repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MINDER | MINDER-main/scripts/training/multiprocessing_bpe_encoder.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help="path to encoder.json",
)
parser.add_argument(
"--vocab-bpe",
type=str,
help="path to vocab.bpe",
)
parser.add_argument(
"--inputs",
nargs="+",
default=["-"],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["-"],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
# only change
line = line.rstrip()
if len(line.strip()) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| 3,818 | 27.714286 | 81 | py |
MINDER | MINDER-main/scripts/training/make_supervised_kilt_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
from collections import defaultdict
import json
import multiprocessing
from pathlib import Path
import random
import re
import tqdm
import math
import pickle
import jsonlines
import ftfy
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
banned = set(stopwords.words('english'))
def parse_args():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument(
'--target',
default = "span",
choices = [
"chunk",
"span",
"title",
"code",
])
parser.add_argument('--min_length', default=10, type=int)
parser.add_argument('--max_length', default=10, type=int)
parser.add_argument('--temperature', default=1.0, type=float)
parser.add_argument('--jobs', default=1, type=int)
parser.add_argument('--mark_target', action="store_true")
parser.add_argument('--mark_silver', action="store_true")
parser.add_argument('--n_samples', default=1, type=int)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument('--kb', required=True, type=str)
parser.add_argument('--limit', default=300_000, type=int)
parser.add_argument('--template', action="store_true")
parser.add_argument('--mode', choices=["w", "a"], default="w")
return parser.parse_args()
def read_id2code(id2code_path):
id2code = {}
with open(id2code_path) as fin:
for line in tqdm.tqdm(fin):
line = line.strip()
if not line:
continue
idx, code = line.split("\t")
id2code[idx] = code
return id2code
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
# for j in range(i+1, min(i+1+ngrams, len(tokens) + 1)):
# tok_orig = tokens[i:j]
# tok = [t for t in tok_orig if t not in banned]
# if not tok:
# break
# yield (i, j)
def extract_spans(text, source, n_samples, min_length, max_length, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
yield span
def extract_spans_wrapper(args):
return args[1], list(extract_spans(*args))
def clean(text):
text = re.sub(r'\s+', ' ', text)
text = ftfy.fix_text(text)
text = text.replace('BULLET::::', '')
text = text.replace('SECTION::::', '')
text = text.strip()
return text
def _iterator_span_get_arguments(data, mark_target, mark_silver, limit=float('inf'), template=False):
for sample in tqdm.tqdm(data):
if template:
source = sample['meta']['template_questions'][0]
else:
source = sample['input']
source = preprocess_question(source)
if mark_target:
source += " || body"
i = 0
for out in sample['output']:
if i >= limit:
break
if "provenance" not in out:
continue
for ctx in out['provenance']:
if i >= limit:
break
idx = ctx['wikipedia_id'] + '-' + str(ctx['start_paragraph_id'])
try:
title, text = kb[idx]
except KeyError:
continue
if mark_silver:
yield text, source + " || +"
else:
yield text, source
i += 1
def iterator_span(args):
with jsonlines.open(args.input) as data:
arg_it = _iterator_span_get_arguments(data, args.mark_target, args.mark_silver, args.limit, args.template)
arg_it = ((text, source, args.n_samples, args.min_length, args.max_length, args.temperature) for text, source in arg_it)
if args.jobs > 1:
with multiprocessing.Pool(args.jobs) as pool:
for source, spans in pool.imap(extract_spans_wrapper, arg_it):
for target in spans:
yield source, target
else:
for source, spans in map(extract_spans_wrapper, arg_it):
for target in spans:
yield source, target
def iterator(args):
if args.target == "code" and args.id2code:
id2code = read_id2code(args.id2code)
with jsonlines.open(args.input) as data:
for sample in tqdm.tqdm(data):
if args.template:
source = sample['meta']['template_questions'][0]
else:
source = sample['input']
source = preprocess_question(source)
if args.target == "chunk" and args.mark_target:
source += " || body"
elif args.target == "title" and args.mark_target:
source += " || title"
elif args.target == "code" and args.mark_target:
source += " || code"
else:
raise ValueError("Wrong target")
i = 0
for out in sample['output']:
if i >= args.limit:
break
if "provenance" not in out:
continue
for ctx in out['provenance']:
if i >= args.limit:
break
idx = ctx['wikipedia_id'] + '-' + str(ctx['start_paragraph_id'])
try:
title, text = kb[idx]
except KeyError:
continue
i += 1
if args.target == "chunk":
target = text
for _ in range(args.n_samples):
if args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "title":
target = title + " @@"
for _ in range(args.n_samples):
if args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "code":
code = id2code.get(idx)
if not code: continue
target = code.strip() + " ||"
for _ in range(args.n_samples):
if args.mark_silver:
yield source + " || +", target
else:
yield source, target
else:
raise ValueError("Wrong target")
def preprocess(line):
line = line.strip()
if not line:
return None
try:
idx, title, text = line.split('\t', 2)
except:
return None
idx, title = idx.strip(), title.strip()
text = text.replace('BULLET::::', '').strip()
text = text.replace('Section::::', '').strip()
text = re.sub(r'\s+', ' ', text)
text = text.strip()
if not (idx and title and text):
return None
return idx, title, text
def preprocess_question(question):
question = question.strip()
question = question.replace('\n', ' / ')
question = re.sub(r'\s+', ' ', question)
return question
def main():
global kb
args = parse_args()
if Path(args.kb + '.cached').exists():
with open(args.kb + '.cached', 'rb') as fin:
kb = pickle.load(fin)
else:
kb = {}
with open(args.kb) as fin, multiprocessing.Pool(15) as pool:
pipe = tqdm.tqdm(fin)
pipe = map(preprocess, pipe)
pipe = (x for x in pipe if x is not None)
for idx, title, text in pipe:
kb[idx] = (title, text)
with open(args.kb + '.cached', 'wb') as fout:
pickle.dump(kb, fout)
with open(args.output + '.source', mode=args.mode) as src, open(args.output + '.target', mode=args.mode) as tgt:
for source, target in iterator_span(args) if args.target == "span" else iterator(args):
source = " " + source.strip()
target = " " + target.strip()
src.write(source + "\n")
tgt.write(target + "\n")
if __name__ == '__main__':
main()
| 10,245 | 29.861446 | 128 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/xcode/Scripts/versiongenerate.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| 4,536 | 43.920792 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_list_tests_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| 6,537 | 30.432692 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_throw_on_failure_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print('Running "%s". . .' % ' '.join(command))
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,767 | 32.534884 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_xml_outfiles_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| 5,340 | 39.157895 | 140 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_filter_unittest.py | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| 21,325 | 32.478807 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_xml_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.items():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| 8,872 | 44.502564 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| 10,823 | 32.719626 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_shuffle_test.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| 12,549 | 37.496933 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_env_var_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,036 | 33.211864 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_help_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| 5,856 | 32.855491 | 75 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_break_on_failure_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| 7,339 | 33.460094 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_output_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'r')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| 12,259 | 34.953079 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_catch_exceptions_test.py | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| 9,901 | 40.605042 | 78 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_uninitialized_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| 2,482 | 33.971831 | 77 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_color_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 4,911 | 36.496183 | 76 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/test/gtest_xml_output_unittest.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 1
To be equal to: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 1
To be equal to: 2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
 Expected: 2
To be equal to: 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected: 2
To be equal to: 3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 14,677 | 46.501618 | 225 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/fuse_gtest_files.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print('ERROR: Cannot find %s in directory %s.' % (relative_path,
directory))
print('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print('ABORTED.')
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in open(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in open(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print(__doc__)
sys.exit(1)
if __name__ == '__main__':
main()
| 8,884 | 33.980315 | 78 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/upload.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| 51,024 | 35.761527 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/pump.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| 23,673 | 26.656542 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/gen_gtest_pred_impl.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| 21,986 | 29.077975 | 76 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/upload_gtest.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| 2,851 | 35.101266 | 72 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/common.py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)')
def GetCommandOutput(command):
"""Runs the shell command and returns its stdout as a list of lines."""
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None
def GetSvnTrunk():
"""Returns the current SVN workspace's trunk root path."""
_, root = GetSvnInfo()
return root + '/trunk' if root else None
def IsInGTestSvn():
project, _ = GetSvnInfo()
return project == 'googletest'
def IsInGMockSvn():
project, _ = GetSvnInfo()
return project == 'googlemock'
| 2,919 | 33.761905 | 78 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googletest/scripts/release_docs.py | #!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for branching Google Test/Mock wiki pages for a new version.
SYNOPSIS
release_docs.py NEW_RELEASE_VERSION
Google Test and Google Mock's external user documentation is in
interlinked wiki files. When we release a new version of
Google Test or Google Mock, we need to branch the wiki files
such that users of a specific version of Google Test/Mock can
look up documenation relevant for that version. This script
automates that process by:
- branching the current wiki pages (which document the
behavior of the SVN trunk head) to pages for the specified
version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when
NEW_RELEASE_VERSION is 2.6);
- updating the links in the branched files to point to the branched
version (e.g. a link in V2_6_FAQ.wiki that pointed to
Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor).
NOTE: NEW_RELEASE_VERSION must be a NEW version number for
which the wiki pages don't yet exist; otherwise you'll get SVN
errors like "svn: Path 'V1_7_PumpManual.wiki' is not a
directory" when running the script.
EXAMPLE
$ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk
$ scripts/release_docs.py 2.6 # create wiki pages for v2.6
$ svn status # verify the file list
$ svn diff # verify the file contents
$ svn commit -m "release wiki pages for v2.6"
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import common
# Wiki pages that shouldn't be branched for every gtest/gmock release.
GTEST_UNVERSIONED_WIKIS = ['DevGuide.wiki']
GMOCK_UNVERSIONED_WIKIS = [
'DesignDoc.wiki',
'DevGuide.wiki',
'KnownIssues.wiki'
]
def DropWikiSuffix(wiki_filename):
"""Removes the .wiki suffix (if any) from the given filename."""
return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki')
else wiki_filename)
class WikiBrancher(object):
"""Branches ..."""
def __init__(self, dot_version):
self.project, svn_root_path = common.GetSvnInfo()
if self.project not in ('googletest', 'googlemock'):
sys.exit('This script must be run in a gtest or gmock SVN workspace.')
self.wiki_dir = svn_root_path + '/wiki'
# Turn '2.6' to 'V2_6_'.
self.version_prefix = 'V' + dot_version.replace('.', '_') + '_'
self.files_to_branch = self.GetFilesToBranch()
page_names = [DropWikiSuffix(f) for f in self.files_to_branch]
# A link to Foo.wiki is in one of the following forms:
# [Foo words]
# [Foo#Anchor words]
# [http://code.google.com/.../wiki/Foo words]
# [http://code.google.com/.../wiki/Foo#Anchor words]
# We want to replace 'Foo' with 'V2_6_Foo' in the above cases.
self.search_for_re = re.compile(
# This regex matches either
# [Foo
# or
# /wiki/Foo
# followed by a space or a #, where Foo is the name of an
# unversioned wiki page.
r'(\[|/wiki/)(%s)([ #])' % '|'.join(page_names))
self.replace_with = r'\1%s\2\3' % (self.version_prefix,)
def GetFilesToBranch(self):
"""Returns a list of .wiki file names that need to be branched."""
unversioned_wikis = (GTEST_UNVERSIONED_WIKIS if self.project == 'googletest'
else GMOCK_UNVERSIONED_WIKIS)
return [f for f in os.listdir(self.wiki_dir)
if (f.endswith('.wiki') and
not re.match(r'^V\d', f) and # Excluded versioned .wiki files.
f not in unversioned_wikis)]
def BranchFiles(self):
"""Branches the .wiki files needed to be branched."""
print 'Branching %d .wiki files:' % (len(self.files_to_branch),)
os.chdir(self.wiki_dir)
for f in self.files_to_branch:
command = 'svn cp %s %s%s' % (f, self.version_prefix, f)
print command
os.system(command)
def UpdateLinksInBranchedFiles(self):
for f in self.files_to_branch:
source_file = os.path.join(self.wiki_dir, f)
versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f)
print 'Updating links in %s.' % (versioned_file,)
text = file(source_file, 'r').read()
new_text = self.search_for_re.sub(self.replace_with, text)
file(versioned_file, 'w').write(new_text)
def main():
if len(sys.argv) != 2:
sys.exit(__doc__)
brancher = WikiBrancher(sys.argv[1])
brancher.BranchFiles()
brancher.UpdateLinksInBranchedFiles()
if __name__ == '__main__':
main()
| 6,132 | 37.572327 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/test/gmock_leak_test.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| 4,384 | 39.229358 | 73 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/test/gmock_test_utils.py | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| 3,684 | 31.610619 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/test/gmock_output_test.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
| 5,999 | 32.149171 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/fuse_gmock_files.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| 8,631 | 34.817427 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/upload.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| 51,024 | 35.761527 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/upload_gmock.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| 2,833 | 34.873418 | 72 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/gmock_doctor.py | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = 'googlemock@googlegroups.com'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
clang11_re = (r'use_ReturnRef_instead_of_Return_to_return_a_reference.*'
r'(.*\n)*?' + _CLANG_NON_GMOCK_FILE_LINE_RE)
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(clang11_re, diagnosis % {'type': 'a type'}),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'(did you mean|maybe you meant) to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
clang11_re = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'.*this_method_does_not_take_'
r'(?P<wrong_args>\d+)_argument.*')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang11_re, diagnosis % {'wrong_args': 'm',
'args': 'n'}),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print ('Waiting for compiler errors on stdin . . .')
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print ('------------------------------------------------------------')
print ('Your code appears to have the following',)
if count > 1:
print ('%s diseases:' % (count,))
else:
print ('disease:')
i = 0
for d in diagnoses:
i += 1
if count > 1:
print ('\n#%s:' % (i,))
print (d)
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
| 24,131 | 36.647426 | 80 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/gmock_gen.py | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for starting up Google Mock class generator."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
if __name__ == '__main__':
# Add the directory of this script to the path so we can import gmock_class.
sys.path.append(os.path.dirname(__file__))
from cpp import gmock_class
# Fix the docstring in case they require the usage.
gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__)
gmock_class.main()
| 1,091 | 33.125 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/gmock_class.py | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = (ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL |
ast.FUNCTION_OVERRIDE)
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
num_parameters = len(node.parameters)
if len(node.parameters) == 1:
first_param = node.parameters[0]
if source[first_param.start:first_param.end].strip() == 'void':
# We must treat T(void) as a function with no parameters.
num_parameters = 0
tmpl = ''
if class_node.templated_types:
tmpl = '_T'
mock_method_macro = 'MOCK_%sMETHOD%d%s' % (const, num_parameters, tmpl)
args = ''
if node.parameters:
# Due to the parser limitations, it is impossible to keep comments
# while stripping the default parameters. When defaults are
# present, we choose to strip them and comments (and produce
# compilable code).
# TODO(nnorwitz@google.com): Investigate whether it is possible to
# preserve parameter name when reconstructing parameter text from
# the AST.
if len([param for param in node.parameters if param.default]) > 0:
args = ', '.join(param.type.name for param in node.parameters)
else:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
parent_name = class_name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add template args for templated classes.
if class_node.templated_types:
# TODO(paulchang): The AST doesn't preserve template argument order,
# so we have to make up names here.
# TODO(paulchang): Handle non-type template arguments (e.g.
# template<typename T, int N>).
template_arg_count = len(class_node.templated_types.keys())
template_args = ['T%d' % n for n in range(template_arg_count)]
template_decls = ['typename ' + arg for arg in template_args]
lines.append('template <' + ', '.join(template_decls) + '>')
parent_name += '<' + ', '.join(template_args) + '>'
# Add the class prolog.
lines.append('class Mock%s : public %s {' # }
% (class_name, parent_name))
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
sys.exit(1)
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| 8,293 | 35.377193 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/keywords.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| 2,004 | 32.416667 | 97 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| 11,356 | 24.293987 | 78 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/utils.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| 1,153 | 26.47619 | 74 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/__init__.py | 0 | 0 | 0 | py | |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/ast.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| 62,773 | 35.201845 | 82 | py |
MINDER | MINDER-main/res/external/sdsl-lite/external/googletest/googlemock/scripts/generator/cpp/tokenize.py | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c and c != '>': # Treat ">>" as two tokens.
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| 9,752 | 32.864583 | 79 | py |
MINDER | MINDER-main/res/external/sdsl-lite/tutorial/document_listing/generate_collection.py | #!/usr/bin/python
import fnmatch
import sys
import os
import re
includes = ['*.cpp','*.hpp'] # sources only
includes = r'|'.join([fnmatch.translate(x) for x in includes])
collection_path = "collection.txt"
def main():
if len(sys.argv) == 1:
print "Usage ./", sys.argv[0], "directory"
sys.exit(0)
cur_dir = sys.argv[1]
doc_paths = []
for root, dirs, files in os.walk(cur_dir):
files = [f for f in files if re.match(includes, f)]
for f in files:
doc_path = root+"/"+f
doc_paths.append(doc_path)
print "Found ", len(doc_paths), "source files in", cur_dir
collection_f = open(collection_path,'w')
for doc_path in doc_paths:
f = open(doc_path, 'r')
content = f.read()
collection_f.write(content)
collection_f.write('\1')
if __name__ == '__main__':
main()
| 877 | 23.388889 | 62 | py |
MINDER | MINDER-main/seal/evaluate_output.py | import json
from tqdm import tqdm
import unicodedata
import json, string, re
from collections import Counter, defaultdict
from argparse import ArgumentParser
import unicodedata
import regex
import logging
import copy
logger = logging.getLogger(__name__)
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i:j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return "".join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if "pos" not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if "lemma" not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if "ner" not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [
(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s : e + 1])
]
# Concatenate into strings
if as_strings:
ngrams = ["{}".format(" ".join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get("non_ent", "O")
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while idx < len(entities) and entities[idx] == ner_tag:
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r"[\p{L}\p{N}\p{M}]+"
NON_WS = r"[^\p{Z}\p{C}]"
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
"(%s)|(%s)" % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE,
)
if len(kwargs.get("annotators", {})) > 0:
logger.warning(
"%s only tokenizes! Skipping annotators: %s" % (type(self).__name__, kwargs.get("annotators"))
)
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append(
(
token,
text[start_ws:end_ws],
span,
)
)
return Tokens(data, self.annotators)
def _normalize(text):
return unicodedata.normalize("NFD", text)
def has_answer(answers, text, match_type="string") -> bool:
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
text = _normalize(text)
if match_type == "string":
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i : i + len(single_answer)]:
return True
# elif match_type == "regex":
# # Answer is a regex
# for single_answer in answers:
# single_answer = _normalize(single_answer)
# if regex_match(text, single_answer):
# return True
return False
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str, default="")
args = parser.parse_args()
query_dict = {}
all_dict = {}
hits=[0.0]*100
with open(args.file, "r") as f:
data = json.load(f)
for line in data:
question = line["question"]
answers = line["answers"]
all_dict[line["question"]] = 1
for i in range(len(line["ctxs"])):
retrieved_p_text = line["ctxs"][i]
if has_answer(answers, retrieved_p_text['title']+ " "+retrieved_p_text['text'].split("||")[0]):
query_dict[line["question"]] = 1
hits[i]+=1
break
print(len(query_dict))
print(len(all_dict))
print('reccall@100', float(len(query_dict))/len(all_dict))
for i in [1,3,5,20,50,100]:
new_hits = hits[:i]
print('recall @ ' + str(i), sum(new_hits)/len(all_dict))
| 7,721 | 29.76494 | 111 | py |
MINDER | MINDER-main/seal/keys.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import defaultdict
from heapq import heappop, heappush
from itertools import chain, islice, product
from typing import *
import torch
from more_itertools import chunked
from tqdm import tqdm
from seal import FMIndex
def deduplicate(list_of_lists):
present = set()
result = []
for el in list_of_lists:
x = el
if isinstance(el[0], float):
el = el[1]
if isinstance(el, torch.Tensor):
t_el = tuple(el.tolist())
else:
t_el = tuple(el)
if t_el in present:
continue
else:
present.add(t_el)
result.append(x)
return result
def decompose_query_into_keys(query, word_tokenizer, length=3):
strings = set()
query = query.strip()
tokens = word_tokenizer(query)
tokens = [t.text for t in tokens]
for i in range(len(tokens)):
for j in range(i + 1, min(1 + len(tokens), i + length + 1)):
span = tokens[i:j]
for upper in product(*([[True, False]] * (j - i))):
ss = [s[0].upper() + s[1:] if u else s for u, s in zip(upper, span)]
ss = " " + " ".join(ss)
strings.add(ss)
strings = list(strings)
return strings
def strip(seq, symbols_start, symbols_end):
i = 0
while i < len(seq) and seq[i] in symbols_start:
i += 1
j = len(seq)
while j > i and seq[j - 1] in symbols_end:
j -= 1
return seq[i:j]
@torch.inference_mode()
def rescore_keys(model, inputs, list_of_decoded, batch_size=100, length_penalty=0.0, progress_bar=False, prefix=[],
strip_from_bos=[], strip_from_eos=[]):
device = next(model.parameters()).device
if inputs is None:
batch_in = [[model.config.bos_token_id, model.config.eos_token_id]] * len(list_of_decoded)
else:
batch_in = list(inputs)
list_of_decoded = [[x[1] if isinstance(x[0], float) else x for x in xx] for xx in list_of_decoded]
maxlen = max([len(i) for i in batch_in])
input_ids = [i + ([model.config.pad_token_id] * (maxlen - len(i))) for i in batch_in]
input_ids = [torch.LongTensor(i).to(device) for i in input_ids]
input_ids = torch.stack(input_ids, 0)
attention_mask = input_ids != model.config.pad_token_id
attention_mask = attention_mask.byte()
encoder_outputs = model._prepare_encoder_decoder_kwargs_for_generation(
input_ids, {'attention_mask': attention_mask})['encoder_outputs'].last_hidden_state
decoder_inputs = enumerate(list_of_decoded)
decoder_inputs = [(idx, di) for idx, ddi in decoder_inputs for di in ddi]
all_out = {i: [] for i, _ in enumerate(list_of_decoded)}
for batch in chunked(tqdm(decoder_inputs) if progress_bar else decoder_inputs, batch_size):
idxs = []
batch_in_decoder_orig = []
batch_in_decoder = []
for i, di in batch:
stripped = [model.config.decoder_start_token_id] + prefix + strip(di, strip_from_bos, strip_from_eos)
if stripped:
idxs.append(i)
batch_in_decoder_orig.append(di)
batch_in_decoder.append(stripped)
batch_in_decoder = [torch.LongTensor(di) for di in batch_in_decoder]
batch_in_decoder = [
torch.cat(
[torch.LongTensor([model.config.decoder_start_token_id]), di]
) if di[0] != model.config.decoder_start_token_id else di for di in batch_in_decoder]
maxlen = max([len(di) for di in batch_in_decoder])
batch_decoder_input_ids = [
torch.cat(
[di, torch.LongTensor([model.config.pad_token_id] * (maxlen - len(di)))])
for di in batch_in_decoder]
batch_decoder_input_ids = [di for di in batch_decoder_input_ids]
batch_decoder_input_ids = torch.stack(batch_decoder_input_ids, 0).to(device)
batch_input_ids = torch.stack([input_ids[idx] for idx in idxs], 0)
batch_attention_mask = torch.stack([attention_mask[idx] for idx in idxs], 0)
batch_encoder_outputs = torch.stack([encoder_outputs[idx] for idx in idxs], 0)
logits = model(
input_ids=batch_input_ids,
attention_mask=batch_attention_mask,
encoder_outputs=(batch_encoder_outputs, None, None),
decoder_input_ids=batch_decoder_input_ids[:, :-1],
).logits
logprobs = logits.log_softmax(-1)
logprobs = torch.gather(logprobs, -1, batch_decoder_input_ids[:, 1:].unsqueeze(-1))
logprobs[batch_decoder_input_ids[:, 1:] < 2] = 0.0
logprobs = logprobs[:, len(prefix):]
logprobs = logprobs.squeeze(-1).sum(-1)
logprobs = logprobs.tolist()
for i, di, bdi, ll in zip(idxs, batch_in_decoder_orig, batch_decoder_input_ids, logprobs):
sco = ll / (len(di) ** length_penalty)
all_out[i].append((sco, di))
return [v for k, v in sorted(all_out.items())]
# @torch.inference_mode()
@torch.no_grad()
def compute_unigram_scores(model, inputs, index: FMIndex, tokenizer=None, tolist=True, temperature=1.0, prefix=[]):
device = next(model.parameters()).device
if isinstance(inputs[0], str):
batch = tokenizer(inputs, padding=True, return_tensors='pt')
else:
batch_in = list(inputs)
maxlen = max([len(i) for i in batch_in])
input_ids = [i + ([model.config.pad_token_id] * (maxlen - len(i))) for i in batch_in]
input_ids = [torch.LongTensor(i).to(device) for i in input_ids]
input_ids = torch.stack(input_ids, 0)
attention_mask = input_ids != model.config.pad_token_id
attention_mask = attention_mask.byte()
batch = dict(input_ids=input_ids, attention_mask=attention_mask)
batch = {k: v.to(device) for k, v in batch.items()}
decoder_input_ids = torch.full_like(batch['input_ids'][:, :1 + len(prefix)], model.config.decoder_start_token_id)
for i, idx in enumerate(prefix, start=1):
decoder_input_ids[:, i] = idx
logits = model(**batch, decoder_input_ids=decoder_input_ids).logits[:, 0 + len(prefix)]
if temperature != 1.0:
logits /= temperature
logprobs = logits.log_softmax(-1)
if tolist:
return logprobs.tolist()
else:
return logprobs
def aggregate_evidence(ngrams_and_scores: List[Tuple[List[int], float]], unigram_scores: Optional[List[float]] = None,
index: Optional[FMIndex] = None, max_occurrences_1: int = 1500,
max_occurrences_2: int = 10_000_000, n_docs_complete_score: int = 500, alpha: float = 2.0,
beta: float = 0.8, length_penalty: float = 0.0, use_fm_index_frequency: bool = True,
add_best_unigrams_to_ngrams: bool = False, use_top_k_unigrams=1000, sort_by_length=False,
sort_by_freq=False, smoothing=5.0, allow_overlaps=False, single_key=0.0,
single_key_add_unigrams=False, unigrams_ignore_free_places=False, tokenizer=None) -> Tuple[List[int], List[float]]:
def repetition(ngram, score, coverage):
if not coverage:
return score
ngram = set(ngram)
coeff = 1.0 - beta + (beta * len(ngram.difference(coverage)) / len(ngram))
return coeff * score
ntokens = float(index.beginnings[-1])
ngrams_and_scores = [(ngram.tolist() if isinstance(ngram, torch.Tensor) else ngram, sr) for ngram, sr in ngrams_and_scores]
counts = {tuple(): len(index)}
if not use_fm_index_frequency:
try:
cutoff = sorted(ngrams_and_scores, key=lambda x: x[1])[0][1] - 0.1
except IndexError as e:
print(ngrams_and_scores)
raise e
else:
cutoff = None
unigrams = {0, 1, 2}
for i in range(len(ngrams_and_scores)):
ngram, sr = ngrams_and_scores[i]
if ngram[0] == 45056:
sr = min(sr+ 14, -1.0)
# sr *= 0.1
if len(ngram) == 1:
unigrams.add(ngram[0])
count = index.get_count(ngram)
# print(tokenizer.decode(ngram),sr,count)
# if count < 2000:
# count = index.get_doc_count(ngram)
# print(ngram, count)
counts[tuple(ngram)] = count
if count == 0:
sco = 0.0
elif use_fm_index_frequency:
sr -= 1e-10
sr *= (1.0 - length_penalty) ** (len(ngram) - 1.0)
snr = math.log((count + smoothing) / (ntokens + smoothing))
sco = \
(sr + math.log(1 - math.exp(snr))) - \
(snr + math.log(1 - math.exp(sr)))
sco = max(sco, 0.0)
sco **= alpha
else:
sco = sr - cutoff
sco = max(sco, 0.0)
sco *= (1.0 - length_penalty) ** (len(ngram) - 1.0)
sco **= alpha
ngrams_and_scores[i] = (ngram, sco)
# print(tokenizer.decode(ngram),'**', sco)
if unigram_scores is not None:
unigram_scores = unigram_scores[:]
best = sorted(range(len(unigram_scores)), reverse=True, key=lambda i: unigram_scores[i])
best = best[:use_top_k_unigrams]
best = set(best)
unigram_scores = [s if i in best else float('-inf') for i, s in enumerate(unigram_scores)]
for i in range(len(unigram_scores)):
if i in unigrams:
unigram_scores[i] = 0.0
continue
sr = unigram_scores[i]
ngram = [i]
count = index.get_count(ngram)
if count == 0:
sco = 0.0
elif use_fm_index_frequency:
snr = math.log((count + smoothing) / (ntokens + smoothing))
sco = \
(sr + math.log(1 - math.exp(snr))) - \
(snr + math.log(1 - math.exp(sr)))
sco = max(sco, 0.0)
else:
sco = sr - cutoff
sco = max(sco, 0.0)
sco **= alpha
if sco == 0.0:
unigram_scores[i] = 0.0
continue
unigram_scores[i] = sco
if add_best_unigrams_to_ngrams:
best_unigrams = sorted(list(range(len(unigram_scores))), key=lambda x: -unigram_scores[x])[:len(ngrams_and_scores)]
for i in best_unigrams:
counts[tuple([i])] = index.get_count([i])
ngrams_and_scores.append(([i], unigram_scores[i]))
# rare ngrams (occurring less than max_hits) --> used for the first stage and full scoring
rare_ngrams = defaultdict(float)
# frequent ngrams --> used just for full scoring
freq_ngrams = defaultdict(float)
# computing scores for all ngrams
for ngram, sco in ngrams_and_scores:
count = index.get_count(ngram)
if count > max_occurrences_2:
continue
elif sco == 0.0:
continue
elif count > max_occurrences_1 or sco < 0.0:
ngrams = freq_ngrams
# ngrams = rare_ngrams
else:
ngrams = rare_ngrams
ngram = tuple(ngram)
ngrams[ngram] = sco
# else:
rare_ngrams = {k: v for k, v in sorted(rare_ngrams.items(), key=lambda x: x[1], reverse=True)}
# rare_ngrams = remove_redundant_ngrams(rare_ngrams)
freq_ngrams = {k: v for k, v in sorted(freq_ngrams.items(), key=lambda x: x[1], reverse=True)}
# freq_ngrams = remove_redundant_ngrams(freq_ngrams)
all_ngrams = {k: v for k, v in \
sorted(
chain(rare_ngrams.items(), freq_ngrams.items()),
key=lambda x: x[1], reverse=True)}
covered_points = set()
first_stage = defaultdict(lambda: [0.0, [], [[], 0.0]])
for ngram, sco in rare_ngrams.items():
# idfs[ngram] = idf(ngram, index)
# each ngram only considered once for doc
doc_done = defaultdict(set)
for row in islice(range(*index.get_range(list(ngram))), max_occurrences_1):
tok_end = index.locate(row)
tok_start = tok_end - len(ngram)
doc = index.get_doc_index(tok_end)
new = all([i not in covered_points for i in range(tok_start, tok_end)])
if sort_by_length:
order = (len(ngram), sco)
max_order = (len(first_stage[doc][2][0]), first_stage[doc][2][1])
elif sort_by_freq:
order = (-counts[tuple(ngram)], sco)
max_order = (-counts[tuple(first_stage[doc][2][0])], first_stage[doc][2][1])
else:
order = sco
max_order = first_stage[doc][2][1]
if order > max_order:
first_stage[doc][2] = [ngram, sco]
if new:
for tok in range(tok_start, tok_end):
covered_points.add(tok)
if new or allow_overlaps:
if ngram not in doc_done[doc]:
doc_done[doc].add(ngram)
first_stage[doc][0] += sco
first_stage[doc][1].append((ngram, sco))
for doc, doc_info in first_stage.items():
current_coverage = set()
current_score = 0.0
for i in range(len(doc_info[1])):
tt, sco = doc_info[1][i]
tts = set(tt)
new_sco = repetition(tts, sco, current_coverage)
current_score += new_sco
doc_info[1][i] = [tt, new_sco]
current_coverage |= tts
doc_info[0] = current_score
to_fully_score = sorted(first_stage.items(),
key=lambda x: (1.0 - single_key) * (-x[1][0]) + single_key * (-x[1][2][1]))[:n_docs_complete_score]
results = defaultdict(lambda:
[
0.0, # score
[], # ngrams found
None, # places filled
None, # full doc tokens
[[], 0.0] # max ngram
])
trie = {}
for ngram, score in all_ngrams.items():
if len(ngram) < 1 or score <= 0.0:
continue
current = trie
for t in ngram:
current = current.setdefault(t, {})
current[-1] = score
for doc, _ in to_fully_score:
doc_tokens = [2] + index.get_doc(doc)[:-1]
results[doc][3] = doc_tokens
if unigram_scores is not None:
type_scores = {t: unigram_scores[t] for t in doc_tokens}
else:
type_scores = {t: 0.0 for t in doc_tokens}
matches = {}
open_matches = []
for i in range(len(doc_tokens)):
open_matches = [(m.get(doc_tokens[i]), l + 1, n) for (m, l, n) in open_matches] + [
(trie.get(doc_tokens[i]), 1, [])]
for _, _, n in open_matches:
n.append(doc_tokens[i])
new_open_matches = []
while open_matches:
m, l, n = open_matches.pop()
if m is None:
continue
new_open_matches.append((m, l, n))
if -1 in m:
start = i - l + 1
end = i + 1
matches.setdefault(tuple(n), [m[-1], []])[1].append((start, end))
open_matches = new_open_matches
greedy_matches = []
for n, (s, d) in matches.items():
if sort_by_length:
order = (-len(n), -s)
max_order = (-len(results[doc][4][0]), -results[doc][4][1])
elif sort_by_freq:
order = (counts[tuple(n)], -s)
max_order = (counts[tuple(results[doc][4][0])], -results[doc][4][1])
else:
order = -s
max_order = -results[doc][4][1]
for (i, j) in d:
heappush(greedy_matches, (-s, n, s, i, j))
if order < max_order:
results[doc][4] = [n, s]
current_coverage = set()
ngrams = []
prev = None
f = 0
free = [True] * len(doc_tokens)
while greedy_matches:
order, n, s, i, j = heappop(greedy_matches)
n_set = set(n)
if prev == n:
new_s = ngrams[-1][1]
elif not n_set:
new_s = 0.0
else:
new_s = repetition(n_set, s, current_coverage)
if new_s <= 0.0:
continue
if allow_overlaps or all(free[i:j]):
pass
else:
continue
if prev == n:
f += 1
ngrams[-1] = (n, new_s)
else:
f = 1
prev = n
current_coverage |= n_set
ngrams.append((n, new_s))
free[i:j] = [False] * (j - i)
if unigrams_ignore_free_places:
free = [True for _ in free]
single_key_score = results[doc][4][1]
multi_key_score = sum([s for n, s in ngrams])
unigram_score = 0.0
for t, f in Counter([t for t, b in zip(doc_tokens, free) if b]).items():
s = type_scores[t]
if s > 0.0:
n = (t,)
s = repetition(n, s, current_coverage)
if s != 0.0:
unigram_score += s
ngrams.append((n, s))
if single_key_add_unigrams:
single_key_score += unigram_score
multi_key_score += unigram_score
results[doc][0] = (1.0 - single_key) * multi_key_score + single_key * single_key_score
results[doc][1] = ngrams
results = {k: v for k, v in sorted(results.items(), key=lambda x: -x[1][0])}
return results, all_ngrams
###################test for remove intersestive
# if unigrams_ignore_free_places:
# free = [True for _ in free]
# single_key_score = results[doc][4][1]
# multi_key_score = sum([s for n, s in ngrams])
# unigram_score = 0.0
# for t, f in Counter([t for t, b in zip(doc_tokens, free) if b]).items():
# s = type_scores[t]
# if s > 0.0:
# n = (t,)
# s = repetition(n, s, current_coverage)
# if s != 0.0:
# unigram_score += s
# ngrams.append((n, s))
# max_s = 0.0
# max_n = ''
# for n, s in ngrams:
# if s > max_s:
# max_s = s
# max_n = n
# results[doc][0] = max_s
# results[doc][1] = [(max_n,max_s)]
# results = {k: v for k, v in sorted(results.items(), key=lambda x: -x[1][0])}
# return results, all_ngrams | 18,977 | 34.079482 | 138 | py |
MINDER | MINDER-main/seal/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
def _remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(k, None)
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def load_state_dict_from_lightning_checkpoint(model, path):
state_dict = torch.load(path, map_location="cpu")
# state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
# for key in ['shared.weight', 'encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']:
# state_dict[key] = torch.cat([state_dict[key], torch.zeros_like(state_dict[key][:1])], 0)
# _remove_ignore_keys_(state_dict)
# if hasattr(model, "lm_head"):
# model.lm_head = _make_linear_from_emb(model.model.shared)
model.load_state_dict(state_dict)
def load_state_dict_from_fairseq_checkpoint(model, path):
state_dict = torch.load(path, map_location="cpu")["model"]
state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
for key in ["shared.weight", "encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]:
state_dict[key] = torch.cat([state_dict[key], torch.zeros_like(state_dict[key][:1])], 0)
_remove_ignore_keys_(state_dict)
if hasattr(model, "lm_head"):
model.lm_head = _make_linear_from_emb(model.model.shared)
new_state_dict = {}
for key in model.model.state_dict():
new_state_dict[key] = state_dict[key]
model.model.load_state_dict(new_state_dict) | 2,021 | 35.107143 | 102 | py |
MINDER | MINDER-main/seal/data.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import jsonlines
import csv
import ast
import pathlib
import json
from enum import unique, Enum
from pyserini.query_iterator import QueryIterator, DefaultQueryIterator, KiltQueryIterator
from pyserini.output_writer import OutputWriter, TrecWriter, MsMarcoWriter
from seal.retrieval import SEALDocument
from datasets import load_dataset
@unique
class TopicsFormat(Enum):
DEFAULT = 'default'
KILT = 'kilt'
KILT_TEMPLATE = 'kilt_template'
DPR = 'dpr'
DPR_QAS = 'dpr_qas'
NQ = 'nq'
MSMARCO = 'msmarco'
@unique
class OutputFormat(Enum):
TREC = 'trec'
MSMARCO = 'msmarco'
KILT = 'kilt'
DPR = 'dpr'
class MsmarcoQueryIterator(QueryIterator):
def get_query(self, id_):
return self.topics[id_]['query']
@classmethod
def from_topics(cls, topics_path: str):
topics = {}
order = []
data = load_dataset(topics_path, split="dev")
num = 0
for instance in data:
topics[int(instance['query_id'])] = instance
order.append(int(instance['query_id']))
num+=1
# if num>10:
# break
return cls(topics, order)
class DprQueryIterator(QueryIterator):
def get_query(self, id_):
return self.topics[id_]['question']
@classmethod
def from_topics(cls, topics_path: str):
topics = {}
order = []
with open(topics_path) as fin:
for id_, instance in enumerate(json.load(fin)):
topics[id_] = instance
order.append(id_)
return cls(topics, order)
class DprQueryQasIterator(QueryIterator):
def get_query(self, id_):
return self.topics[id_]['question']
@classmethod
def from_topics(cls, topics_path: str):
topics = {}
order = []
num = 0
with open(topics_path) as fin:
fin = csv.reader(fin, delimiter="\t", quotechar='"')
for id_, (query, answers) in enumerate(fin):
# if num <10:
answers = ast.literal_eval(answers)
assert isinstance(answers, list) and isinstance(answers[0], str)
topics[id_] = {
"question": query,
"answers": answers,
}
order.append(id_)
# num+=1
return cls(topics, order)
class KiltTemplateQueryIterator(KiltQueryIterator):
def get_query(self, id_):
return self.topics[id_]['meta']['template_questions'][0]
class NqQueryIterator(QueryIterator):
def get_query(self, id_):
return self.topics[id_]['question_text']
@classmethod
def from_topics(cls, topics_path: str):
topics = {}
order = []
with jsonlines.open(topics_path) as fin:
for instance in fin:
topics[instance['example_id']] = instance
return cls(topics, order)
def get_query_iterator(topics_path: str, topics_format: TopicsFormat, queries_path: Optional[str] = None):
mapping = {
TopicsFormat.DEFAULT: DefaultQueryIterator,
TopicsFormat.KILT: KiltQueryIterator,
TopicsFormat.KILT_TEMPLATE: KiltTemplateQueryIterator,
TopicsFormat.DPR: DprQueryIterator,
TopicsFormat.DPR_QAS: DprQueryQasIterator,
TopicsFormat.NQ: NqQueryIterator,
TopicsFormat.MSMARCO: MsmarcoQueryIterator,
}
return mapping[topics_format].from_topics(topics_path)
class KiltWriter(OutputWriter):
def write(self, topic: str, hits: list):
provenance = []
datapoint = {'id': topic, 'input': None, 'output': [{'provenance': provenance}]}
for docid, rank, score, hit in self.hits_iterator(hits):
if isinstance(hit, SEALDocument):
if datapoint['input'] is None and hit.query is not None:
datapoint['input'] = hit.query
docid = docid.split("-")
wikipedia_id = int(docid[0])
start_paragraph_id = end_paragraph_id = 0
if len(docid) == 2:
start_paragraph_id = end_paragraph_id = int(docid[1])
elif len(docid) >= 3:
start_paragraph_id = int(docid[1])
end_paragraph_id = int(docid[2])
title, body = hit.text()
provenance.append({
"wikipedia_id": wikipedia_id,
"start_paragraph_id": start_paragraph_id,
"end_paragraph_id": end_paragraph_id,
"text": f'{title} @@ {body}',
"score": score,
})
if hit.keys is not None:
provenance[-1]['meta'] = {'keys': hit.keys}
else:
provenance.append({"wikipedia_id": docid})
json.dump(datapoint, self._file)
self._file.write('\n')
class DprWriter(OutputWriter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.order = []
def write(self, topic: str, hits: list):
datapoint = self.topics[topic]
self.order.append(topic)
ctxs = datapoint['ctxs'] = []
for docid, rank, score, hit in self.hits_iterator(hits):
title, body = hit.text()
ctx = {
"title": title.strip(),
"text": body.strip(),
"score": score,
"passage_id": docid,
"keys":hit.keys
}
ctxs.append(ctx)
def __exit__(self, exc_type, exc_value, exc_traceback):
data = [self.topics[t] for t in self.order]
json.dump(data, self._file, indent=" ")
return super().__exit__(exc_type, exc_value, exc_traceback)
def get_output_writer(file_path: str, output_format: OutputFormat, *args, **kwargs) -> OutputWriter:
mapping = {
OutputFormat.TREC: TrecWriter,
OutputFormat.MSMARCO: MsMarcoWriter,
OutputFormat.KILT: KiltWriter,
OutputFormat.DPR: DprWriter,
}
return mapping[output_format](file_path, *args, **kwargs)
| 6,374 | 31.692308 | 106 | py |
MINDER | MINDER-main/seal/retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import psutil
import logging
import sys
from typing import *
from dataclasses import dataclass
from itertools import islice
import multiprocessing
from more_itertools import ichunked
import tqdm
from transformers import AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, BartTokenizer, BartForConditionalGeneration
from seal.index import FMIndex
from seal.beam_search import fm_index_generate
from seal import keys as rk
from seal.utils import \
load_state_dict_from_lightning_checkpoint, \
load_state_dict_from_fairseq_checkpoint
word_tokenizer = None
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
DEBUG = False
def _init_word_tokenizer():
global word_tokenizer
if word_tokenizer is None:
from spacy.lang.en import English
word_tokenizer = English().tokenizer
def _get_process_memory():
process = psutil.Process(os.getpid())
return process.memory_info().rss
def batch_generate_keys(searcher, queries, constrained_generation=True):
if searcher.add_query_to_keys:
_init_word_tokenizer()
def process_batch(inputs):
inputs = [(" " + q.strip()) if searcher.prepend_space else q.strip() for q in inputs]
input_tokens = searcher.bart_tokenizer(inputs, padding=False)['input_ids']
if searcher.decode_body:
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || body" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
batch = searcher.bart_tokenizer(batch_str, return_tensors='pt', padding=True, truncation=True)
batch = {k: v.to(searcher.device) for k, v in batch.items()}
found_keys = fm_index_generate(
searcher.bart_model, searcher.fm_index,
**batch,
min_length=searcher.length,
max_length=searcher.length,
length_penalty=searcher.length_penalty,
num_beams=searcher.beam,
disable_fm_index=not constrained_generation,
diverse_bs_groups=searcher.diverse_bs_groups,
diverse_bs_penalty=searcher.diverse_bs_penalty,
stop_at_count=searcher.stop_at_count,
keep_history=True,
topk=searcher.topk,
)
for fk in found_keys:
fk[:] = [(s, k[1:] if k[0] in searcher.strip_token_ids else k) for s, k in fk if k]
fk[:] = [(s, k[1:] if k[0] in searcher.strip_token_ids else k) for s, k in fk if k]
fk[:] = [(s, k[:-1] if k[-1] in searcher.strip_token_ids else k) for s, k in fk if k]
if searcher.min_length > 0:
fk[:] = [(s, k) for s, k in fk if len(k) == searcher.min_length]
fk[:] = [(s, k) for s, k in fk if k and searcher.fm_index.get_count(k) > 0]
if searcher.rescore and searcher.use_markers:
input_tokens = searcher.bart_tokenizer(inputs, padding=False)['input_ids']
found_keys = rk.rescore_keys(
searcher.bart_model,
input_tokens,
found_keys,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[
searcher.title_eos_token_id,
searcher.code_eos_token_id,
searcher.bart_model.config.eos_token_id])
# for i in range(len(found_keys)):
# print(batch_str[i])
# new_fk = found_keys[i]
# for s, k in new_fk:
# print(s, searcher.bart_tokenizer.decode(k))
else:
found_keys = [[] for _ in inputs]
if searcher.add_query_to_keys:
found_keys_input_no_score = []
for inp in inputs:
new_fk = searcher.bart_tokenizer(
rk.decompose_query_into_keys(inp, word_tokenizer, 3),
padding=False,
add_special_tokens=False)['input_ids']
with searcher.bart_tokenizer.as_target_tokenizer():
new_fk = searcher.bart_tokenizer(searcher.bart_tokenizer.batch_decode(new_fk), padding=False)['input_ids']
new_fk = [k[:-1] if k and k[-1] in searcher.strip_token_ids else k for k in new_fk if k]
new_fk = [k[1:] if k and k[0] in searcher.strip_token_ids else k for k in new_fk if k]
new_fk = [k[1:] if k and k[0] in searcher.strip_token_ids else k for k in new_fk if k]
if searcher.min_length > 0:
new_fk = [k for k in new_fk if len(k) == searcher.min_length]
new_fk = [k for k in new_fk if k and searcher.fm_index.get_count(k) > 0]
found_keys_input_no_score.append(new_fk)
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || body" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
input_tokens = searcher.bart_tokenizer(batch_str, padding=False)['input_ids']
found_keys_input = rk.rescore_keys(
searcher.bart_model,
input_tokens,
found_keys_input_no_score,
batch_size=100,
length_penalty=0.0)
for fk, nfk in zip(found_keys, found_keys_input):
fk += nfk
if searcher.decode_titles:
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || title" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
batch = searcher.bart_tokenizer(batch_str, return_tensors='pt', padding=True, truncation=True)
batch = {k: v.to(searcher.device) for k, v in batch.items()}
decoded_title = fm_index_generate(
searcher.bart_title_model, searcher.fm_index,
**batch,
min_length=1,
max_length=15,
num_beams=searcher.beam,
length_penalty=searcher.length_penalty,
force_decoding_from=[searcher.title_bos_token_id],
eos_token_id=searcher.title_eos_token_id,
diverse_bs_groups=searcher.diverse_bs_groups,
diverse_bs_penalty=searcher.diverse_bs_penalty,
keep_history=True,
disable_fm_index=not constrained_generation,
topk=searcher.topk,
)
found_keys_title = [[(sco, hyp) for sco, hyp in dec] for dec in decoded_title]
for new_fk, fk in zip(found_keys_title, found_keys):
if searcher.force_decoding_second_token >= 0:
new_fk[:] = [(s, k[:1] + k[2:]) for s, k in new_fk if len(k) >= 3]
new_fk[:] = [(s, k[:-1] if k[-1] in searcher.strip_token_ids else k) for s, k in new_fk]
if not searcher.partial_titles:
new_fk[:] = [(s, k) for s, k in new_fk if k[-1] == searcher.title_eos_token_id]
if searcher.min_length > 0:
new_fk[:] = [(s, k) for s, k in new_fk if len(k) == (searcher.min_length+1)]
new_fk[:] = [(s, [searcher.title_bos_token_id] + k if k[0] != searcher.title_bos_token_id else k) for s, k in new_fk]
new_fk[:] = [(s, k) for s, k in new_fk if k and searcher.fm_index.get_count(k) > 0]
if searcher.rescore and searcher.use_markers:
input_tokens = searcher.bart_tokenizer(batch_str, padding=False)['input_ids']
found_keys_title = rk.rescore_keys(
searcher.bart_title_model,
input_tokens,
found_keys_title,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[searcher.bart_model.config.eos_token_id])
for new_fk, fk in zip(found_keys_title, found_keys):
fk += new_fk
if searcher.decode_query =="free":
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || query" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
batch = searcher.bart_tokenizer(batch_str, return_tensors='pt', padding=True, truncation=True)
batch = {k: v.to(searcher.device) for k, v in batch.items()}
found_keys_query = fm_index_generate(
searcher.bart_model, searcher.fm_index,
**batch,
min_length=30,
max_length=30,
length_penalty=searcher.length_penalty,
num_beams=searcher.beam,
disable_fm_index=not constrained_generation,
diverse_bs_groups=searcher.diverse_bs_groups,
diverse_bs_penalty=searcher.diverse_bs_penalty,
stop_at_count=searcher.stop_at_count,
keep_history=True,
topk=searcher.topk,
)
for new_fk, fk in zip(found_keys_query, found_keys):
new_fk[:] = [(s, k[1:] if k[0] in searcher.strip_token_ids else k) for s, k in new_fk]
new_fk[:] = [(s, k[:-1] if k[-1] in searcher.strip_token_ids else k) for s, k in new_fk]
new_fk[:] = [(s, k) for s, k in new_fk if k and searcher.fm_index.get_count(k) > 0]
if searcher.rescore and searcher.use_markers:
input_tokens = searcher.bart_tokenizer(inputs, padding=False)['input_ids']
found_keys_query = rk.rescore_keys(
searcher.bart_model,
input_tokens,
found_keys_query,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[
searcher.title_eos_token_id,
searcher.code_eos_token_id,
searcher.bart_model.config.eos_token_id])
for new_fk, fk in zip(found_keys_query, found_keys):
fk += new_fk
if searcher.decode_query =="stable":
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || query" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
batch = searcher.bart_tokenizer(batch_str, return_tensors='pt', padding=True, truncation=True)
batch = {k: v.to(searcher.device) for k, v in batch.items()}
decoded_query = fm_index_generate(
searcher.bart_model, searcher.fm_index,
**batch,
min_length=1,
max_length=30,
num_beams=searcher.beam,
length_penalty=searcher.length_penalty,
forced_bos_token_id=searcher.query_bos_token_id,
eos_token_id=searcher.query_eos_token_id,
diverse_bs_groups=searcher.diverse_bs_groups,
diverse_bs_penalty=searcher.diverse_bs_penalty,
keep_history=True,
disable_fm_index=not constrained_generation,
topk=searcher.topk,
)
found_keys_query = [[(sco, hyp) for sco, hyp in dec] for dec in decoded_query]
for new_fk, fk in zip(found_keys_query, found_keys):
if searcher.force_decoding_second_token >= 0:
new_fk[:] = [(s, k[:1] + k[2:]) for s, k in new_fk if len(k) >= 3]
new_fk[:] = [(s, k[1:] if k[0] in searcher.strip_token_ids else k) for s, k in new_fk]
new_fk[:] = [(s, k[:-1] if k[-1] in searcher.strip_token_ids else k) for s, k in new_fk]
if not searcher.partial_titles:
new_fk[:] = [(s, k) for s, k in new_fk if k[-1] == searcher.query_eos_token_id]
if searcher.min_length > 0:
new_fk[:] = [(s, k) for s, k in new_fk if len(k) == (searcher.min_length+1)]
new_fk[:] = [(s, [searcher.query_bos_token_id] + k if k[0] != searcher.query_bos_token_id else k) for s, k in new_fk]
new_fk[:] = [(s, k) for s, k in new_fk if k and searcher.fm_index.get_count(k) > 0]
if searcher.rescore and searcher.use_markers:
input_tokens = searcher.bart_tokenizer(batch_str, padding=False)['input_ids']
found_keys_query = rk.rescore_keys(
searcher.bart_model,
input_tokens,
found_keys_query,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.query_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[searcher.bart_model.config.eos_token_id])
for new_fk, fk in zip(found_keys_query, found_keys):
fk += new_fk
# for i in range(len(found_keys)):
# print(batch_str[i])
# new_fk = found_keys[i]
# for s, k in new_fk:
# print(s, searcher.bart_tokenizer.decode(k))
if searcher.decode_code:
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || code" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
batch = searcher.bart_tokenizer(batch_str, return_tensors='pt', padding=True, truncation=True)
batch = {k: v.to(searcher.device) for k, v in batch.items()}
decoded_code = fm_index_generate(
searcher.bart_code_model, searcher.fm_index,
**batch,
min_length=1,
max_length=15,
num_beams=searcher.beam,
length_penalty=searcher.length_penalty,
eos_token_id=searcher.code_eos_token_id,
diverse_bs_groups=searcher.diverse_bs_groups,
diverse_bs_penalty=searcher.diverse_bs_penalty,
keep_history=True,
force_decoding_from=[searcher.code_bos_token_id],
disable_fm_index=not constrained_generation,
)
found_keys_code = [[(sco, hyp) for sco, hyp in dec] for dec in decoded_code]
for new_fk, fk in zip(found_keys_code, found_keys):
if searcher.force_decoding_second_token >= 0:
new_fk[:] = [(s, k[:1] + k[2:]) for s, k in new_fk if len(k) >= 2]
new_fk[:] = [(s, k[1:-1] if k[-1] in searcher.strip_token_ids else k[1:]) for s, k in new_fk if k]
if not searcher.partial_code:
new_fk[:] = [(s, k) for s, k in new_fk if k and (k[-1] == searcher.code_eos_token_id)]
new_fk[:] = [(s, [searcher.code_bos_token_id] + k if k[0] != searcher.code_bos_token_id else k) for s, k in new_fk if k]
new_fk[:] = [(s, k) for s, k in new_fk if k and searcher.fm_index.get_count(k) > 0]
if searcher.rescore and searcher.use_markers:
input_tokens = searcher.bart_tokenizer(batch_str, padding=False)['input_ids']
found_keys_code = rk.rescore_keys(
searcher.bart_code_model,
input_tokens,
found_keys_code,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[searcher.bart_model.config.eos_token_id])
for new_fk, fk in zip(found_keys_code, found_keys):
fk += new_fk
# for i in range(len(found_keys)):
# print(batch_str[i])
# new_fk = found_keys[i]
# for s, k in new_fk:
# print(s, searcher.bart_tokenizer.decode(k))
if searcher.rescore and not searcher.use_markers:
found_keys = rk.rescore_keys(
searcher.bart_scorer_model,
input_tokens,
found_keys,
batch_size=100,
length_penalty=0.0,
strip_from_bos=[
searcher.title_bos_token_id,
searcher.code_bos_token_id,
searcher.bart_model.config.decoder_start_token_id],
strip_from_eos=[searcher.bart_model.config.eos_token_id])
found_keys = [[(n, s) for s, n in xx] for xx in found_keys]
if searcher.unigram_scores:
batch_str = inputs
if searcher.use_markers:
batch_str = [i + " || body" for i in batch_str]
if searcher.value_conditioning:
batch_str = [i + " || +" for i in batch_str]
input_tokens = searcher.bart_tokenizer(batch_str, padding=False)['input_ids']
unigram_scores = rk.compute_unigram_scores(
searcher.bart_scorer_model,
input_tokens,
searcher.fm_index,
prefix=[searcher.force_decoding_second_token] if searcher.force_decoding_second_token >= 0 else [],
)
found_keys = list(zip(found_keys, unigram_scores))
return found_keys
else:
return found_keys
with tqdm.tqdm(total=len(queries), desc="Generating keys", disable=not searcher.progress) as bar:
batches = ichunked(queries, searcher.batch_size)
for batch in batches:
for instance in process_batch(batch):
bar.update()
yield instance
class SEALDocument:
def __init__(
self, idx: int,
score: float,
fm_index: FMIndex,
bart_tokenizer: BartTokenizer,
delim1: int = 49314,
delim2: int = None,
keys=None,
query=None):
self.idx = idx
self.score = score
self.fm_index = fm_index
self.bart_tokenizer = bart_tokenizer
self.delim1 = delim1
self.delim2 = delim2
self.keys = keys
self.query = query
self._raw_tokens = None
self._body = None
self._title = None
@property
def docid(self):
return self.fm_index.labels[self.idx]
def id(self):
return self.idx
def raw_tokens(self):
if self._raw_tokens is None:
self._raw_tokens = self.fm_index.get_doc(self.idx)
return self._raw_tokens
def raw_text(self):
tokens = self.raw_tokens()
return self.bart_tokenizer.decode(tokens, clean_up_tokenization_spaces=False)
def text(self):
if self._body is None or self._title is None:
tokens = self.raw_tokens()
title_tokens, body_tokens = self.split_tokens(tokens)
if title_tokens:
title = self.bart_tokenizer.decode(title_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)
else:
title = ""
body = self.bart_tokenizer.decode(body_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)
self._title = title
self._body = body
return self._title, self._body
def split_tokens(self, tokens):
if self.delim1 is None:
title_tokens = []
body_tokens = []
else:
try:
i = tokens.index(self.delim1)
title_tokens = tokens[:i]
body_tokens = tokens[i+1:]
except IndexError:
title_tokens = []
body_tokens = tokens
except ValueError:
title_tokens = []
body_tokens = tokens
if self.delim2 is not None:
try:
i = body_tokens.index(self.delim2) + 1
except IndexError:
i = 0
except ValueError:
i = 0
body_tokens = body_tokens[i:]
return title_tokens, body_tokens
def __repr__(self):
return f'<GRDocument: {self.idx}, "{self.raw_text()[:30]}[...]">'
class SEALSearcher:
DEFAULTS = \
{
"backbone": 'facebook/bart-large',
"fairseq_checkpoint": True,
"length": 10,
"min_length": 0,
"length_penalty": 0.0,
"scoring_length_penalty": 0.0,
"repetition_penalty": 0.8,
"score_exponent": 2.0,
"beam": 15,
"max_hits": 1500,
"fully_score": 1500,
"skip_frequent_keys": 10_000_000,
"add_query_to_keys": True,
"batch_size": 20,
"jobs": 1,
"progress": False,
"free_generation": False,
"use_fm_index_frequency": True,
"unigram_scores": True,
"add_best_unigrams_to_ngrams": True,
"use_top_k_ngrams": 5000,
"sort_by_length": False,
"sort_by_freq": False,
"print_n_doc": False,
"allow_overlaps": False,
"diverse_bs_groups": 1,
"diverse_bs_penalty": 0.0,
"rescore": True,
"detokenize": True,
"include_keys": False,
"single_key": 0.0,
"unigrams_ignore_free_places": False,
"use_markers": True,
"value_conditioning": True,
"decode_body": True,
"decode_titles": True,
"decode_query": "no",
"decode_code": False,
"partial_code": False,
"partial_titles": False,
"smoothing": 5.0,
"stop_at_count": 0,
"topk": 0,
"force_decoding_second_token": -1,
}
def __init__(
self,
fm_index: FMIndex,
bart_tokenizer: BartTokenizer,
bart_model: BartForConditionalGeneration,
bart_scorer_model: Optional[BartForConditionalGeneration] = None,
bart_title_model: Optional[BartForConditionalGeneration] = None,
bart_code_model: Optional[BartForConditionalGeneration] = None,
**params):
self.fm_index = fm_index
self.docid2idx = {k: i for i, k in enumerate(self.fm_index.labels)}
self.bart_tokenizer = bart_tokenizer
self.bart_model = bart_model
if bart_scorer_model is None:
self.bart_scorer_model = self.bart_model
else:
self.bart_scorer_model = bart_scorer_model
if bart_title_model is None:
self.bart_title_model = self.bart_model
else:
self.bart_title_model = bart_title_model
if bart_code_model is None:
self.bart_code_model = self.bart_model
else:
self.bart_code_model= bart_code_model
self.num_docs = fm_index.n_docs
self.docids = fm_index.labels
self.set_params(params)
if 'bart' in self.backbone:
self.title_bos_token = '</s>'
self.title_bos_token_id = 2
self.title_eos_token = '@@'
self.title_eos_token_id = 49314
self.query_bos_token = '||'
self.query_bos_token_id = 45056
self.query_eos_token = '@@'
self.query_eos_token_id = 49314
self.code_bos_token = '@@'
self.code_bos_token_id = 49314
self.code_eos_token = '||'
self.code_eos_token_id = 45056
self.prepend_space = True
self.strip_token_ids = (0, 2)
elif 't5' in self.backbone:
self.title_bos_token = '</s>'
self.title_bos_token_id = 1
self.title_eos_token = '<extra_id_99>'
self.title_eos_token_id = 32000
self.code_bos_token = '<extra_id_99>'
self.code_bos_token_id = 32000
self.code_eos_token = '<extra_id_98>'
self.code_eos_token_id = 32001
self.prepend_space = False
self.strip_token_ids = (0, 1)
else:
raise NotImplementedError
@property
def device(self):
return next(self.bart_model.parameters()).device
@device.setter
def device(self, device: str):
self.bart_model.to(device)
def set_params(self, params):
for key, val in self.DEFAULTS.items():
setattr(self, key, params.get(key, val))
@classmethod
def add_args(cls, parser):
parser.add_argument('--fm_index', required=True, type=str)
parser.add_argument('--checkpoint', required=False, type=str)
parser.add_argument('--checkpoint_scorer', required=False, type=str, default=None)
parser.add_argument('--checkpoint_title', required=False, type=str, default=None)
parser.add_argument('--checkpoint_code', required=False, type=str, default=None)
parser.add_argument('--device', default="cpu", type=str)
for name, value in cls.DEFAULTS.items():
if value is True:
parser.add_argument(f'--dont_{name}', action="store_false", dest=name)
elif value is False:
parser.add_argument(f'--{name}', action="store_true")
else:
parser.add_argument(f'--{name}', required=False, type=type(value), default=value)
@classmethod
def from_args(cls, args):
params = {}
for name, value in cls.DEFAULTS.items():
params[name] = getattr(args, name)
return cls.load(
args.fm_index,
args.checkpoint,
bart_scorer_model_path=args.checkpoint_scorer,
bart_title_model_path=args.checkpoint_title,
bart_code_model_path=args.checkpoint_code,
device=args.device,
**params
)
@staticmethod
def load_fm_index(fm_index_path: str):
mem_before = _get_process_memory()
logger.log(logging.WARN, f"initializing FM-index from {fm_index_path}")
index = FMIndex.load(fm_index_path)
mem_after = _get_process_memory()
logger.log(logging.WARN, f"FM-index initialized ({(mem_after - mem_before) // 1024 ** 2} MBs)")
return index
@staticmethod
def load_bart(bart_model_path: str, device: str = "cpu", backbone="facebook/bart-large", fairseq_checkpoint=True):
logger.log(logging.WARN, f"initializing BART large")
config = AutoConfig.from_pretrained(backbone)
config.forced_bos_token_id = None
tokenizer = AutoTokenizer.from_pretrained(backbone)
if bart_model_path:
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
logger.log(logging.WARN, f"loading weights from checkpoint: {bart_model_path}")
if fairseq_checkpoint:
load_state_dict_from_fairseq_checkpoint(model, bart_model_path)
else:
load_state_dict_from_lightning_checkpoint(model, bart_model_path)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(backbone)
model.resize_token_embeddings(len(tokenizer))
model.config.forced_bos_token_id = None
model.eval()
# for some trained models, the mask logit is set to 0 for some reason. This ugly hack fixes it
if hasattr(model, 'final_logits_bias'):
model.config.add_bias_logits = True
model.final_logits_bias[0, tokenizer.pad_token_id] = float('-inf')
model.final_logits_bias[0, tokenizer.bos_token_id] = float('-inf')
model.final_logits_bias[0, tokenizer.mask_token_id] = float('-inf')
model.to(device)
logger.log(logging.WARN, f"model successfully loaded")
return tokenizer, model
@classmethod
def load(cls, fm_index_path, bart_model_path, device="cpu", **params):
fm_index = cls.load_fm_index(fm_index_path)
bart_tokenizer, bart_model = cls.load_bart(
bart_model_path,
device,
backbone=params.get('backbone', "facebook/bart-large"),
fairseq_checkpoint=params.get('fairseq_checkpoint', True)
)
if params.get('bart_scorer_model_path') is None:
bart_scorer_model = None
else:
_, bart_scorer_model = cls.load_bart(
params.get('bart_scorer_model_path'),
device,
backbone=params.get('backbone', "facebook/bart-large"),
fairseq_checkpoint=params.get('fairseq_checkpoint', True)
)
if params.get('bart_title_model_path') is None:
bart_title_model = None
else:
_, bart_title_model = cls.load_bart(
params.get('bart_title_model_path'),
device,
backbone=params.get('backbone', "facebook/bart-large"),
fairseq_checkpoint=params.get('fairseq_checkpoint', True)
)
if params.get('bart_code_model_path') is None:
bart_code_model = None
else:
_, bart_code_model = cls.load_bart(
params.get('bart_code_model_path'),
device,
backbone=params.get('backbone', "facebook/bart-large"),
fairseq_checkpoint=params.get('fairseq_checkpoint', True)
)
searcher = cls(
fm_index,
bart_tokenizer,
bart_model,
bart_scorer_model=bart_scorer_model,
bart_title_model=bart_title_model,
bart_code_model=bart_code_model,
**params
)
return searcher
def search(self, query: str, k: int = 10, added_documents=None, detokenize=True) -> List[SEALDocument]:
if added_documents is not None:
added_documents = [added_documents]
return self.batch_search([query], k=k, added_documents=added_documents, detokenize=True)[0]
def batch_search(self, queries, k: int = 10, added_documents=None, detokenize=None) -> List[List[SEALDocument]]:
if detokenize is None:
detokenize = self.detokenize
retrieved = []
keys = self.batch_generate_keys(queries)
if added_documents is not None:
if self.unigram_scores:
keys = ((kk, us, added_documents[i]) for i, (kk, us) in enumerate(keys))
else:
keys = ((kk, None, added_documents[i]) for i, kk in enumerate(keys))
results, keys = zip(*self.batch_retrieve_from_keys(keys))
keys = list({k for kk in keys for k in kk})
vals = self.bart_tokenizer.batch_decode([list(k) for k in keys], clean_up_tokenization_spaces=False)
keys = {k: (v, self.fm_index.get_count(list(k))) for k, v in zip(keys, vals)}
for query, res in zip(queries, results):
docs = []
for idx, (score, kk, _, full, _) in islice(res.items(), k):
doc = SEALDocument(
idx,
score,
self.fm_index,
self.bart_tokenizer,
delim1=self.title_eos_token_id,
# delim2=self.code_eos_token_id,
keys=None,
query=query
)
if self.include_keys:
for k, _ in kk:
if k not in keys:
keys[k] = (self.bart_tokenizer.decode(list(k), clean_up_tokenization_spaces=False), self.fm_index.get_count(list(k)))
kk = [(*keys[k], s) for k, s in kk]
doc.keys = kk
doc._raw_tokens = full
docs.append(doc)
retrieved.append(docs)
if detokenize:
return self.detokenize_retrieved(retrieved)
else:
return retrieved
def detokenize_retrieved(self, retrieved):
flat = [d for dd in retrieved for d in dd]
batch_tokens = []
for d in flat:
if d._raw_tokens is not None:
title, body = d.split_tokens(d._raw_tokens)
else:
title, body = d.split_tokens(d.raw_tokens())
batch_tokens.append(title)
batch_tokens.append(body)
if self.jobs > 2:
batch_tokens = list(self._mp_batch_detokenize(batch_tokens))
else:
batch_tokens = self._batch_detokenize(batch_tokens)
for i in range(len(flat)):
j = i * 2
flat[i]._title = batch_tokens[j]
flat[i]._body = batch_tokens[j+1]
return retrieved
def generate_keys(self, query):
return next(self.batch_generate_keys([query]))
def batch_generate_keys(self, queries):
return batch_generate_keys(self, queries, constrained_generation=not self.free_generation)
def retrieve_from_keys(self, keys):
unigram_scores = None
added_documents = None
if isinstance(keys, tuple) and len(keys) == 1:
keys = keys[0]
elif isinstance(keys, tuple) and len(keys) == 2:
keys, unigram_scores = keys
elif isinstance(keys, tuple) and len(keys) == 3:
keys, unigram_scores, added_documents = keys
# if self.single_term_based:
results, ngrams = rk.aggregate_evidence(
ngrams_and_scores=keys,
unigram_scores=unigram_scores,
index=self.fm_index,
max_occurrences_1=self.max_hits,
n_docs_complete_score=self.fully_score,
alpha=self.score_exponent,
beta=self.repetition_penalty,
length_penalty=self.scoring_length_penalty,
use_fm_index_frequency=self.use_fm_index_frequency,
add_best_unigrams_to_ngrams=self.add_best_unigrams_to_ngrams,
use_top_k_unigrams=self.use_top_k_ngrams,
sort_by_length=self.sort_by_length,
sort_by_freq=self.sort_by_freq,
smoothing=self.smoothing,
allow_overlaps=self.allow_overlaps,
single_key=self.single_key,
unigrams_ignore_free_places=self.unigrams_ignore_free_places,
tokenizer = self.bart_tokenizer)
if DEBUG:
for n, s in ngrams.items():
print(s, self.bart_tokenizer.decode(n))
return results, ngrams
def batch_retrieve_from_keys(self, keys):
if self.jobs >= 2:
yield from self._mp_batch_retrieve_from_keys(keys)
else:
yield from self._batch_retrieve_from_keys(keys)
def _mp_batch_retrieve_from_keys(self, keys):
assert self.jobs >= 2
idx = id(self)
setattr(sys.modules['__main__'], f'_searcher_global_{idx}', self)
with multiprocessing.Pool(self.jobs) as pool:
for i, (res, ngrams) in enumerate(pool.imap(_retrieve_from_keys_mp_aux, tqdm.tqdm(
[(idx, kk) for kk in keys],
desc="Retrieving from keys",
disable=not self.progress
))):
if self.print_n_doc:
print(i)
yield res, ngrams
delattr(sys.modules['__main__'], f'_searcher_global_{idx}')
def _batch_detokenize(self, seqs):
return [self.bart_tokenizer.decode(seq, skip_special_tokens=True, clean_up_tokenization_spaces=False).strip() if seq else "" for seq in seqs]
def _mp_batch_detokenize(self, seqs):
assert self.jobs >= 2
idx = id(self)
setattr(sys.modules['__main__'], f'_searcher_global_{idx}', self)
with multiprocessing.Pool(self.jobs) as pool:
for i, out in enumerate(pool.imap(_detokenize_mp_aux, tqdm.tqdm(
[(idx, seq) for seq in seqs],
desc="Detokenizing",
disable=not self.progress
))):
if self.print_n_doc:
print(i)
yield out
delattr(sys.modules['__main__'], f'_searcher_global_{idx}')
def _batch_retrieve_from_keys(self, keys):
keys = tqdm.tqdm(
keys,
desc="Retrieving from keys",
disable=not self.progress
)
for i, kk in enumerate(keys):
if self.print_n_doc:
print(i)
yield self.retrieve_from_keys(kk)
def doc(self, docid: Union[str, int]) -> Optional[SEALDocument]:
if isinstance(docid, str):
idx = self.docid2idx[docid]
else:
idx = docid
return SEALDocument(idx, None, self.fm_index, self.bart_tokenizer, delim1=self.title_eos_token_id, delim2=self.code_eos_token_id)
def _retrieve_from_keys_mp_aux(args):
idx, keys = args
return getattr(sys.modules['__main__'], f'_searcher_global_{idx}').retrieve_from_keys(keys)
def _detokenize_mp_aux(args):
idx, seq = args
if not seq:
return ""
return getattr(sys.modules['__main__'], f'_searcher_global_{idx}').bart_tokenizer.decode(seq, skip_special_tokens=True, clean_up_tokenization_spaces=False).strip()
| 38,931 | 38.807771 | 167 | py |
MINDER | MINDER-main/seal/beam_search.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import UserDict
from typing import *
import warnings
from more_itertools import chunked
import torch
from torch import nn
import torch.distributed as dist
from transformers import LogitsProcessor, BeamScorer, BeamSearchScorer, LogitsProcessorList, StoppingCriteriaList, HammingDiversityLogitsProcessor
from transformers.generation_utils import BeamSearchOutput, validate_stopping_criteria, BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput
from transformers.generation_logits_process import TopKLogitsWarper
from seal.index import FMIndex
stopword_token_ids = [
10, # a
41, # an
660, # An
5, # the
1941, # THE
20, # The
7, # to
6, # and
]
class IndexBasedLogitsProcessor(LogitsProcessor):
"""
Class that masks logit, meant to be used during decoding. The logit mask is determined by finding the range of rows
in the FM-index that correspond to the previously decoded token ( $O(n log V)$ ), then finding all tokens in that
interval ( $O(V log V)$ ).
"""
def __init__(
self,
index: FMIndex,
num_beams: int,
pad_token_id: int = 0,
eos_token_id: int = 2,
force_decoding_from: Optional[List[int]] = None,
stop_at_count: int = 0,
always_allow_eos: bool = False,
forced_bos_token_id: Optional[int] = None,
):
self.index = index
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self._num_beams = num_beams
self.log_odds_weight = 0.0
self.force_decoding_from = force_decoding_from
self.force_decoding_second_token = None
self.block_initial_stopwords = False
self.stop_at_count = stop_at_count
self.always_allow_eos = always_allow_eos
self.forced_bos_token_id = forced_bos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
mask = torch.full_like(scores, float('-inf'))
if self.forced_bos_token_id is not None:
if input_ids.size(1) == 1:
mask[:, self.forced_bos_token_id] = 0.0
return scores + mask
# else:
# input_ids = input_ids[:, 1:]
if input_ids.size(1) == 1:
distinct = self.index.occurring_distinct
distinct = torch.LongTensor(distinct).to(scores.device)
mask[:, distinct] = 0.0
else:
input_ids_list = input_ids.view(-1, self._num_beams, input_ids.shape[-1]).tolist()
lows = []
highs = []
fm_index_counts = []
for batch_id, beam_sent in enumerate(input_ids_list):
for beam_id, sent in enumerate(beam_sent):
if sent[-1] in (self.eos_token_id, self.pad_token_id):
low = 0
high = 0
count = 0
elif self.force_decoding_from is not None:
low, high = self.index.get_range(self.force_decoding_from + sent[1:])
count = self.index.get_count(self.force_decoding_from + sent[1:-1])
else:
low, high = self.index.get_range(sent[1:])
count = self.index.get_count(sent[1:-1])
lows.append(low)
highs.append(high)
fm_index_counts.append(count)
fm_index_result = self.index.get_distinct_count_multi(lows, highs)
fm_index_result = fm_index_result[::-1]
fm_index_counts = fm_index_counts[::-1]
for batch_id, beam_sent in enumerate(input_ids_list):
for beam_id, sent in enumerate(beam_sent):
if self.stop_at_count > 0 and fm_index_counts[-1] <= self.stop_at_count:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.eos_token_id]
elif sent[-1] == self.eos_token_id:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.pad_token_id]
elif sent[-1] == self.pad_token_id:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.pad_token_id]
else:
fm_index_counts.pop()
distinct, _ = fm_index_result.pop()
distinct = torch.LongTensor(distinct).to(scores.device)
mask[batch_id * self._num_beams + beam_id, distinct] = 0
if self.always_allow_eos:
mask[:, self.eos_token_id] = 0.0
return scores + mask
def constrained_beam_search(
model,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
constrained_decoding_processor: Optional[IndexBasedLogitsProcessor] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
sample: bool = False,
topk: int = 0,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
if topk > 0:
topk_warper = TopKLogitsWarper(topk)
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else model.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else model.config.eos_token_id
output_scores = output_scores if output_scores is not None else model.config.output_scores
output_attentions = output_attentions if output_attentions is not None else model.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else model.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else model.config.return_dict_in_generate
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
beam_indices = (
tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
)
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and model.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = model(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = model.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
if topk:
next_token_logits = topk_warper(input_ids, next_token_logits)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores_no_prev = next_token_scores_processed
next_token_scores = next_token_scores_no_prev + beam_scores[:, None].expand_as(next_token_scores)
if constrained_decoding_processor is not None:
next_token_scores_constrained_no_prev = constrained_decoding_processor(input_ids, next_token_scores_processed)
next_token_scores_constrained = next_token_scores_constrained_no_prev + beam_scores[:, None].expand_as(next_token_scores)
# if return_masked_scores:
# next_token_scores = next_token_scores_constrained
else:
next_token_scores_constrained_no_prev = next_token_scores_no_prev
next_token_scores_constrained = next_token_scores
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores_processed,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if model.config.is_encoder_decoder else (outputs.attentions,)
)
if model.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if model.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
if sample:
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
weights = next_token_scores_constrained_no_prev.view(batch_size * num_beams, vocab_size).log_softmax(-1).exp()
nans = torch.isnan(weights.sum(-1))
weights[nans, :] = 0.0
weights[nans, eos_token_id] = 1.0
next_tokens = torch.multinomial(weights, 1, replacement=True).view(batch_size, 1 * num_beams)
next_token_scores = next_token_scores.gather(-1, next_tokens)
# next_token_scores = next_token_scores.reshape(batch_size, num_beams, 1)
# next_token_scores[:, :, :] = 0.0
# next_token_scores = next_token_scores.reshape(batch_size, 1 * num_beams)
else:
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
next_token_scores_constrained = next_token_scores_constrained.view(batch_size, num_beams * vocab_size)
next_token_scores_constrained, next_tokens = torch.topk(
next_token_scores_constrained, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_token_scores = next_token_scores.gather(-1, next_tokens)
next_indices = (next_tokens / vocab_size).long()
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = model._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=model.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = model._reorder_cache(model_kwargs["past"], beam_idx)
if return_dict_in_generate and output_scores:
beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
else:
num_return_sequences = beam_scorer.num_beam_hyps_to_keep
# return only as many indices as sequences
beam_indices = tuple(
(beam_indices[i * num_beams : i * num_beams + num_return_sequences] for i in range(batch_size))
)
beam_indices = sum(beam_indices, ())
if model.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=beam_indices,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=beam_indices,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
@torch.inference_mode()
def fm_index_generate(
model,
index: FMIndex,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
min_length: int = 3,
max_length: int = 25,
length_penalty: float = 1.0,
num_beams: int = 3,
diverse_bs_groups: int = 1,
diverse_bs_penalty: float = 0.0,
eos_token_id: Optional[int] = None,
force_decoding_from: Optional[List[int]] = None,
always_allow_eos: bool = False,
keep_history: bool = False,
disable_fm_index: bool = False,
sample: bool = False,
stop_at_count: int = 0,
topk: int = 0,
transformers_output: bool = False,
**kwargs,
):
if 'forced_bos_token_id' in kwargs:
forced_bos_token_id = kwargs.pop('forced_bos_token_id')
else:
forced_bos_token_id = model.config.forced_bos_token_id
if sample:
orig_num_beams = num_beams
input_ids = input_ids.repeat(num_beams, 1)
attention_mask = attention_mask.repeat(num_beams, 1)
num_beams = 1
device = input_ids.device
if eos_token_id is None:
eos_token_id = model.config.eos_token_id
logits_processor = model._get_logits_processor(
encoder_input_ids=input_ids,
repetition_penalty=None,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
bad_words_ids=None,
min_length=min_length,
max_length=max_length,
eos_token_id=None,
prefix_allowed_tokens_fn=None,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=None,
num_beams=num_beams,
num_beam_groups=1,
diversity_penalty=0.0,
remove_invalid_values=True)
if diverse_bs_groups > 1 and diverse_bs_penalty > 0.0:
logits_processor.append(
HammingDiversityLogitsProcessor(
diversity_penalty=diverse_bs_penalty,
num_beams=num_beams,
num_beam_groups=diverse_bs_groups,
)
)
if not disable_fm_index:
constrained_decoding_processor = IndexBasedLogitsProcessor(
num_beams=num_beams // diverse_bs_groups,
index=index,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id or model.config.eos_token_id,
force_decoding_from=force_decoding_from,
stop_at_count=stop_at_count,
always_allow_eos=always_allow_eos,
forced_bos_token_id=forced_bos_token_id,
)
if diverse_bs_groups > 1:
logits_processor.append(constrained_decoding_processor)
else:
constrained_decoding_processor = None
stopping_criteria = model._get_stopping_criteria(
max_length=max_length,
max_time=None,
#max_new_tokens=None,
#start_length=None
)
model_kwargs = model._prepare_encoder_decoder_kwargs_for_generation(
input_ids, {'attention_mask': attention_mask})
model_kwargs['use_cache'] = True
decoder_input_ids = model._prepare_decoder_input_ids_for_generation(
batch_size=input_ids.size(0),
decoder_start_token_id=model.config.decoder_start_token_id,
bos_token_id=model.config.bos_token_id,
)
if keep_history:
beam_scorer = BeamSearchScorerWithMemory(
batch_size=decoder_input_ids.shape[0],
num_beams=num_beams,
device=device,
length_penalty=length_penalty,
do_early_stopping=False,
num_beam_hyps_to_keep=num_beams,
min_length=min_length,
max_length=max_length,
num_beam_groups=diverse_bs_groups,
)
else:
beam_scorer = BeamSearchScorer(
batch_size=decoder_input_ids.shape[0],
num_beams=num_beams,
device=device,
length_penalty=length_penalty,
do_early_stopping=False,
num_beam_hyps_to_keep=num_beams,
num_beam_groups=diverse_bs_groups,
)
decoder_input_ids, model_kwargs = model._expand_inputs_for_generation(
decoder_input_ids,
expand_size=num_beams,
is_encoder_decoder=True,
**model_kwargs)
if diverse_bs_groups > 1:
out = model.group_beam_search(
input_ids=decoder_input_ids,
beam_scorer=beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
output_scores=True,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id,
**model_kwargs)
else:
out = constrained_beam_search(
model,
input_ids=decoder_input_ids,
beam_scorer=beam_scorer,
logits_processor=logits_processor,
constrained_decoding_processor=constrained_decoding_processor,
stopping_criteria=stopping_criteria,
output_scores=True,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id,
sample=sample,
topk=topk,
**model_kwargs)
if transformers_output:
return out
if sample:
out = [[(h[0] * h[1].size(0) ** length_penalty, h[1].tolist()) for b in bb for h in b.beams if h[0] > float('-inf')] for bb in chunked(beam_scorer._beam_hyps, orig_num_beams)]
else:
out = [[(h[0] * h[1].size(0) ** length_penalty, h[1].tolist()) for h in b.beams if h[0] > float('-inf')] for b in beam_scorer._beam_hyps]
return out
class BeamSearchScorerWithMemory(BeamScorer):
def __init__(
self,
batch_size: int,
num_beams: int,
device: torch.device,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[bool] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
min_length: Optional[int] = 15,
max_length: Optional[int] = 25,
**kwargs,
):
self.num_beams = num_beams
self.device = device
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self.min_length = min_length
self.max_length = max_length
self._is_init = False
self._beam_hyps = [
BeamHypothesesWithMemory(
num_beams=self.num_beams,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
min_length=self.min_length,
max_length=self.max_length)
for _ in range(batch_size)
]
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
raise ValueError(
f"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` "
f"has to be divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
)
if "max_length" in kwargs:
warnings.warn(
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect."
"`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`"
",or `group_beam_search(...)`."
)
@property
def is_done(self) -> bool:
return self._done.all()
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.Tensor]:
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
if not (batch_size == (input_ids.shape[0] // self.group_size)):
if self.num_beam_groups > 1:
raise ValueError(
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
f"size of {self.group_size} is expected by the beam scorer."
)
else:
raise ValueError(
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
f"{self.group_size} is expected by the beam scorer."
)
device = input_ids.device
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
if self.num_beams < len(beam_hyp):
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
if eos_token_id is None or pad_token_id is None:
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence
beam_idx = 0
broken = False
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
beam_hyp.add(
torch.cat([
input_ids[batch_beam_idx],
next_token.view(1),
]),
next_score.item(),
)
# add to generated hypotheses if end of sentence
if broken:
pass
elif (eos_token_id is not None) and (next_token.item() == eos_token_id):
pass
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
broken = True
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id: {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def finalize(
self,
input_ids: torch.LongTensor,
final_beam_scores: torch.FloatTensor,
final_beam_tokens: torch.LongTensor,
final_beam_indices: torch.LongTensor,
max_length: int,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.LongTensor]:
batch_size = len(self._beam_hyps)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_hyp.add(
final_tokens.clone(),
final_score)
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, 3)
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
}
)
class BeamHypothesesWithMemory:
def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, min_length: int, max_length: int):
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
self.min_length = min_length
self.max_length = max_length
self._best = None
def __len__(self):
return len(self.beams)
def add(self, hyp: torch.LongTensor, sum_logprobs: float):
size = hyp.size(0)
score = sum_logprobs / (size ** self.length_penalty)
self.beams.append((score, hyp))
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
return cur_len >= self.max_length
| 31,228 | 40.090789 | 183 | py |
MINDER | MINDER-main/seal/search.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from more_itertools import chunked
from seal.retrieval import SEALSearcher
from seal.data import TopicsFormat, OutputFormat, get_query_iterator, get_output_writer
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hybrid", default="none", choices=["none", "ensemble", "recall", "recall-ensemble"])
parser.add_argument("--topics", type=str, metavar="topic_name", required=True, help="Name of topics.")
parser.add_argument("--hits", type=int, metavar="num", required=False, default=100, help="Number of hits.")
parser.add_argument(
"--topics_format",
type=str,
metavar="format",
default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}",
)
parser.add_argument(
"--output_format",
type=str,
metavar="format",
default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}",
)
parser.add_argument("--output", type=str, metavar="path", help="Path to output file.")
parser.add_argument(
"--max_passage", action="store_true", default=False, help="Select only max passage from document."
)
parser.add_argument(
"--max_passage_hits",
type=int,
metavar="num",
required=False,
default=100,
help="Final number of hits when selecting only max passage.",
)
parser.add_argument(
"--max_passage_delimiter",
type=str,
metavar="str",
required=False,
default="#",
help="Delimiter between docid and passage id.",
)
parser.add_argument("--remove_duplicates", action="store_true", default=False, help="Remove duplicate docs.")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--keep_samples", type=int, default=None)
parser.add_argument("--chunked", type=int, default=0)
SEALSearcher.add_args(parser)
args = parser.parse_args()
print(args)
query_iterator = get_query_iterator(args.topics, TopicsFormat(args.topics_format))
output_writer = get_output_writer(
args.output,
OutputFormat(args.output_format),
"w",
max_hits=args.hits,
tag="SEAL",
topics=query_iterator.topics,
use_max_passage=args.max_passage,
max_passage_delimiter=args.max_passage_delimiter,
max_passage_hits=args.max_passage_hits,
)
if args.debug:
query_iterator.order = query_iterator.order[:100]
query_iterator.topics = {topic: query_iterator.topics[topic] for topic in query_iterator.order}
if args.keep_samples is not None and args.keep_samples < len(query_iterator.order):
random.seed(42)
random.shuffle(query_iterator.order)
query_iterator.order = query_iterator.order[: args.keep_samples]
query_iterator.topics = {topic: query_iterator.topics[topic] for topic in query_iterator.order}
searcher = SEALSearcher.from_args(args)
with output_writer:
if args.chunked <= 0:
topic_ids, texts = zip(*query_iterator)
for topic_id, hits in zip(topic_ids, searcher.batch_search(texts, k=args.hits)):
output_writer.write(topic_id, hits)
else:
for batch_query_iterator in chunked(query_iterator, args.chunked):
topic_ids, texts = zip(*batch_query_iterator)
for topic_id, hits in zip(topic_ids, searcher.batch_search(texts, k=args.hits)):
output_writer.write(topic_id, hits)
| 3,852 | 37.148515 | 113 | py |
MINDER | MINDER-main/seal/evaluate_output_msmarco.py | """
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import re
import sys
import statistics
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
"""Load Reference reference relevant passages
Args:f (stream): stream to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
qids_to_relevant_passageids = {}
for l in f:
try:
l = re.split('[\t\s]', l.strip())
qid = int(l[0])
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(int(l[2]))
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference,'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
"""Load candidate data from a stream.
Args:f (stream): stream to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
if qid in qid_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qid_to_ranked_candidate_passages[qid] = tmp
qid_to_ranked_candidate_passages[qid][rank-1]=pid
except:
raise IOError('\"%s\" is not valid format' % l)
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate,'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_num = [0.0]*100
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0,MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1/(i + 1)
ranking.pop()
ranking.append(i+1)
break
for i in range(min(100, len(candidate_pid)-1)):
if candidate_pid[i] in target_pid:
recall_num[i]+=1
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR/len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores['Recalll @5'] = sum(recall_num[:5])/len(qids_to_relevant_passageids)
all_scores['Recalll @20'] = sum(recall_num[:20])/len(qids_to_relevant_passageids)
all_scores['Recalll @100'] = sum(recall_num[:100])/len(qids_to_relevant_passageids)
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
print('#####################')
for metric in sorted(metrics):
print('{}: {}'.format(metric, metrics[metric]))
print('#####################')
else:
print('Usage: msmarco_eval_ranking.py <reference ranking> <candidate ranking>')
exit()
if __name__ == '__main__':
main()
| 8,065 | 40.792746 | 161 | py |
MINDER | MINDER-main/seal/index.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import pickle
import struct
import tempfile
from typing import List, Set, Tuple, Optional, Iterable, Iterator
from .cpp_modules.fm_index import FMIndex as _FMIndex
from .cpp_modules.fm_index import load_FMIndex
SHIFT = 10
BUFSZ = 1_000_000
FORMAT = '<l'
class FMIndex(_FMIndex):
"""
FMIndex class that interfaces with the low-level `sdsl-lite` implementation.
"""
beginnings: List[int]
occurring: Set[int]
occurring_distinct: List[int]
occurring_counts: List[int]
labels: Optional[List[str]]
def __init__(self):
super().__init__()
self.beginnings = [0]
self.occurring = set()
self.occurring_distinct = []
self.occurring_counts = []
self.labels = None
def initialize(self, sequences: Iterable[List[int]], in_memory: bool = False) -> None:
"""
Initialize the FM-index.
Params:
sequences: An iterable of list of integers, e.g. token ids.
in_memory: If False, builds the FM-index using a temporary cache file
"""
occurring = set()
if in_memory:
data = []
for seq in sequences:
self.beginnings.append(self.beginnings[-1] + len(seq))
occurring |= set(seq)
seq = [x + SHIFT for x in seq[::-1]]
data.extend(seq)
self.occurring = list(occurring)
super().initialize(data)
else:
with tempfile.NamedTemporaryFile() as tmp:
for seq in sequences:
self.beginnings.append(self.beginnings[-1] + len(seq))
occurring |= set(seq)
seq = [x + SHIFT for x in seq[::-1]]
tmp.write(b''.join([struct.pack(FORMAT, x) for x in seq]))
tmp.flush()
self.occurring = list(occurring)
super().initialize_from_file(tmp.name, 4)
self.occurring_distinct, self.occurring_counts = self.get_distinct_count(0, len(self))
def get_doc(self, doc_index: int) -> List[int]:
"""
Returns the document (as a list of ids) given its index in the index.
"""
doc = self.extract_text(
self.beginnings[doc_index], self.beginnings[doc_index+1])
doc = [x - SHIFT for x in doc]
return doc
def get_doc_index(self, token_index: int) -> int:
"""
Returns the index of the document containing the token identified by the input index.
"""
doc_index = bisect.bisect_right(self.beginnings, token_index) - 1
return doc_index
def get_doc_length(self, doc_index: int) -> int:
"""
Returns the length of the document matching `doc_index`.
"""
return self.beginnings[doc_index + 1] - self.beginnings[doc_index]
def get_token_index_from_row(self, row: int) -> int:
"""
Locates a range of FM-index rows in the corpus.
"""
return self.locate(row)
def get_doc_index_from_row(self, row: int) -> int:
"""
Returns the `doc_index` of the document containing the token in the input row of the Wavelet Tree.
"""
return self.get_doc_index(self.locate(row))
def get_range(self, sequence: List[int]) -> Tuple[int, int]:
"""
Finds the FM-index rows that match the input prefix `sequence`.
"""
start_row = 0
end_row = self.size()
for token in sequence:
start_row, end_row = self.backward_search_step(token + SHIFT, start_row, end_row)
end_row += 1
return start_row, end_row
def get_count(self, sequence: List[int]) -> int:
"""
Counts the number of occurrences of the input prefix `sequence` in the FM-index.
"""
start, end = self.get_range(sequence)
return end - start
def get_doc_count(self, sequence: List[int]) -> Iterator[int]:
"""
Finds the number of documents that contain the input prefix `sequence`.
"""
start, end = self.get_range(sequence)
doc_dict={}
for row in range(start, end):
doc_index = self.get_doc_index_from_row(row)
doc_dict[doc_index] = 1
return len(doc_dict)
def get_doc_indices(self, sequence: List[int]) -> Iterator[int]:
"""
Finds the documents that contain the input prefix `sequence`.
"""
start, end = self.get_range(sequence)
for row in range(start, end):
yield self.get_doc_index_from_row(row)
def get_continuations(self, sequence: List[int]) -> List[int]:
"""
Finds all tokens that appear at least once as successors for the input prefix.
"""
start, end = self.get_range(sequence)
conts = self.get_distinct(start, end)
return conts
def get_distinct(self, low: int, high: int) -> List[int]:
"""
Finds all distinct symbols that appear in the last column of the FM-index in a given range.
"""
distinct = self.distinct(low, high)
distinct = [c - SHIFT for c in distinct if c > 0]
return distinct
def get_distinct_count(self, low: int, high: int) -> Tuple[List[int], List[int]]:
"""
Finds all distinct symbols that appear in the last column of the FM-index in a given range, and also return their
counts.
"""
data = self.distinct_count(low, high)
distinct = []
counts = []
for d, c in zip(data[0::2], data[1::2]):
if d > 0:
distinct.append(d - SHIFT)
counts.append(c)
return distinct, counts
def get_distinct_count_multi(self, lows: List[int], highs: List[int]) -> List[Tuple[List[int], List[int]]]:
"""
Multithreaded version of `get_distinct_count`.
"""
ret = []
for data in self.distinct_count_multi(lows, highs):
distinct = []
counts = []
for d, c in zip(data[0::2], data[1::2]):
if d > 0:
distinct.append(d - SHIFT)
counts.append(c)
ret.append((distinct, counts))
return ret
def __len__(self) -> int:
"""
FM-index length (in tokens).
"""
return self.beginnings[-1]
@property
def n_docs(self) -> int:
"""
Number of documents in the FM-index.
"""
return len(self.beginnings) - 1
def save(self, path: str) -> None:
"""
Serialize the FM-index at the given path.
"""
with open(path + '.oth', 'wb') as f:
pickle.dump((self.beginnings, self.occurring, self.labels), f)
return super().save(path + '.fmi')
@classmethod
def load(cls, path: str) -> 'FMIndex':
"""
Initialize the FM-index from the given path.
"""
index = load_FMIndex(path + '.fmi')
index.__class__ = cls
with open(path + '.oth', 'rb') as f:
index.beginnings, index.occurring, index.labels = pickle.load(f)
index.occurring_distinct, index.occurring_counts = index.get_distinct_count(0, len(index))
return index
| 7,468 | 33.419355 | 121 | py |
MINDER | MINDER-main/seal/cpp_modules/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| 196 | 31.833333 | 61 | py |
SRU_for_GCI | SRU_for_GCI-master/main.py | #!/usr/bin/env python
# coding: utf-8
# Import header files
import math
import argparse
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
from models.sru import SRU, trainSRU
from models.eSRU_1LF import eSRU_1LF, train_eSRU_1LF
from models.eSRU_2LF import eSRU_2LF, train_eSRU_2LF
from utils.utilFuncs import env_config, loadTrainingData, loadTrueNetwork, getCausalNodes, count_parameters, getGeneTrainingData
# Read input command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:3',
help='device, default: cuda:3')
parser.add_argument('--dataset', type=str, default='VAR',
help='dataset type, default: VAR')
parser.add_argument('--dsid', type=int, default=1,
help='dataset id, default: 1')
parser.add_argument('--T', type=int, default=10,
help='training size, default: 10')
parser.add_argument('--F', type=int, default=10,
help='chaos, default: 10')
parser.add_argument('--n', type=int, default=10,
help='num of timeseries, default: 10')
parser.add_argument('--model', type=str, default='sru',
help='[sru, gru, lstm]: select your model')
parser.add_argument('--nepochs', type=int, default=500,
help='sets max_iter, default: 500')
parser.add_argument('--mu1', type=float, default=1,
help='sets mu1 parameter, default: 1')
parser.add_argument('--mu2', type=float, default=1,
help='sets mu2 parameter, default: 1')
parser.add_argument('--mu3', type=float, default=1,
help='sets mu3 parameter, default: 1')
parser.add_argument('--lr', type=float, default=0.005,
help='sets learning rate, default: 0.005')
parser.add_argument('--joblog', type=str, default="",
help='name of job logfile, default=""')
args = parser.parse_args()
deviceName = args.device
model_name = args.model
max_iter = args.nepochs
mu1 = args.mu1
mu2 = args.mu2
mu3 = args.mu3
dataset = args.dataset
dataset_id = args.dsid
T = args.T
F = args.F
n = args.n
lr = args.lr
jobLogFilename = args.joblog
###############################
# Global simulation settings
###############################
verbose = 0 # Verbosity level
#################################
# Pytorch environment
#################################
device, seed = env_config(True, deviceName) # true --> use GPU
print("Computational Resource: %s" % (device))
######################################
# Create input data in batch format
######################################
if(dataset == 'gene'):
Xtrain, Gref = getGeneTrainingData(dataset_id, device)
n1 = Xtrain.shape[0]
if(n != n1):
print("Error::Dimension mismatch for input training data..")
numTotalSamples = Xtrain.shape[1]
Xtrain = Xtrain.float().to(device)
# Make input signal zero mean and appropriately scaled
Xtrain = Xtrain - Xtrain.mean()
inputSignalMultiplier = 50
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'var'):
fileName = "data/var/S_%s_T_%s_dataset_%s.npz" % (F, T, dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'lorenz'):
fileName = "data/lorenz96/F_%s_T_%s_dataset_%s.npz" % (F, T, dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'netsim'):
fileName = "data/netsim/sim3_subject_%s.npz" % (dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
else:
print("Dataset is not supported")
if(verbose >= 1):
plt.figure(1)
plt.xlabel("t")
plt.ylabel("x0(t)")
plt.plot(range(numTotalSamples),Xtrain.cpu().numpy()[0][:])
plt.show(block=False)
plt.pause(0.1)
######################################
# SRU Cell parameters
######################################
#######################################
# Model training parameters
######################################
if(model_name == 'sru'):
lr_gamma = 0.99
lr_update_gap = 4
staggerTrainWin = 1
stoppingThresh = 1e-5;
trainVerboseLvl = 2
lr = lr
lambda1 = mu1
lambda2 = mu2
n_inp_channels = n
n_out_channels = 1
if(dataset == 'gene'):
A = [0.0, 0.01, 0.1, 0.5, 0.99]; #0.75
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2)
dim_rec_stats_feedback = 10 #d * len(A)
batchSize = 21
blk_size = batchSize
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'var'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2) #n
dim_rec_stats_feedback = 10 #d * len(A) #math.ceil(n/2) #n
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'lorenz'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'netsim'):
A = [0.0, 0.01, 0.05, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 10 #100
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
else:
print("Unsupported dataset encountered")
elif(model_name == 'eSRU_1LF' or model_name == 'eSRU_2LF'):
lr_gamma = 0.99
lr_update_gap = 4
staggerTrainWin = 1
stoppingThresh = 1e-5;
trainVerboseLvl = 2
lr = lr
lambda1 = mu1
lambda2 = mu2
lambda3 = mu3
n_inp_channels = n
n_out_channels = 1
if(dataset == 'gene'):
A = [0.05, 0.1, 0.2, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 21
blk_size = int(batchSize)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'var'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2) #n
dim_rec_stats_feedback = 10 #d * len(A) #math.ceil(n/2) #n
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'lorenz'):
#lr = 0.01
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10 #d*len(A)
dim_rec_stats_feedback = 10 #d*len(A)
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'netsim'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10 #d*len(A)
dim_rec_stats_feedback = 10 #d*len(A)
batchSize = 10 #10 #100
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
else:
print("Unsupported dataset encountered")
else:
print("Unsupported model encountered")
############################################
# Evaluate ROC plots (regress mu2)
############################################
if 1:
Gest = torch.zeros(n, n, requires_grad = False)
if(model_name == 'sru'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = SRU(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = trainSRU(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
elif(model_name == 'eSRU_1LF'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = eSRU_1LF(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = train_eSRU_1LF(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
elif(model_name == 'eSRU_2LF'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = eSRU_2LF(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = train_eSRU_2LF(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
else:
print("Unsupported model encountered")
print(Gref)
print(Gest)
if(jobLogFilename != ""):
if(model_name == 'eSRU_1LF' or model_name == 'eSRU_2LF'):
np.savez(jobLogFilename,
Gref=Gref,
Gest=Gest.detach().cpu().numpy(),
model=model_name,
dataset=dataset,
dsid=dataset_id,
T=T,
F=F,
nepochs=max_iter,
mu1=mu1,
mu2=mu2,
mu3=mu3,
lr=lr,
batchSize=batchSize,
blk_size=blk_size,
numBatches=numBatches,
dim_iid_stats=dim_iid_stats,
dim_rec_stats=dim_rec_stats,
dim_final_stats=dim_final_stats,
dim_rec_stats_feedback=dim_rec_stats_feedback)
else:
np.savez(jobLogFilename, Gref=Gref, Gest=Gest.detach().cpu().numpy(), model=model_name, dataset=dataset, dsid=dataset_id, T=T, F=F, nepochs=max_iter, mu1=mu1, mu2=mu2, lr=lr)
# sleep for one seconds followed by printing
# the exit key for tmux consumption
time.sleep(1)
print("#RUN_COMPLETE #RUN_COMPLETE #RUN_COMPLETE #RUN_COMPLETE")
| 12,432 | 33.72905 | 186 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_2LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_2LF(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(eSRU_2LF, self).__init__()
# initialization of SRU parameters
self.type = 'eSRU_2LF'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r1 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_r2 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
# Fixed random matrices for sketching hidden state to lower dimensions
self.intrMat_h2r_transpose = (1/math.sqrt(dim_rec_stats_feedback)) * torch.randn(self.numScales*dim_rec_stats, dim_rec_stats_feedback, requires_grad=False, device=device)
# SRU forward pass
def forward(self, x_t):
# Generate feedback statistics
self.r_t = torch.matmul(self.u_t_prev, self.intrMat_h2r_transpose) # sketch of hidden state
self.r_t = F.elu(self.lin_r1(self.r_t)) # layer 1
self.r_t = F.elu(self.lin_r2(self.r_t)) # layer 2
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU_eSRU_2LF
############################################
def train_eSRU_2LF(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
wtMtxRow = torch.zeros(model.numScales * model.dim_final_stats, 1, requires_grad = False, device=device)
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r1_weight = deepcopy(model.lin_r1.weight.data)
lin_r1_bias = deepcopy(model.lin_r1.bias.data)
lin_r2_weight = deepcopy(model.lin_r2.weight.data)
lin_r2_bias = deepcopy(model.lin_r2.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
softshrink1 = torch.nn.Softshrink(lambda1)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters to use later in checking the stopping criterion
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r1_weight[:,:] = model.lin_r1.weight.data[:,:]
lin_r1_bias[:] = model.lin_r1.bias.data[:]
lin_r2_weight[:,:] = model.lin_r2.weight.data[:,:]
lin_r2_bias[:] = model.lin_r2.bias.data[:]
lin_o_weight[:,:] = model.lin_o.weight.data[:,:]
lin_o_bias[:] = model.lin_o.bias.data[:]
lin_y_weight[:,:] = model.lin_y.weight.data[:,:]
lin_y_bias[:] = model.lin_y.bias.data[:]
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#print("111: %s" % torch.cuda.memory_allocated(device))
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
softshrink3 = nn.Softshrink(lambda3*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r1.weight.data = softshrink1(model.lin_r1.weight).data
model.lin_r1.bias.data = softshrink1(model.lin_r1.bias).data
model.lin_r2.weight.data = softshrink1(model.lin_r2.weight).data
model.lin_r2.bias.data = softshrink1(model.lin_r2.bias).data
#model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=lambda2*lr_current*0.1))
# Update the weight matrix mapping multi-time scale hidden state to
# the lag sensitive features for prediction purpose
for rr in range(model.dim_final_stats):
wtMtxRow.data = model.lin_o.weight.data[rr,:]
#reshape wtMtxRow as (numScales x dim_rec_stats) matrix
wtMtxRowReshaped = wtMtxRow.view(model.numScales, model.dim_rec_stats)
l2normTensor1 = torch.norm(wtMtxRowReshaped, p=2, dim=0, keepdim=True) # 1 x dim_final_stats row tensor
model.lin_o.weight.data[rr,:] = (wtMtxRowReshaped*(softshrink3(l2normTensor1)/torch.clamp(l2normTensor1, min=lambda3*lr_current*0.1))).flatten().data[:]
# Compute and log regularization loss without updating gradients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r1.weight.data, 1) + torch.norm(model.lin_r1.bias.data, 1) +
torch.norm(model.lin_r2.weight.data, 1) + torch.norm(model.lin_r2.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r1.weight, lin_r1_weight)
+ mseLoss(model.lin_r1.bias, lin_r1_bias)
+ mseLoss(model.lin_r2.weight, lin_r2_weight)
+ mseLoss(model.lin_r2.bias, lin_r2_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 14,315 | 47.040268 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_1LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_1LF(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(eSRU_1LF, self).__init__()
# initialization of SRU parameters
self.type = 'eSRU_1LF'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r1 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
# Fixed random matrices for sketching hidden state to lower dimensions
self.intrMat_h2r_transpose = (1/math.sqrt(dim_rec_stats_feedback)) * torch.randn(self.numScales*dim_rec_stats, dim_rec_stats_feedback, requires_grad=False, device=device)
# SRU forward pass
def forward(self, x_t):
# Generate feedback statistics
self.r_t = torch.matmul(self.u_t_prev, self.intrMat_h2r_transpose) # sketch of hidden state
self.r_t = F.elu(self.lin_r1(self.r_t))
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU
############################################
def train_eSRU_1LF(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
wtMtxRow = torch.zeros(model.numScales * model.dim_final_stats, 1, requires_grad = False, device=device)
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r1_weight = deepcopy(model.lin_r1.weight.data)
lin_r1_bias = deepcopy(model.lin_r1.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
softshrink1 = torch.nn.Softshrink(lambda1)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r1_weight[:,:] = model.lin_r1.weight.data[:,:]
lin_r1_bias[:] = model.lin_r1.bias.data[:]
lin_o_weight[:,:] = deepcopy(model.lin_o.weight.data[:,:])
lin_o_bias[:] = deepcopy(model.lin_o.bias.data[:])
lin_y_weight[:,:] = deepcopy(model.lin_y.weight.data[:,:])
lin_y_bias[:] = deepcopy(model.lin_y.bias.data[:])
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#print("111: %s" % torch.cuda.memory_allocated(device))
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
softshrink3 = nn.Softshrink(lambda3*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r1.weight.data = softshrink1(model.lin_r1.weight).data
model.lin_r1.bias.data = softshrink1(model.lin_r1.bias).data
#model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=lambda2*lr_current*0.1))
# Update the weight matrix mapping multi-time scale hidden state to
# the lag sensitive features for prediction purpose
for rr in range(model.dim_final_stats):
wtMtxRow.data = model.lin_o.weight.data[rr,:]
#reshape wtMtxRow as (numScales x dim_rec_stats) matrix
wtMtxRowReshaped = wtMtxRow.view(model.numScales, model.dim_rec_stats)
l2normTensor1 = torch.norm(wtMtxRowReshaped, p=2, dim=0, keepdim=True) # 1 x dim_final_stats row tensor
model.lin_o.weight.data[rr,:] = (wtMtxRowReshaped*(softshrink3(l2normTensor1)/torch.clamp(l2normTensor1, min=lambda3*lr_current*0.1))).flatten().data[:]
# Compute and log regularization loss without updating gradients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r1.weight.data, 1) + torch.norm(model.lin_r1.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r1.weight, lin_r1_weight)
+ mseLoss(model.lin_r1.bias, lin_r1_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 13,531 | 45.501718 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/sru.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class SRU(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(SRU, self).__init__()
# initialization of SRU parameters
self.type = 'sru'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r = nn.Linear(self.numScales*dim_rec_stats, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
#self.lin_xr2phi.weight.data.uniform_(-0.1,0.1)
#self.lin_r.weight.data.uniform_(-0.1,0.1)
#self.lin_o.weight.data.uniform_(-0.1,0.1)
#self.lin_y.weight.data.uniform_(-0.1,0.1)
# total number of parameteres
self.numParams = ( (n_inp_channels + dim_rec_stats_feedback)*dim_iid_stats +
self.numScales*dim_rec_stats*dim_rec_stats_feedback +
self.numScales*dim_rec_stats*dim_final_stats +
dim_final_stats*n_out_channels +
dim_iid_stats + dim_rec_stats_feedback + dim_final_stats + n_out_channels)
# SRU forward pass
def forward(self, x_t):
# Update r_t
self.r_t = F.elu(self.lin_r(self.u_t_prev))
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU
############################################
def trainSRU(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
proxUpdate = True
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r_weight = deepcopy(model.lin_r.weight.data)
lin_r_bias = deepcopy(model.lin_r.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
if(proxUpdate):
softshrink1 = torch.nn.Softshrink(lambda1)
#hardshrink2 = nn.Hardshrink(hs2)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r_weight[:,:] = model.lin_r.weight.data[:,:]
lin_r_bias[:] = model.lin_r.bias.data[:]
lin_o_weight[:,:] = deepcopy(model.lin_o.weight.data[:,:])
lin_o_bias[:] = deepcopy(model.lin_o.bias.data[:])
lin_y_weight[:,:] = deepcopy(model.lin_y.weight.data[:,:])
lin_y_bias[:] = deepcopy(model.lin_y.bias.data[:])
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#smooth_loss.backward(retain_graph = True)
#print("111: %s" % torch.cuda.memory_allocated(device))
# Compute gradient energy (without accounting for regularization terms)
#total_grad_norm = 0;
#for p in list(filter(lambda p: p.grad is not None, model.parameters())):
# total_grad_norm = total_grad_norm + p.grad.data.norm(2).item()
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
if(proxUpdate):
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r.weight.data = softshrink1(model.lin_r.weight).data
model.lin_r.bias.data = softshrink1(model.lin_r.bias).data
model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=1e-8))
# Compute and log regularization loss without updating gadients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r.weight.data, 1) + torch.norm(model.lin_r.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
else:
loss1 = lambda1*((torch.norm(model.lin_y.weight, 1)+ torch.norm(model.lin_y.bias, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:], 1)) + torch.norm(model.lin_xr2phi.bias, 1) +
torch.norm(model.lin_o.weight, 1) + torch.norm(model.lin_o.bias, 1) +
torch.norm(model.lin_r.weight, 1) + torch.norm(model.lin_r.bias, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
model.lin_xr2phi.weight.retain_grad()
loss1.backward(retain_graph = True)
optimizer.step()
optimizer.zero_grad()
#for col in range(n):
# loss2 = lambda2*torch.norm(model.lin_xr2phi.weight[:,col], 2)
# model.lin_xr2phi.weight.retain_grad()
# loss2.backward(retain_graph = True)
# lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
model.lin_xr2phi.weight.retain_grad()
loss2.backward(retain_graph = True)
optimizer.step()
scheduler.step()
# prune small weights
#model.lin_xr2phi.weight.data[:,predictedIdx] = 0
#model.lin_xr2phi.weight.grad.data[:,predictedIdx] = 0
#model.lin_xr2phi.weight.data = hardshrink1(model.lin_xr2phi.weight.data)
#model.lin_r.weight.data = hardshrink2(model.lin_r.weight.data)
#model.lin_o.weight.data = hardshrink2(model.lin_o.weight.data)
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r.weight, lin_r_weight)
+ mseLoss(model.lin_r.bias, lin_r_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 15,047 | 45.018349 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/utilFuncs.py | # Import header files
import math
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
import csv
###########################################
# Python/numpy/pytorch environment config
###########################################
def env_config(GPUTrue, deviceName):
global_seed = 2
# Disable debug mode
#torch.backends.cudnn.enabled=False
torch.autograd.set_detect_anomaly(False)
# Shrink very small values to zero in tensors for computational speedup
torch.set_flush_denormal(True)
# Set seed for random number generation (for reproducibility of results)
torch.manual_seed(global_seed)
torch.cuda.manual_seed(global_seed)
np.random.seed(global_seed)
# Set device as GPU if available, otherwise default to CPU
if(GPUTrue):
device = torch.device(deviceName if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
return device, global_seed
######################################
# Function for loading input data
######################################
def loadTrainingData(inputDataFilePath, device):
# Load and parse input data (create batch data)
inpData = torch.load(inputDataFilePath)
Xtrain = torch.zeros(inpData['TsData'].shape[1], inpData['TsData'].shape[0], requires_grad = False, device=device)
Xtrain1 = inpData['TsData'].t()
Xtrain.data[:,:] = Xtrain1.data[:,:]
return Xtrain
#######################################################
# Function for reading ground truth network from file
#######################################################
def loadTrueNetwork(inputFilePath, networkSize):
with open(inputFilePath) as tsvin:
reader = csv.reader(tsvin, delimiter='\t')
numrows = 0
for row in reader:
numrows = numrows + 1
network = np.zeros((numrows,2),dtype=np.int16)
with open(inputFilePath) as tsvin:
reader = csv.reader(tsvin, delimiter='\t')
rowcounter = 0
for row in reader:
network[rowcounter][0] = int(row[0][1:])
network[rowcounter][1] = int(row[1][1:])
rowcounter = rowcounter + 1
Gtrue = np.zeros((networkSize,networkSize), dtype=np.int16)
for row in range(0,len(network),1):
Gtrue[network[row][1]-1][network[row][0]-1] = 1
return Gtrue
#############################################
# getCausalNodes
######################################
def getCausalNodes(model, threshold):
n = model.n_inp_channels
causalNodeMask = torch.zeros(n, 1, requires_grad = False, dtype=torch.int16)
for col in range(n):
#print(torch.norm(model.lin_xr2phi.weight.data[:,col],2))
if(torch.norm(model.lin_xr2phi.weight.data[:,col], 2) > threshold):
causalNodeMask.data[col] = 1
return causalNodeMask
#######################################################################
# Calculates false positive negatives and true positives negatives
#####################################################################
def calcPerfMetrics(Gtrue, Gest):
TP = 0 # True positive
FP = 0 # False positive
TN = 0 # True negative
FN = 0 # False negative
#n = Gest.shape[0]
GTGE = (Gtrue * Gest)
GestComplement = -1*(Gest-1)
GtrueComplement = -1*(Gtrue-1)
GTCGEC = (GtrueComplement * GestComplement)
TP = np.sum(GTGE)
FP = np.sum(Gest) - TP
TN = np.sum(GTCGEC)
FN = np.sum(GestComplement) - np.sum(GTCGEC)
TPR = float(TP)/float(TP+FN)
FPR = float(FP)/float(FP+TN)
Recall = float(TP)/float(TP+FN)
if(TP > 0 and FP > 0):
Precision = float(TP)/float(TP+FP)
else:
Precision = 0
return TPR, FPR, Precision, Recall
####################################################
# Calculates area under ROC curve
#
# (In) xin: numpy float array of false positive entries
# (In) yin: numpy float array of true positive entries
# (Out) auroc: calculated area under ROC curve
#
# Notes: xin and yin should sorted and be of same dimension
# and contain bounded entries in (0,1)
####################################################
def calcAUROC(xin, yin, verbose):
xin, yin = parallel_sort(xin, yin)
if(verbose > 0):
for ii in range(len(xin)):
print("%d\t %.6f \t %.6f" %(ii, xin[ii], yin[ii]))
# Update input arrays to include extreme points (0,0) and (1,1) to the ROC plot
xin = np.insert(xin,0,0)
yin = np.insert(yin,0,0)
xin = np.append(xin,1)
yin = np.append(yin,1)
n = len(xin)
auroc = 0
for ii in range(n-1):
h = xin[ii+1]-xin[ii]
b1 = yin[ii]
b2 = yin[ii+1]
trapezoid_area = 0.5*h*(b1 + b2)
auroc = auroc + trapezoid_area
return auroc, xin, yin
####################################################
# Calculates area under Precision-Recall curve
#
# (In) xin: numpy float array of precision values
# (In) yin: numpy float array of recall values
# (Out) aupr: calculated area under precision-recall curve
#
# Notes: xin and yin should sorted and be of same dimension
# and contain bounded entries in (0,1)
####################################################
def calcAUPR(xin, yin):
ll = len(xin)
# Update input arrays to include extreme points (0,1) and (1,0) to the precision-recall plot
if(xin[0] > 0):
xin = np.insert(xin,0,0)
yin = np.insert(yin,0,1)
if(xin[ll-1] < 1):
xin = np.append(xin,1)
yin = np.append(yin,0)
n = len(xin)
aupr = 0
for ii in range(n-1):
h = xin[ii+1]-xin[ii]
b1 = yin[ii]
b2 = yin[ii+1]
trapezoid_area = 0.5*h*(b1 + b2)
aupr = aupr + trapezoid_area
return aupr
###########################
# Count the number of tunable parameters in the model
##########################
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
##################################
# Calc metrics
##################################
def calcMetrics(jobLogFilename, model, dataset, verbose):
ld = np.load(jobLogFilename)
Gest = ld['Gest']
Gref = ld['Gref']
model_name = ld['model']
dataset = ld['dataset']
dsid = ld['dsid']
nepochs = ld['nepochs']
T = ld['T']
F = ld['F']
mu1 = ld['mu1']
mu2 = ld['mu2']
lr = ld['lr']
# if esru2 model, then register esru2 specific parameters, namely mu3
if(model_name == 'esru2' or model_name == 'esru3'):
mu3 = ld['mu3']
else:
mu3 = 0
n = Gest.shape[0]
Gest1 = np.ones((n,n), dtype=np.int16)
thresh = 0
thresh_idx = (Gest <= thresh)
Gest1.fill(1)
Gest1[thresh_idx] = 0
# remove self loops for gene causal network estimate
if(dataset == 'gene'):
for ii in range(n):
Gest1[ii][ii] = 0
#print(Gref)
#print(Gest1)
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
return model_name, dataset, dsid, T, F, nepochs, lr, mu1, mu2, mu3, TPR, FPR, Precision, Recall
##################################
# Calc metrics
##################################
def calcMetricsTCDF(jobLogFilename, model, dataset, threshold, verbose):
ld = np.load(jobLogFilename)
Gest = ld['Gest']
Gref = ld['Gref']
model_name = ld['model']
dataset = ld['dataset']
dsid = ld['dsid']
nepochs = ld['nepochs']
T = ld['T']
F = ld['F']
nepochs = ld['nepochs']
kernel = ld['kernel_size']
level = ld['levels']
lr = ld['lr']
dilation = ld['dilation_c']
n = Gest.shape[0]
Gest1 = np.ones((n,n), dtype=np.int16)
thresh_idx = (Gest <= threshold)
Gest1.fill(1)
Gest1[thresh_idx] = 0
# remove self loops for gene causal network estimate
if(dataset == 'gene'):
for ii in range(n):
Gest1[ii][ii] = 0
#print(Gref)
#print(Gest1)
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
return model_name, dataset, dsid, T, F, nepochs, lr, kernel, level, dilation, TPR, FPR, Precision, Recall
###################################################
# parallel sort in ascending order
###################################################
def parallel_sort(xin, yin):
n = len(xin)
xin_sorted_idx = np.argsort(xin)
yin_sorted_idx = np.argsort(yin)
xout = xin[xin_sorted_idx]
ysorted_by_x = yin[xin_sorted_idx]
yout = yin
#for ii in range(n):
# print("%d\t %.4f \t %.4f" %(ii, xout[ii], ysorted_by_x[ii]))
# for fixed xin[.], further sort yin[...]
x_prev = xout[0]
same_x_start_idx = 0
yout=[]
for ii in range(0, n, 1):
x = xout[ii]
if((x > x_prev) or (ii == n-1)):
if(ii == n-1):
same_x_stop_idx = n-1
else:
same_x_stop_idx = ii-1
if(same_x_start_idx == same_x_stop_idx):
y_arr_for_same_x = ysorted_by_x[same_x_start_idx]
else:
y_arr_for_same_x = np.sort(ysorted_by_x[same_x_start_idx:same_x_stop_idx+1:1])
#print("%d, %d, %.4f" %(same_x_start_idx, same_x_stop_idx, x_prev))
#print(ysorted_by_x[same_x_start_idx:same_x_stop_idx+1:1])
#print(y_arr_for_same_x)
yout = np.append(yout, y_arr_for_same_x)
#print("%d, %d, %.4f" %(same_x_start_idx, same_x_stop_idx, x_prev))
same_x_start_idx = ii
x_prev = xout[ii]
return xout, yout
def getGeneTrainingData(dataset_id, device):
if(dataset_id == 1):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Ecoli1.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Ecoli1.tsv"
elif(dataset_id == 2):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Ecoli2.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Ecoli2.tsv"
elif(dataset_id == 3):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast1.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast1.tsv"
elif(dataset_id == 4):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast2.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast2.tsv"
elif(dataset_id == 5):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast3.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast3.tsv"
else:
print("Error while loading gene training data")
Xtrain = loadTrainingData(InputDataFilePath, device)
n = Xtrain.shape[0]
Gref = loadTrueNetwork(RefNetworkFilePath, n)
return Xtrain, Gref | 11,267 | 28.730871 | 142 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/lorenz96Checker.py | import numpy as np
import torch
from utilFuncs import calcPerfMetrics, calcAUROC, calcAUPR
# lorenz96 params
T = 1000
F = 40.0
model_name = 'lstm'
mu = 6.6 # F = 10, mu = 0.2| F = 40, mu = 4.0
n = 10
numDatasets = 5
max_iter = 500
verbose = 0
thresholdVec = np.arange(0, 1, 0.05)
#thresholdVec = np.arange(0, 0.1, 0.001)
TPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
FPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
RecallVec = np.zeros(len(thresholdVec), dtype=np.float32)
PrecisionVec = np.zeros(len(thresholdVec), dtype=np.float32)
Gest1 = np.ones((n,n), dtype=np.int16)
Gest2 = np.ones((n,n), dtype=np.int16)
Gref1 = np.zeros((n,n), dtype=np.int16)
AUROCList = np.zeros(numDatasets)
AUPRList = np.zeros(numDatasets)
for dsid in range(numDatasets):
filename = "../logs/lorenz96/LORENZ%s_T%s_F%s_%s_niter%s_mu_%s.pt" % (dsid+1, T, F, model_name, max_iter, mu)
savedTensors = torch.load(filename)
Gest = savedTensors['Gest']
Gref = savedTensors['Gref']
Gest.requires_grad = False
Gest1 = Gest.cpu().numpy()
Gest2.fill(1)
for ii in range(len(thresholdVec)):
thresh = thresholdVec[ii]
thresh_idx = (Gest1 < thresh)
Gest2[thresh_idx] = 0
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest2)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
TPRVec[ii] = TPR
FPRVec[ii] = FPR
PrecisionVec[ii] = Precision
RecallVec[ii] = Recall
AUROCList[dsid] = calcAUROC(np.flip(FPRVec), np.flip(TPRVec))
AUPRList[dsid] = calcAUPR(RecallVec, PrecisionVec)
print("%s_LORENZ%d_T%s_F%s: AUROC = %.4f, \t AUPR = %.4f" % (model_name, dsid, T, F, AUROCList[dsid], AUPRList[dsid]))
print("Mean AUROC = %.4f, \t Mean AUPR = %.4f" % (AUROCList.mean(), AUPRList.mean()))
| 1,909 | 32.508772 | 146 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/perfChk.py | import math
import torch
import matplotlib
#import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
#import time
from utilFuncs import loadTrueNetwork, getCausalNodes, calcPerfMetrics, calcAUROC, calcAUPR
dataset = 'LORENZ'
#dataset = 'VAR'
#dataset = 'GENE'
if(dataset == 'LORENZ'):
dataset_id = 1
T = 1000
F = 40.0
model_name = 'sru'
max_iter = 500
n = 10
thresh = 0.05
muVec = np.arange(1.0, 11.0, 1.0)
#muVec = np.arange(18.0, 40.0, 1.0)
TPRVec = np.zeros(len(muVec), dtype=np.float32)
FPRVec = np.zeros(len(muVec), dtype=np.float32)
RecallVec = np.zeros(len(muVec), dtype=np.float32)
PrecisionVec = np.zeros(len(muVec), dtype=np.float32)
Gest1 = np.zeros((n,n), dtype=np.int16)
for muidx in range(len(muVec)):
mu = muVec[muidx]
LogPath = "logs/lorenz96/%s/%s%s_T%s_F%s_%s_niter%s_mu_%s.pt" % (model_name, dataset, dataset_id, T, F, model_name, max_iter, mu)
savedTensors = torch.load(LogPath)
Gref = savedTensors['Gref']
Gest = savedTensors['Gest']
Gest.requires_grad = False
Gest1 = Gest.cpu().numpy()
#print(Gest1)
Gest1[Gest1 <= thresh] = 0
Gest1[Gest1 > 0] = 1
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
print("mu = %.4f, \t thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (mu, thresh, TPR, FPR, Precision, Recall))
TPRVec[muidx] = TPR
FPRVec[muidx] = FPR
PrecisionVec[muidx] = Precision
RecallVec[muidx] = Recall
AUROC, FPRVec, TPRVec = calcAUROC(np.flip(FPRVec), np.flip(TPRVec))
AUPR = calcAUPR(RecallVec, PrecisionVec)
print("AUROC = %.4f, \t AUPR = %.4f" % (AUROC, AUPR))
plt.figure(1)
plt.title("ROC (TPR vs FPR)")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.xlim(0,1)
plt.ylim(0,1)
plt.plot(FPRVec, TPRVec)
plt.show()
plt.figure(2)
plt.title("ROC (Precision vs Recall)")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim(0,1)
plt.ylim(0,1)
plt.plot(RecallVec, PrecisionVec)
plt.show()
else:
print("Dataset is not supported")
##########################
# old stuff
###########################
if 0:
#LogPath = "logs/sru_niter1000_mu_2.0.pt"
#LogPath = "logs/sru_mod_niter1000_mu_1.0.pt"
#LogPath = "logs/lstm_niter200_mu_1.0.pt"
#LogPath = "logs/lstm_niter200_mu_1.0.pt"
#LogPath = "LORENZ_mlp_niter50000_mu_1.pt"
InputDataFilePath = "Dream3TensorData/Size100Ecoli1.pt"
RefNetworkFilePath = "Dream3TrueGeneNetworks/InSilicoSize100-Ecoli1.tsv"
n = 100;
Gref = loadTrueNetwork(RefNetworkFilePath, n)
savedTensors = torch.load(LogPath)
Gest = savedTensors['Gest']
print(Gest)
Gest1 = torch.zeros(n,n, requires_grad = False, dtype=torch.int16)
#thresholdVec = np.arange(0, 0.2, 0.001)
thresholdVec = np.arange(0, 2, 0.05)
TPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
FPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
RecallVec = np.zeros(len(thresholdVec), dtype=np.float32)
PrecisionVec = np.zeros(len(thresholdVec), dtype=np.float32)
#ignore self loops
for ii in range(n):
Gest.data[ii][ii] = 0
for ii in range(len(thresholdVec)):
thresh = thresholdVec[ii]
for rr in range(n):
for cc in range(n):
Gest1.data[rr][cc] = Gest.data[rr][cc] > thresh
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
TPRVec[ii] = TPR
FPRVec[ii] = FPR
PrecisionVec[ii] = Precision
RecallVec[ii] = Recall
| 3,856 | 30.357724 | 160 | py |
errant | errant-master/setup.py | from pathlib import Path
from setuptools import setup, find_packages
# Get base working directory.
base_dir = Path(__file__).resolve().parent
# Readme text for long description
with open(base_dir/"README.md") as f:
readme = f.read()
setup(
name = "errant",
version = "2.3.3",
license = "MIT",
description = "The ERRor ANnotation Toolkit (ERRANT). Automatically extract and classify edits in parallel sentences.",
long_description = readme,
long_description_content_type = "text/markdown",
author = "Christopher Bryant, Mariano Felice",
author_email = "christopher.bryant@cl.cam.ac.uk",
url = "https://github.com/chrisjbryant/errant",
keywords = ["automatic annotation", "grammatical errors", "natural language processing"],
python_requires = ">= 3.6",
install_requires = ["spacy>=2.2.0,<3", "rapidfuzz>=2.0.0"],
packages = find_packages(),
include_package_data=True,
entry_points = {
"console_scripts": [
"errant_compare = errant.commands.compare_m2:main",
"errant_m2 = errant.commands.m2_to_m2:main",
"errant_parallel = errant.commands.parallel_to_m2:main"]},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Education",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Text Processing :: Linguistic"]
)
| 1,900 | 39.446809 | 123 | py |
errant | errant-master/errant/alignment.py | from itertools import groupby
from rapidfuzz.distance import Indel
import spacy.parts_of_speech as POS
from errant.edit import Edit
class Alignment:
# Protected class resource
_open_pos = {POS.ADJ, POS.ADV, POS.NOUN, POS.VERB}
# Input 1: An original text string parsed by spacy
# Input 2: A corrected text string parsed by spacy
# Input 3: A flag for standard Levenshtein alignment
def __init__(self, orig, cor, lev=False):
# Set orig and cor
self.orig = orig
self.cor = cor
# Align orig and cor and get the cost and op matrices
self.cost_matrix, self.op_matrix = self.align(lev)
# Get the cheapest align sequence from the op matrix
self.align_seq = self.get_cheapest_align_seq()
# Input: A flag for standard Levenshtein alignment
# Output: The cost matrix and the operation matrix of the alignment
def align(self, lev):
# Sentence lengths
o_len = len(self.orig)
c_len = len(self.cor)
# Lower case token IDs (for transpositions)
o_low = [o.lower for o in self.orig]
c_low = [c.lower for c in self.cor]
# Create the cost_matrix and the op_matrix
cost_matrix = [[0.0 for j in range(c_len+1)] for i in range(o_len+1)]
op_matrix = [["O" for j in range(c_len+1)] for i in range(o_len+1)]
# Fill in the edges
for i in range(1, o_len+1):
cost_matrix[i][0] = cost_matrix[i-1][0] + 1
op_matrix[i][0] = "D"
for j in range(1, c_len+1):
cost_matrix[0][j] = cost_matrix[0][j-1] + 1
op_matrix[0][j] = "I"
# Loop through the cost_matrix
for i in range(o_len):
for j in range(c_len):
# Matches
if self.orig[i].orth == self.cor[j].orth:
cost_matrix[i+1][j+1] = cost_matrix[i][j]
op_matrix[i+1][j+1] = "M"
# Non-matches
else:
del_cost = cost_matrix[i][j+1] + 1
ins_cost = cost_matrix[i+1][j] + 1
trans_cost = float("inf")
# Standard Levenshtein (S = 1)
if lev: sub_cost = cost_matrix[i][j] + 1
# Linguistic Damerau-Levenshtein
else:
# Custom substitution
sub_cost = cost_matrix[i][j] + \
self.get_sub_cost(self.orig[i], self.cor[j])
# Transpositions require >=2 tokens
# Traverse the diagonal while there is not a Match.
k = 1
while i-k >= 0 and j-k >= 0 and \
cost_matrix[i-k+1][j-k+1] != cost_matrix[i-k][j-k]:
if sorted(o_low[i-k:i+1]) == sorted(c_low[j-k:j+1]):
trans_cost = cost_matrix[i-k][j-k] + k
break
k += 1
# Costs
costs = [trans_cost, sub_cost, ins_cost, del_cost]
# Get the index of the cheapest (first cheapest if tied)
l = costs.index(min(costs))
# Save the cost and the op in the matrices
cost_matrix[i+1][j+1] = costs[l]
if l == 0: op_matrix[i+1][j+1] = "T"+str(k+1)
elif l == 1: op_matrix[i+1][j+1] = "S"
elif l == 2: op_matrix[i+1][j+1] = "I"
else: op_matrix[i+1][j+1] = "D"
# Return the matrices
return cost_matrix, op_matrix
# Input 1: A spacy orig Token
# Input 2: A spacy cor Token
# Output: A linguistic cost between 0 < x < 2
def get_sub_cost(self, o, c):
# Short circuit if the only difference is case
if o.lower == c.lower: return 0
# Lemma cost
if o.lemma == c.lemma: lemma_cost = 0
else: lemma_cost = 0.499
# POS cost
if o.pos == c.pos: pos_cost = 0
elif o.pos in self._open_pos and c.pos in self._open_pos: pos_cost = 0.25
else: pos_cost = 0.5
# Char cost
char_cost = Indel.normalized_distance(o.text, c.text)
# Combine the costs
return lemma_cost + pos_cost + char_cost
# Get the cheapest alignment sequence and indices from the op matrix
# align_seq = [(op, o_start, o_end, c_start, c_end), ...]
def get_cheapest_align_seq(self):
i = len(self.op_matrix)-1
j = len(self.op_matrix[0])-1
align_seq = []
# Work backwards from bottom right until we hit top left
while i + j != 0:
# Get the edit operation in the current cell
op = self.op_matrix[i][j]
# Matches and substitutions
if op in {"M", "S"}:
align_seq.append((op, i-1, i, j-1, j))
i -= 1
j -= 1
# Deletions
elif op == "D":
align_seq.append((op, i-1, i, j, j))
i -= 1
# Insertions
elif op == "I":
align_seq.append((op, i, i, j-1, j))
j -= 1
# Transpositions
else:
# Get the size of the transposition
k = int(op[1:])
align_seq.append((op, i-k, i, j-k, j))
i -= k
j -= k
# Reverse the list to go from left to right and return
align_seq.reverse()
return align_seq
# all-split: Don't merge anything
def get_all_split_edits(self):
edits = []
for align in self.align_seq:
if align[0] != "M":
edits.append(Edit(self.orig, self.cor, align[1:]))
return edits
# all-merge: Merge all adjacent non-match ops
def get_all_merge_edits(self):
edits = []
for op, group in groupby(self.align_seq,
lambda x: True if x[0] == "M" else False):
if not op:
merged = self.merge_edits(list(group))
edits.append(Edit(self.orig, self.cor, merged[0][1:]))
return edits
# all-equal: Merge all edits of the same operation type.
def get_all_equal_edits(self):
edits = []
for op, group in groupby(self.align_seq, lambda x: x[0]):
if op != "M":
merged = self.merge_edits(list(group))
edits.append(Edit(self.orig, self.cor, merged[0][1:]))
return edits
# Merge the input alignment sequence to a single edit span
def merge_edits(self, seq):
if seq: return [("X", seq[0][1], seq[-1][2], seq[0][3], seq[-1][4])]
else: return seq
# Alignment object string representation
def __str__(self):
orig = " ".join(["Orig:"]+[tok.text for tok in self.orig])
cor = " ".join(["Cor:"]+[tok.text for tok in self.cor])
cost_matrix = "\n".join(["Cost Matrix:"]+[str(row) for row in self.cost_matrix])
op_matrix = "\n".join(["Operation Matrix:"]+[str(row) for row in self.op_matrix])
seq = "Best alignment: "+str([a[0] for a in self.align_seq])
return "\n".join([orig, cor, cost_matrix, op_matrix, seq])
| 7,294 | 40.685714 | 89 | py |
errant | errant-master/errant/__init__.py | from importlib import import_module
import spacy
from errant.annotator import Annotator
# ERRANT version
__version__ = '2.3.3'
# Load an ERRANT Annotator object for a given language
def load(lang, nlp=None):
# Make sure the language is supported
supported = {"en"}
if lang not in supported:
raise Exception("%s is an unsupported or unknown language" % lang)
# Load spacy
nlp = nlp or spacy.load(lang, disable=["ner"])
# Load language edit merger
merger = import_module("errant.%s.merger" % lang)
# Load language edit classifier
classifier = import_module("errant.%s.classifier" % lang)
# The English classifier needs spacy
if lang == "en": classifier.nlp = nlp
# Return a configured ERRANT annotator
return Annotator(lang, nlp, merger, classifier) | 813 | 29.148148 | 74 | py |
errant | errant-master/errant/annotator.py | from errant.alignment import Alignment
from errant.edit import Edit
from spacy.tokens import Doc
# Main ERRANT Annotator class
class Annotator:
# Input 1: A string language id: e.g. "en"
# Input 2: A spacy processing object for the language
# Input 3: A merging module for the language
# Input 4: A classifier module for the language
def __init__(self, lang, nlp=None, merger=None, classifier=None):
self.lang = lang
self.nlp = nlp
self.merger = merger
self.classifier = classifier
# Input 1: A text string
# Input 2: A flag for word tokenisation
# Output: The input string parsed by spacy
def parse(self, text, tokenise=False):
if tokenise:
text = self.nlp(text)
else:
text = Doc(self.nlp.vocab, text.split())
self.nlp.tagger(text)
self.nlp.parser(text)
return text
# Input 1: An original text string parsed by spacy
# Input 2: A corrected text string parsed by spacy
# Input 3: A flag for standard Levenshtein alignment
# Output: An Alignment object
def align(self, orig, cor, lev=False):
return Alignment(orig, cor, lev)
# Input 1: An Alignment object
# Input 2: A flag for merging strategy
# Output: A list of Edit objects
def merge(self, alignment, merging="rules"):
# rules: Rule-based merging
if merging == "rules":
edits = self.merger.get_rule_edits(alignment)
# all-split: Don't merge anything
elif merging == "all-split":
edits = alignment.get_all_split_edits()
# all-merge: Merge all adjacent non-match ops
elif merging == "all-merge":
edits = alignment.get_all_merge_edits()
# all-equal: Merge all edits of the same operation type
elif merging == "all-equal":
edits = alignment.get_all_equal_edits()
# Unknown
else:
raise Exception("Unknown merging strategy. Choose from: "
"rules, all-split, all-merge, all-equal.")
return edits
# Input: An Edit object
# Output: The same Edit object with an updated error type
def classify(self, edit):
return self.classifier.classify(edit)
# Input 1: An original text string parsed by spacy
# Input 2: A corrected text string parsed by spacy
# Input 3: A flag for standard Levenshtein alignment
# Input 4: A flag for merging strategy
# Output: A list of automatically extracted, typed Edit objects
def annotate(self, orig, cor, lev=False, merging="rules"):
alignment = self.align(orig, cor, lev)
edits = self.merge(alignment, merging)
for edit in edits:
edit = self.classify(edit)
return edits
# Input 1: An original text string parsed by spacy
# Input 2: A corrected text string parsed by spacy
# Input 3: A token span edit list; [o_start, o_end, c_start, c_end, (cat)]
# Input 4: A flag for gold edit minimisation; e.g. [a b -> a c] = [b -> c]
# Input 5: A flag to preserve the old error category (i.e. turn off classifier)
# Output: An Edit object
def import_edit(self, orig, cor, edit, min=True, old_cat=False):
# Undefined error type
if len(edit) == 4:
edit = Edit(orig, cor, edit)
# Existing error type
elif len(edit) == 5:
edit = Edit(orig, cor, edit[:4], edit[4])
# Unknown edit format
else:
raise Exception("Edit not of the form: "
"[o_start, o_end, c_start, c_end, (cat)]")
# Minimise edit
if min:
edit = edit.minimise()
# Classify edit
if not old_cat:
edit = self.classify(edit)
return edit
| 3,788 | 36.89 | 83 | py |
errant | errant-master/errant/edit.py | # ERRANT edit class
class Edit:
# Input 1: An original text string parsed by spacy
# Input 2: A corrected text string parsed by spacy
# Input 3: A token span edit list: [o_start, o_end, c_start, c_end]
# Input 4: An error type string, if known
def __init__(self, orig, cor, edit, type="NA"):
# Orig offsets, spacy tokens and string
self.o_start = edit[0]
self.o_end = edit[1]
self.o_toks = orig[self.o_start:self.o_end]
self.o_str = self.o_toks.text if self.o_toks else ""
# Cor offsets, spacy tokens and string
self.c_start = edit[2]
self.c_end = edit[3]
self.c_toks = cor[self.c_start:self.c_end]
self.c_str = self.c_toks.text if self.c_toks else ""
# Error type
self.type = type
# Minimise the edit; e.g. [a b -> a c] = [b -> c]
def minimise(self):
# While the first token is the same on both sides
while self.o_toks and self.c_toks and \
self.o_toks[0].text == self.c_toks[0].text:
# Remove that token from the span, and adjust the start offsets
self.o_toks = self.o_toks[1:]
self.c_toks = self.c_toks[1:]
self.o_start += 1
self.c_start += 1
# Do the same for the last token
while self.o_toks and self.c_toks and \
self.o_toks[-1].text == self.c_toks[-1].text:
self.o_toks = self.o_toks[:-1]
self.c_toks = self.c_toks[:-1]
self.o_end -= 1
self.c_end -= 1
# Update the strings
self.o_str = self.o_toks.text if self.o_toks else ""
self.c_str = self.c_toks.text if self.c_toks else ""
return self
# Input: An id for the annotator
# Output: An edit string formatted for an M2 file
def to_m2(self, id=0):
span = " ".join(["A", str(self.o_start), str(self.o_end)])
cor_toks_str = " ".join([tok.text for tok in self.c_toks])
return "|||".join([span, self.type, cor_toks_str, "REQUIRED", "-NONE-", str(id)])
# Edit object string representation
def __str__(self):
orig = "Orig: "+str([self.o_start, self.o_end, self.o_str])
cor = "Cor: "+str([self.c_start, self.c_end, self.c_str])
type = "Type: "+repr(self.type)
return ", ".join([orig, cor, type]) | 2,352 | 41.017857 | 89 | py |
errant | errant-master/errant/en/lancaster.py | # Taken from NLTK
# https://github.com/nltk/nltk/blob/develop/nltk/stem/lancaster.py
#
# Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Steven Tomcavage <stomcava@law.upenn.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A word stemmer based on the Lancaster (Paice/Husk) stemming algorithm.
Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61.
"""
import re
class LancasterStemmer:
"""
Lancaster Stemmer
>>> from nltk.stem.lancaster import LancasterStemmer
>>> st = LancasterStemmer()
>>> st.stem('maximum') # Remove "-um" when word is intact
'maxim'
>>> st.stem('presumably') # Don't remove "-um" when word is not intact
'presum'
>>> st.stem('multiply') # No action taken if word ends with "-ply"
'multiply'
>>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules
'provid'
>>> st.stem('owed') # Word starting with vowel must contain at least 2 letters
'ow'
>>> st.stem('ear') # ditto
'ear'
>>> st.stem('saying') # Words starting with consonant must contain at least 3
'say'
>>> st.stem('crying') # letters and one of those letters must be a vowel
'cry'
>>> st.stem('string') # ditto
'string'
>>> st.stem('meant') # ditto
'meant'
>>> st.stem('cement') # ditto
'cem'
>>> st_pre = LancasterStemmer(strip_prefix_flag=True)
>>> st_pre.stem('kilometer') # Test Prefix
'met'
>>> st_custom = LancasterStemmer(rule_tuple=("ssen4>", "s1t."))
>>> st_custom.stem("ness") # Change s to t
'nest'
"""
# The rule list is static since it doesn't change between instances
default_rule_tuple = (
"ai*2.", # -ia > - if intact
"a*1.", # -a > - if intact
"bb1.", # -bb > -b
"city3s.", # -ytic > -ys
"ci2>", # -ic > -
"cn1t>", # -nc > -nt
"dd1.", # -dd > -d
"dei3y>", # -ied > -y
"deec2ss.", # -ceed >", -cess
"dee1.", # -eed > -ee
"de2>", # -ed > -
"dooh4>", # -hood > -
"e1>", # -e > -
"feil1v.", # -lief > -liev
"fi2>", # -if > -
"gni3>", # -ing > -
"gai3y.", # -iag > -y
"ga2>", # -ag > -
"gg1.", # -gg > -g
"ht*2.", # -th > - if intact
"hsiug5ct.", # -guish > -ct
"hsi3>", # -ish > -
"i*1.", # -i > - if intact
"i1y>", # -i > -y
"ji1d.", # -ij > -id -- see nois4j> & vis3j>
"juf1s.", # -fuj > -fus
"ju1d.", # -uj > -ud
"jo1d.", # -oj > -od
"jeh1r.", # -hej > -her
"jrev1t.", # -verj > -vert
"jsim2t.", # -misj > -mit
"jn1d.", # -nj > -nd
"j1s.", # -j > -s
"lbaifi6.", # -ifiabl > -
"lbai4y.", # -iabl > -y
"lba3>", # -abl > -
"lbi3.", # -ibl > -
"lib2l>", # -bil > -bl
"lc1.", # -cl > c
"lufi4y.", # -iful > -y
"luf3>", # -ful > -
"lu2.", # -ul > -
"lai3>", # -ial > -
"lau3>", # -ual > -
"la2>", # -al > -
"ll1.", # -ll > -l
"mui3.", # -ium > -
"mu*2.", # -um > - if intact
"msi3>", # -ism > -
"mm1.", # -mm > -m
"nois4j>", # -sion > -j
"noix4ct.", # -xion > -ct
"noi3>", # -ion > -
"nai3>", # -ian > -
"na2>", # -an > -
"nee0.", # protect -een
"ne2>", # -en > -
"nn1.", # -nn > -n
"pihs4>", # -ship > -
"pp1.", # -pp > -p
"re2>", # -er > -
"rae0.", # protect -ear
"ra2.", # -ar > -
"ro2>", # -or > -
"ru2>", # -ur > -
"rr1.", # -rr > -r
"rt1>", # -tr > -t
"rei3y>", # -ier > -y
"sei3y>", # -ies > -y
"sis2.", # -sis > -s
"si2>", # -is > -
"ssen4>", # -ness > -
"ss0.", # protect -ss
"suo3>", # -ous > -
"su*2.", # -us > - if intact
"s*1>", # -s > - if intact
"s0.", # -s > -s
"tacilp4y.", # -plicat > -ply
"ta2>", # -at > -
"tnem4>", # -ment > -
"tne3>", # -ent > -
"tna3>", # -ant > -
"tpir2b.", # -ript > -rib
"tpro2b.", # -orpt > -orb
"tcud1.", # -duct > -duc
"tpmus2.", # -sumpt > -sum
"tpec2iv.", # -cept > -ceiv
"tulo2v.", # -olut > -olv
"tsis0.", # protect -sist
"tsi3>", # -ist > -
"tt1.", # -tt > -t
"uqi3.", # -iqu > -
"ugo1.", # -ogu > -og
"vis3j>", # -siv > -j
"vie0.", # protect -eiv
"vi2>", # -iv > -
"ylb1>", # -bly > -bl
"yli3y>", # -ily > -y
"ylp0.", # protect -ply
"yl2>", # -ly > -
"ygo1.", # -ogy > -og
"yhp1.", # -phy > -ph
"ymo1.", # -omy > -om
"ypo1.", # -opy > -op
"yti3>", # -ity > -
"yte3>", # -ety > -
"ytl2.", # -lty > -l
"yrtsi5.", # -istry > -
"yra3>", # -ary > -
"yro3>", # -ory > -
"yfi3.", # -ify > -
"ycn2t>", # -ncy > -nt
"yca3>", # -acy > -
"zi2>", # -iz > -
"zy1s.", # -yz > -ys
)
def __init__(self, rule_tuple=None, strip_prefix_flag=False):
"""Create an instance of the Lancaster stemmer.
"""
# Setup an empty rule dictionary - this will be filled in later
self.rule_dictionary = {}
# Check if a user wants to strip prefix
self._strip_prefix = strip_prefix_flag
# Check if a user wants to use his/her own rule tuples.
self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple
def parseRules(self, rule_tuple=None):
"""Validate the set of rules used in this stemmer.
If this function is called as an individual method, without using stem
method, rule_tuple argument will be compiled into self.rule_dictionary.
If this function is called within stem, self._rule_tuple will be used.
"""
# If there is no argument for the function, use class' own rule tuple.
rule_tuple = rule_tuple if rule_tuple else self._rule_tuple
valid_rule = re.compile("^[a-z]+\*?\d[a-z]*[>\.]?$")
# Empty any old rules from the rule set before adding new ones
self.rule_dictionary = {}
for rule in rule_tuple:
if not valid_rule.match(rule):
raise ValueError("The rule {0} is invalid".format(rule))
first_letter = rule[0:1]
if first_letter in self.rule_dictionary:
self.rule_dictionary[first_letter].append(rule)
else:
self.rule_dictionary[first_letter] = [rule]
def stem(self, word):
"""Stem a word using the Lancaster stemmer.
"""
# Lower-case the word, since all the rules are lower-cased
word = word.lower()
word = self.__stripPrefix(word) if self._strip_prefix else word
# Save a copy of the original word
intact_word = word
# If rule dictionary is empty, parse rule tuple.
if not self.rule_dictionary:
self.parseRules()
return self.__doStemming(word, intact_word)
def __doStemming(self, word, intact_word):
"""Perform the actual word stemming
"""
valid_rule = re.compile("^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$")
proceed = True
while proceed:
# Find the position of the last letter of the word to be stemmed
last_letter_position = self.__getLastLetter(word)
# Only stem the word if it has a last letter and a rule matching that last letter
if (
last_letter_position < 0
or word[last_letter_position] not in self.rule_dictionary
):
proceed = False
else:
rule_was_applied = False
# Go through each rule that matches the word's final letter
for rule in self.rule_dictionary[word[last_letter_position]]:
rule_match = valid_rule.match(rule)
if rule_match:
(
ending_string,
intact_flag,
remove_total,
append_string,
cont_flag,
) = rule_match.groups()
# Convert the number of chars to remove when stemming
# from a string to an integer
remove_total = int(remove_total)
# Proceed if word's ending matches rule's word ending
if word.endswith(ending_string[::-1]):
if intact_flag:
if word == intact_word and self.__isAcceptable(
word, remove_total
):
word = self.__applyRule(
word, remove_total, append_string
)
rule_was_applied = True
if cont_flag == ".":
proceed = False
break
elif self.__isAcceptable(word, remove_total):
word = self.__applyRule(
word, remove_total, append_string
)
rule_was_applied = True
if cont_flag == ".":
proceed = False
break
# If no rules apply, the word doesn't need any more stemming
if rule_was_applied == False:
proceed = False
return word
def __getLastLetter(self, word):
"""Get the zero-based index of the last alphabetic character in this string
"""
last_letter = -1
for position in range(len(word)):
if word[position].isalpha():
last_letter = position
else:
break
return last_letter
def __isAcceptable(self, word, remove_total):
"""Determine if the word is acceptable for stemming.
"""
word_is_acceptable = False
# If the word starts with a vowel, it must be at least 2
# characters long to be stemmed
if word[0] in "aeiouy":
if len(word) - remove_total >= 2:
word_is_acceptable = True
# If the word starts with a consonant, it must be at least 3
# characters long (including one vowel) to be stemmed
elif len(word) - remove_total >= 3:
if word[1] in "aeiouy":
word_is_acceptable = True
elif word[2] in "aeiouy":
word_is_acceptable = True
return word_is_acceptable
def __applyRule(self, word, remove_total, append_string):
"""Apply the stemming rule to the word
"""
# Remove letters from the end of the word
new_word_length = len(word) - remove_total
word = word[0:new_word_length]
# And add new letters to the end of the truncated word
if append_string:
word += append_string
return word
def __stripPrefix(self, word):
"""Remove prefix from a word.
This function originally taken from Whoosh.
"""
for prefix in (
"kilo",
"micro",
"milli",
"intra",
"ultra",
"mega",
"nano",
"pico",
"pseudo",
):
if word.startswith(prefix):
return word[len(prefix) :]
return word
def __repr__(self):
return "<LancasterStemmer>"
| 12,340 | 34.26 | 93 | py |
errant | errant-master/errant/en/merger.py | from itertools import combinations, groupby
from re import sub
from string import punctuation
from rapidfuzz.distance import Indel
import spacy.symbols as POS
from errant.edit import Edit
# Merger resources
open_pos = {POS.ADJ, POS.AUX, POS.ADV, POS.NOUN, POS.VERB}
# Input: An Alignment object
# Output: A list of Edit objects
def get_rule_edits(alignment):
edits = []
# Split alignment into groups of M, T and rest. (T has a number after it)
for op, group in groupby(alignment.align_seq,
lambda x: x[0][0] if x[0][0] in {"M", "T"} else False):
group = list(group)
# Ignore M
if op == "M": continue
# T is always split
elif op == "T":
for seq in group:
edits.append(Edit(alignment.orig, alignment.cor, seq[1:]))
# Process D, I and S subsequence
else:
processed = process_seq(group, alignment)
# Turn the processed sequence into edits
for seq in processed:
edits.append(Edit(alignment.orig, alignment.cor, seq[1:]))
return edits
# Input 1: A sequence of adjacent D, I and/or S alignments
# Input 2: An Alignment object
# Output: A sequence of merged/split alignments
def process_seq(seq, alignment):
# Return single alignments
if len(seq) <= 1: return seq
# Get the ops for the whole sequence
ops = [op[0] for op in seq]
# Merge all D xor I ops. (95% of human multi-token edits contain S).
if set(ops) == {"D"} or set(ops) == {"I"}: return merge_edits(seq)
content = False # True if edit includes a content word
# Get indices of all start-end combinations in the seq: 012 = 01, 02, 12
combos = list(combinations(range(0, len(seq)), 2))
# Sort them starting with largest spans first
combos.sort(key = lambda x: x[1]-x[0], reverse=True)
# Loop through combos
for start, end in combos:
# Ignore ranges that do NOT contain a substitution.
if "S" not in ops[start:end+1]: continue
# Get the tokens in orig and cor. They will now never be empty.
o = alignment.orig[seq[start][1]:seq[end][2]]
c = alignment.cor[seq[start][3]:seq[end][4]]
# First token possessive suffixes
if start == 0 and (o[0].tag_ == "POS" or c[0].tag_ == "POS"):
return [seq[0]] + process_seq(seq[1:], alignment)
# Merge possessive suffixes: [friends -> friend 's]
if o[-1].tag_ == "POS" or c[-1].tag_ == "POS":
return process_seq(seq[:end-1], alignment) + \
merge_edits(seq[end-1:end+1]) + \
process_seq(seq[end+1:], alignment)
# Case changes
if o[-1].lower == c[-1].lower:
# Merge first token I or D: [Cat -> The big cat]
if start == 0 and ((len(o) == 1 and c[0].text[0].isupper()) or \
(len(c) == 1 and o[0].text[0].isupper())):
return merge_edits(seq[start:end+1]) + \
process_seq(seq[end+1:], alignment)
# Merge with previous punctuation: [, we -> . We], [we -> . We]
if (len(o) > 1 and is_punct(o[-2])) or \
(len(c) > 1 and is_punct(c[-2])):
return process_seq(seq[:end-1], alignment) + \
merge_edits(seq[end-1:end+1]) + \
process_seq(seq[end+1:], alignment)
# Merge whitespace/hyphens: [acat -> a cat], [sub - way -> subway]
s_str = sub("['-]", "", "".join([tok.lower_ for tok in o]))
t_str = sub("['-]", "", "".join([tok.lower_ for tok in c]))
if s_str == t_str:
return process_seq(seq[:start], alignment) + \
merge_edits(seq[start:end+1]) + \
process_seq(seq[end+1:], alignment)
# Merge same POS or auxiliary/infinitive/phrasal verbs:
# [to eat -> eating], [watch -> look at]
pos_set = set([tok.pos for tok in o]+[tok.pos for tok in c])
if len(o) != len(c) and (len(pos_set) == 1 or \
pos_set.issubset({POS.AUX, POS.PART, POS.VERB})):
return process_seq(seq[:start], alignment) + \
merge_edits(seq[start:end+1]) + \
process_seq(seq[end+1:], alignment)
# Split rules take effect when we get to smallest chunks
if end-start < 2:
# Split adjacent substitutions
if len(o) == len(c) == 2:
return process_seq(seq[:start+1], alignment) + \
process_seq(seq[start+1:], alignment)
# Split similar substitutions at sequence boundaries
if (ops[start] == "S" and char_cost(o[0], c[0]) > 0.75) or \
(ops[end] == "S" and char_cost(o[-1], c[-1]) > 0.75):
return process_seq(seq[:start+1], alignment) + \
process_seq(seq[start+1:], alignment)
# Split final determiners
if end == len(seq)-1 and ((ops[-1] in {"D", "S"} and \
o[-1].pos == POS.DET) or (ops[-1] in {"I", "S"} and \
c[-1].pos == POS.DET)):
return process_seq(seq[:-1], alignment) + [seq[-1]]
# Set content word flag
if not pos_set.isdisjoint(open_pos): content = True
# Merge sequences that contain content words
if content: return merge_edits(seq)
else: return seq
# Check whether token is punctuation
def is_punct(token):
return token.pos == POS.PUNCT or token.text in punctuation
# Calculate the cost of character alignment; i.e. char similarity
def char_cost(a, b):
return 1-Indel.normalized_distance(a.text, b.text)
# Merge the input alignment sequence to a single edit span
def merge_edits(seq):
if seq: return [("X", seq[0][1], seq[-1][2], seq[0][3], seq[-1][4])]
else: return seq | 5,806 | 45.456 | 77 | py |
errant | errant-master/errant/en/classifier.py | from pathlib import Path
from rapidfuzz.distance import Levenshtein
from errant.en.lancaster import LancasterStemmer
import spacy
import spacy.symbols as POS
# Load Hunspell word list
def load_word_list(path):
with open(path) as word_list:
return set([word.strip() for word in word_list])
# Load Universal Dependency POS Tags map file.
# https://universaldependencies.org/tagset-conversion/en-penn-uposf.html
def load_pos_map(path):
map_dict = {}
with open(path) as map_file:
for line in map_file:
line = line.strip().split("\t")
# Change ADP to PREP for readability
if line[1] == "ADP": map_dict[line[0]] = "PREP"
# Change PROPN to NOUN; we don't need a prop noun tag
elif line[1] == "PROPN": map_dict[line[0]] = "NOUN"
# Change CCONJ to CONJ
elif line[1] == "CCONJ": map_dict[line[0]] = "CONJ"
# Otherwise
else: map_dict[line[0]] = line[1].strip()
# Add some spacy PTB tags not in the original mapping.
map_dict['""'] = "PUNCT"
map_dict["SP"] = "SPACE"
map_dict["_SP"] = "SPACE"
map_dict["BES"] = "VERB"
map_dict["HVS"] = "VERB"
map_dict["ADD"] = "X"
map_dict["GW"] = "X"
map_dict["NFP"] = "X"
map_dict["XX"] = "X"
return map_dict
# Classifier resources
base_dir = Path(__file__).resolve().parent
# Spacy
nlp = None
# Lancaster Stemmer
stemmer = LancasterStemmer()
# GB English word list (inc -ise and -ize)
spell = load_word_list(base_dir/"resources"/"en_GB-large.txt")
# Part of speech map file
pos_map = load_pos_map(base_dir/"resources"/"en-ptb_map")
# Open class coarse Spacy POS tags
open_pos1 = {POS.ADJ, POS.ADV, POS.NOUN, POS.VERB}
# Open class coarse Spacy POS tags (strings)
open_pos2 = {"ADJ", "ADV", "NOUN", "VERB"}
# Rare POS tags that make uninformative error categories
rare_pos = {"INTJ", "NUM", "SYM", "X"}
# Contractions
conts = {"'d", "'ll", "'m", "n't", "'re", "'s", "'ve"}
# Special auxiliaries in contractions.
aux_conts = {"ca": "can", "sha": "shall", "wo": "will"}
# Some dep labels that map to pos tags.
dep_map = {
"acomp": "ADJ",
"amod": "ADJ",
"advmod": "ADV",
"det": "DET",
"prep": "PREP",
"prt": "PART",
"punct": "PUNCT"}
# Input: An Edit object
# Output: The same Edit object with an updated error type
def classify(edit):
# Nothing to nothing is a detected but not corrected edit
if not edit.o_toks and not edit.c_toks:
edit.type = "UNK"
# Missing
elif not edit.o_toks and edit.c_toks:
op = "M:"
cat = get_one_sided_type(edit.c_toks)
edit.type = op+cat
# Unnecessary
elif edit.o_toks and not edit.c_toks:
op = "U:"
cat = get_one_sided_type(edit.o_toks)
edit.type = op+cat
# Replacement and special cases
else:
# Same to same is a detected but not corrected edit
if edit.o_str == edit.c_str:
edit.type = "UNK"
# Special: Ignore case change at the end of multi token edits
# E.g. [Doctor -> The doctor], [, since -> . Since]
# Classify the edit as if the last token wasn't there
elif edit.o_toks[-1].lower == edit.c_toks[-1].lower and \
(len(edit.o_toks) > 1 or len(edit.c_toks) > 1):
# Store a copy of the full orig and cor toks
all_o_toks = edit.o_toks[:]
all_c_toks = edit.c_toks[:]
# Truncate the instance toks for classification
edit.o_toks = edit.o_toks[:-1]
edit.c_toks = edit.c_toks[:-1]
# Classify the truncated edit
edit = classify(edit)
# Restore the full orig and cor toks
edit.o_toks = all_o_toks
edit.c_toks = all_c_toks
# Replacement
else:
op = "R:"
cat = get_two_sided_type(edit.o_toks, edit.c_toks)
edit.type = op+cat
return edit
# Input: Spacy tokens
# Output: A list of pos and dep tag strings
def get_edit_info(toks):
pos = []
dep = []
for tok in toks:
pos.append(pos_map[tok.tag_])
dep.append(tok.dep_)
return pos, dep
# Input: Spacy tokens
# Output: An error type string based on input tokens from orig or cor
# When one side of the edit is null, we can only use the other side
def get_one_sided_type(toks):
# Special cases
if len(toks) == 1:
# Possessive noun suffixes; e.g. ' -> 's
if toks[0].tag_ == "POS":
return "NOUN:POSS"
# Contractions. Rule must come after possessive
if toks[0].lower_ in conts:
return "CONTR"
# Infinitival "to" is treated as part of a verb form
if toks[0].lower_ == "to" and toks[0].pos == POS.PART and \
toks[0].dep_ != "prep":
return "VERB:FORM"
# Extract pos tags and parse info from the toks
pos_list, dep_list = get_edit_info(toks)
# Auxiliary verbs
if set(dep_list).issubset({"aux", "auxpass"}):
return "VERB:TENSE"
# POS-based tags. Ignores rare, uninformative categories
if len(set(pos_list)) == 1 and pos_list[0] not in rare_pos:
return pos_list[0]
# More POS-based tags using special dependency labels
if len(set(dep_list)) == 1 and dep_list[0] in dep_map.keys():
return dep_map[dep_list[0]]
# To-infinitives and phrasal verbs
if set(pos_list) == {"PART", "VERB"}:
return "VERB"
# Tricky cases
else:
return "OTHER"
# Input 1: Spacy orig tokens
# Input 2: Spacy cor tokens
# Output: An error type string based on orig AND cor
def get_two_sided_type(o_toks, c_toks):
# Extract pos tags and parse info from the toks as lists
o_pos, o_dep = get_edit_info(o_toks)
c_pos, c_dep = get_edit_info(c_toks)
# Orthography; i.e. whitespace and/or case errors.
if only_orth_change(o_toks, c_toks):
return "ORTH"
# Word Order; only matches exact reordering.
if exact_reordering(o_toks, c_toks):
return "WO"
# 1:1 replacements (very common)
if len(o_toks) == len(c_toks) == 1:
# 1. SPECIAL CASES
# Possessive noun suffixes; e.g. ' -> 's
if o_toks[0].tag_ == "POS" or c_toks[0].tag_ == "POS":
return "NOUN:POSS"
# Contraction. Rule must come after possessive.
if (o_toks[0].lower_ in conts or \
c_toks[0].lower_ in conts) and \
o_pos == c_pos:
return "CONTR"
# Special auxiliaries in contractions (1); e.g. ca -> can, wo -> will
# Rule was broken in V1. Turned off this fix for compatibility.
if (o_toks[0].lower_ in aux_conts and \
c_toks[0].lower_ == aux_conts[o_toks[0].lower_]) or \
(c_toks[0].lower_ in aux_conts and \
o_toks[0].lower_ == aux_conts[c_toks[0].lower_]):
return "CONTR"
# Special auxiliaries in contractions (2); e.g. ca -> could, wo -> should
if o_toks[0].lower_ in aux_conts or \
c_toks[0].lower_ in aux_conts:
return "VERB:TENSE"
# Special: "was" and "were" are the only past tense SVA
if {o_toks[0].lower_, c_toks[0].lower_} == {"was", "were"}:
return "VERB:SVA"
# 2. SPELLING AND INFLECTION
# Only check alphabetical strings on the original side
# Spelling errors take precedence over POS errors; this rule is ordered
if o_toks[0].text.isalpha():
# Check a GB English dict for both orig and lower case.
# E.g. "cat" is in the dict, but "Cat" is not.
if o_toks[0].text not in spell and \
o_toks[0].lower_ not in spell:
# Check if both sides have a common lemma
if o_toks[0].lemma == c_toks[0].lemma:
# Inflection; often count vs mass nouns or e.g. got vs getted
if o_pos == c_pos and o_pos[0] in {"NOUN", "VERB"}:
return o_pos[0]+":INFL"
# Unknown morphology; i.e. we cannot be more specific.
else:
return "MORPH"
# Use string similarity to detect true spelling errors.
else:
# Normalised Lev distance works better than Lev ratio
str_sim = Levenshtein.normalized_similarity(o_toks[0].lower_, c_toks[0].lower_)
# WARNING: THIS IS AN APPROXIMATION.
# Thresholds tuned manually on FCE_train + W&I_train
# str_sim > 0.55 is almost always a true spelling error
if str_sim > 0.55:
return "SPELL"
# Special scores for shorter sequences are usually SPELL
if str_sim == 0.5 or round(str_sim, 3) == 0.333:
# Short strings are more likely to be spell: eles -> else
if len(o_toks[0].text) <= 4 and len(c_toks[0].text) <= 4:
return "SPELL"
# The remainder are usually word choice: amounght -> number
# Classifying based on cor_pos alone is generally enough.
if c_pos[0] not in rare_pos:
return c_pos[0]
# Anything that remains is OTHER
else:
return "OTHER"
# 3. MORPHOLOGY
# Only ADJ, ADV, NOUN and VERB can have inflectional changes.
if o_toks[0].lemma == c_toks[0].lemma and \
o_pos[0] in open_pos2 and \
c_pos[0] in open_pos2:
# Same POS on both sides
if o_pos == c_pos:
# Adjective form; e.g. comparatives
if o_pos[0] == "ADJ":
return "ADJ:FORM"
# Noun number
if o_pos[0] == "NOUN":
return "NOUN:NUM"
# Verbs - various types
if o_pos[0] == "VERB":
# NOTE: These rules are carefully ordered.
# Use the dep parse to find some form errors.
# Main verbs preceded by aux cannot be tense or SVA.
if preceded_by_aux(o_toks, c_toks):
return "VERB:FORM"
# Use fine PTB tags to find various errors.
# FORM errors normally involve VBG or VBN.
if o_toks[0].tag_ in {"VBG", "VBN"} or \
c_toks[0].tag_ in {"VBG", "VBN"}:
return "VERB:FORM"
# Of what's left, TENSE errors normally involved VBD.
if o_toks[0].tag_ == "VBD" or c_toks[0].tag_ == "VBD":
return "VERB:TENSE"
# Of what's left, SVA errors normally involve VBZ.
if o_toks[0].tag_ == "VBZ" or c_toks[0].tag_ == "VBZ":
return "VERB:SVA"
# Any remaining aux verbs are called TENSE.
if o_dep[0].startswith("aux") and \
c_dep[0].startswith("aux"):
return "VERB:TENSE"
# Use dep labels to find some more ADJ:FORM
if set(o_dep+c_dep).issubset({"acomp", "amod"}):
return "ADJ:FORM"
# Adj to plural noun is usually noun number; e.g. musical -> musicals.
if o_pos[0] == "ADJ" and c_toks[0].tag_ == "NNS":
return "NOUN:NUM"
# For remaining verb errors (rare), rely on c_pos
if c_toks[0].tag_ in {"VBG", "VBN"}:
return "VERB:FORM"
if c_toks[0].tag_ == "VBD":
return "VERB:TENSE"
if c_toks[0].tag_ == "VBZ":
return "VERB:SVA"
# Tricky cases that all have the same lemma.
else:
return "MORPH"
# Derivational morphology.
if stemmer.stem(o_toks[0].text) == stemmer.stem(c_toks[0].text) and \
o_pos[0] in open_pos2 and c_pos[0] in open_pos2:
return "MORPH"
# 4. GENERAL
# Auxiliaries with different lemmas
if o_dep[0].startswith("aux") and c_dep[0].startswith("aux"):
return "VERB:TENSE"
# POS-based tags. Some of these are context sensitive mispellings.
if o_pos == c_pos and o_pos[0] not in rare_pos:
return o_pos[0]
# Some dep labels map to POS-based tags.
if o_dep == c_dep and o_dep[0] in dep_map.keys():
return dep_map[o_dep[0]]
# Phrasal verb particles.
if set(o_pos+c_pos) == {"PART", "PREP"} or \
set(o_dep+c_dep) == {"prt", "prep"}:
return "PART"
# Can use dep labels to resolve DET + PRON combinations.
if set(o_pos+c_pos) == {"DET", "PRON"}:
# DET cannot be a subject or object.
if c_dep[0] in {"nsubj", "nsubjpass", "dobj", "pobj"}:
return "PRON"
# "poss" indicates possessive determiner
if c_dep[0] == "poss":
return "DET"
# NUM and DET are usually DET; e.g. a <-> one
if set(o_pos+c_pos) == {"NUM", "DET"}:
return "DET"
# Special: other <-> another
if {o_toks[0].lower_, c_toks[0].lower_} == {"other", "another"}:
return "DET"
# Special: your (sincerely) -> yours (sincerely)
if o_toks[0].lower_ == "your" and c_toks[0].lower_ == "yours":
return "PRON"
# Special: no <-> not; this is very context sensitive
if {o_toks[0].lower_, c_toks[0].lower_} == {"no", "not"}:
return "OTHER"
# 5. STRING SIMILARITY
# These rules are quite language specific.
if o_toks[0].text.isalpha() and c_toks[0].text.isalpha():
# Normalised Lev distance works better than Lev ratio
str_sim = Levenshtein.normalized_similarity(o_toks[0].lower_, c_toks[0].lower_)
# WARNING: THIS IS AN APPROXIMATION.
# Thresholds tuned manually on FCE_train + W&I_train
# A. Short sequences are likely to be SPELL or function word errors
if len(o_toks[0].text) == 1:
# i -> in, a -> at
if len(c_toks[0].text) == 2 and str_sim == 0.5:
return "SPELL"
if len(o_toks[0].text) == 2:
# in -> is, he -> the, to -> too
if 2 <= len(c_toks[0].text) <= 3 and str_sim >= 0.5:
return "SPELL"
if len(o_toks[0].text) == 3:
# Special: the -> that (relative pronoun)
if o_toks[0].lower_ == "the" and c_toks[0].lower_ == "that":
return "PRON"
# Special: all -> everything
if o_toks[0].lower_ == "all" and c_toks[0].lower_ == "everything":
return "PRON"
# off -> of, too -> to, out -> our, now -> know
if 2 <= len(c_toks[0].text) <= 4 and str_sim >= 0.5:
return "SPELL"
# B. Longer sequences are also likely to include content word errors
if len(o_toks[0].text) == 4:
# Special: that <-> what
if {o_toks[0].lower_, c_toks[0].lower_} == {"that", "what"}:
return "PRON"
# Special: well <-> good
if {o_toks[0].lower_, c_toks[0].lower_} == {"good", "well"} and \
c_pos[0] not in rare_pos:
return c_pos[0]
# knew -> new,
if len(c_toks[0].text) == 3 and str_sim > 0.5:
return "SPELL"
# then <-> than, form -> from
if len(c_toks[0].text) == 4 and str_sim >= 0.5:
return "SPELL"
# gong -> going, hole -> whole
if len(c_toks[0].text) == 5 and str_sim == 0.8:
return "SPELL"
# high -> height, west -> western
if len(c_toks[0].text) > 5 and str_sim > 0.5 and \
c_pos[0] not in rare_pos:
return c_pos[0]
if len(o_toks[0].text) == 5:
# Special: after -> later
if {o_toks[0].lower_, c_toks[0].lower_} == {"after", "later"} and \
c_pos[0] not in rare_pos:
return c_pos[0]
# where -> were, found -> fund
if len(c_toks[0].text) == 4 and str_sim == 0.8:
return "SPELL"
# thing <-> think, quite -> quiet, their <-> there
if len(c_toks[0].text) == 5 and str_sim >= 0.6:
return "SPELL"
# house -> domestic, human -> people
if len(c_toks[0].text) > 5 and c_pos[0] not in rare_pos:
return c_pos[0]
# C. Longest sequences include MORPH errors
if len(o_toks[0].text) > 5 and len(c_toks[0].text) > 5:
# Special: therefor -> therefore
if o_toks[0].lower_ == "therefor" and c_toks[0].lower_ == "therefore":
return "SPELL"
# Special: though <-> thought
if {o_toks[0].lower_, c_toks[0].lower_} == {"though", "thought"}:
return "SPELL"
# Morphology errors: stress -> stressed, health -> healthy
if (o_toks[0].text.startswith(c_toks[0].text) or \
c_toks[0].text.startswith(o_toks[0].text)) and \
str_sim >= 0.66:
return "MORPH"
# Spelling errors: exiting -> exciting, wether -> whether
if str_sim > 0.8:
return "SPELL"
# Content word errors: learning -> studying, transport -> travel
if str_sim < 0.55 and c_pos[0] not in rare_pos:
return c_pos[0]
# NOTE: Errors between 0.55 and 0.8 are a mix of SPELL, MORPH and POS
# Tricky cases
else:
return "OTHER"
# Multi-token replacements (uncommon)
# All auxiliaries
if set(o_dep+c_dep).issubset({"aux", "auxpass"}):
return "VERB:TENSE"
# All same POS
if len(set(o_pos+c_pos)) == 1:
# Final verbs with the same lemma are tense; e.g. eat -> has eaten
if o_pos[0] == "VERB" and \
o_toks[-1].lemma == c_toks[-1].lemma:
return "VERB:TENSE"
# POS-based tags.
elif o_pos[0] not in rare_pos:
return o_pos[0]
# All same special dep labels.
if len(set(o_dep+c_dep)) == 1 and \
o_dep[0] in dep_map.keys():
return dep_map[o_dep[0]]
# Infinitives, gerunds, phrasal verbs.
if set(o_pos+c_pos) == {"PART", "VERB"}:
# Final verbs with the same lemma are form; e.g. to eat -> eating
if o_toks[-1].lemma == c_toks[-1].lemma:
return "VERB:FORM"
# Remaining edits are often verb; e.g. to eat -> consuming, look at -> see
else:
return "VERB"
# Possessive nouns; e.g. friends -> friend 's
if (o_pos == ["NOUN", "PART"] or c_pos == ["NOUN", "PART"]) and \
o_toks[0].lemma == c_toks[0].lemma:
return "NOUN:POSS"
# Adjective forms with "most" and "more"; e.g. more free -> freer
if (o_toks[0].lower_ in {"most", "more"} or \
c_toks[0].lower_ in {"most", "more"}) and \
o_toks[-1].lemma == c_toks[-1].lemma and \
len(o_toks) <= 2 and len(c_toks) <= 2:
return "ADJ:FORM"
# Tricky cases.
else:
return "OTHER"
# Input 1: Spacy orig tokens
# Input 2: Spacy cor tokens
# Output: Boolean; the difference between orig and cor is only whitespace or case
def only_orth_change(o_toks, c_toks):
o_join = "".join([o.lower_ for o in o_toks])
c_join = "".join([c.lower_ for c in c_toks])
if o_join == c_join:
return True
return False
# Input 1: Spacy orig tokens
# Input 2: Spacy cor tokens
# Output: Boolean; the tokens are exactly the same but in a different order
def exact_reordering(o_toks, c_toks):
# Sorting lets us keep duplicates.
o_set = sorted([o.lower_ for o in o_toks])
c_set = sorted([c.lower_ for c in c_toks])
if o_set == c_set:
return True
return False
# Input 1: An original text spacy token.
# Input 2: A corrected text spacy token.
# Output: Boolean; both tokens have a dependant auxiliary verb.
def preceded_by_aux(o_tok, c_tok):
# If the toks are aux, we need to check if they are the first aux.
if o_tok[0].dep_.startswith("aux") and c_tok[0].dep_.startswith("aux"):
# Find the parent verb
o_head = o_tok[0].head
c_head = c_tok[0].head
# Find the children of the parent
o_children = o_head.children
c_children = c_head.children
# Check the orig children.
for o_child in o_children:
# Look at the first aux...
if o_child.dep_.startswith("aux"):
# Check if the string matches o_tok
if o_child.text != o_tok[0].text:
# If it doesn't, o_tok is not first so check cor
for c_child in c_children:
# Find the first aux in cor...
if c_child.dep_.startswith("aux"):
# If that doesn't match either, neither are first aux
if c_child.text != c_tok[0].text:
return True
# Break after the first cor aux
break
# Break after the first orig aux.
break
# Otherwise, the toks are main verbs so we need to look for any aux.
else:
o_deps = [o_dep.dep_ for o_dep in o_tok[0].children]
c_deps = [c_dep.dep_ for c_dep in c_tok[0].children]
if "aux" in o_deps or "auxpass" in o_deps:
if "aux" in c_deps or "auxpass" in c_deps:
return True
return False
| 22,283 | 42.608611 | 99 | py |
errant | errant-master/errant/en/__init__.py | 0 | 0 | 0 | py | |
errant | errant-master/errant/commands/m2_to_m2.py | import argparse
import errant
def main():
# Parse command line args
args = parse_args()
print("Loading resources...")
# Load Errant
annotator = errant.load("en")
print("Processing M2 file...")
# Open the m2 file and split it into text+edits blocks. Also open out_m2.
with open(args.m2_file) as m2, open(args.out, "w") as out_m2:
# Store the current m2_block here
m2_block = []
# Loop through m2 lines
for line in m2:
line = line.strip()
# If the line isn't empty, add it to the m2_block
if line: m2_block.append(line)
# Otherwise, process the complete blocks
else:
# Write the original text to the output M2 file
out_m2.write(m2_block[0]+"\n")
# Parse orig with spacy
orig = annotator.parse(m2_block[0][2:])
# Simplify the edits and sort by coder id
edit_dict = simplify_edits(m2_block[1:])
# Loop through coder ids
for id, raw_edits in sorted(edit_dict.items()):
# If the first edit is a noop
if raw_edits[0][2] == "noop":
# Write the noop and continue
out_m2.write(noop_edit(id)+"\n")
continue
# Apply the edits to generate the corrected text
# Also redefine the edits as orig and cor token offsets
cor, gold_edits = get_cor_and_edits(m2_block[0][2:], raw_edits)
# Parse cor with spacy
cor = annotator.parse(cor)
# Save detection edits here for auto
det_edits = []
# Loop through the gold edits
for gold_edit in gold_edits:
# Do not minimise detection edits
if gold_edit[-2] in {"Um", "UNK"}:
edit = annotator.import_edit(orig, cor, gold_edit[:-1],
min=False, old_cat=args.old_cats)
# Overwrite the pseudo correction and set it in the edit
edit.c_toks = annotator.parse(gold_edit[-1])
# Save the edit for auto
det_edits.append(edit)
# Write the edit for gold
if args.gold:
# Write the edit
out_m2.write(edit.to_m2(id)+"\n")
# Gold annotation
elif args.gold:
edit = annotator.import_edit(orig, cor, gold_edit[:-1],
not args.no_min, args.old_cats)
# Write the edit
out_m2.write(edit.to_m2(id)+"\n")
# Auto annotations
if args.auto:
# Auto edits
edits = annotator.annotate(orig, cor, args.lev, args.merge)
# Combine detection and auto edits and sort by orig offsets
edits = sorted(det_edits+edits, key=lambda e:(e.o_start, e.o_end))
# Write the edits to the output M2 file
for edit in edits:
out_m2.write(edit.to_m2(id)+"\n")
# Write a newline when there are no more edits
out_m2.write("\n")
# Reset the m2 block
m2_block = []
# Parse command line args
def parse_args():
parser = argparse.ArgumentParser(
description = "Automatically extract and/or classify edits in an m2 file.",
formatter_class = argparse.RawTextHelpFormatter,
usage = "%(prog)s [-h] (-auto | -gold) [options] m2_file -out OUT")
parser.add_argument(
"m2_file",
help = "The path to an m2 file.")
type_group = parser.add_mutually_exclusive_group(required = True)
type_group.add_argument(
"-auto",
help = "Extract edits automatically.",
action = "store_true")
type_group.add_argument(
"-gold",
help = "Use existing edit alignments.",
action = "store_true")
parser.add_argument(
"-out",
help = "The output filepath.",
required = True)
parser.add_argument(
"-no_min",
help = "Do not minimise edit spans (gold only).",
action = "store_true")
parser.add_argument(
"-old_cats",
help = "Preserve old error types (gold only); i.e. turn off the classifier.",
action = "store_true")
parser.add_argument(
"-lev",
help = "Align using standard Levenshtein.",
action = "store_true")
parser.add_argument(
"-merge",
help = "Choose a merging strategy for automatic alignment.\n"
"rules: Use a rule-based merging strategy (default)\n"
"all-split: Merge nothing: MSSDI -> M, S, S, D, I\n"
"all-merge: Merge adjacent non-matches: MSSDI -> M, SSDI\n"
"all-equal: Merge adjacent same-type non-matches: MSSDI -> M, SS, D, I",
choices = ["rules", "all-split", "all-merge", "all-equal"],
default = "rules")
args = parser.parse_args()
return args
# Input: A list of edit lines from an m2 file
# Output: An edit dictionary; key is coder id, value is a list of edits
def simplify_edits(edits):
edit_dict = {}
for edit in edits:
edit = edit.split("|||")
span = edit[0][2:].split() # [2:] ignore the leading "A "
start = int(span[0])
end = int(span[1])
cat = edit[1]
cor = edit[2]
id = edit[-1]
# Save the useful info as a list
proc_edit = [start, end, cat, cor]
# Save the proc_edit inside the edit_dict using coder id
if id in edit_dict.keys():
edit_dict[id].append(proc_edit)
else:
edit_dict[id] = [proc_edit]
return edit_dict
# Input 1: A tokenised original text string
# Input 2: A list of edits; [o_start, o_end, cat, cor]
# Output 1: A tokenised corrected text string
# Output 2: A list of edits; [o_start, o_end, c_start, c_end, cat, cor]
def get_cor_and_edits(orig, edits):
# Copy orig; we will apply edits to it to make cor
cor = orig.split()
new_edits = []
offset = 0
# Sort the edits by offsets before processing them
edits = sorted(edits, key=lambda e:(e[0], e[1]))
# Loop through edits: [o_start, o_end, cat, cor_str]
for edit in edits:
o_start = edit[0]
o_end = edit[1]
cat = edit[2]
cor_toks = edit[3].split()
# Detection edits
if cat in {"Um", "UNK"}:
# Save the pseudo correction
det_toks = cor_toks[:]
# But temporarily overwrite it to be the same as orig
cor_toks = orig.split()[o_start:o_end]
# Apply the edits
cor[o_start+offset:o_end+offset] = cor_toks
# Get the cor token start and end offsets in cor
c_start = o_start+offset
c_end = c_start+len(cor_toks)
# Keep track of how this affects orig edit offsets
offset = offset-(o_end-o_start)+len(cor_toks)
# Detection edits: Restore the pseudo correction
if cat in {"Um", "UNK"}: cor_toks = det_toks
# Update the edit with cor span and save
new_edit = [o_start, o_end, c_start, c_end, cat, " ".join(cor_toks)]
new_edits.append(new_edit)
return " ".join(cor), new_edits
# Input: A coder id
# Output: A noop edit; i.e. text contains no edits
def noop_edit(id=0):
return "A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||"+str(id) | 7,837 | 41.597826 | 90 | py |
errant | errant-master/errant/commands/compare_m2.py | import argparse
from collections import Counter
def main():
# Parse command line args
args = parse_args()
# Open hypothesis and reference m2 files and split into chunks
hyp_m2 = open(args.hyp).read().strip().split("\n\n")
ref_m2 = open(args.ref).read().strip().split("\n\n")
# Make sure they have the same number of sentences
assert len(hyp_m2) == len(ref_m2)
# Store global corpus level best counts here
best_dict = Counter({"tp":0, "fp":0, "fn":0})
best_cats = {}
# Process each sentence
sents = zip(hyp_m2, ref_m2)
for sent_id, sent in enumerate(sents):
# Simplify the edits into lists of lists
hyp_edits = simplify_edits(sent[0])
ref_edits = simplify_edits(sent[1])
# Process the edits for detection/correction based on args
hyp_dict = process_edits(hyp_edits, args)
ref_dict = process_edits(ref_edits, args)
# original sentence for logging
original_sentence = sent[0][2:].split("\nA")[0]
# Evaluate edits and get best TP, FP, FN hyp+ref combo.
count_dict, cat_dict = evaluate_edits(
hyp_dict, ref_dict, best_dict, sent_id, original_sentence, args)
# Merge these dicts with best_dict and best_cats
best_dict += Counter(count_dict)
best_cats = merge_dict(best_cats, cat_dict)
# Print results
print_results(best_dict, best_cats, args)
# Parse command line args
def parse_args():
parser = argparse.ArgumentParser(
description="Calculate F-scores for error detection and/or correction.\n"
"Flags let you evaluate at different levels of granularity.",
formatter_class=argparse.RawTextHelpFormatter,
usage="%(prog)s [options] -hyp HYP -ref REF")
parser.add_argument(
"-hyp",
help="A hypothesis M2 file.",
required=True)
parser.add_argument(
"-ref",
help="A reference M2 file.",
required=True)
parser.add_argument(
"-b",
"--beta",
help="Value of beta in F-score. (default: 0.5)",
default=0.5,
type=float)
parser.add_argument(
"-v",
"--verbose",
help="Print verbose output.",
action="store_true")
eval_type = parser.add_mutually_exclusive_group()
eval_type.add_argument(
"-dt",
help="Evaluate Detection in terms of Tokens.",
action="store_true")
eval_type.add_argument(
"-ds",
help="Evaluate Detection in terms of Spans.",
action="store_true")
eval_type.add_argument(
"-cs",
help="Evaluate Correction in terms of Spans. (default)",
action="store_true")
eval_type.add_argument(
"-cse",
help="Evaluate Correction in terms of Spans and Error types.",
action="store_true")
parser.add_argument(
"-single",
help="Only evaluate single token edits; i.e. 0:1, 1:0 or 1:1",
action="store_true")
parser.add_argument(
"-multi",
help="Only evaluate multi token edits; i.e. 2+:n or n:2+",
action="store_true")
parser.add_argument(
"-filt",
help="Do not evaluate the specified error types.",
nargs="+",
default=[])
parser.add_argument(
"-cat",
help="Show error category scores.\n"
"1: Only show operation tier scores; e.g. R.\n"
"2: Only show main tier scores; e.g. NOUN.\n"
"3: Show all category scores; e.g. R:NOUN.",
choices=[1, 2, 3],
type=int)
args = parser.parse_args()
return args
# Input: An m2 format sentence with edits.
# Output: A list of lists. Each edit: [start, end, cat, cor, coder]
def simplify_edits(sent):
out_edits = []
# Get the edit lines from an m2 block.
edits = sent.split("\n")[1:]
# Loop through the edits
for edit in edits:
# Preprocessing
edit = edit[2:].split("|||") # Ignore "A " then split.
span = edit[0].split()
start = int(span[0])
end = int(span[1])
cat = edit[1]
cor = edit[2]
coder = int(edit[-1])
out_edit = [start, end, cat, cor, coder]
out_edits.append(out_edit)
return out_edits
# Input 1: A list of edits. Each edit: [start, end, cat, cor, coder]
# Input 2: Command line args
# Output: A dict; key is coder, value is edit dict.
def process_edits(edits, args):
coder_dict = {}
# Add an explicit noop edit if there are no edits.
if not edits: edits = [[-1, -1, "noop", "-NONE-", 0]]
# Loop through the edits
for edit in edits:
# Name the edit elements for clarity
start = edit[0]
end = edit[1]
cat = edit[2]
cor = edit[3]
coder = edit[4]
# Add the coder to the coder_dict if necessary
if coder not in coder_dict: coder_dict[coder] = {}
# Optionally apply filters based on args
# 1. UNK type edits are only useful for detection, not correction.
if not args.dt and not args.ds and cat == "UNK": continue
# 2. Only evaluate single token edits; i.e. 0:1, 1:0 or 1:1
if args.single and (end-start >= 2 or len(cor.split()) >= 2): continue
# 3. Only evaluate multi token edits; i.e. 2+:n or n:2+
if args.multi and end-start < 2 and len(cor.split()) < 2: continue
# 4. If there is a filter, ignore the specified error types
if args.filt and cat in args.filt: continue
# Token Based Detection
if args.dt:
# Preserve noop edits.
if start == -1:
if (start, start) in coder_dict[coder].keys():
coder_dict[coder][(start, start)].append(cat)
else:
coder_dict[coder][(start, start)] = [cat]
# Insertions defined as affecting the token on the right
elif start == end and start >= 0:
if (start, start+1) in coder_dict[coder].keys():
coder_dict[coder][(start, start+1)].append(cat)
else:
coder_dict[coder][(start, start+1)] = [cat]
# Edit spans are split for each token in the range.
else:
for tok_id in range(start, end):
if (tok_id, tok_id+1) in coder_dict[coder].keys():
coder_dict[coder][(tok_id, tok_id+1)].append(cat)
else:
coder_dict[coder][(tok_id, tok_id+1)] = [cat]
# Span Based Detection
elif args.ds:
if (start, end) in coder_dict[coder].keys():
coder_dict[coder][(start, end)].append(cat)
else:
coder_dict[coder][(start, end)] = [cat]
# Span Based Correction
else:
# With error type classification
if args.cse:
if (start, end, cat, cor) in coder_dict[coder].keys():
coder_dict[coder][(start, end, cat, cor)].append(cat)
else:
coder_dict[coder][(start, end, cat, cor)] = [cat]
# Without error type classification
else:
if (start, end, cor) in coder_dict[coder].keys():
coder_dict[coder][(start, end, cor)].append(cat)
else:
coder_dict[coder][(start, end, cor)] = [cat]
return coder_dict
# Input 1: A hyp dict; key is coder_id, value is dict of processed hyp edits.
# Input 2: A ref dict; key is coder_id, value is dict of processed ref edits.
# Input 3: A dictionary of the best corpus level TP, FP and FN counts so far.
# Input 4: Sentence ID (for verbose output only)
# Input 5: Command line args
# Output 1: A dict of the best corpus level TP, FP and FN for the input sentence.
# Output 2: The corresponding error type dict for the above dict.
def evaluate_edits(hyp_dict, ref_dict, best, sent_id, original_sentence, args):
# Verbose output: display the original sentence
if args.verbose:
print('{:-^40}'.format(""))
print("Original sentence " + str(sent_id) + ": " + original_sentence)
# Store the best sentence level scores and hyp+ref combination IDs
# best_f is initialised as -1 cause 0 is a valid result.
best_tp, best_fp, best_fn, best_f, best_hyp, best_ref = 0, 0, 0, -1, 0, 0
best_cat = {}
# Compare each hyp and ref combination
for hyp_id in hyp_dict.keys():
for ref_id in ref_dict.keys():
# Get the local counts for the current combination.
tp, fp, fn, cat_dict = compareEdits(hyp_dict[hyp_id], ref_dict[ref_id])
# Compute the local sentence scores (for verbose output only)
loc_p, loc_r, loc_f = computeFScore(tp, fp, fn, args.beta)
# Compute the global sentence scores
p, r, f = computeFScore(
tp+best["tp"], fp+best["fp"], fn+best["fn"], args.beta)
# Save the scores if they are better in terms of:
# 1. Higher F-score
# 2. Same F-score, higher TP
# 3. Same F-score and TP, lower FP
# 4. Same F-score, TP and FP, lower FN
if (f > best_f) or \
(f == best_f and tp > best_tp) or \
(f == best_f and tp == best_tp and fp < best_fp) or \
(f == best_f and tp == best_tp and fp == best_fp and fn < best_fn):
best_tp, best_fp, best_fn = tp, fp, fn
best_f, best_hyp, best_ref = f, hyp_id, ref_id
best_cat = cat_dict
# Verbose output
if args.verbose:
# Prepare verbose output edits.
hyp_verb = list(sorted(hyp_dict[hyp_id].keys()))
ref_verb = list(sorted(ref_dict[ref_id].keys()))
# add categories
# hyp_dict[hyp_id] looks like (0, 1, "str")
# hyp_dict[hyp_id][h] is a list, always length one, of the corresponding category
hyp_verb = [h + (hyp_dict[hyp_id][h][0],) for h in hyp_verb]
ref_verb = [r + (ref_dict[ref_id][r][0],) for r in ref_verb]
# Ignore noop edits
if not hyp_verb or hyp_verb[0][0] == -1: hyp_verb = []
if not ref_verb or ref_verb[0][0] == -1: ref_verb = []
# Print verbose info
print('{:-^40}'.format(""))
print("SENTENCE "+str(sent_id)+" - HYP "+str(hyp_id)+" - REF "+str(ref_id))
print("HYPOTHESIS EDITS :", hyp_verb)
print("REFERENCE EDITS :", ref_verb)
print("Local TP/FP/FN :", str(tp), str(fp), str(fn))
print("Local P/R/F"+str(args.beta)+" :", str(loc_p), str(loc_r), str(loc_f))
print("Global TP/FP/FN :", str(tp+best["tp"]), str(fp+best["fp"]), str(fn+best["fn"]))
print("Global P/R/F"+str(args.beta)+" :", str(p), str(r), str(f))
# Verbose output: display the best hyp+ref combination
if args.verbose:
print('{:-^40}'.format(""))
print("^^ HYP "+str(best_hyp)+", REF "+str(best_ref)+" chosen for sentence "+str(sent_id))
print("Local results:")
header = ["Category", "TP", "FP", "FN"]
body = [[k, *v] for k, v in best_cat.items()]
print_table([header] + body)
# Save the best TP, FP and FNs as a dict, and return this and the best_cat dict
best_dict = {"tp":best_tp, "fp":best_fp, "fn":best_fn}
return best_dict, best_cat
# Input 1: A dictionary of hypothesis edits for a single system.
# Input 2: A dictionary of reference edits for a single annotator.
# Output 1-3: The TP, FP and FN for the hyp vs the given ref annotator.
# Output 4: A dictionary of the error type counts.
def compareEdits(hyp_edits, ref_edits):
tp = 0 # True Positives
fp = 0 # False Positives
fn = 0 # False Negatives
cat_dict = {} # {cat: [tp, fp, fn], ...}
for h_edit, h_cats in hyp_edits.items():
# noop hyp edits cannot be TP or FP
if h_cats[0] == "noop": continue
# TRUE POSITIVES
if h_edit in ref_edits.keys():
# On occasion, multiple tokens at same span.
for h_cat in ref_edits[h_edit]: # Use ref dict for TP
tp += 1
# Each dict value [TP, FP, FN]
if h_cat in cat_dict.keys():
cat_dict[h_cat][0] += 1
else:
cat_dict[h_cat] = [1, 0, 0]
# FALSE POSITIVES
else:
# On occasion, multiple tokens at same span.
for h_cat in h_cats:
fp += 1
# Each dict value [TP, FP, FN]
if h_cat in cat_dict.keys():
cat_dict[h_cat][1] += 1
else:
cat_dict[h_cat] = [0, 1, 0]
for r_edit, r_cats in ref_edits.items():
# noop ref edits cannot be FN
if r_cats[0] == "noop": continue
# FALSE NEGATIVES
if r_edit not in hyp_edits.keys():
# On occasion, multiple tokens at same span.
for r_cat in r_cats:
fn += 1
# Each dict value [TP, FP, FN]
if r_cat in cat_dict.keys():
cat_dict[r_cat][2] += 1
else:
cat_dict[r_cat] = [0, 0, 1]
return tp, fp, fn, cat_dict
# Input 1-3: True positives, false positives, false negatives
# Input 4: Value of beta in F-score.
# Output 1-3: Precision, Recall and F-score rounded to 4dp.
def computeFScore(tp, fp, fn, beta):
p = float(tp)/(tp+fp) if fp else 1.0
r = float(tp)/(tp+fn) if fn else 1.0
f = float((1+(beta**2))*p*r)/(((beta**2)*p)+r) if p+r else 0.0
return round(p, 4), round(r, 4), round(f, 4)
# Input 1-2: Two error category dicts. Key is cat, value is list of TP, FP, FN.
# Output: The dictionaries combined with cumulative TP, FP, FN.
def merge_dict(dict1, dict2):
for cat, stats in dict2.items():
if cat in dict1.keys():
dict1[cat] = [x+y for x, y in zip(dict1[cat], stats)]
else:
dict1[cat] = stats
return dict1
# Input 1: A dict; key is error cat, value is counts for [tp, fp, fn]
# Input 2: Integer value denoting level of error category granularity.
# 1: Operation tier; e.g. M, R, U. 2: Main tier; e.g. NOUN, VERB 3: Everything.
# Output: A dictionary of category TP, FP and FN based on Input 2.
def processCategories(cat_dict, setting):
# Otherwise, do some processing.
proc_cat_dict = {}
for cat, cnt in cat_dict.items():
if cat == "UNK":
proc_cat_dict[cat] = cnt
continue
# M, U, R or UNK combined only.
if setting == 1:
if cat[0] in proc_cat_dict.keys():
proc_cat_dict[cat[0]] = [x+y for x, y in zip(proc_cat_dict[cat[0]], cnt)]
else:
proc_cat_dict[cat[0]] = cnt
# Everything without M, U or R.
elif setting == 2:
if cat[2:] in proc_cat_dict.keys():
proc_cat_dict[cat[2:]] = [x+y for x, y in zip(proc_cat_dict[cat[2:]], cnt)]
else:
proc_cat_dict[cat[2:]] = cnt
# All error category combinations
else:
return cat_dict
return proc_cat_dict
# Input 1: A dict of global best TP, FP and FNs
# Input 2: A dict of error types and counts for those TP, FP and FNs
# Input 3: Command line args
def print_results(best, best_cats, args):
# Prepare output title.
if args.dt: title = " Token-Based Detection "
elif args.ds: title = " Span-Based Detection "
elif args.cse: title = " Span-Based Correction + Classification "
else: title = " Span-Based Correction "
# Category Scores
if args.cat:
best_cats = processCategories(best_cats, args.cat)
print("")
print('{:=^66}'.format(title))
print("Category".ljust(14), "TP".ljust(8), "FP".ljust(8), "FN".ljust(8),
"P".ljust(8), "R".ljust(8), "F"+str(args.beta))
for cat, cnts in sorted(best_cats.items()):
cat_p, cat_r, cat_f = computeFScore(cnts[0], cnts[1], cnts[2], args.beta)
print(cat.ljust(14), str(cnts[0]).ljust(8), str(cnts[1]).ljust(8),
str(cnts[2]).ljust(8), str(cat_p).ljust(8), str(cat_r).ljust(8), cat_f)
# Print the overall results.
print("")
print('{:=^46}'.format(title))
print("\t".join(["TP", "FP", "FN", "Prec", "Rec", "F"+str(args.beta)]))
print("\t".join(map(str, [best["tp"], best["fp"],
best["fn"]]+list(computeFScore(best["tp"], best["fp"], best["fn"], args.beta)))))
print('{:=^46}'.format(""))
print("")
def print_table(table):
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3)
for i in range(len(table[0]))
]
row_format = "".join(["{:>" + str(longest_col) + "}" for longest_col in longest_cols])
for row in table:
print(row_format.format(*row))
if __name__ == "__main__":
# Run the program
main()
| 17,092 | 41.100985 | 103 | py |
errant | errant-master/errant/commands/parallel_to_m2.py | import argparse
from contextlib import ExitStack
import errant
def main():
# Parse command line args
args = parse_args()
print("Loading resources...")
# Load Errant
annotator = errant.load("en")
print("Processing parallel files...")
# Process an arbitrary number of files line by line simultaneously. Python 3.3+
# See https://tinyurl.com/y4cj4gth . Also opens the output m2 file.
with ExitStack() as stack, open(args.out, "w") as out_m2:
in_files = [stack.enter_context(open(i)) for i in [args.orig]+args.cor]
# Process each line of all input files
for line in zip(*in_files):
# Get the original and all the corrected texts
orig = line[0].strip()
cors = line[1:]
# Skip the line if orig is empty
if not orig: continue
# Parse orig with spacy
orig = annotator.parse(orig, args.tok)
# Write orig to the output m2 file
out_m2.write(" ".join(["S"]+[token.text for token in orig])+"\n")
# Loop through the corrected texts
for cor_id, cor in enumerate(cors):
cor = cor.strip()
# If the texts are the same, write a noop edit
if orig.text.strip() == cor:
out_m2.write(noop_edit(cor_id)+"\n")
# Otherwise, do extra processing
else:
# Parse cor with spacy
cor = annotator.parse(cor, args.tok)
# Align the texts and extract and classify the edits
edits = annotator.annotate(orig, cor, args.lev, args.merge)
# Loop through the edits
for edit in edits:
# Write the edit to the output m2 file
out_m2.write(edit.to_m2(cor_id)+"\n")
# Write a newline when we have processed all corrections for each line
out_m2.write("\n")
# Parse command line args
def parse_args():
parser=argparse.ArgumentParser(
description="Align parallel text files and extract and classify the edits.\n",
formatter_class=argparse.RawTextHelpFormatter,
usage="%(prog)s [-h] [options] -orig ORIG -cor COR [COR ...] -out OUT")
parser.add_argument(
"-orig",
help="The path to the original text file.",
required=True)
parser.add_argument(
"-cor",
help="The paths to >= 1 corrected text files.",
nargs="+",
default=[],
required=True)
parser.add_argument(
"-out",
help="The output filepath.",
required=True)
parser.add_argument(
"-tok",
help="Word tokenise the text using spacy (default: False).",
action="store_true")
parser.add_argument(
"-lev",
help="Align using standard Levenshtein (default: False).",
action="store_true")
parser.add_argument(
"-merge",
help="Choose a merging strategy for automatic alignment.\n"
"rules: Use a rule-based merging strategy (default)\n"
"all-split: Merge nothing: MSSDI -> M, S, S, D, I\n"
"all-merge: Merge adjacent non-matches: MSSDI -> M, SSDI\n"
"all-equal: Merge adjacent same-type non-matches: MSSDI -> M, SS, D, I",
choices=["rules", "all-split", "all-merge", "all-equal"],
default="rules")
args=parser.parse_args()
return args
# Input: A coder id
# Output: A noop edit; i.e. text contains no edits
def noop_edit(id=0):
return "A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||"+str(id) | 3,648 | 39.544444 | 86 | py |
errant | errant-master/errant/commands/__init__.py | 0 | 0 | 0 | py | |
Quantized-GBDT | Quantized-GBDT-master/experiments/generate_script.py | import os
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("data_path", type=str)
arg_parser.add_argument("--use-discretized-grad", action='store_true')
arg_parser.add_argument("--discretized-grad-renew", action='store_true')
arg_parser.add_argument("--stochastic-rounding", action='store_true')
arg_parser.add_argument("--for-speed", action='store_true')
arg_parser.add_argument("--device", type=str, default='cpu')
arg_parser.add_argument("--algorithm", type=str, default='lgb')
arg_parser.add_argument("--log-path", type=str, default='logs')
script_fname = 'run.sh'
running = open(script_fname, 'w')
os.system(f"chmod +x {script_fname}")
data = [
'higgs',
'epsilon',
'criteo',
'bosch',
'kitsune',
'yahoo',
'msltr',
'year'
]
task = [
'binary',
'binary',
'binary',
'binary',
'binary',
'ranking',
'ranking',
'regression'
]
col_wise_data = ['epsilon', 'year', 'yahoo', 'bosch']
bins = [2, 3, 4, 5]
def generate_script(data_path, use_discretized_grad, discretized_grad_renew, stochastic_rounding, for_speed, device, algorithm, log_dir):
data_path = data_path.rstrip('/')
dataset = [
f'data={data_path}/higgs.train',
f'data={data_path}/epsilon.train',
f'data={data_path}/criteo.train',
f'data={data_path}/bosch.train',
f'data={data_path}/kitsune.train',
f'data={data_path}/yahoo.train',
f'data={data_path}/msltr.train',
f'data={data_path}/year.train',
]
validset = [
f'valid={data_path}/higgs.test',
f'valid={data_path}/epsilon.test',
f'valid={data_path}/criteo.test',
f'valid={data_path}/bosch.test',
f'valid={data_path}/kitsune.test',
f'valid={data_path}/yahoo.test',
f'valid={data_path}/msltr.test',
f'valid={data_path}/year.test'
]
os.system(f"mkdir -p {log_dir}")
if algorithm == 'lgb':
use_discretized_grad_str = str(use_discretized_grad).lower()
discretized_grad_renew_str = str(discretized_grad_renew).lower()
stochastic_rounding_str = str(stochastic_rounding).lower()
num_k = 4 if use_discretized_grad else 1
for i in range(8):
for j in range(5):
for k in range(num_k):
base_conf_fname = 'train_model.conf' if task[i] == 'binary' else ('train_rank_model.conf' if task[i] == 'ranking' else 'train_reg_model.conf')
args = ''
args += dataset[i]
if not for_speed:
args += ' ' + validset[i]
args += ' seed=' + str(j)
if use_discretized_grad:
args += ' grad_discretize_bins='+str(2**bins[k]-2)
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j) + '_bins' + str(bins[k])+'.log'
else:
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_fp32' + '.log'
args += f' use_discretized_grad={use_discretized_grad_str} discretized_grad_renew={discretized_grad_renew_str} stochastic_rounding={stochastic_rounding_str}'
if data[i] == 'bosch':
args += ' learning_rate=0.015 num_leaves=45'
if data[i] in col_wise_data:
args += ' force_row_wise=false force_col_wise=true'
if device != 'cpu':
args += f' device_type=cuda gpu_device_id=0 num_threads=24'
if not use_discretized_grad:
running.write(f'../LightGBM-master/lightgbm config={base_conf_fname} {args} > {log_name} 2>&1\n')
else:
running.write(f'../LightGBM/lightgbm config={base_conf_fname} {args} > {log_name} 2>&1\n')
elif algorithm == 'xgb':
for i in range(8):
for j in range(5):
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_xgb' + '.log'
base_conf_fname = 'xgboost.conf'
args = ''
args += dataset[i]
if task[i] == 'ranking':
args += '.xgb?format=libsvm'
args += ' seed=' + str(j)
if not for_speed:
args += ' ' + validset[i].replace('valid=', 'eval[test]=')
if task[i] == 'ranking':
args += '.xgb?format=libsvm'
metric = 'auc' if task[i] == 'binary' else ('rmse' if task[i] == 'regression' else 'ndcg@10')
args += f' eval_metric={metric}'
objective = 'binary:logistic' if task[i] == 'binary' else ('reg:linear' if task[i] == 'regression' else 'rank:pairwise')
args += f' objective={objective}'
if data[i] == 'bosch':
args += ' eta=0.015 max_leaves=45' # max_leaves=45 for xgboost to reduce time cost for post pruning
if device != 'cpu':
args += ' tree_method=gpu_hist nthread=24'
running.write(f'../xgboost/xgboost {base_conf_fname} {args} > {log_name} 2>&1\n')
elif algorithm == 'cat':
for i in range(8):
for j in range(5):
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_cat' + '.log'
base_conf_fname = 'catboost.json'
args = ''
args += f"--params-file {base_conf_fname}"
if data[i] == 'bosch':
args += " --learning-rate 0.015 --max-leaves 45"
data_path_prefix = 'libsvm://' if task[i] != 'ranking' else ''
data_path_suffix = '' if task[i] != 'ranking' else '.cat'
data_path_for_catboost = dataset[i].split('=')[-1]
args += f" --learn-set {data_path_prefix}{data_path_for_catboost}{data_path_suffix}"
if not for_speed:
valid_path_for_catboost = validset[i].split('=')[-1]
args += f" --test-set {data_path_prefix}{valid_path_for_catboost}{data_path_suffix}"
args += f" --column-description {data_path_for_catboost.split('.')[0]}.cd"
loss_function = "Logloss" if task[i] == 'binary' else ("RMSE" if task[i] == 'regression' else "YetiRank")
args += f" --loss-function {loss_function}"
if not for_speed:
eval_metric = "AUC" if task[i] == 'binary' else ("RMSE" if task[i] == 'regression' else "NDCG:top=10\\;type=Exp")
args += f" --eval-metric {eval_metric}"
args += " --metric-period 1"
task_type = "CPU" if device == 'cpu' else "GPU --devices 0 --thread-count 24"
args += f" --task-type {task_type}"
args += f" --random-seed {j}"
args += f" --bootstrap-type No --random-strength 0.0 --rsm 1.0" # remove known randomness
running.write(f"../catboost/catboost/app/catboost fit {args} > {log_name} 2>&1\n")
if __name__ == '__main__':
args = arg_parser.parse_args()
generate_script(args.data_path, args.use_discretized_grad, args.discretized_grad_renew, args.stochastic_rounding, args.for_speed, args.device, args.algorithm, args.log_path)
| 7,377 | 46.294872 | 177 | py |
Quantized-GBDT | Quantized-GBDT-master/experiments/parse_logs.py | from argparse import ArgumentParser
import os
import numpy as np
parser = ArgumentParser()
parser.add_argument("log_dir", type=str)
parser.add_argument("out_fname", type=str)
parser.add_argument("num_seeds", type=int)
parser.add_argument("--for-speed", action='store_true')
datasets = ["higgs", "epsilon", "kitsune", "criteo", "bosch", "year", "yahoo", "msltr"]
def parse(log_dir, out_fname, num_seeds, for_speed):
results = {}
for fname in os.listdir(log_dir):
_, data, seed, bins = fname.split(".")[0].split("_")
if data not in results:
results[data] = {}
if bins not in results[data]:
results[data][bins] = np.zeros(num_seeds)
if bins != 'xgb' and bins != 'cat' and for_speed:
results[data][f"{bins} hist"] = np.zeros(num_seeds)
seed = int(seed.split("seed")[-1])
compare_func = max if data != "year" else min
metric_val = 0.0 if compare_func == max else 1000000.0
time_val = 0.0
if not for_speed:
if (fname.find("fp32") != -1 or fname.find("bins") != -1) and (fname.find("cat") == -1 and fname.find("xgb") == -1):
val_index = "valid_1 " if (data != "yahoo" and data != "msltr") else "valid_1 ndcg@10 "
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find(val_index) != -1:
metric_val = compare_func(metric_val, float(line.strip().split(" ")[-1]))
elif fname.find("cat") != -1:
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find("test:") != -1:
metric_val = compare_func(metric_val, float(line.strip().split("test:")[1].strip().split(" ")[0].split("best:")[0].strip()))
elif fname.find("xgb") != -1:
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find("test-") != -1:
metric_val = compare_func(metric_val, float(line.strip().split(":")[-1]))
results[data][bins][seed] = metric_val
else:
if (fname.find("fp32") != -1 or fname.find("bins") != -1) and (fname.find("cat") == -1 and fname.find("xgb") == -1):
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find("seconds elapsed") != -1:
time_val = float(line.strip().split("[Info]")[-1].strip().split(" ")[0])
if line.find("ConstructHistograms costs:") != -1 or line.find("ConstructHistogramForLeaf costs:") != -1:
hist_time_val = float(line.strip().split(" ")[-1])
results[data][f"{bins} hist"][seed] = hist_time_val
elif fname.find("xgb") != -1:
all_times = []
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find("sec elapsed") != -1:
time_val = float(line.strip().split("sec")[0].strip().split(" ")[-1])
all_times.append(time_val)
time_val = all_times[-1] - all_times[1] # subtract time of 1st iteration which contains some time for data preprocessing
elif fname.find("cat") != -1:
with open(f"{log_dir}/{fname}", "r") as in_file:
for line in in_file:
if line.find("total:") != -1:
time_str = line.strip().split("total:")[-1].strip().split("remaining:")[0].strip()
time_str_splits = time_str.split(" ")
time_val = 0.0
for time_str_split in time_str_splits:
if time_str_split.endswith("ms"):
time_val += float(time_str_split[:-2]) * 1e-3
elif time_str_split.endswith("s"):
time_val += float(time_str_split[:-1])
elif time_str_split.endswith("m"):
time_val += float(time_str_split[:-1]) * 60.0
elif time_str_split.endswith("h"):
time_val += float(time_str_split[:-1]) * 3600.0
results[data][bins][seed] = time_val
with open(out_fname, "w") as out_file:
out_file.write(f"| algorithm |")
binss = np.hstack([np.sort(list(filter(lambda x: x.find("hist") == -1, results[datasets[0]].keys()))),
np.sort(list(filter(lambda x: x.find("hist") != -1, results[datasets[0]].keys())))])
for data in datasets:
out_file.write(f" {data} |")
out_file.write("\n")
out_file.write(f"|-------|-------|-------|-------|-------|-------|-------|-------|-------|\n")
for bins in binss:
out_file.write(f"| {bins} |")
for data in datasets:
mean = np.mean(results[data][bins])
std = np.std(results[data][bins])
if not for_speed:
out_file.write(f" {mean:.6f}/{std:.6f} |")
else:
out_file.write(f" {mean:.2f}/{std:.2f} |")
out_file.write("\n")
if __name__ == "__main__":
args = parser.parse_args()
parse(args.log_dir, args.out_fname, args.num_seeds, args.for_speed)
| 5,658 | 53.941748 | 152 | py |
neuron-merging | neuron-merging-main/main.py | from __future__ import print_function
import warnings
warnings.simplefilter("ignore", UserWarning)
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import sys
import pickle
import copy
cwd = os.getcwd()
sys.path.append(cwd+'/../')
import models
from torchvision import datasets, transforms
from torch.autograd import Variable
from decompose import Decompose
def save_state(model, acc):
print('==> Saving model ...')
state = {
'acc': acc,
'state_dict': model.state_dict(),
}
for key in state['state_dict'].keys():
if 'module' in key:
print(key)
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
# save
if args.model_type == 'original':
if args.arch == 'WideResNet' :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
'_'.join(map(str, args.depth_wide)),
'pth.tar'])
elif args.arch == 'ResNet' :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
str(args.depth_wide),
'pth.tar'])
else:
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
'pth.tar'])
else: # retrain
if args.arch == 'WideResNet' :
model_filename = '.'.join([args.arch,
'_'.join(map(str, args.depth_wide)),
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
elif args.arch == 'ResNet' :
model_filename = '.'.join([args.arch,
str(args.depth_wide),
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
else :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
torch.save(state, os.path.join('saved_models/', model_filename))
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data))
return
def test(epoch, evaluate=False):
global best_acc
global best_epoch
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += criterion(output, target).data
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(test_loader.dataset)
if (acc > best_acc):
best_acc = acc
best_epoch = epoch
if not evaluate:
save_state(model, best_acc)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss * args.batch_size, correct, len(test_loader.dataset),
100. * float(correct) / len(test_loader.dataset)))
print('Best Accuracy: {:.2f}%, Best Epoch: {}\n'.format(best_acc, best_epoch))
return
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
lr = args.lr
for (gamma, step) in zip (gammas, schedule):
if(epoch>= step) and (args.epochs * 3 //4 >= epoch):
lr = lr * gamma
elif(epoch>= step) and (args.epochs * 3 //4 < epoch):
lr = lr * gamma * gamma
else:
break
print('learning rate : ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return
def weight_init(model, decomposed_weight_list, target):
for layer in model.state_dict():
decomposed_weight = decomposed_weight_list.pop(0)
model.state_dict()[layer].copy_(decomposed_weight)
return model
if __name__=='__main__':
# settings
parser = argparse.ArgumentParser(description='Neuron Merging Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--arch', action='store', default='VGG',
help='network structure: VGG | ResNet | WideResNet | LeNet_300_100')
parser.add_argument('--pretrained', action='store', default=None,
help='pretrained model')
parser.add_argument('--evaluate', action='store_true', default=False,
help='whether to run evaluation')
parser.add_argument('--retrain', action='store_true', default=False,
help='whether to retrain')
parser.add_argument('--model-type', action='store', default='original',
help='model type: original | prune | merge')
parser.add_argument('--target', action='store', default='conv',
help='decomposing target: default=None | conv | ip')
parser.add_argument('--dataset', action='store', default='cifar10',
help='dataset: cifar10 | cifar100 | FashionMNIST')
parser.add_argument('--criterion', action='store', default='l1-norm',
help='criterion : l1-norm | l2-norm | l2-GM')
parser.add_argument('--threshold', type=float, default=1,
help='threshold (default: 1)')
parser.add_argument('--lamda', type=float, default=0.8,
help='lamda (default: 0.8)')
parser.add_argument('--pruning-ratio', type=float, default=0.7,
help='pruning ratio : (default: 0.7)')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1,0.1],
help='gammas : (default: [0.1,0.1])')
parser.add_argument('--schedule', type=int, nargs='+', default=[100,200],
help='schedule : (default: [100,200])')
parser.add_argument('--depth-wide', action='store', default=None,
help='depth and wide (default: None)')
args = parser.parse_args()
# check options
if not (args.model_type in [None, 'original', 'merge', 'prune']):
print('ERROR: Please choose the correct model type')
exit()
if not (args.target in [None, 'conv', 'ip']):
print('ERROR: Please choose the correct decompose target')
exit()
if not (args.arch in ['VGG','ResNet','WideResNet','LeNet_300_100']):
print('ERROR: specified arch is not suppported')
exit()
torch.manual_seed(args.seed)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic=True
# load data
num_classes = 10
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_data = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=2)
num_classes = 10
elif args.dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_data = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=2)
num_classes = 100
elif args.dataset == 'FashionMNIST':
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ])
train_data = datasets.FashionMNIST('data', train=True, download=True, transform=transform)
test_data = datasets.FashionMNIST('data', train=False, download=True, transform=transform)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, **kwargs)
num_classes = 10
else :
pass
if args.depth_wide:
args.depth_wide = eval(args.depth_wide)
cfg = None
# make cfg
if args.retrain:
if args.target == 'conv' :
if args.arch == 'VGG':
if args.dataset == 'cifar10':
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
elif args.dataset == 'cifar100':
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 256, 'M', 256, 256, 256]
temp_cfg = list(filter(('M').__ne__, cfg))
elif args.arch == 'ResNet':
cfg = [16, 32, 64]
for i in range(len(cfg)):
cfg[i] = int(cfg[i] * (1 - args.pruning_ratio))
temp_cfg = cfg
elif args.arch == 'WideResNet':
cfg = [16, 32, 64]
temp_cfg = [16, 32, 32]
for i in range(len(cfg)):
cfg[i] = int(cfg[i] * (1 - args.pruning_ratio))
temp_cfg[i] = cfg[i] * args.depth_wide[1]
elif args.target == 'ip' :
if args.arch == 'LeNet_300_100':
cfg = [300,100]
for i in range(len(cfg)):
cfg[i] = round(cfg[i] * (1 - args.pruning_ratio))
temp_cfg = cfg
pass
# generate the model
if args.arch == 'VGG':
model = models.VGG(num_classes, cfg=cfg)
elif args.arch == 'LeNet_300_100':
model = models.LeNet_300_100(bias_flag=True, cfg=cfg)
elif args.arch == 'ResNet':
model = models.ResNet(int(args.depth_wide) ,num_classes,cfg=cfg)
elif args.arch == 'WideResNet':
model = models.WideResNet(args.depth_wide[0], num_classes, widen_factor=args.depth_wide[1], cfg=cfg)
else:
pass
if args.cuda:
model.cuda()
# pretrain
best_acc = 0.0
best_epoch = 0
if args.pretrained:
pretrained_model = torch.load(args.pretrained)
best_epoch = 0
if args.model_type == 'original':
best_acc = pretrained_model['acc']
model.load_state_dict(pretrained_model['state_dict'])
# weight initialization
if args.retrain:
decomposed_list = Decompose(args.arch, pretrained_model['state_dict'], args.criterion, args.threshold, args.lamda, args.model_type, temp_cfg, args.cuda).main()
model = weight_init(model, decomposed_list, args.target)
# print the number of model parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Total parameter number:', params, '\n')
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
if args.evaluate:
test(0, evaluate=True)
exit()
for epoch in range(1, args.epochs + 1):
adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)
train(epoch)
test(epoch) | 14,964 | 38.485488 | 167 | py |
neuron-merging | neuron-merging-main/decompose.py | from __future__ import print_function
import argparse
import pickle
import numpy as np
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cosine
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.spatial import distance
import sys
import os
import scipy
import random
cwd = os.getcwd()
sys.path.append(cwd+'/../')
def create_scaling_mat_ip_thres_bias(weight, ind, threshold, model_type):
'''
weight - 2D matrix (n_{i+1}, n_i), np.ndarray
ind - chosen indices to remain, np.ndarray
threshold - cosine similarity threshold
'''
assert(type(weight) == np.ndarray)
assert(type(ind) == np.ndarray)
cosine_sim = 1-pairwise_distances(weight, metric="cosine")
weight_chosen = weight[ind, :]
scaling_mat = np.zeros([weight.shape[0], weight_chosen.shape[0]])
for i in range(weight.shape[0]):
if i in ind: # chosen
ind_i, = np.where(ind == i)
assert(len(ind_i) == 1) # check if only one index is found
scaling_mat[i, ind_i] = 1
else: # not chosen
if model_type == 'prune':
continue
max_cos_value = np.max(cosine_sim[i][ind])
max_cos_value_index = np.argpartition(cosine_sim[i][ind], -1)[-1]
if threshold and max_cos_value < threshold:
continue
baseline_weight = weight_chosen[max_cos_value_index]
current_weight = weight[i]
baseline_norm = np.linalg.norm(baseline_weight)
current_norm = np.linalg.norm(current_weight)
scaling_factor = current_norm / baseline_norm
scaling_mat[i, max_cos_value_index] = scaling_factor
return scaling_mat
def create_scaling_mat_conv_thres_bn(weight, ind, threshold,
bn_weight, bn_bias,
bn_mean, bn_var, lam, model_type):
'''
weight - 4D tensor(n, c, h, w), np.ndarray
ind - chosen indices to remain
threshold - cosine similarity threshold
bn_weight, bn_bias - parameters of batch norm layer right after the conv layer
bn_mean, bn_var - running_mean, running_var of BN (for inference)
lam - how much to consider cosine sim over bias, float value between 0 and 1
'''
assert(type(weight) == np.ndarray)
assert(type(ind) == np.ndarray)
assert(type(bn_weight) == np.ndarray)
assert(type(bn_bias) == np.ndarray)
assert(type(bn_mean) == np.ndarray)
assert(type(bn_var) == np.ndarray)
assert(bn_weight.shape[0] == weight.shape[0])
assert(bn_bias.shape[0] == weight.shape[0])
assert(bn_mean.shape[0] == weight.shape[0])
assert(bn_var.shape[0] == weight.shape[0])
weight = weight.reshape(weight.shape[0], -1)
cosine_dist = pairwise_distances(weight, metric="cosine")
weight_chosen = weight[ind, :]
scaling_mat = np.zeros([weight.shape[0], weight_chosen.shape[0]])
for i in range(weight.shape[0]):
if i in ind: # chosen
ind_i, = np.where(ind == i)
assert(len(ind_i) == 1) # check if only one index is found
scaling_mat[i, ind_i] = 1
else: # not chosen
if model_type == 'prune':
continue
current_weight = weight[i]
current_norm = np.linalg.norm(current_weight)
current_cos = cosine_dist[i]
gamma_1 = bn_weight[i]
beta_1 = bn_bias[i]
mu_1 = bn_mean[i]
sigma_1 = bn_var[i]
# choose one
cos_list = []
scale_list = []
bias_list = []
for chosen_i in ind:
chosen_weight = weight[chosen_i]
chosen_norm = np.linalg.norm(chosen_weight, ord = 2)
chosen_cos = current_cos[chosen_i]
gamma_2 = bn_weight[chosen_i]
beta_2 = bn_bias[chosen_i]
mu_2 = bn_mean[chosen_i]
sigma_2 = bn_var[chosen_i]
# compute cosine sim
cos_list.append(chosen_cos)
# compute s
s = current_norm/chosen_norm
# compute scale term
scale_term_inference = s * (gamma_2 / gamma_1) * (sigma_1 / sigma_2)
scale_list.append(scale_term_inference)
# compute bias term
bias_term_inference = abs((gamma_2/sigma_2) * (s * (-(sigma_1*beta_1/gamma_1) + mu_1) - mu_2) + beta_2)
bias_term_inference = bias_term_inference/scale_term_inference
bias_list.append(bias_term_inference)
assert(len(cos_list) == len(ind))
assert(len(scale_list) == len(ind))
assert(len(bias_list) == len(ind))
# merge cosine distance and bias distance
bias_list = (bias_list - np.min(bias_list)) / (np.max(bias_list)-np.min(bias_list))
score_list = lam * np.array(cos_list) + (1-lam) * np.array(bias_list)
# find index and scale with minimum distance
min_ind = np.argmin(score_list)
min_scale = scale_list[min_ind]
min_cosine_sim = 1-cos_list[min_ind]
# check threshold - second
if threshold and min_cosine_sim < threshold:
continue
scaling_mat[i, min_ind] = min_scale
return scaling_mat
class Decompose:
def __init__(self, arch, param_dict, criterion, threshold, lamda, model_type, cfg, cuda):
self.param_dict = param_dict
self.arch = arch
self.criterion = criterion
self.threshold = threshold
self.lamda = lamda
self.model_type = model_type
self.cfg = cfg
self.cuda = cuda
self.output_channel_index = {}
self.decompose_weight = []
def get_output_channel_index(self, value, layer_id):
output_channel_index = []
if len(value.size()) :
weight_vec = value.view(value.size()[0], -1)
weight_vec = weight_vec.cuda()
# l1-norm
if self.criterion == 'l1-norm':
norm = torch.norm(weight_vec, 1, 1)
norm_np = norm.cpu().detach().numpy()
arg_max = np.argsort(norm_np)
arg_max_rev = arg_max[::-1][:self.cfg[layer_id]]
output_channel_index = sorted(arg_max_rev.tolist())
# l2-norm
elif self.criterion == 'l2-norm':
norm = torch.norm(weight_vec, 2, 1)
norm_np = norm.cpu().detach().numpy()
arg_max = np.argsort(norm_np)
arg_max_rev = arg_max[::-1][:self.cfg[layer_id]]
output_channel_index = sorted(arg_max_rev.tolist())
# l2-GM
elif self.criterion == 'l2-GM':
weight_vec = weight_vec.cpu().detach().numpy()
matrix = distance.cdist(weight_vec, weight_vec, 'euclidean')
similar_sum = np.sum(np.abs(matrix), axis=0)
output_channel_index = np.argpartition(similar_sum, -self.cfg[layer_id])[-self.cfg[layer_id]:]
return output_channel_index
def get_decompose_weight(self):
# scale matrix
z = None
# copy original weight
self.decompose_weight = list(self.param_dict.values())
# cfg index
layer_id = -1
for index, layer in enumerate(self.param_dict):
original = self.param_dict[layer]
# VGG
if self.arch == 'VGG':
# feature
if 'feature' in layer :
# conv
if len(self.param_dict[layer].shape) == 4:
layer_id += 1
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# Merge scale matrix
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# batchNorm
elif len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# first classifier
else:
pruned = torch.zeros(original.shape[0],z.shape[0])
if self.cuda:
pruned = pruned.cuda()
for i, f in enumerate(original):
o_old = f.view(z.shape[1],-1)
o = torch.mm(z,o_old).view(-1)
pruned[i,:] = o
self.decompose_weight[index] = pruned
break
# ResNet
elif self.arch == 'ResNet':
# block
if 'layer' in layer :
# last layer each block
if '0.conv1.weight' in layer :
layer_id += 1
# Pruning
if 'conv1' in layer :
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# batchNorm
elif 'bn1' in layer :
if len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# Merge scale matrix
elif 'conv2' in layer :
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
scaled = original
# update decompose weight
self.decompose_weight[index] = scaled
# WideResNet
elif self.arch == 'WideResNet':
# block
if 'block' in layer :
# last layer each block
if '0.conv1.weight' in layer :
layer_id += 1
# Pruning
if 'conv1' in layer :
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# BatchNorm
elif 'bn2' in layer :
if len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# Merge scale matrix
elif 'conv2' in layer :
# scale
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
scaled = original
# update decompose weight
self.decompose_weight[index] = scaled
# LeNet_300_100
elif self.arch == 'LeNet_300_100':
# ip
if layer in ['ip1.weight','ip2.weight'] :
# Merge scale matrix
if z != None:
original = torch.mm(original,z)
layer_id += 1
# concatenate weight and bias
if layer in 'ip1.weight' :
weight = self.param_dict['ip1.weight'].cpu().detach().numpy()
bias = self.param_dict['ip1.bias'].cpu().detach().numpy()
elif layer in 'ip2.weight' :
weight = self.param_dict['ip2.weight'].cpu().detach().numpy()
bias = self.param_dict['ip2.bias'].cpu().detach().numpy()
bias_reshaped = bias.reshape(bias.shape[0],-1)
concat_weight = np.concatenate([weight, bias_reshaped], axis = 1)
# get index
self.output_channel_index[index] = self.get_output_channel_index(torch.from_numpy(concat_weight), layer_id)
# make scale matrix with bias
x = create_scaling_mat_ip_thres_bias(concat_weight, np.array(self.output_channel_index[index]), self.threshold, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
# pruned
pruned = original[self.output_channel_index[index],:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
elif layer in 'ip3.weight':
original = torch.mm(original,z)
# update decompose weight
self.decompose_weight[index] = original
# update bias
elif layer in ['ip1.bias','ip2.bias']:
self.decompose_weight[index] = original[input_channel_index]
else :
pass
def main(self):
if self.cuda == False:
for layer in self.param_dict:
self.param_dict[layer] = self.param_dict[layer].cpu()
self.get_decompose_weight()
return self.decompose_weight | 19,186 | 35.616412 | 168 | py |
neuron-merging | neuron-merging-main/models/ResNet.py | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, cfg, stride)
self.bn1 = nn.BatchNorm2d(cfg)
self.conv2 = conv3x3(cfg, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out
def bn_feature(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
bn_feature = out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out, bn_feature
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes, cfg=None, bottleneck=False):
super(ResNet, self).__init__()
if cfg == None:
cfg = [16, 32, 64]
self.inplanes = 16
#print(bottleneck)
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n, cfg[0])
self.layer2 = self._make_layer(block, 32, n, cfg[1], stride=2)
self.layer3 = self._make_layer(block, 64, n, cfg[2], stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, cfg, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, cfg, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cfg))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
print('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def get_channel_num(self):
return [16, 32, 64]
def extract_feature(self, x, preReLU=False):
x = x.cuda()
x = self.conv1(x)
x = self.bn1(x)
feat1 = self.layer1(x)
feat2 = self.layer2(feat1)
feat3 = self.layer3(feat2)
x = F.relu(feat3)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
#out = self.fc(x)
if not preReLU:
feat1 = F.relu(feat1)
feat2 = F.relu(feat2)
feat3 = F.relu(feat3)
return [feat1, feat2, feat3]#, out
def bn_feature(self,x):
bn_feature_list = []
x = self.conv1(x)
x = self.bn1(x)
for block in self.layer1:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
for block in self.layer2:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
for block in self.layer3:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
return bn_feature_list
def ware(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x.cpu().detach().numpy()
| 7,173 | 26.381679 | 99 | py |
neuron-merging | neuron-merging-main/models/LeNet_300_100.py | from __future__ import print_function
import torch
import torch.nn as nn
import os
class LeNet_300_100(nn.Module):
def __init__(self, bias_flag, cfg):
if cfg == None:
cfg = [300,100]
super(LeNet_300_100, self).__init__()
self.ip1 = nn.Linear(28*28, cfg[0], bias=bias_flag)
self.relu_ip1 = nn.ReLU(inplace=True)
self.ip2 = nn.Linear(cfg[0], cfg[1], bias=bias_flag)
self.relu_ip2 = nn.ReLU(inplace=True)
self.ip3 = nn.Linear(cfg[1], 10, bias=bias_flag)
return
def forward(self, x):
x = x.view(x.size(0), 28*28)
x = self.ip1(x)
x = self.relu_ip1(x)
x = self.ip2(x)
x = self.relu_ip2(x)
x = self.ip3(x)
return x | 749 | 29 | 60 | py |
neuron-merging | neuron-merging-main/models/VGG.py | from __future__ import print_function
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
__all__ = ['VGG']
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class VGG(nn.Module):
def __init__(self, out_classes=10, depth=16, init_weights=True, cfg=None):
super(VGG, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self.cfg = cfg
self.feature = self.make_layers(cfg, True)
self.classifier = nn.Sequential(
nn.Linear(cfg[-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, out_classes)
)
if init_weights:
self._initialize_weights()
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def extract_feature(self, x, preReLU=False):
x = x.cuda()
feat1 = self.feature(x)
if not preReLU:
feat1 = F.relu(feat1)
return [feat1]
def bn_feature(self,x):
bn_feature = []
for layer in self.feature:
x = layer(x)
if isinstance(layer, nn.BatchNorm2d) :
# temp = torch.sum(x,0).cpu().detach().numpy()
temp = x.cpu().detach().numpy()
bn_feature.append(temp)
return bn_feature
def ware(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y.cpu().detach().numpy()
| 3,154 | 26.920354 | 107 | py |
neuron-merging | neuron-merging-main/models/__init__.py | from .LeNet_300_100 import *
from .VGG import *
from .ResNet import *
from .WideResNet import * | 111 | 27 | 32 | py |
neuron-merging | neuron-merging-main/models/WideResNet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, cfg, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, cfg, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(cfg)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(cfg, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, cfg, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, cfg, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, cfg, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, cfg, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, cfg=None):
super(WideResNet, self).__init__()
if cfg == None:
cfg = [16, 32, 64]
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, cfg[0]*widen_factor, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, cfg[1]*widen_factor, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, cfg[2]*widen_factor, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
return self.fc(out)
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def get_channel_num(self):
return self.nChannels[1:]
def extract_feature(self, x, preReLU=False):
x = x.cuda()
out = self.conv1(x)
feat1 = self.block1(out)
feat2 = self.block2(feat1)
feat3 = self.block3(feat2)
out = self.relu(self.bn1(feat3))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
out = self.fc(out)
if preReLU:
feat1 = self.block2.layer[0].bn1(feat1)
feat2 = self.block3.layer[0].bn1(feat2)
feat3 = self.bn1(feat3)
return [feat1, feat2, feat3]
def ware(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
return self.fc(out).cpu().detach().numpy() | 4,955 | 35.711111 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/main.py | import numpy as np
import torch
import gym
import argparse
import os
import time
import utils
import TD3
import LAP_TD3
import PAL_TD3
import PER_TD3
# Runs policy for X episodes and returns average reward
def eval_policy(policy, env, seed, eval_episodes=10):
eval_env = gym.make(env)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state), test=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", default="LAP_TD3") # Algorithm nameu
parser.add_argument("--env", default="HalfCheetah-v3") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=25e3, type=int)# Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=3e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--alpha", default=0.4) # Priority = TD^alpha (only used by LAP/PAL)
parser.add_argument("--min_priority", default=1, type=int) # Minimum priority (set to 1 in paper, only used by LAP/PAL)
args = parser.parse_args()
file_name = "%s_%s_%s" % (args.algorithm, args.env, str(args.seed))
print("---------------------------------------")
print(f"Settings: {file_name}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
"policy_noise": args.policy_noise * max_action,
"noise_clip": args.noise_clip * max_action,
"policy_freq": args.policy_freq
}
# Initialize policy and replay buffer
if args.algorithm == "TD3":
policy = TD3.TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
elif args.algorithm == "PER_TD3":
policy = PER_TD3.PER_TD3(**kwargs)
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim)
kwargs["alpha"] = args.alpha
kwargs["min_priority"] = args.min_priority
if args.algorithm == "LAP_TD3":
policy = LAP_TD3.LAP_TD3(**kwargs)
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim)
elif args.algorithm == "PAL_TD3":
policy = PAL_TD3.PAL_TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args.start_timesteps: #>=
policy.train(replay_buffer, args.batch_size)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save("./results/%s" % (file_name), evaluations) | 5,222 | 33.361842 | 121 | py |
LAP-PAL | LAP-PAL-master/continuous/PAL_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class PAL_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.4,
min_priority=1
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.min_priority = min_priority
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q)
td_loss2 = (current_Q2 - target_Q)
critic_loss = self.PAL(td_loss1) + self.PAL(td_loss2)
critic_loss /= torch.max(td_loss1.abs(), td_loss2.abs()).clamp(min=self.min_priority).pow(self.alpha).mean().detach()
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# If min_priority=1, this can be simplified.
def PAL(self, x):
return torch.where(
x.abs() < self.min_priority,
(self.min_priority ** self.alpha) * 0.5 * x.pow(2),
self.min_priority * x.abs().pow(1. + self.alpha)/(1. + self.alpha)
).mean()
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,164 | 26.768817 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/PER_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class PER_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.6,
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done, ind, weights = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q).abs()
td_loss2 = (current_Q2 - target_Q).abs()
# Compute critic loss
critic_loss = (
(weights * F.mse_loss(current_Q1, target_Q, reduction='none')).mean()
+ (weights * F.mse_loss(current_Q2, target_Q, reduction='none')).mean()
)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
priority = torch.max(td_loss1, td_loss2).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,032 | 26.653846 | 94 | py |
LAP-PAL | LAP-PAL-master/continuous/utils.py | import numpy as np
import torch
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(self.size, size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
class PrioritizedReplayBuffer():
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.tree = SumTree(max_size)
self.max_priority = 1.0
self.beta = 0.4
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.tree.set(self.ptr, self.max_priority)
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = self.tree.sample(batch_size)
weights = self.tree.levels[-1][ind] ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 2e-7, 1) # Hardcoded: 0.4 + 2e-7 * 3e6 = 1.0. Only used by PER.
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device),
ind,
torch.FloatTensor(weights).to(self.device).reshape(-1, 1)
)
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
class SumTree(object):
def __init__(self, max_size):
self.levels = [np.zeros(1)]
# Tree construction
# Double the number of nodes at each level
level_size = 1
while level_size < max_size:
level_size *= 2
self.levels.append(np.zeros(level_size))
# Batch binary search through sum tree
# Sample a priority between 0 and the max priority
# and then search the tree for the corresponding index
def sample(self, batch_size):
value = np.random.uniform(0, self.levels[0][0], size=batch_size)
ind = np.zeros(batch_size, dtype=int)
for nodes in self.levels[1:]:
ind *= 2
left_sum = nodes[ind]
is_greater = np.greater(value, left_sum)
# If value > left_sum -> go right (+1), else go left (+0)
ind += is_greater
# If we go right, we only need to consider the values in the right tree
# so we subtract the sum of values in the left tree
value -= left_sum * is_greater
return ind
def set(self, ind, new_priority):
priority_diff = new_priority - self.levels[-1][ind]
for nodes in self.levels[::-1]:
np.add.at(nodes, ind, priority_diff)
ind //= 2
def batch_set(self, ind, new_priority):
# Confirm we don't increment a node twice
ind, unique_ind = np.unique(ind, return_index=True)
priority_diff = new_priority[unique_ind] - self.levels[-1][ind]
for nodes in self.levels[::-1]:
np.add.at(nodes, ind, priority_diff)
ind //= 2 | 4,293 | 28.410959 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 4,551 | 26.756098 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/LAP_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class LAP_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.4,
min_priority=1
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.min_priority = min_priority
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done, ind, weights = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q).abs()
td_loss2 = (current_Q2 - target_Q).abs()
# Compute critic loss
critic_loss = self.huber(td_loss1) + self.huber(td_loss2)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
priority = torch.max(td_loss1, td_loss2).clamp(min=self.min_priority).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def huber(self, x):
return torch.where(x < self.min_priority, 0.5 * x.pow(2), self.min_priority * x).mean()
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,118 | 26.67027 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/main.py | import argparse
import copy
import importlib
import json
import os
import numpy as np
import torch
import DDQN
import PER_DDQN
import LAP_DDQN
import PAL_DDQN
import utils
def main(env, replay_buffer, is_atari, state_dim, num_actions, args, parameters, device):
# Initialize and load policy
kwargs = {
"is_atari": is_atari,
"num_actions": num_actions,
"state_dim": state_dim,
"device": device,
"discount": parameters["discount"],
"optimizer": parameters["optimizer"],
"optimizer_parameters": parameters["optimizer_parameters"],
"polyak_target_update": parameters["polyak_target_update"],
"target_update_frequency": parameters["target_update_freq"],
"tau": parameters["tau"],
"initial_eps": parameters["initial_eps"],
"end_eps": parameters["end_eps"],
"eps_decay_period": parameters["eps_decay_period"],
"eval_eps": parameters["eval_eps"]
}
if args.algorithm == "DDQN":
policy = DDQN.DDQN(**kwargs)
elif args.algorithm == "PER_DDQN":
policy = PER_DDQN.PER_DDQN(**kwargs)
kwargs["alpha"] = parameters["alpha"]
kwargs["min_priority"] = parameters["min_priority"]
if args.algorithm == "LAP_DDQN":
policy = LAP_DDQN.LAP_DDQN(**kwargs)
elif args.algorithm == "PAL_DDQN":
policy = PAL_DDQN.PAL_DDQN(**kwargs)
evaluations = []
state, done = env.reset(), False
episode_start = True
episode_reward = 0
episode_timesteps = 0
episode_num = 0
# Interact with the environment for max_timesteps
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
#if args.train_behavioral:
if t < parameters["start_timesteps"]:
action = env.action_space.sample()
else:
action = policy.select_action(np.array(state))
# Perform action and log results
next_state, reward, done, info = env.step(action)
episode_reward += reward
# Only consider "done" if episode terminates due to failure condition
done_float = float(done) if episode_timesteps < env._max_episode_steps else 0
# For atari, info[0] = clipped reward, info[1] = done_float
if is_atari:
reward = info[0]
done_float = info[1]
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_float, done, episode_start)
state = copy.copy(next_state)
episode_start = False
# Train agent after collecting sufficient data
if t >= parameters["start_timesteps"] and (t + 1) % parameters["train_freq"] == 0:
policy.train(replay_buffer)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = env.reset(), False
episode_start = True
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % parameters["eval_freq"] == 0:
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save(f"./results/{setting}.npy", evaluations)
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env, _, _, _ = utils.make_env(env_name, atari_preprocessing)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state), eval=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
# Atari Specific
atari_preprocessing = {
"frame_skip": 4,
"frame_size": 84,
"state_history": 4,
"done_on_life_loss": False,
"reward_clipping": True,
"max_episode_timesteps": 27e3
}
atari_parameters = {
# LAP/PAL
"alpha": 0.6,
"min_priority": 1e-2,
# Exploration
"start_timesteps": 2e4,
"initial_eps": 1,
"end_eps": 1e-2,
"eps_decay_period": 25e4,
# Evaluation
"eval_freq": 5e4,
"eval_eps": 1e-3,
# Learning
"discount": 0.99,
"buffer_size": 1e6,
"batch_size": 32,
"optimizer": "RMSprop",
"optimizer_parameters": {
"lr": 0.0000625,
"alpha": 0.95,
"centered": True,
"eps": 0.00001
},
"train_freq": 4,
"polyak_target_update": False,
"target_update_freq": 8e3,
"tau": 1
}
regular_parameters = {
# LAP/PAL
"alpha": 0.4,
"min_priority": 1,
# Exploration
"start_timesteps": 1e3,
"initial_eps": 0.1,
"end_eps": 0.1,
"eps_decay_period": 1,
# Evaluation
"eval_freq": 5e3,
"eval_eps": 0,
# Learning
"discount": 0.99,
"buffer_size": 1e6,
"batch_size": 64,
"optimizer": "Adam",
"optimizer_parameters": {
"lr": 3e-4
},
"train_freq": 1,
"polyak_target_update": True,
"target_update_freq": 1,
"tau": 0.005
}
# Load parameters
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", default="LAP_DDQN") # OpenAI gym environment name
parser.add_argument("--env", default="PongNoFrameskip-v0") # OpenAI gym environment name #PongNoFrameskip-v0
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--buffer_name", default="Default") # Prepends name to filename
parser.add_argument("--max_timesteps", default=50e6, type=int) # Max time steps to run environment or train for
args = parser.parse_args()
print("---------------------------------------")
print(f"Setting: Algorithm: {args.algorithm}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
setting = f"{args.algorithm}_{args.env}_{args.seed}"
if not os.path.exists("./results"):
os.makedirs("./results")
# Make env and determine properties
env, is_atari, state_dim, num_actions = utils.make_env(args.env, atari_preprocessing)
parameters = atari_parameters if is_atari else regular_parameters
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize buffer
prioritized = True if args.algorithm == "PER_DDQN" or args.algorithm == "LAP_DDQN" else False
replay_buffer = utils.ReplayBuffer(
state_dim,
prioritized,
is_atari,
atari_preprocessing,
parameters["batch_size"],
parameters["buffer_size"],
device
)
main(env, replay_buffer, is_atari, state_dim, num_actions, args, parameters, device)
| 6,543 | 26.846809 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/utils.py | import cv2
import gym
import numpy as np
import torch
def ReplayBuffer(state_dim, prioritized, is_atari, atari_preprocessing, batch_size, buffer_size, device):
if is_atari:
return PrioritizedAtariBuffer(state_dim, atari_preprocessing, batch_size, buffer_size, device, prioritized)
else:
return PrioritizedStandardBuffer(state_dim, batch_size, buffer_size, device, prioritized)
class PrioritizedAtariBuffer(object):
def __init__(self, state_dim, atari_preprocessing, batch_size, buffer_size, device, prioritized):
self.batch_size = batch_size
self.max_size = int(buffer_size)
self.device = device
self.state_history = atari_preprocessing["state_history"]
self.ptr = 0
self.size = 0
self.state = np.zeros((
self.max_size + 1,
atari_preprocessing["frame_size"],
atari_preprocessing["frame_size"]
), dtype=np.uint8)
self.action = np.zeros((self.max_size, 1), dtype=np.int64)
self.reward = np.zeros((self.max_size, 1))
# not_done only consider "done" if episode terminates due to failure condition
# if episode terminates due to timelimit, the transition is not added to the buffer
self.not_done = np.zeros((self.max_size, 1))
self.first_timestep = np.zeros(self.max_size, dtype=np.uint8)
self.prioritized = prioritized
if self.prioritized:
self.tree = SumTree(self.max_size)
self.max_priority = 1.0
self.beta = 0.4
def add(self, state, action, next_state, reward, done, env_done, first_timestep):
# If dones don't match, env has reset due to timelimit
# and we don't add the transition to the buffer
if done != env_done:
return
self.state[self.ptr] = state[0]
self.action[self.ptr] = action
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.first_timestep[self.ptr] = first_timestep
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
if self.prioritized:
self.tree.set(self.ptr, self.max_priority)
def sample(self):
ind = self.tree.sample(self.batch_size) if self.prioritized \
else np.random.randint(0, self.size, size=self.batch_size)
# Note + is concatenate here
state = np.zeros(((self.batch_size, self.state_history) + self.state.shape[1:]), dtype=np.uint8)
next_state = np.array(state)
state_not_done = 1.
next_not_done = 1.
for i in range(self.state_history):
# Wrap around if the buffer is filled
if self.size == self.max_size:
j = (ind - i) % self.max_size
k = (ind - i + 1) % self.max_size
else:
j = ind - i
k = (ind - i + 1).clip(min=0)
# If j == -1, then we set state_not_done to 0.
state_not_done *= (j + 1).clip(min=0, max=1).reshape(-1, 1, 1)
j = j.clip(min=0)
# State should be all 0s if the episode terminated previously
state[:, i] = self.state[j] * state_not_done
next_state[:, i] = self.state[k] * next_not_done
# If this was the first timestep, make everything previous = 0
next_not_done *= state_not_done
state_not_done *= (1. - self.first_timestep[j]).reshape(-1, 1, 1)
batch = (
torch.ByteTensor(state).to(self.device).float(),
torch.LongTensor(self.action[ind]).to(self.device),
torch.ByteTensor(next_state).to(self.device).float(),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
if self.prioritized:
weights = np.array(self.tree.nodes[-1][ind]) ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 4.8e-8, 1) # Hardcoded: 0.4 + 4.8e-8 * 12.5e6 = 1.0. Only used by PER.
batch += (ind, torch.FloatTensor(weights).to(self.device).reshape(-1, 1))
return batch
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
# Replay buffer for standard gym tasks
class PrioritizedStandardBuffer():
def __init__(self, state_dim, batch_size, buffer_size, device, prioritized):
self.batch_size = batch_size
self.max_size = int(buffer_size)
self.device = device
self.ptr = 0
self.size = 0
self.state = np.zeros((self.max_size, state_dim))
self.action = np.zeros((self.max_size, 1))
self.next_state = np.array(self.state)
self.reward = np.zeros((self.max_size, 1))
self.not_done = np.zeros((self.max_size, 1))
self.prioritized = prioritized
if self.prioritized:
self.tree = SumTree(self.max_size)
self.max_priority = 1.0
self.beta = 0.4
def add(self, state, action, next_state, reward, done, env_done, first_timestep):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
if self.prioritized:
self.tree.set(self.ptr, self.max_priority)
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self):
ind = self.tree.sample(self.batch_size) if self.prioritized \
else np.random.randint(0, self.size, size=self.batch_size)
batch = (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.LongTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
if self.prioritized:
weights = np.array(self.tree.nodes[-1][ind]) ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 2e-7, 1) # Hardcoded: 0.4 + 2e-7 * 3e6 = 1.0. Only used by PER.
batch += (ind, torch.FloatTensor(weights).to(self.device).reshape(-1, 1))
return batch
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
class SumTree(object):
def __init__(self, max_size):
self.nodes = []
# Tree construction
# Double the number of nodes at each level
level_size = 1
for _ in range(int(np.ceil(np.log2(max_size))) + 1):
nodes = np.zeros(level_size)
self.nodes.append(nodes)
level_size *= 2
# Batch binary search through sum tree
# Sample a priority between 0 and the max priority
# and then search the tree for the corresponding index
def sample(self, batch_size):
query_value = np.random.uniform(0, self.nodes[0][0], size=batch_size)
node_index = np.zeros(batch_size, dtype=int)
for nodes in self.nodes[1:]:
node_index *= 2
left_sum = nodes[node_index]
is_greater = np.greater(query_value, left_sum)
# If query_value > left_sum -> go right (+1), else go left (+0)
node_index += is_greater
# If we go right, we only need to consider the values in the right tree
# so we subtract the sum of values in the left tree
query_value -= left_sum * is_greater
return node_index
def set(self, node_index, new_priority):
priority_diff = new_priority - self.nodes[-1][node_index]
for nodes in self.nodes[::-1]:
np.add.at(nodes, node_index, priority_diff)
node_index //= 2
def batch_set(self, node_index, new_priority):
# Confirm we don't increment a node twice
node_index, unique_index = np.unique(node_index, return_index=True)
priority_diff = new_priority[unique_index] - self.nodes[-1][node_index]
for nodes in self.nodes[::-1]:
np.add.at(nodes, node_index, priority_diff)
node_index //= 2
# Atari Preprocessing
# Code is based on https://github.com/openai/gym/blob/master/gym/wrappers/atari_preprocessing.py
class AtariPreprocessing(object):
def __init__(
self,
env,
frame_skip=4,
frame_size=84,
state_history=4,
done_on_life_loss=False,
reward_clipping=True, # Clips to a range of -1,1
max_episode_timesteps=27000
):
self.env = env.env
self.done_on_life_loss = done_on_life_loss
self.frame_skip = frame_skip
self.frame_size = frame_size
self.reward_clipping = reward_clipping
self._max_episode_steps = max_episode_timesteps
self.observation_space = np.zeros((frame_size, frame_size))
self.action_space = self.env.action_space
self.lives = 0
self.episode_length = 0
# Tracks previous 2 frames
self.frame_buffer = np.zeros(
(2,
self.env.observation_space.shape[0],
self.env.observation_space.shape[1]),
dtype=np.uint8
)
# Tracks previous 4 states
self.state_buffer = np.zeros((state_history, frame_size, frame_size), dtype=np.uint8)
def reset(self):
self.env.reset()
self.lives = self.env.ale.lives()
self.episode_length = 0
self.env.ale.getScreenGrayscale(self.frame_buffer[0])
self.frame_buffer[1] = 0
self.state_buffer[0] = self.adjust_frame()
self.state_buffer[1:] = 0
return self.state_buffer
# Takes single action is repeated for frame_skip frames (usually 4)
# Reward is accumulated over those frames
def step(self, action):
total_reward = 0.
self.episode_length += 1
for frame in range(self.frame_skip):
_, reward, done, _ = self.env.step(action)
total_reward += reward
if self.done_on_life_loss:
crt_lives = self.env.ale.lives()
done = True if crt_lives < self.lives else done
self.lives = crt_lives
if done:
break
# Second last and last frame
f = frame + 2 - self.frame_skip
if f >= 0:
self.env.ale.getScreenGrayscale(self.frame_buffer[f])
self.state_buffer[1:] = self.state_buffer[:-1]
self.state_buffer[0] = self.adjust_frame()
done_float = float(done)
if self.episode_length >= self._max_episode_steps:
done = True
return self.state_buffer, total_reward, done, [np.clip(total_reward, -1, 1), done_float]
def adjust_frame(self):
# Take maximum over last two frames
np.maximum(
self.frame_buffer[0],
self.frame_buffer[1],
out=self.frame_buffer[0]
)
# Resize
image = cv2.resize(
self.frame_buffer[0],
(self.frame_size, self.frame_size),
interpolation=cv2.INTER_AREA
)
return np.array(image, dtype=np.uint8)
def seed(self, seed):
self.env.seed(seed)
# Create environment, add wrapper if necessary and create env_properties
def make_env(env_name, atari_preprocessing):
env = gym.make(env_name)
is_atari = gym.envs.registry.spec(env_name).entry_point == 'gym.envs.atari:AtariEnv'
env = AtariPreprocessing(env, **atari_preprocessing) if is_atari else env
state_dim = (
atari_preprocessing["state_history"],
atari_preprocessing["frame_size"],
atari_preprocessing["frame_size"]
) if is_atari else env.observation_space.shape[0]
return (
env,
is_atari,
state_dim,
env.action_space.n
) | 10,436 | 28.483051 | 109 | py |
LAP-PAL | LAP-PAL-master/discrete/PER_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class PER_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1, 4, 84, 84) if is_atari else (-1, state_dim) ### need to pass framesize
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
Q_loss = self.huber(td_loss)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = td_loss.pow(0.6).clamp(min=0.06309573444).cpu().data.numpy().flatten()
replay_buffer.reinsert(ind, priority)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
# Compute Q loss
Q_loss = (weights * F.smooth_l1_loss(current_Q, target_Q, reduction='none')).mean()
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = ((current_Q - target_Q).abs() + 1e-10).pow(0.6).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 5,366 | 28.010811 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/PAL_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class PAL_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
alpha=0.6,
min_priority=1e-2
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# PAL hyper-parameters
self.alpha = alpha
self.min_priority = min_priority
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
weight = td_loss.clamp(min=self.min_priority).pow(self.alpha).mean().detach()
# Compute critic loss
Q_loss = self.PAL(td_loss)/weight.detach()
# Optimize the Q
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
def PAL(self, x):
return torch.where(
x.abs() < self.min_priority,
(self.min_priority ** self.alpha) * 0.5 * x.pow(2),
self.min_priority * x.abs().pow(1. + self.alpha)/(1. + self.alpha)
).mean()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,712 | 27.053571 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/LAP_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class LAP_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
alpha=0.6,
min_priority=1e-2
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# LAP hyper-parameters
self.alpha = alpha
self.min_priority = min_priority
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
Q_loss = self.huber(td_loss)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = td_loss.clamp(min=self.min_priority).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
def huber(self, x):
return torch.where(x < self.min_priority, 0.5 * x.pow(2), self.min_priority * x).mean()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,658 | 27.408537 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
# Compute Q loss
Q_loss = F.smooth_l1_loss(current_Q, target_Q)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,265 | 27.251656 | 111 | py |
inversegraphics | inversegraphics-master/process_mesh.py | import meshtool.filters.simplify_filters.sander_simplify
from meshtool.filters.base_filters import MetaFilter
from meshtool.filters import factory
from itertools import chain, izip, combinations
import collada
import numpy
def process(meshPath):
mesh = collada.Collada(meshPath)
# 'triangulate',
optimize_filters = [
'combine_primitives',
'optimize_sources',
'strip_unused_sources',
'normalize_indices'
]
for f in optimize_filters:
inst = factory.getInstance(f)
mesh = inst.apply(mesh)
# f = 'sander_simplify'
# pmout = open('pm_file', 'w')
# inst = factory.getInstance(f)
# mesh = inst.apply(mesh, pmout)
# s = meshtool.filters.simplify_filters.sander_simplify.SanderSimplify(mesh, pmout)
# meshsimple = s.simplify()
return mesh
| 921 | 23.918919 | 87 | py |
inversegraphics | inversegraphics-master/teapots.py | #!/usr/bin/env python3.4m
from blender_utils import *
baseDir = '../databaseFull/models/'
lines = [line.strip() for line in open('teapots.txt')]
for object in bpy.data.scenes['Scene'].objects: print(object.name)
lamp = bpy.data.scenes['Scene'].objects[1]
lamp.location = (0,0.0,1.5)
camera = bpy.data.scenes['Scene'].objects[2]
camera.data.angle = 60 * 180 / numpy.pi
distance = 0.5
originalLoc = mathutils.Vector((0,-distance,0.0))
elevation = 0.0
azimuth = 0.0
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
location = azimuthRot * elevationRot * originalLoc
camera.location = location
look_at(camera, mathutils.Vector((0,0,0)))
world = bpy.context.scene.world
# Environment lighting
world.light_settings.use_environment_light = True
world.light_settings.environment_energy = 0.2
world.horizon_color = mathutils.Color((0.0,0.0,0.0))
width = 200
height = 200
data, images, experiments = loadData()
groundTruthEls = data['azimuths'][0][0][0]
groundTruthAzs = data['altitudes'][0][0][0]
filenames = [name[0] for name in data['filenames'][0][0][0][:]]
# images["images"][i]
labels = numpy.column_stack((numpy.cos(groundTruthAzs*numpy.pi/180), numpy.sin(groundTruthAzs*numpy.pi/180), numpy.cos(groundTruthAzs*numpy.pi/180.0), numpy.sin(groundTruthAzs*numpy.pi/180.0)))
expi = 3
experiment = experiments['experiments'][0][0][0][expi]
selTrain = experiment['selTrain'][0][0][0]
selTest = experiment['selTest'][0][0][0]
output = scipy.io.loadmat('../data/crossval6div2-hog8-alldataexperiments.mat')['output_data']
idx = output['idx'][0][0][expi][0]
# filenames = [u''.join(chr(c[0]) for c in fdata[name[0]]) for name in numpy.array(data["filenames"])[:][:]]
nnpredazs = output['nnpredradazs'][0][0][expi][0]*180.0/numpy.pi
nnpredalts = output['nnpredradalts'][0][0][expi][0]*180.0/numpy.pi
# rtpredazs = output['rtpredradazs'][0][0][expi][0]*180.0/numpy.pi
# rtpredalts = output['rtpredradalts'][0][0][expi][0]*180.0/numpy.pi
predazs =nnpredazs.squeeze()
predalts=nnpredalts.squeeze()
numTests = selTest.size
bestModels= [""]*numTests
bestScores = numpy.ones(numTests)*999999
bestAzimuths = numpy.zeros(numTests)
bestElevations = numpy.zeros(numTests)
predi = 0
# selTest[[10384,10397,10408,10440,10442,10446,10458,10469,10478,10492]]:
for selTestNum in [10384, 10397, 10408]:
test = selTest[selTestNum]
rgbTestImage = numpy.transpose(images["images"][test])
testImage = cv2.cvtColor(numpy.float32(rgbTestImage*255), cv2.COLOR_RGB2BGR)/255.0
testImageEdges = cv2.Canny(numpy.uint8(testImage*255), 50,150)
cv2.imwrite("canny_" + str(test) + ".png" , testImageEdges)
cv2.imwrite("image_" + str(test) + ".png" , numpy.uint8(testImage*255))
score = 9999999
for teapot in lines[0:5]:
fullTeapot = baseDir + teapot
print("Reading " + fullTeapot + '.dae')
bpy.ops.scene.new()
bpy.context.scene.name = teapot
scene = bpy.context.scene
scene.objects.link(lamp)
scene.camera = camera
# scene.render.use_raytrace = True
# scene.render.antialiasing_samples = '16'
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.world = world
scene.render.filepath = teapot + '.png'
bpy.utils.collada_import(fullTeapot + '.dae')
minZ, maxZ = modelHeight(scene)
minY, maxY = modelWidth(scene)
scaleZ = 0.254/(maxZ-minZ)
scaleY = 0.1778/(maxY-minY)
scale = min(scaleZ, scaleY)
for mesh in scene.objects:
if mesh.type == 'MESH':
scaleMat = mathutils.Matrix.Scale(scale, 4)
mesh.matrix_world = scaleMat * mesh.matrix_world
minZ, maxZ = modelHeight(scene)
center = centerOfGeometry(scene)
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(-center) * mesh.matrix_world
#Rotate the object to the azimuth angle we define as 0.
rot = mathutils.Matrix.Rotation(radians(90), 4, 'Z')
rotateMatrixWorld(scene, rot)
camera.data.angle = 60 * 180 / numpy.pi
stopSearchEl = False
stopSearchAz = False
dirEl = 1
dirAz = 1
elevation = predalts[selTestNum]
azimuth = predazs[selTestNum]
center = centerOfGeometry(scene)
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
azimuthRot = mathutils.Matrix.Rotation(radians(azimuth), 4, 'Z')
location = azimuthRot * elevationRot * (center + originalLoc)
camera.location = location
scene.update()
look_at(camera, center)
scene.update()
bpy.ops.render.render( write_still=False )
blendImage = bpy.data.images['Render Result']
image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height/2,width/2,4]))
image[numpy.where(image > 1)] = 1
# image = cv2.imread(teapot + '.png', cv2.IMREAD_ANYDEPTH)
# image = numpy.float16(image)/255.0
distance = getChamferDistance(testImage, image)
if distance < score:
score = distance
bestModels[predi] = teapot
bestScores[predi] = score
bestElevations[predi] = elevation
bestAzimuths[predi] = azimuth
# while not stopSearchEl:
# elevation = (elevation + dirEl*2) % 90
# elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
# location = azimuthRot * elevationRot * (center + originalLoc)
# camera.location = location
# scene.update()
# look_at(camera, center)
# bpy.ops.render.render( write_still=False )
# blendImage = bpy.data.images['Render Result']
# image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height/2,width/2,4]))
# # # Truncate intensities larger than 1.
# image[numpy.where(image > 1)] = 1
## image = cv2.imread(teapot + '.png', cv2.IMREAD_ANYDEPTH)
## image = numpy.float16(image)/255.0
# distance = getChamferDistance(image, testImage)
# if distance < score:
# score = distance
# bestModels[predi] = teapot
# bestScores[predi] = score
# bestElevations[predi] = elevation
# elif dirEl > 0:
# elevation = predalts[selTestNum]
# dirEl = -1
# else:
# stopSearchEl = True
# iaz = 0
# azimuth = 0
# while not stopSearchAz:
# azimuth = (azimuth + dirAz*5) % 360
# azimuthRot = mathutils.Matrix.Rotation(radians(azimuth), 4, 'Z')
# location = azimuthRot * elevationRot * (center + originalLoc)
# camera.location = location
# scene.update()
# look_at(camera, center)
# scene.update()
# bpy.ops.render.render( write_still=False )
# blendImage = bpy.data.images['Render Result']
# # image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height/2,width/2,4]))[:,:,0:3]
# # # Truncate intensities larger than 1.
# # image[numpy.where(image > 1)] = 1
# image = cv2.imread(teapot + '.png', cv2.IMREAD_ANYDEPTH)
# image = numpy.float32(image)/255.0
# distance = getChamferDistance(testImage, image)
# if distance < score:
# score = distance
# bestModels[predi] = teapot
# bestScores[predi] = score
# bestAzimuths[predi] = azimuth
# imageEdges = cv2.Canny(numpy.uint8(image*255.0), 25,225)
# cv2.imwrite(teapot + "_canny_" + str(test) + ".png" , imageEdges)
# # elif dirAz > 0:
# # azimuth = predazs[selTestNum]
# # dirAz = -1
# # else:
# # stopSearchAz = True
# if azimuth >= 355:
# stopSearchAz = True
# # Save best image.
# # im = Image.fromarray(numpy.uint8(image*255))
# # im.save(teapot + '.png')
# # Cleanup
# for obji in scene.objects:
# if obji.type == 'MESH':
# obji.user_clear()
# bpy.data.objects.remove(obji)
# scene.user_clear()
# bpy.ops.scene.delete()
predi = predi + 1
| 8,858 | 29.030508 | 193 | py |
inversegraphics | inversegraphics-master/diffrender_groundtruth.py | __author__ = 'pol'
import matplotlib
# matplotlib.use('Qt4Agg')
import bpy
import scene_io_utils
import mathutils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import numpy as np
import cv2
from blender_utils import *
import generative_models
import matplotlib.pyplot as plt
from opendr_utils import *
import OpenGL.GL as GL
import light_probes
import imageio
from OpenGL import contextdata
plt.ion()
#########################################
# Initialization starts here
#########################################
prefix = 'cian_example'
previousGTPrefix = 'cian_example'
#Main script options:
renderFromPreviousGT = False
useShapeModel = True
renderOcclusions = False
useOpenDR = True
useBlender = False
loadBlenderSceneFile = True
useCycles = True
unpackModelsFromBlender = False
unpackSceneFromBlender = False
glModes = ['glfw','mesa']
glMode = glModes[0]
width, height = (150, 150)
win = -1
if useOpenDR:
if glMode == 'glfw':
import glfw
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
# win = glfw.create_window(width, height, "Demo", None, None)
# glfw.make_context_current(win)
angle = 60 * 180 / numpy.pi
clip_start = 0.01
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
teapots = [line.strip() for line in open('teapots.txt')]
renderTeapotsList = np.arange(len(teapots))[0:1]
sceneIdx = 0
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup.txt'
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
targetParentIdx = 0
targetIndex = targetIndices[targetParentIdx]
targetParentPosition = targetPositions[targetParentIdx]
targetPosition = targetParentPosition
tex_srgb2lin = True
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
removeObjectData(int(targetIndex), v, f_list, vc, vn, uv, haveTextures_list, textures_list)
targetModels = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [ teapots[i] for i in renderTeapotsList]
if useBlender:
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapot]
blender_teapots = blender_teapots + [teapot]
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels)
chObjAzGT = ch.Ch([0])
chAzGT = ch.Ch([0])
chAzRelGT = chAzGT - chObjAzGT
chElGT = ch.Ch([0])
chDistGT = ch.Ch([camDistance])
chComponentGT = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
chComponent = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
chPointLightIntensityGT = ch.Ch([1])
chLightDistGT = ch.Ch([0.5])
chLightAzGT = ch.Ch([0.0])
chLightElGT = ch.Ch([np.pi/4])
ligthTransfGT = computeHemisphereTransformation(chLightAzGT, chLightElGT, chLightDistGT, targetPosition)
lightPosGT = ch.dot(ligthTransfGT, ch.Ch([0.,0.,0.,1.]))[0:3]
chGlobalConstantGT = ch.Ch([0.5])
light_colorGT = ch.ones(3)*chPointLightIntensityGT
chVColorsGT = ch.Ch([0.8,0.8,0.8])
shCoefficientsFile = 'data/sceneSH' + str(sceneIdx) + '.pickle'
chAmbientIntensityGT = ch.Ch([0.1])
clampedCosCoeffs = clampedCosineCoefficients()
chAmbientSHGT = ch.zeros([9])
envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
phiOffset = ch.Ch([0])
totalOffset = phiOffset + chObjAzGT
envMapCoeffs = ch.Ch(list(envMapDic.items())[0][1][1])
envMapCoeffsRotated = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
envMapCoeffsRotatedRel = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
shCoeffsRGB = envMapCoeffsRotated
shCoeffsRGBRel = envMapCoeffsRotatedRel
chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
chAmbientSHGT = chShCoeffs.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chAmbientSHGTRel = chShCoeffsRel.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chLightRadGT = ch.Ch([0.1])
chLightDistGT = ch.Ch([0.5])
chLightIntensityGT = ch.Ch([0])
chLightAzGT = ch.Ch([np.pi*3/2])
chLightElGT = ch.Ch([np.pi/4])
angle = ch.arcsin(chLightRadGT/chLightDistGT)
zGT = chZonalHarmonics(angle)
shDirLightGT = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT + chObjAzGT - np.pi/2) * clampedCosCoeffs
shDirLightGTRel = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT - np.pi/2) * clampedCosCoeffs
chComponentGT = chAmbientSHGT
# chComponentGT = ch.Ch(chAmbientSHGT.r[:].copy())
# + shDirLightGT*chLightIntensityGT
chComponentGTRel = chAmbientSHGTRel
# chComponentGTRel = ch.Ch(chAmbientSHGTRel.r[:].copy())
# chComponentGT = chAmbientSHGT.r[:] + shDirLightGT.r[:]*chLightIntensityGT.r[:]
chDisplacementGT = ch.Ch([0.0,0.0,0.0])
chScaleGT = ch.Ch([1, 1.,1.])
currentTeapotModel = 0
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
center = center_teapots[currentTeapotModel]
if useOpenDR:
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, width, height, uv, haveTextures_list, textures_list, frustum, None)
vis_gt = np.array(rendererGT.indices_image!=1).copy().astype(np.bool)
vis_mask = np.array(rendererGT.indices_image==1).copy().astype(np.bool)
shapeIm = vis_gt.shape
numPixels = height * width
import multiprocessing
numTileAxis = np.ceil(np.sqrt(multiprocessing.cpu_count())/2)
numTileAxis = 3
#########################################
# Initialization ends here
#########################################
if useShapeModel:
teapot_i = -1
import shape_model
#%% Load data
filePath = 'data/teapotModel.pkl'
teapotModel = shape_model.loadObject(filePath)
faces = teapotModel['faces']
#%% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.random.randn(latentDim)
chShapeParamsGT = ch.Ch(shapeParams)
meshLinearTransform=teapotModel['meshLinearTransform']
W=teapotModel['ppcaW']
b=teapotModel['ppcaB']
chVerticesGT = shape_model.VerticesModel(chShapeParams=chShapeParamsGT,meshLinearTransform=meshLinearTransform,W = W,b=b)
chVerticesGT.init()
chVerticesGT = ch.dot(geometry.RotateZ(-np.pi/2)[0:3,0:3],chVerticesGT.T).T
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
smFacesGT = [[faces]]
smVColorsGT = [chVColorsGT*np.ones(chVerticesGT.shape)]
smUVsGT = [ch.Ch(np.zeros([chVerticesGT.shape[0],2]))]
smHaveTexturesGT = [[False]]
smTexturesListGT = [[None]]
chVerticesGT = chVerticesGT - ch.mean(chVerticesGT, axis=0)
minZ = ch.min(chVerticesGT[:,2])
chMinZ = ch.min(chVerticesGT[:,2])
zeroZVerts = chVerticesGT[:,2]- chMinZ
chVerticesGT = ch.hstack([chVerticesGT[:,0:2] , zeroZVerts.reshape([-1,1])])
chVerticesGT = chVerticesGT*0.09
smCenterGT = ch.array([0,0,0.1])
smVerticesGT = [chVerticesGT]
chNormalsGT = shape_model.chGetNormals(chVerticesGT, faces)
smNormalsGT = [chNormalsGT]
else:
latentDim = 1
chShapeParamsGT = ch.array([0])
print("Creating Ground Truth")
trainAzsGT = np.array([])
trainObjAzsGT = np.array([])
trainElevsGT = np.array([])
trainLightAzsGT = np.array([])
trainLightElevsGT = np.array([])
trainLightIntensitiesGT = np.array([])
trainVColorGT = np.array([])
trainScenes = np.array([], dtype=np.uint8)
trainTeapotIds = np.array([], dtype=np.uint8)
trainEnvMaps = np.array([], dtype=np.uint8)
trainOcclusions = np.array([])
trainTargetIndices = np.array([], dtype=np.uint8)
trainIds = np.array([], dtype=np.uint32)
#zeros
trainLightCoefficientsGT = np.array([]).reshape([0,9])
trainLightCoefficientsGTRel = np.array([]).reshape([0,9])
trainAmbientIntensityGT = np.array([])
trainEnvMapPhiOffsets = np.array([])
trainShapeModelCoeffsGT = np.array([]).reshape([0,latentDim])
gtDir = 'groundtruth/' + prefix + '/'
if not os.path.exists(gtDir + 'images/'):
os.makedirs(gtDir + 'images/')
if not os.path.exists(gtDir + 'sphericalharmonics/'):
os.makedirs(gtDir + 'sphericalharmonics/')
if not os.path.exists(gtDir + 'images_opendr/'):
os.makedirs(gtDir + 'images_opendr/')
if not os.path.exists(gtDir + 'masks_occlusion/'):
os.makedirs(gtDir + 'masks_occlusion/')
print("Generating renders")
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
scenesToRender = range(len(sceneLines))[:]
trainSize = 20000
renderTeapotsList = np.arange(len(teapots))[0:1]
# for hdrit, hdri in enumerate(list(envMapDic.items())):
# if hdri[0] == 'data/hdr/dataset/canada_montreal_nad_photorealism.exr':
# hdrtorenderi = hdrit
ignoreEnvMaps = np.loadtxt('data/bad_envmaps.txt')
hdritems = list(envMapDic.items())[:]
hdrstorender = []
phiOffsets = [0, np.pi/2, np.pi, 3*np.pi/2]
for hdrFile, hdrValues in hdritems:
hdridx = hdrValues[0]
envMapCoeffs = hdrValues[1]
if hdridx not in ignoreEnvMaps:
hdrstorender = hdrstorender + [(hdrFile,hdrValues)]
# if not os.path.exists('light_probes/envMap' + str(hdridx)):
# os.makedirs('light_probes/envMap' + str(hdridx))
#
# for phiOffset in phiOffsets:
#
# # phiOffset = np.random.uniform(0,2*np.pi, 1)
# from numpy.random import choice
# objAzGT = np.pi/2
# chObjAzGT[:] = 0
# totalOffset = phiOffset + chObjAzGT.r
# envMapCoeffsRotated = np.dot(light_probes.sphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]].copy()
# envMapCoeffsRotatedRel = np.dot(light_probes.sphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]].copy()
# shCoeffsRGB = envMapCoeffsRotated.copy()
# shCoeffsRGBRel = envMapCoeffsRotatedRel.copy()
# chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
# chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
# chAmbientSHGT = chShCoeffs * chAmbientIntensityGT * clampedCosCoeffs
# chAmbientSHGTRel = chShCoeffsRel * chAmbientIntensityGT * clampedCosCoeffs
# chComponentGT[:] = chAmbientSHGT.r[:].copy()
# chComponentGTRel[:] = chAmbientSHGTRel.r[:].copy()
# cv2.imwrite('light_probes/envMap' + str(hdridx) + '/opendr_' + str(np.int(180*phiOffset/np.pi)) + '.png' , 255*rendererGT.r[:,:,[2,1,0]])
# sys.exit("")
gtDtype = [('trainIds', trainIds.dtype.name), ('trainAzsGT', trainAzsGT.dtype.name),('trainObjAzsGT', trainObjAzsGT.dtype.name),('trainElevsGT', trainElevsGT.dtype.name),('trainLightAzsGT', trainLightAzsGT.dtype.name),('trainLightElevsGT', trainLightElevsGT.dtype.name),('trainLightIntensitiesGT', trainLightIntensitiesGT.dtype.name),('trainVColorGT', trainVColorGT.dtype.name, (3,) ),('trainScenes', trainScenes.dtype.name),('trainTeapotIds', trainTeapotIds.dtype.name),('trainEnvMaps', trainEnvMaps.dtype.name),('trainOcclusions', trainOcclusions.dtype.name),('trainTargetIndices', trainTargetIndices.dtype.name), ('trainLightCoefficientsGT',trainLightCoefficientsGT.dtype, (9,)), ('trainLightCoefficientsGTRel', trainLightCoefficientsGTRel.dtype, (9,)), ('trainAmbientIntensityGT', trainAmbientIntensityGT.dtype), ('trainEnvMapPhiOffsets', trainEnvMapPhiOffsets.dtype), ('trainShapeModelCoeffsGT', trainShapeModelCoeffsGT.dtype, (latentDim,))]
groundTruth = np.array([], dtype = gtDtype)
groundTruthFilename = gtDir + 'groundTruth.h5'
gtDataFile = h5py.File(groundTruthFilename, 'a')
gtDataFileToRender = h5py.File(gtDir + 'groundTruthToRender.h5', 'w')
gtDatasetToRender = gtDataFileToRender.create_dataset(prefix, data=groundTruth, maxshape=(None,))
nextId = 0
try:
gtDataset = gtDataFile[prefix]
if gtDataset.size > 0:
nextId = gtDataset['trainIds'][-1] + 1
except:
gtDataset = gtDataFile.create_dataset(prefix, data=groundTruth, maxshape=(None,))
train_i = nextId
#Re-producible groundtruth generation.
if train_i == 0:
np.random.seed(1)
unlinkedObj = None
scenesToRenderOcclusions = []
scenes = []
lenScenes = 0
#Compute how many different locations can the teapot be instantiated across all scenes.
for sceneIdx in scenesToRender:
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
if renderOcclusions:
targetIndicesNew = []
occlusionSceneFile = 'data/occlusions/occlusionScene' + str(sceneNumber) + '.pickle'
with open(occlusionSceneFile, 'rb') as pfile:
occlusions = pickle.load(pfile)
for targetidx, targetIndex in enumerate(targetIndices):
if not occlusions[targetIndex][1]:
print("Scene idx " + str(sceneIdx) + " at index " + str(targetIndex) + " has no proper occlusion.")
else:
targetIndicesNew = targetIndicesNew + [targetIndex]
targetIndices = targetIndicesNew
collisionSceneFile = 'data/collisions/collisionScene' + str(sceneNumber) + '.pickle'
scenes = scenes + [targetIndices]
with open(collisionSceneFile, 'rb') as pfile:
collisions = pickle.load(pfile)
for targetidx, targetIndex in enumerate(targetIndices):
if not collisions[targetIndex][1]:
print("Scene idx " + str(sceneIdx) + " at index " + str(targetIndex) + " collides everywhere.")
lenScenes += len(targetIndices)
#Generate GT labels before rendering them.
if not renderFromPreviousGT:
for scene_i, sceneIdx in enumerate(scenesToRender):
print("Generating groundtruth for scene: " + str(sceneIdx))
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndicesScene, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
targetIndices = scenes[scene_i]
if not targetIndices:
continue
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
collisionSceneFile = 'data/collisions/collisionScene' + str(sceneNumber) + '.pickle'
with open(collisionSceneFile, 'rb') as pfile:
collisions = pickle.load(pfile)
if renderOcclusions:
occlusionSceneFile = 'data/occlusions/occlusionScene' + str(sceneNumber) + '.pickle'
with open(occlusionSceneFile, 'rb') as pfile:
occlusions = pickle.load(pfile)
unlinkedObj = None
envMapFilename = None
for targetidx, targetIndex in enumerate(targetIndices):
targetPosition = targetPositions[np.where(targetIndex==np.array(targetIndicesScene))[0]]
if not collisions[targetIndex][1]:
continue
collisionProbs = np.zeros(len(collisions[targetIndex][1]))
# removeObjectData(int(targetIndex-1), v, f_list, vc, vn, uv, haveTextures_list, textures_list)
for intervalIdx, interval in enumerate(collisions[targetIndex][1]):
collisionProbs[intervalIdx] = collisions[targetIndex][1][intervalIdx][1] - collisions[targetIndex][1][intervalIdx][0]
collisionsProbs = collisionProbs / np.sum(collisionProbs)
if renderOcclusions:
occlusionProbs = np.zeros(len(occlusions[targetIndex][1]))
for intervalIdx, interval in enumerate(occlusions[targetIndex][1]):
occlusionProbs[intervalIdx] = abs(occlusions[targetIndex][1][intervalIdx][1] - occlusions[targetIndex][1][intervalIdx][0])
occlusionProbs = occlusionProbs / np.sum(occlusionProbs)
# if useShapeModel
for teapot_i in renderTeapotsList:
if useShapeModel:
teapot_i = -1
else:
currentTeapotModel = teapot_i
center = center_teapots[teapot_i]
print("Ground truth on new teapot" + str(teapot_i))
for hdrFile, hdrValues in hdrstorender:
hdridx = hdrValues[0]
envMapCoeffsVals = hdrValues[1]
# envMapCoeffs[:] = np.array([[0.5,0,0.0,1,0,0,0,0,0], [0.5,0,0.0,1,0,0,0,0,0],[0.5,0,0.0,1,0,0,0,0,0]]).T
envMapFilename = hdrFile
# updateEnviornmentMap(envMapFilename, scene)
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
for numTeapotTrain in range(max(int(trainSize/(lenScenes*len(hdrstorender)*len(renderTeapotsList))),1)):
ignore = False
chAmbientIntensityGTVals = 0.75/(0.3*envMapCoeffs[0,0] + 0.59*envMapCoeffs[0,1]+ 0.11*envMapCoeffs[0,2])
phiOffsetVals = np.random.uniform(0,2*np.pi, 1)
# phiOffset[:] = 0
from numpy.random import choice
objAzInterval = choice(len(collisionsProbs), size=1, p=collisionsProbs)
objAzGT = np.random.uniform(0,1)*(collisions[targetIndex][1][objAzInterval][1] - collisions[targetIndex][1][objAzInterval][0]) + collisions[targetIndex][1][objAzInterval][0]
objAzGT = objAzGT*np.pi/180
chObjAzGTVals = objAzGT.copy()
if renderOcclusions:
azInterval = choice(len(occlusionProbs), size=1, p=occlusionProbs)
azGT = np.random.uniform(0,1)*(occlusions[targetIndex][1][azInterval][1] - occlusions[targetIndex][1][azInterval][0]) + occlusions[targetIndex][1][azInterval][0]
chAzGTVals = azGT*np.pi/180
else:
chAzGTVals = np.mod(np.random.uniform(0,np.pi, 1) - np.pi/2, 2*np.pi)
chElGTVals = np.random.uniform(0.05,np.pi/2, 1)
chLightAzGTVals = np.random.uniform(0,2*np.pi, 1)
chLightElGTVals = np.random.uniform(0,np.pi/3, 1)
chLightIntensityGTVals = 0
chVColorsGTVals = np.random.uniform(0.1,0.9, [1, 3])
envMapCoeffsRotatedVals = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRelVals = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
shapeParams = np.random.randn(latentDim)
chShapeParamsGTVals = shapeParams
# pEnvMap = SHProjection(envMapTexture, envMapCoeffsRotated)
# approxProjection = np.sum(pEnvMap, axis=3)
# cv2.imwrite(gtDir + 'sphericalharmonics/envMapProjectionRot' + str(hdridx) + '_rot' + str(int(totalOffset*180/np.pi)) + '_' + str(str(train_i)) + '.jpeg' , 255*approxProjection[:,:,[2,1,0]])
#Add groundtruth to arrays
trainAzsGT = chAzGTVals
trainObjAzsGT = chObjAzGTVals
trainElevsGT = chElGTVals
trainLightAzsGT = chLightAzGTVals
trainLightElevsGT = chLightElGTVals
trainLightIntensitiesGT = chLightIntensityGTVals
trainVColorGT = chVColorsGTVals
lightCoeffs = envMapCoeffsRotatedVals[None, :].copy().squeeze()
lightCoeffs = 0.3*lightCoeffs[:,0] + 0.59*lightCoeffs[:,1] + 0.11*lightCoeffs[:,2]
trainLightCoefficientsGT = lightCoeffs
lightCoeffsRel = envMapCoeffsRotatedRelVals[None, :].copy().squeeze()
lightCoeffsRel = 0.3*lightCoeffsRel[:,0] + 0.59*lightCoeffsRel[:,1] + 0.11*lightCoeffsRel[:,2]
trainLightCoefficientsGTRel = lightCoeffsRel
trainAmbientIntensityGT = chAmbientIntensityGTVals
trainEnvMapPhiOffsets = phiOffset
trainScenes = sceneNumber
trainTeapotIds = teapot_i
trainEnvMaps = hdridx
trainShapeModelCoeffsGT = chShapeParamsGTVals.copy()
trainOcclusions = -1
trainIds = train_i
trainTargetIndices = targetIndex
gtDatasetToRender.resize(gtDatasetToRender.shape[0]+1, axis=0)
gtDatasetToRender[-1] = np.array([(trainIds, trainAzsGT,trainObjAzsGT,trainElevsGT,trainLightAzsGT,trainLightElevsGT,trainLightIntensitiesGT,trainVColorGT,trainScenes,trainTeapotIds,trainEnvMaps,trainOcclusions,trainTargetIndices, trainLightCoefficientsGT, trainLightCoefficientsGTRel, trainAmbientIntensityGT, phiOffsetVals, trainShapeModelCoeffsGT)],dtype=gtDtype)
train_i = train_i + 1
if np.mod(train_i, 100) == 0:
print("Generated " + str(train_i) + " GT instances.")
print("Generating groundtruth. Iteration of " + str(range(int(trainSize/(lenScenes*len(hdrstorender)*len(renderTeapotsList))))) + " teapots")
if renderFromPreviousGT:
groundTruthFilename = 'groundtruth/' + previousGTPrefix + '/groundTruth.h5'
gtDataFileToRender = h5py.File(groundTruthFilename, 'r')
groundTruthToRender = gtDataFileToRender[previousGTPrefix]
else:
groundTruthToRender = gtDataFileToRender[prefix]
train_i = nextId
currentScene = -1
currentTeapot = -1
currentTargetIndex = -1
teapot = None
if renderFromPreviousGT:
rangeGT = np.arange(0, len(groundTruthToRender))
else:
rangeGT = np.arange(len(groundTruthToRender))
teapot_i = 0
if useShapeModel:
teapot_i = -1
# addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
for gtIdx in rangeGT[:]:
sceneNumber = groundTruthToRender['trainScenes'][gtIdx]
sceneIdx = scene_io_utils.getSceneIdx(sceneNumber, replaceableScenesFile)
print("Rendering scene: " + str(sceneIdx))
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndicesScene, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
if sceneIdx != currentScene:
# v, f_list, vc, vn, uv, haveTextures_list, textures_list = sceneimport.loadSavedScene(sceneDicFile)
import copy
v2, f_list2, vc2, vn2, uv2, haveTextures_list2, textures_list2 = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
if sceneIdx != currentScene:
if useBlender and not loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene = scene_io_utils.loadBlenderScene(sceneIdx, replaceableScenesFile)
scene_io_utils.setupScene(scene, roomInstanceNum, scene.world, scene.camera, width, height, 16, useCycles, True)
scene.update()
#Save barebones scene.
elif useBlender and loadBlenderSceneFile:
bpy.ops.wm.read_factory_settings()
scene_io_utils.loadSceneBlendData(sceneIdx, replaceableScenesFile)
scene = bpy.data.scenes['Main Scene']
if useBlender:
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.tile_x = height
scene.render.tile_y = width
scene.cycles.samples = 3000
bpy.context.screen.scene = scene
addEnvironmentMapWorld(scene)
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.filepath = 'opendr_blender.exr'
scene.sequencer_colorspace_settings.name = 'Linear'
scene.display_settings.display_device = 'None'
bpy.context.user_preferences.filepaths.render_cache_directory = '/disk/scratch1/pol/.cache/'
targetModels = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [ teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapotIdx]
blender_teapots = blender_teapots + [teapot]
scene.cycles.device = 'GPU'
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_MULTI_2'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
bpy.ops.wm.save_userpref()
scene.world.horizon_color = mathutils.Color((1.0,1.0,1.0))
scene.camera.data.clip_start = clip_start
treeNodes=scene.world.node_tree
links = treeNodes.links
unlinkedObj = None
envMapFilename = None
targetIndex = groundTruthToRender['trainTargetIndices'][gtIdx]
if sceneIdx != currentScene or targetIndex != currentTargetIndex:
targetPosition = targetPositions[np.where(targetIndex==np.array(targetIndicesScene))[0]]
import copy
v, f_list, vc, vn, uv, haveTextures_list, textures_list = copy.deepcopy(v2), copy.deepcopy(f_list2), copy.deepcopy(vc2), copy.deepcopy(vn2), copy.deepcopy(uv2), copy.deepcopy(haveTextures_list2), copy.deepcopy(textures_list2)
removeObjectData(len(v) -1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
if sceneIdx != currentScene or targetIndex != currentTargetIndex:
if useBlender:
if unlinkedObj != None:
scene.objects.link(unlinkedObj)
unlinkedObj = scene.objects[str(targetIndex)]
scene.objects.unlink(unlinkedObj)
teapot_i = groundTruthToRender['trainTeapotIds'][gtIdx]
teapot_i = 0
if useShapeModel:
teapot_i = -1
if sceneIdx != currentScene or targetIndex != currentTargetIndex or teapot_i != currentTeapot:
if useOpenDR:
rendererGT.makeCurrentContext()
rendererGT.clear()
contextdata.cleanupContext(contextdata.getContext())
if glMode == 'glfw':
glfw.destroy_window(rendererGT.win)
del rendererGT
currentTeapotModel = teapot_i
center = center_teapots[teapot_i]
if currentScene != -1 and currentTargetIndex != -1 and currentTeapot != -1 and (targetIndex != currentTargetIndex or teapot_i != currentTeapot):
removeObjectData(0, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
if useShapeModel:
center = smCenterGT
vGT, vnGT = transformObject(smVerticesGT, smNormalsGT, chScaleGT, chObjAzGT, ch.Ch([0]), ch.Ch([0]), np.array([0, 0, 0]))
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, smVerticesGT, smFacesGT, smVColorsGT, smNormalsGT, smUVsGT, smHaveTexturesGT, smTexturesListGT)
else:
vGT, vnGT = transformObject(v_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], chScaleGT, chObjAzGT, ch.Ch([0]), ch.Ch([0]), np.array([0, 0, 0]))
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, vGT, f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vnGT, uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
if useOpenDR:
rendererGT = createRendererGT(glMode, chAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, width,height, uv, haveTextures_list, textures_list, frustum, None )
print("Ground truth on new teapot" + str(teapot_i))
if useBlender:
if currentScene != -1 and currentTargetIndex != -1 and currentTeapot != -1 and teapot != None:
if teapot.name in scene.objects:
scene.objects.unlink(teapot)
if useShapeModel:
deleteInstance(teapot)
if not useShapeModel:
teapot = blender_teapots[currentTeapotModel]
else:
teapotMesh = createMeshFromData('teapotShapeModelMesh', chVerticesGT.r.tolist(), faces.astype(np.int32).tolist())
teapotMesh.layers[0] = True
teapotMesh.layers[1] = True
teapotMesh.pass_index = 1
targetGroup = bpy.data.groups.new('teapotShapeModelGroup')
targetGroup.objects.link(teapotMesh)
teapot = bpy.data.objects.new('teapotShapeModel', None)
teapot.dupli_type = 'GROUP'
teapot.dupli_group = targetGroup
teapot.pass_index = 1
mat = makeMaterial('teapotMat', (0,0,0), (0,0,0), 1)
setMaterial(teapotMesh, mat)
# center = centerOfGeometry(teapot.dupli_group.objects, teapot.matrix_world)
placeNewTarget(scene, teapot, targetPosition[:].copy())
teapot.layers[1]=True
teapot.layers[0]=True
original_matrix_world = teapot.matrix_world.copy()
hdridx = groundTruthToRender['trainEnvMaps'][gtIdx]
envMapFilename = ""
for hdrFile, hdrValues in hdritems:
if hdridx == hdrValues[0]:
envMapCoeffs[:] = hdrValues[1]
envMapFilename = hdrFile
# envMapCoeffs[:] = np.array([[0.5,0,0.0,1,0,0,0,0,0], [0.5,0,0.0,1,0,0,0,0,0],[0.5,0,0.0,1,0,0,0,0,0]]).T
# updateEnviornmentMap(envMapFilename, scene)
envMapTexture = np.array(imageio.imread(envMapFilename))[:,:,0:3]
break
if envMapFilename == "":
ipdb.set_trace()
print("Render " + str(gtIdx) + "of " + str(len(groundTruthToRender)))
ignore = False
# chAmbientIntensityGT[:] = groundTruthToRender['trainAmbientIntensityGT'][gtIdx]
chAmbientIntensityGT[:] = 1
phiOffset[:] = groundTruthToRender['trainEnvMapPhiOffsets'][gtIdx]
chObjAzGT[:] = groundTruthToRender['trainObjAzsGT'][gtIdx]
chAzGT[:] = groundTruthToRender['trainAzsGT'][gtIdx]
chElGT[:] = groundTruthToRender['trainElevsGT'][gtIdx]
chLightAzGT[:] = groundTruthToRender['trainLightAzsGT'][gtIdx]
chLightElGT[:] = groundTruthToRender['trainLightElevsGT'][gtIdx]
# chLightIntensityGT[:] = np.random.uniform(5,10, 1)
chVColorsGT[:] = groundTruthToRender['trainVColorGT'][gtIdx]
try:
chShapeParamsGT[:] = groundTruthToRender['trainShapeModelCoeffsGT'][gtIdx]
except:
chShapeParamsGT[:] = np.random.randn(latentDim)
if useOpenDR:
occlusion = getOcclusionFraction(rendererGT)
vis_occluded = np.array(rendererGT.indices_image==1).copy().astype(np.bool)
vis_im = np.array(rendererGT.image_mesh_bool([0])).copy().astype(np.bool)
if occlusion > 0.9:
ignore = True
if not ignore:
#Ignore if camera collides with occluding object as there are inconsistencies with OpenDR and Blender.
cameraEye = np.linalg.inv(np.r_[rendererGT.camera.view_mtx, np.array([[0,0,0,1]])])[0:3,3]
vDists = rendererGT.v.r[rendererGT.f[rendererGT.visibility_image[rendererGT.visibility_image != 4294967295].ravel()].ravel()] - cameraEye
if np.min(np.linalg.norm(vDists,axis=1)) <= clip_start:
ignore = True
if not ignore and useBlender:
envMapTexture = cv2.resize(src=envMapTexture, dsize=(360,180))
# envMapTexture = skimage.transform.resize(images[test_i], [height,width])
envMapGray = 0.3*envMapTexture[:,:,0] + 0.59*envMapTexture[:,:,1] + 0.11*envMapTexture[:,:,2]
envMapGrayMean = np.mean(envMapGray, axis=(0,1))
envMapGrayRGB = np.concatenate([envMapGray[...,None], envMapGray[...,None], envMapGray[...,None]], axis=2)/envMapGrayMean
envMapCoeffsNew = light_probes.getEnvironmentMapCoefficients(envMapGrayRGB, 1, 0, 'equirectangular')
pEnvMap = SHProjection(envMapTexture, envMapCoeffsNew)
# pEnvMap = SHProjection(envMapGrayRGB, envMapCoeffs)
approxProjection = np.sum(pEnvMap, axis=3).astype(np.float32)
# envMapCoeffsNewRE = light_probes.getEnvironmentMapCoefficients(approxProjectionRE, 1, 0, 'equirectangular')
# pEnvMapRE = SHProjection(envMapTexture, envMapCoeffsNewRE)
# # pEnvMap = SHProjection(envMapGrayRGB, envMapCoeffs)
# approxProjectionRE = np.sum(pEnvMapRE, axis=3).astype(np.float32)
approxProjection[approxProjection<0] = 0
cv2.imwrite(gtDir + 'im.exr',approxProjection)
# updateEnviornmentMap(envMapFilename, scene)
updateEnviornmentMap(gtDir + 'im.exr', scene)
rotateEnviornmentMap(totalOffset.r.copy(), scene)
cv2.imwrite(gtDir + 'sphericalharmonics/envMapProjOr' + str(train_i) + '.jpeg' , 255*approxProjection[:,:,[2,1,0]])
cv2.imwrite(gtDir + 'sphericalharmonics/envMapGrayOr' + str(train_i) + '.jpeg' , 255*envMapGrayRGB[:,:,[2,1,0]])
links.remove(treeNodes.nodes['lightPathNode'].outputs[0].links[0])
scene.world.cycles_visibility.camera = True
scene.camera.data.type ='PANO'
scene.camera.data.cycles.panorama_type = 'EQUIRECTANGULAR'
scene.render.resolution_x = 360#perhaps set resolution in code
scene.render.resolution_y = 180
roomInstance = scene.objects[str(roomInstanceNum)]
roomInstance.cycles_visibility.camera = False
roomInstance.cycles_visibility.shadow = False
teapot.cycles_visibility.camera = False
teapot.cycles_visibility.shadow = True
# image = cv2.imread(scene.render.filepath)
# image = np.float64(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))/255.0
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.filepath = gtDir + 'sphericalharmonics/envMap' + str(train_i) + '.exr'
# bpy.context.user_preferences.system.compute_device_type = 'NONE'
# bpy.context.user_preferences.system.compute_device = 'CPU'
scene.cycles.samples = 1000
scene.camera.up_axis = 'Z'
# placeCamera(scene.camera, 0, 0, 1, )
scene.camera.location = center[:].copy() + targetPosition[:].copy()
look_at(scene.camera, center[:].copy() + targetPosition[:].copy() + mathutils.Vector((1,0,0)))
scene.update()
bpy.ops.render.render( write_still=True )
imageEnvMap = np.array(imageio.imread(scene.render.filepath))[:,:,0:3]
cv2.imwrite(gtDir + 'sphericalharmonics/envMapCycles' + str(train_i) + '.jpeg' , 255*imageEnvMap[:,:,[2,1,0]])
envMapCoeffs = light_probes.getEnvironmentMapCoefficients(imageEnvMap, 1, 0, 'equirectangular')
pEnvMap = SHProjection(envMapTexture, envMapCoeffs)
approxProjection = np.sum(pEnvMap, axis=3)
cv2.imwrite(gtDir + 'sphericalharmonics/envMapCyclesProjection' + str(train_i) + '.jpeg' , 255*approxProjection[:,:,[2,1,0]])
links.new(treeNodes.nodes['lightPathNode'].outputs[0], treeNodes.nodes['mixShaderNode'].inputs[0])
scene.cycles.samples = 3000
scene.render.filepath = 'opendr_blender.exr'
roomInstance.cycles_visibility.camera = True
scene.render.image_settings.file_format = 'OPEN_EXR'
scene.render.resolution_x = width#perhaps set resolution in code
scene.render.resolution_y = height
scene.camera.data.type ='PERSP'
scene.world.cycles_visibility.camera = True
scene.camera.data.cycles.panorama_type = 'FISHEYE_EQUISOLID'
teapot.cycles_visibility.camera = True
teapot.cycles_visibility.shadow = True
# updateEnviornmentMap(envMapFilename, scene)
if useBlender:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(0), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(-chObjAzGT.r), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
else:
envMapCoeffsRotated[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
envMapCoeffsRotatedRel[:] = np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]]
if useBlender and not ignore:
azimuthRot = mathutils.Matrix.Rotation(chObjAzGT.r[:].copy(), 4, 'Z')
teapot.matrix_world = mathutils.Matrix.Translation(original_matrix_world.to_translation()) * azimuthRot * (mathutils.Matrix.Translation(-original_matrix_world.to_translation())) * original_matrix_world
placeCamera(scene.camera, -chAzGT.r[:].copy()*180/np.pi, chElGT.r[:].copy()*180/np.pi, chDistGT.r[0].copy(), center[:].copy() + targetPosition[:].copy())
setObjectDiffuseColor(teapot, chVColorsGT.r.copy())
if useShapeModel:
mesh = teapot.dupli_group.objects[0]
for vertex_i, vertex in enumerate(mesh.data.vertices):
vertex.co = mathutils.Vector(chVerticesGT.r[vertex_i])
# ipdb.set_trace()
scene.update()
bpy.ops.render.render( write_still=True )
image = np.array(imageio.imread(scene.render.filepath))[:,:,0:3]
image[image>1]=1
blenderRender = image
blenderRenderGray = 0.3*blenderRender[:,:,0] + 0.59*blenderRender[:,:,1] + 0.11*blenderRender[:,:,2]
rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
#For some unkown (yet) reason I need to correct average intensity in OpenDR a few times before it gets it right:
meanIntensityScale = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale
rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
meanIntensityScale2 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale2
rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
meanIntensityScale3 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale3
rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
meanIntensityScale4 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale4
rendererGTGray = 0.3*rendererGT[:,:,0].r[:] + 0.59*rendererGT[:,:,1].r[:] + 0.11*rendererGT[:,:,2].r[:]
meanIntensityScale5 = np.mean(blenderRenderGray[vis_occluded])/np.mean(rendererGTGray[vis_occluded]).copy()
chAmbientIntensityGT[:] = chAmbientIntensityGT.r[:].copy()*meanIntensityScale5
lin2srgb(blenderRender)
if useOpenDR:
image = rendererGT.r[:].copy()
lin2srgb(image)
if useBlender and not ignore and useOpenDR and np.mean(rendererGTGray,axis=(0,1)) < 0.01:
ignore = True
if not ignore:
# hogs = hogs + [imageproc.computeHoG(image).reshape([1,-1])]
# illumfeats = illumfeats + [imageproc.featuresIlluminationDirection(image,20)]
if useBlender:
cv2.imwrite(gtDir + 'images/im' + str(train_i) + '.jpeg' , 255*blenderRender[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if useOpenDR:
cv2.imwrite(gtDir + 'images_opendr/im' + str(train_i) + '.jpeg' , 255*image[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# cv2.imwrite(gtDir + 'images_opendr/im' + str(train_i) + '.jpeg' , 255*image[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if useOpenDR:
np.save(gtDir + 'masks_occlusion/mask' + str(train_i)+ '.npy', vis_occluded)
#Add groundtruth to arrays
trainAzsGT = chAzGT.r
trainObjAzsGT = chObjAzGT.r
trainElevsGT = chElGT.r
trainLightAzsGT = chLightAzGT.r
trainLightElevsGT = chLightElGT.r
trainLightIntensitiesGT = groundTruthToRender['trainLightIntensitiesGT'][gtIdx]
trainVColorGT = chVColorsGT.r
lightCoeffs = envMapCoeffsRotated.r[None, :].copy().squeeze()
lightCoeffs = 0.3*lightCoeffs[:,0] + 0.59*lightCoeffs[:,1] + 0.11*lightCoeffs[:,2]
trainLightCoefficientsGT = lightCoeffs
lightCoeffsRel = envMapCoeffsRotatedRel.r[None, :].copy().squeeze()
lightCoeffsRel = 0.3*lightCoeffsRel[:,0] + 0.59*lightCoeffsRel[:,1] + 0.11*lightCoeffsRel[:,2]
trainLightCoefficientsGTRel = lightCoeffsRel
trainAmbientIntensityGT = chAmbientIntensityGT.r
trainEnvMapPhiOffsets = phiOffset
trainScenes = sceneNumber
trainTeapotIds = teapot_i
trainEnvMaps = hdridx
trainShapeModelCoeffsGT = chShapeParamsGT.r.copy()
trainOcclusions = occlusion
trainIds = train_i
trainTargetIndices = targetIndex
gtDataset.resize(gtDataset.shape[0]+1, axis=0)
gtDataset[-1] = np.array([(trainIds, trainAzsGT,trainObjAzsGT,trainElevsGT,trainLightAzsGT,trainLightElevsGT,trainLightIntensitiesGT,trainVColorGT,trainScenes,trainTeapotIds,trainEnvMaps,trainOcclusions,trainTargetIndices, trainLightCoefficientsGT, trainLightCoefficientsGTRel, trainAmbientIntensityGT, phiOffset.r, trainShapeModelCoeffsGT)],dtype=gtDtype)
gtDataFile.flush()
train_i = train_i + 1
currentScene = sceneIdx
currentTargetIndex = targetIndex
currentTeapot = teapot_i
# np.savetxt(gtDir + 'data.txt',np.array(np.hstack([trainIds[:,None], trainAzsGT[:,None], trainObjAzsGT[:,None], trainElevsGT[:,None], phiOffsets[:,None], trainOcclusions[:,None]])), fmt="%g")
gtDataFile.close()
gtDataFileToRender.close() | 44,438 | 44.392237 | 946 | py |
inversegraphics | inversegraphics-master/azimuth_test.py | #!/usr/bin/env python3.4m
from blender_utils import *
# bpy.ops.render.render( write_still=True )
lines = [line.strip() for line in open('teapots.txt')]
# lamp = bpy.data.scenes['Scene'].objects[1]
# lamp.location = (0,0.0,1.5)
lamp_data = bpy.data.lamps.new(name="LampTopData", type='AREA')
lamp = bpy.data.objects.new(name="LampTop", object_data=lamp_data)
lamp.location = (0,0.0,2)
lamp.data.energy = 0.004
lamp.data.size = 0.5
lamp.data.use_diffuse = True
# lamp.data.use_nodes = True
lamp_data2 = bpy.data.lamps.new(name="LampBotData", type='POINT')
lamp2 = bpy.data.objects.new(name="LampBot", object_data=lamp_data2)
lamp2.location = (0,0.0,-1.0)
lamp2.data.energy = 0.2
# lamp.data.size = 0.25
lamp2.data.use_diffuse = True
lamp2.data.use_specular = False
# lamp2.data.use_nodes = True
camera = bpy.data.scenes['Scene'].objects[2]
camera.data.angle = 60 * 180 / numpy.pi
distance = 0.5
originalLoc = mathutils.Vector((0,-distance,0.0))
elevation = 0.0
azimuth = 0.0
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
azimuthRot = mathutils.Matrix.Rotation(radians(-azimuth), 4, 'Z')
location = azimuthRot * elevationRot * originalLoc
camera.location = location
look_at(camera, mathutils.Vector((0,0,0)))
world = bpy.context.scene.world
# Environment lighting
world.light_settings.use_environment_light = True
world.light_settings.environment_energy = 0.15
world.horizon_color = mathutils.Color((0.0,0.0,0.0))
width = 230
height = 230
data, images, experiments = loadData()
groundTruthEls = data['altitudes'][0][0][0]
groundTruthAzs = data['azimuths'][0][0][0]
filenames = [name[0] for name in data['filenames'][0][0][0][:]]
ids = [name[0] for name in data['ids'][0][0][0][:]]
labels = numpy.column_stack((numpy.cos(groundTruthAzs*numpy.pi/180), numpy.sin(groundTruthAzs*numpy.pi/180), numpy.cos(groundTruthAzs*numpy.pi/180.0), numpy.sin(groundTruthAzs*numpy.pi/180.0)))
output = scipy.io.loadmat('../data/crossval6div2-hog8-alldataexperiments.mat')['output_data']
numpy.random.seed(1)
minThresTemplate = 10
maxThresTemplate = 100
minThresImage = 50
maxThresImage = 150
baseDir = '../databaseFull/models/'
experimentTeapots = ['teapots/fa1fa0818738e932924ed4f13e49b59d/Teapot N300912','teapots/c7549b28656181c91bff71a472da9153/Teapot N311012', 'teapots/1c43a79bd6d814c84a0fee00d66a5e35/Teapot', 'teapots/a7fa82f5982edfd033da2d90df7af046/Teapot_fixed', 'teapots/8e6a162e707ecdf323c90f8b869f2ce9/Teapot N280912', 'teapots/12b81ec72a967dc1714fc48a3b0c961a/Teapot N260113_fixed']
# experimentTeapots = ['teapots/fa1fa0818738e932924ed4f13e49b59d/Teapot N300912','teapots/c7549b28656181c91bff71a472da9153/Teapot N311012', 'teapots/1c43a79bd6d814c84a0fee00d66a5e35/Teapot']
outputExperiments = []
# distanceTypes = ['chamferDataToModel', 'robustChamferDataToModel', 'sqDistImages', 'robustSqDistImages']
distanceTypes = ['chamferDataToModel', 'ignoreSqDistImages', 'sqDistImages', 'chamferModelToData']
for teapotTest in experimentTeapots:
robust = True
robustScale = 0
for distanceType in distanceTypes:
robust = ~robust
if robust is False:
robustScale = 0
experiment = {}
indices = [i for i, s in enumerate(ids) if teapotTest in s]
selTest = indices
selTest = numpy.random.permutation(selTest)
numTests = len(selTest)
teapot = teapotTest + '_cleaned'
fullTeapot = baseDir + teapot
print("Reading " + fullTeapot + '.dae')
bpy.ops.scene.new()
bpy.context.scene.name = teapot
scene = bpy.context.scene
bpy.context.scene.render.engine = 'CYCLES'
# bpy.context.scene.cycles.samples = 128
scene.camera = camera
scene.render.resolution_x = width #perhaps set resolution in code
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.world = world
scene.render.filepath = teapot + '.png'
bpy.utils.collada_import(fullTeapot + '.dae')
# modifySpecular(scene, 0.3)
# ipdb.set_trace()
minZ, maxZ = modelHeight(scene)
minY, maxY = modelWidth(scene)
scaleZ = 0.254/(maxZ-minZ)
scaleY = 0.1778/(maxY-minY)
scale = min(scaleZ, scaleY)
for mesh in scene.objects:
if mesh.type == 'MESH':
scaleMat = mathutils.Matrix.Scale(scale, 4)
mesh.matrix_world = scaleMat * mesh.matrix_world
minZ, maxZ = modelHeight(scene)
scene.objects.link(lamp2)
scene.objects.link(lamp)
# lamp2.location = (0,0, 2)
center = centerOfGeometry(scene)
for mesh in scene.objects:
if mesh.type == 'MESH':
mesh.matrix_world = mathutils.Matrix.Translation(-center) * mesh.matrix_world
#Rotate the object to the azimuth angle we define as 0.
rot = mathutils.Matrix.Rotation(radians(90), 4, 'Z')
rotateMatrixWorld(scene, rot)
scene.update()
camera.data.angle = 60 * 180 / numpy.pi
performance = numpy.array([])
elevations = numpy.array([])
groundTruthAzimuths = numpy.array([])
bestAzimuths= numpy.array([])
expSelTest = numpy.arange(0,numTests,int(numTests/100))
for selTestNum in expSelTest:
test = selTest[selTestNum]
groundTruthAz = groundTruthAzs[test]
groundTruthEl = groundTruthEls[test]
scores = []
azimuths = []
directory = 'aztest/' + '_' + teapot.replace("/", "") + '/' + distanceType
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(directory + 'test_samples'):
os.makedirs(directory + 'test_samples')
numDir = directory + 'test_samples/num' + str(test) + '_azim' + str(int(groundTruthAz)) + '_elev' + str(int(groundTruthEl)) + '/'
if not os.path.exists(numDir):
os.makedirs(numDir)
rgbTestImage = numpy.transpose(images["images"][test])
testImage = cv2.cvtColor(numpy.float32(rgbTestImage*255), cv2.COLOR_RGB2BGR)/255.0
testImageEdges = cv2.Canny(numpy.uint8(testImage*255), minThresImage,maxThresImage)
cv2.imwrite(numDir + "image_canny" + ".png" , testImageEdges)
cv2.imwrite(numDir + "image" + ".png" , numpy.uint8(testImage*255))
score = numpy.finfo(numpy.float64).max
elevation = groundTruthEls[test]
# elevation = -45
azimuth = 0
center = centerOfGeometry(scene)
elevationRot = mathutils.Matrix.Rotation(radians(-elevation), 4, 'X')
# azimuthRot = mathutils.Matrix.Rotation(radians(azimuth), 4, 'Z')
# location = azimuthRot * elevationRot * (center + originalLoc)
# camera.location = location
# scene.update()
# look_at(camera, center)
# scene.update()
# bpy.ops.render.render( write_still=False )
# blendImage = bpy.data.images['Render Result']
# image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height/2,width/2,4]))
# image[numpy.where(image > 1)] = 1
# distance = getChamferDistance(testImage, image, minThresImage, maxThresImage, minThresTemplate, maxThresTemplate)
for azimuth in numpy.arange(0,360,5):
azimuthRot = mathutils.Matrix.Rotation(radians(azimuth), 4, 'Z')
location = azimuthRot * elevationRot * (center + originalLoc)
camera.location = location
scene.update()
look_at(camera, center)
scene.update()
scene.render.filepath = directory + teapot.replace("/", "") + "blender_" + '_' + str(test) + "_az" + '%.1f' % azimuth + '_dist' + '%.1f' % distance + '.png'
bpy.ops.render.render( write_still=False )
# image = cv2.imread(scene.render.filepath, cv2.IMREAD_ANYDEPTH)
blendImage = bpy.data.images['Render Result']
image = numpy.flipud(numpy.array(blendImage.extract_render(scene=scene)).reshape([height/scene.render.resolution_percentage/100,width/scene.render.resolution_percentage/100,4]))[7:107,7:107,0:3]
# Truncate intensities larger than 1.
image[numpy.where(image > 1)] = 1
# ipdb.set_trace()
image[0:20, 75:100, :] = 0
image = cv2.cvtColor(numpy.float32(image*255), cv2.COLOR_RGB2BGR)/255.0
methodParams = {'scale': robustScale, 'minThresImage': minThresImage, 'maxThresImage': maxThresImage, 'minThresTemplate': minThresTemplate, 'maxThresTemplate': maxThresTemplate}
distance = scoreImage(testImage, image, distanceType, methodParams)
cv2.imwrite(numDir + 'image' + "_az" + '%.1f' % azimuth + '_dist' + '%.1f' % distance + '.png', numpy.uint8(image*255.0))
if distance <= score:
imageEdges = cv2.Canny(numpy.uint8(image*255.0), minThresTemplate,maxThresTemplate)
bestImageEdges = imageEdges
bestImage = image
score = distance
scores.append(distance)
azimuths.append(azimuth)
bestAzimuth = azimuths[numpy.argmin(scores)]
if robust is False:
robustScale = 1.4826 * numpy.sqrt(numpy.median(scores))
error = numpy.arctan2(numpy.sin((groundTruthAz-bestAzimuth)*numpy.pi/180), numpy.cos((groundTruthAz-bestAzimuth)*numpy.pi/180))*180/numpy.pi
performance = numpy.append(performance, error)
elevations = numpy.append(elevations, elevation)
bestAzimuths = numpy.append(bestAzimuths, bestAzimuth)
groundTruthAzimuths = numpy.append(groundTruthAzimuths, groundTruthAz)
cv2.imwrite(numDir + 'bestImage' + "_canny_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png' , bestImageEdges)
cv2.imwrite(numDir + 'bestImage' + "_az" + '%.1f' % bestAzimuth + '_dist' + '%.1f' % score + '.png', numpy.uint8(bestImage*255.0))
imgEdges = cv2.Canny(numpy.uint8(testImage*255), minThresImage,maxThresImage)
bwEdges1 = cv2.distanceTransform(~imgEdges, cv2.DIST_L2, 5)
disp = cv2.normalize(bwEdges1, bwEdges1, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imwrite(numDir + 'dist_transform' + '.png', disp)
plt.plot(azimuths, numpy.array(scores))
plt.xlabel('Azimuth (degrees)')
plt.ylabel('Distance')
plt.title('Chamfer distance')
plt.axvline(x=bestAzimuth, linewidth=2, color='b', label='Minimum distance azimuth')
plt.axvline(x=groundTruthAz, linewidth=2, color='g', label='Ground truth azimuth')
plt.axvline(x=(bestAzimuth + 180) % 360, linewidth=1, color='b', ls='--', label='Minimum distance azimuth + 180')
fontP = FontProperties()
fontP.set_size('small')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,360,0,y2))
# plt.legend()
plt.savefig(numDir + 'performance.png')
plt.clf()
experiment = {'methodParams': methodParams, 'distanceType': distanceType, 'teapot':teapot, 'bestAzimuths':bestAzimuths, 'performance': performance, 'elevations':elevations, 'groundTruthAzimuths': groundTruthAzimuths, 'selTest':selTest, 'expSelTest':expSelTest}
outputExperiments.append(experiment)
with open(directory + 'experiment.pickle', 'wb') as pfile:
pickle.dump(experiment, pfile)
plt.scatter(elevations, performance)
plt.xlabel('Elevation (degrees)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,90,-180,180))
plt.title('Performance scatter plot')
plt.savefig(directory + '_elev-performance-scatter.png')
plt.clf()
plt.scatter(groundTruthAzimuths, performance)
plt.xlabel('Azimuth (degrees)')
plt.ylabel('Angular error')
x1,x2,y1,y2 = plt.axis()
plt.axis((0,360,-180,180))
plt.title('Performance scatter plot')
plt.savefig(directory + '_azimuth-performance-scatter.png')
plt.clf()
plt.hist(performance, bins=36)
plt.xlabel('Angular error')
plt.ylabel('Counts')
x1,x2,y1,y2 = plt.axis()
plt.axis((-180,180,0, y2))
plt.title('Performance histogram')
plt.savefig(directory + '_performance-histogram.png')
plt.clf()
# experimentFile = 'aztest/teapotsc7549b28656181c91bff71a472da9153Teapot N311012_cleaned.pickle'
# with open(experimentFile, 'rb') as pfile:
# experiment = pickle.load( pfile)
headers=["Best global fit", ""]
table = [["Mean angular error", numpy.mean(numpy.abs(performance))],["Median angualar error",numpy.median(numpy.abs(performance))]]
performanceTable = tabulate(table, tablefmt="latex", floatfmt=".1f")
with open(directory + 'performance.tex', 'w') as expfile:
expfile.write(performanceTable)
# Cleanup
# for obji in scene.objects:
# if obji.type == 'MESH':
# obji.user_clear()
# bpy.data.objects.remove(obji)
# scene.user_clear()
# bpy.ops.scene.delete()
print("Finished the experiment")
| 13,769 | 36.82967 | 369 | py |
inversegraphics | inversegraphics-master/export_occlusions.py | __author__ = 'pol'
import matplotlib
matplotlib.use('Qt4Agg')
import bpy
import scene_io_utils
import mathutils
from math import radians
import timeit
import time
import opendr
import chumpy as ch
import geometry
import image_processing
import numpy as np
import cv2
from blender_utils import *
import glfw
import generative_models
import matplotlib.pyplot as plt
from opendr_utils import *
import OpenGL.GL as GL
import light_probes
import imageio
from OpenGL import contextdata
plt.ion()
#########################################
# Initialization starts here
#########################################
#Main script options:
useBlender = False
loadBlenderSceneFile = True
groundTruthBlender = False
useCycles = True
unpackModelsFromBlender = False
unpackSceneFromBlender = False
loadSavedSH = False
glModes = ['glfw','mesa']
glMode = glModes[0]
width, height = (150, 150)
win = -1
if glMode == 'glfw':
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
# win = glfw.create_window(width, height, "Demo", None, None)
# glfw.make_context_current(win)
angle = 60 * 180 / numpy.pi
clip_start = 0.01
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
camDistance = 0.4
teapots = [line.strip() for line in open('teapots.txt')]
renderTeapotsList = np.arange(len(teapots))
sceneIdx = 0
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup_new.txt'
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
targetParentIdx = 0
targetIndex = targetIndices[targetParentIdx]
targetParentPosition = targetPositions[targetParentIdx]
targetPosition = targetParentPosition
tex_srgb2lin = True
v, f_list, vc, vn, uv, haveTextures_list, textures_list = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
removeObjectData(int(targetIndex), v, f_list, vc, vn, uv, haveTextures_list, textures_list)
targetModels = []
blender_teapots = []
teapots = [line.strip() for line in open('teapots.txt')]
selection = [ teapots[i] for i in renderTeapotsList]
scene_io_utils.loadTargetsBlendData()
for teapotIdx, teapotName in enumerate(selection):
teapot = bpy.data.scenes[teapotName[0:63]].objects['teapotInstance' + str(renderTeapotsList[teapotIdx])]
teapot.layers[1] = True
teapot.layers[2] = True
targetModels = targetModels + [teapot]
blender_teapots = blender_teapots + [teapot]
v_teapots, f_list_teapots, vc_teapots, vn_teapots, uv_teapots, haveTextures_list_teapots, textures_list_teapots, vflat, varray, center_teapots = scene_io_utils.loadTeapotsOpenDRData(renderTeapotsList, useBlender, unpackModelsFromBlender, targetModels)
azimuth = np.pi
chCosAz = ch.Ch([np.cos(azimuth)])
chSinAz = ch.Ch([np.sin(azimuth)])
chAz = 2*ch.arctan(chSinAz/(ch.sqrt(chCosAz**2 + chSinAz**2) + chCosAz))
chAz = ch.Ch([np.pi/4])
chObjAz = ch.Ch([np.pi/4])
chAzRel = chAz - chObjAz
elevation = 0
chLogCosEl = ch.Ch(np.log(np.cos(elevation)))
chLogSinEl = ch.Ch(np.log(np.sin(elevation)))
chEl = 2*ch.arctan(ch.exp(chLogSinEl)/(ch.sqrt(ch.exp(chLogCosEl)**2 + ch.exp(chLogSinEl)**2) + ch.exp(chLogCosEl)))
chEl = ch.Ch([0.95993109])
chDist = ch.Ch([camDistance])
chObjAzGT = ch.Ch([np.pi*3/2])
chAzGT = ch.Ch([np.pi*3/2])
chAzRelGT = chAzGT - chObjAzGT
chElGT = ch.Ch(chEl.r[0])
chDistGT = ch.Ch([camDistance])
chComponentGT = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
chComponent = ch.Ch(np.array([2, 0.25, 0.25, 0.12,-0.17,0.36,0.1,0.,0.]))
chPointLightIntensity = ch.Ch([1])
chPointLightIntensityGT = ch.Ch([1])
chLightAz = ch.Ch([0.0])
chLightEl = ch.Ch([np.pi/2])
chLightDist = ch.Ch([0.5])
chLightDistGT = ch.Ch([0.5])
chLightAzGT = ch.Ch([0.0])
chLightElGT = ch.Ch([np.pi/4])
ligthTransf = computeHemisphereTransformation(chLightAz, chLightEl, chLightDist, targetPosition)
ligthTransfGT = computeHemisphereTransformation(chLightAzGT, chLightElGT, chLightDistGT, targetPosition)
lightPos = ch.dot(ligthTransf, ch.Ch([0.,0.,0.,1.]))[0:3]
lightPos = ch.Ch([targetPosition[0]+0.5,targetPosition[1],targetPosition[2] + 0.5])
lightPosGT = ch.dot(ligthTransfGT, ch.Ch([0.,0.,0.,1.]))[0:3]
chGlobalConstant = ch.Ch([0.5])
chGlobalConstantGT = ch.Ch([0.5])
light_color = ch.ones(3)*chPointLightIntensity
light_colorGT = ch.ones(3)*chPointLightIntensityGT
chVColors = ch.Ch([0.8,0.8,0.8])
chVColorsGT = ch.Ch([0.8,0.8,0.8])
shCoefficientsFile = 'data/sceneSH' + str(sceneIdx) + '.pickle'
chAmbientIntensityGT = ch.Ch([0.025])
clampedCosCoeffs = clampedCosineCoefficients()
chAmbientSHGT = ch.zeros([9])
envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'
with open(SHFilename, 'rb') as pfile:
envMapDic = pickle.load(pfile)
phiOffset = ch.Ch([0])
totalOffset = phiOffset + chObjAzGT
envMapCoeffs = ch.Ch(list(envMapDic.items())[0][1][1])
envMapCoeffsRotated = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(totalOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
envMapCoeffsRotatedRel = ch.Ch(np.dot(light_probes.chSphericalHarmonicsZRotation(phiOffset), envMapCoeffs[[0,3,2,1,4,5,6,7,8]])[[0,3,2,1,4,5,6,7,8]])
shCoeffsRGB = envMapCoeffsRotated
shCoeffsRGBRel = envMapCoeffsRotatedRel
chShCoeffs = 0.3*shCoeffsRGB[:,0] + 0.59*shCoeffsRGB[:,1] + 0.11*shCoeffsRGB[:,2]
chShCoeffsRel = 0.3*shCoeffsRGBRel[:,0] + 0.59*shCoeffsRGBRel[:,1] + 0.11*shCoeffsRGBRel[:,2]
chAmbientSHGT = chShCoeffs.ravel() * chAmbientIntensityGT * clampedCosCoeffs * 10
chAmbientSHGTRel = chShCoeffsRel.ravel() * chAmbientIntensityGT * clampedCosCoeffs
chLightRadGT = ch.Ch([0.1])
chLightDistGT = ch.Ch([0.5])
chLightIntensityGT = ch.Ch([0])
chLightAzGT = ch.Ch([np.pi*3/2])
chLightElGT = ch.Ch([np.pi/4])
angle = ch.arcsin(chLightRadGT/chLightDistGT)
zGT = chZonalHarmonics(angle)
shDirLightGT = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT + chObjAzGT - np.pi/2) * clampedCosCoeffs
shDirLightGTRel = chZonalToSphericalHarmonics(zGT, np.pi/2 - chLightElGT, chLightAzGT - np.pi/2) * clampedCosCoeffs
chComponentGT = chAmbientSHGT
# chComponentGT = ch.Ch(chAmbientSHGT.r[:].copy())
# + shDirLightGT*chLightIntensityGT
chComponentGTRel = chAmbientSHGTRel
# chComponentGTRel = ch.Ch(chAmbientSHGTRel.r[:].copy())
# chComponentGT = chAmbientSHGT.r[:] + shDirLightGT.r[:]*chLightIntensityGT.r[:]
chDisplacement = ch.Ch([0.0, 0.0,0.0])
chDisplacementGT = ch.Ch([0.0,0.0,0.0])
chScale = ch.Ch([1.0,1.0,1.0])
chScaleGT = ch.Ch([1, 1.,1.])
currentTeapotModel = 0
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
center = center_teapots[currentTeapotModel]
rendererGT = createRendererGT(glMode, chAzGT, chObjAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition[:].copy(), chDisplacementGT, chScaleGT, width,height, uv, haveTextures_list, textures_list, frustum, None )
vis_gt = np.array(rendererGT.indices_image!=1).copy().astype(np.bool)
vis_mask = np.array(rendererGT.indices_image==1).copy().astype(np.bool)
shapeIm = vis_gt.shape
numPixels = shapeIm[0] * shapeIm[1]
def imageGT():
global groundTruthBlender
global rendererGT
global blenderRender
if groundTruthBlender:
return blenderRender
else:
return np.copy(np.array(rendererGT.r)).astype(np.float64)
#########################################
# Initialization ends here
#########################################
replaceableScenesFile = '../databaseFull/fields/scene_replaceables_backup_new.txt'
sceneLines = [line.strip() for line in open(replaceableScenesFile)]
scenesToRender = range(len(sceneLines))[:]
lenScenes = 0
for sceneIdx in scenesToRender:
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
lenScenes += len(targetIndices)
collisionSceneFile = 'data/collisions_new/collisionScene' + str(sceneNumber) + '.pickle'
with open(collisionSceneFile, 'rb') as pfile:
collisions = pickle.load(pfile)
for targetidx, targetIndex in enumerate(targetIndices):
if not collisions[targetIndex][1]:
print("Scene idx " + str(sceneIdx) + " at index " + str(targetIndex) + " collides everywhere.")
sceneOcclusions = {}
for sceneIdx in scenesToRender[:]:
print("Rendering scene: " + str(sceneIdx))
sceneNumber, sceneFileName, instances, roomName, roomInstanceNum, targetIndices, targetPositions = scene_io_utils.getSceneInformation(sceneIdx, replaceableScenesFile)
sceneDicFile = 'data/scene' + str(sceneNumber) + '.pickle'
# v, f_list, vc, vn, uv, haveTextures_list, textures_list = sceneimport.loadSavedScene(sceneDicFile)
import copy
v2, f_list2, vc2, vn2, uv2, haveTextures_list2, textures_list2 = scene_io_utils.loadSavedScene(sceneDicFile, tex_srgb2lin)
collisionSceneFile = 'data/collisions_new/collisionScene' + str(sceneNumber) + '.pickle'
with open(collisionSceneFile, 'rb') as pfile:
collisions = pickle.load(pfile)
targetOcclusions = {}
for targetidx, targetIndex in enumerate(targetIndices):
targetPosition = targetPositions[targetidx]
if collisions[targetIndex][1]:
collisionProbs = np.zeros(len(collisions[targetIndex][1]))
import copy
rendererGT.makeCurrentContext()
rendererGT.clear()
contextdata.cleanupContext(contextdata.getContext())
glfw.destroy_window(rendererGT.win)
del rendererGT
v, f_list, vc, vn, uv, haveTextures_list, textures_list = copy.deepcopy(v2), copy.deepcopy(f_list2), copy.deepcopy(vc2), copy.deepcopy(vn2), copy.deepcopy(uv2), copy.deepcopy(haveTextures_list2), copy.deepcopy(textures_list2)
removeObjectData(len(v) -1 - targetIndex, v, f_list, vc, vn, uv, haveTextures_list, textures_list)
addObjectData(v, f_list, vc, vn, uv, haveTextures_list, textures_list, v_teapots[currentTeapotModel][0], f_list_teapots[currentTeapotModel][0], vc_teapots[currentTeapotModel][0], vn_teapots[currentTeapotModel][0], uv_teapots[currentTeapotModel][0], haveTextures_list_teapots[currentTeapotModel][0], textures_list_teapots[currentTeapotModel][0])
rendererGT = createRendererGT(glMode, chAzGT, chObjAzGT, chElGT, chDistGT, center, v, vc, f_list, vn, light_colorGT, chComponentGT, chVColorsGT, targetPosition.copy(), chDisplacementGT, chScaleGT, width,height, uv, haveTextures_list, textures_list, frustum, None )
# removeObjectData(int(targetIndex-1), v, f_list, vc, vn, uv, haveTextures_list, textures_list)
for intervalIdx, interval in enumerate(collisions[targetIndex][1]):
collisionProbs[intervalIdx] = collisions[targetIndex][1][intervalIdx][1] - collisions[targetIndex][1][intervalIdx][0]
collisionsProbs = collisionProbs / np.sum(collisionProbs)
teapot = None
intersections = []
cameraInterval = 5
for azimuth in np.mod(numpy.arange(270,270+180,cameraInterval), 360):
occludes = False
from numpy.random import choice
objAzInterval = choice(len(collisionsProbs), size=1, p=collisionsProbs)
chAzGT[:] = azimuth*np.pi/180
for elevation in numpy.arange(0,90,cameraInterval):
chElGT[:] = elevation*np.pi/180
#occludes =
occlusion = getOcclusionFraction(rendererGT)
if occlusion > 0.01 and occlusion < 0.95:
occludes = True
print("Found occlusion!")
# cv2.imwrite('tmp/imOcclusion_scene' + str(sceneNumber) + '_tgIndex' + str(targetIndex) + '_az' + str(int(azimuth)) + '.jpeg' , 255*rendererGT.r[:,:,[2,1,0]], [int(cv2.IMWRITE_JPEG_QUALITY), 100])
break
intersections = intersections + [[azimuth, occludes]]
startInterval = True
intervals = []
initInterval = 0
endInterval = 0
for idx, intersection in enumerate(intersections):
if intersection[1]:
if startInterval:
initInterval = intersection[0]
startInterval = False
else:
if not startInterval:
if idx >= 1 and intersections[idx-1][0] != initInterval:
endInterval = intersection[0] - cameraInterval
intervals = intervals + [[initInterval, endInterval]]
startInterval = True
if intersection[1]:
endInterval = intersection[0]
if intersections[0][1]:
intervals = intervals + [[initInterval, endInterval+cameraInterval]]
else:
intervals = intervals + [[initInterval, endInterval]]
targetOcclusions[targetIndex] = (targetParentPosition, intervals)
sceneOcclusions[sceneNumber] = targetOcclusions
with open('data/occlusions_new/occlusionScene' + str(sceneNumber) + '.pickle', 'wb') as pfile:
pickle.dump(targetOcclusions, pfile)
print("Occlusion detection ended.") | 14,109 | 40.378299 | 357 | py |
inversegraphics | inversegraphics-master/recognition_models.py | import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import r2_score
from collections import defaultdict
from sklearn.ensemble import RandomForestRegressor
from sklearn import mixture
import ipdb
from chumpy.ch import MatVecMult, Ch
class segmentCRFModel(Ch):
dterms = ['renderer', 'groundtruth', 'priorProbs']
def compute_r(self):
return self.segmentation()
def compute_dr_wrt(self, wrt):
return None
# if wrt is self.renderer:
# return self.segmentation().dr_wrt(self.renderer)
def segmentation(self):
import densecrf_model
vis_im = np.array(self.renderer.indices_image == 1).copy().astype(np.bool)
bound_im = self.renderer.boundarybool_image.astype(np.bool)
#
segmentation, Q = densecrf_model.crfInference(self.groundtruth.r, vis_im, bound_im, [self.priorProbs[0], self.priorProbs[1], self.priorProbs[2]],
self.resultDir + 'imgs/crf/Q_' + str(self.test_i))
return Q
def evaluatePrediction(azsGT, elevsGT, azsPred, elevsPred):
errazs = np.arctan2(np.sin(azsGT - azsPred), np.cos(azsGT - azsPred))*180/np.pi
errelevs = np.arctan2(np.sin(elevsGT-elevsPred), np.cos(elevsGT-elevsPred))*180/np.pi
return errazs, errelevs
def trainRandomForest(xtrain, ytrain):
randForest = RandomForestRegressor(n_estimators=400, n_jobs=-1)
rf = randForest.fit(xtrain, ytrain)
return rf
def testRandomForest(randForest, xtest):
return randForest.predict(xtest)
def filteredMean(image, win):
pixels = image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win,:]
pixels = pixels.reshape([-1,3])
gray = 0.3*pixels[:,0] + 0.59*pixels[:,1] + 0.11*pixels[:,2]
stdM = 2
pixels[np.abs(gray - np.mean(gray)) < stdM * np.std(gray),:]
color = np.mean(image)
return color
def meanColor(image, win):
image = np.mean(image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win,:], axis=0)
color = np.mean(image, axis=0)
return color
def medianColor(image, win):
imageWin = image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win,:]
color = np.median(imageWin.reshape([-1,3]), axis=0)
return color
def midColor(image):
color = image[image.shape[0]/2,image.shape[1]/2,:]
return color
def colorGMM(image, win):
np.random.seed(1)
gmm = mixture.GMM(n_components=8, covariance_type='spherical')
colors = image[image.shape[0]/2-win:image.shape[0]/2+win,image.shape[1]/2-win:image.shape[1]/2+win,:][:,3]
gmm.fit(colors)
gmm._weights=np.array([0.6,0.3,0.1,0,0,0,0,0])
return gmm
from scipy.stats import vonmises
def poseGMM(azimuth, elevation):
np.random.seed(1)
components = [0.7,0.05,0.05,0.05,0.05,0.05,0.05]
azs = np.random.uniform(0,2*np.pi, 6)
elevs = np.random.uniform(0,np.pi/2, 6)
kappa = 50
vmParamsAz = [(azs[i],kappa) for i in azs]
vmParamsEl = [(elevs[i], kappa) for i in elevs]
vmParamsAz = [(azimuth, kappa)] + vmParamsAz
vmParamsEl = [(elevation, kappa)] + vmParamsEl
return components, vmParamsAz, vmParamsEl
def trainLinearRegression(xtrain, ytrain):
# Create linear regression object
regr = linear_model.LinearRegression(n_jobs=-1)
# Train the model using the training sets
regr.fit(xtrain, ytrain)
# print('Coefficients: \n', regr.coef_)
#
# % (regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
# print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
# plt.scatter(xtest, ytest, color='black')
return regr
def testLinearRegression(lrmodel, xtest):
return lrmodel.predict(xtest)
class sphericalHarmonicsModel():
def __init__(self, image=None, barycentric=None, visibility=None, SHComponents=None, f=None, vc=None, vn=None):
self.SHComponents = SHComponents
self.vn = vn
self.visibility = visibility
self.f = f
self.vc = vc
self.vn = vn
self.barycentric = None
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
vertexpix = np.where(self.barycentric[visible].ravel() <= 0.01)
fvis = f[visibility.ravel()[visible]].ravel()[vertexpix]
vcvis = vc[fvis]
vnvis = vn[fvis]
imvis = image[visible]
evis = imvis/(vcvis+1e-5)
self.X = vnvis
self.y = evis
# def fit(self, X, y):
#
#
#
# def score(self, X, y):
#
# def predict(self, X):
def solveSHCoefficients(groundtruth, visibility, f, vn, vc):
#RANSAC solution.
#1 Select nine vertices.
#2 Get E by dividing by vc.
#3 We know the normalizing constant and transfer function. What is A_l? Probably wrt vertex normals.
#4 Solve the coefficients.
bestVertices = None
return bestVertices
| 5,058 | 30.03681 | 153 | py |
inversegraphics | inversegraphics-master/clean_collada.py | import pyassimp
from pyassimp.postprocess import *
modelPath = '../databaseFull/models/teapots/78ed1a0383cd2eab7552b099aebcb24e/Teapot_fixed.dae'
# aiProcessPreset_TargetRealtime_Quality = ( \
# aiProcess_CalcTangentSpace | \
# aiProcess_GenSmoothNormals | \
# aiProcess_JoinIdenticalVertices | \
# aiProcess_ImproveCacheLocality | \
# aiProcess_LimitBoneWeights | \
# aiProcess_RemoveRedundantMaterials | \
# aiProcess_SplitLargeMeshes | \
# aiProcess_Triangulate | \
# aiProcess_GenUVCoords | \
# aiProcess_SortByPType | \
# aiProcess_FindDegenerates | \
# aiProcess_FindInvalidData | \
# 0 )
postprocess = aiProcessPreset_TargetRealtime_Quality
scene = pyassimp.load(modelPath, postprocess)
| 918 | 37.291667 | 94 | py |
inversegraphics | inversegraphics-master/diffrender_opt.py | # Copyright (c) 2015, Javier Gonzalez
# Copyright (c) 2015, the GPy Authors (see GPy AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import GPyOpt
import GPy
import ipdb
from numpy.random import seed
import chumpy as ch
"""
This is a simple demo to demonstrate the use of Bayesian optimization with GPyOpt with some simple options. Run the example by writing:
import GPyOpt
BO_demo_2d = GPyOpt.demos.advanced_optimization_2d()
As a result you should see:
- A plot with the model and the current acquisition function
- A plot with the diagnostic plots of the optimization.
- An object call BO_demo_auto that contains the results of the optimization process (see reference manual for details). Among the available results you have access to the GP model via
>> BO_demo_2d.model
and to the location of the best found location writing.
BO_demo_2d.x_opt
"""
def opendrObjectiveFunction(obj, free_variables):
def changevars(vs, obj, free_variables):
vs = vs.ravel()
cur = 0
changed = False
for idx, freevar in enumerate(free_variables):
sz = freevar.r.size
newvals = vs[cur:cur+sz].copy().reshape(free_variables[idx].shape)
if np.max(np.abs(newvals-free_variables[idx]).ravel()) > 0:
free_variables[idx][:] = newvals
changed = True
cur += sz
return changed
def objFun(vs):
vs = np.array(vs)
if vs.shape[0] == 1:
changevars(vs, obj, free_variables)
return obj.r.reshape([1,1])
else:
res = []
for vs_i in vs:
changevars(vs_i, obj, free_variables)
res = res + [obj.r.reshape([1,1])]
return np.vstack(res)
return objFun
def opendrObjectiveFunctionCRF(free_variables, rendererGT, renderer, color, chVColors, chSHLightCoeffs, lightCoeffs, free_variables_app_light, resultDir, test_i, stds, method, updateColor=False,minAppLight=False):
def changevars(vs, free_variables):
vs = vs.ravel()
cur = 0
changed = False
for idx, freevar in enumerate(free_variables):
sz = freevar.r.size
newvals = vs[cur:cur+sz].copy().reshape(free_variables[idx].shape)
if np.max(np.abs(newvals-free_variables[idx]).ravel()) > 0:
free_variables[idx][:] = newvals
changed = True
cur += sz
return changed
def objFun(vs):
vs = np.array(vs)
res = []
for vs_it, vs_i in enumerate(vs):
changevars(vs_i, free_variables)
import densecrf_model
vis_im = np.array(renderer.indices_image == 1).copy().astype(np.bool)
bound_im = renderer.boundarybool_image.astype(np.bool)
segmentation, Q = densecrf_model.crfInference(rendererGT.r, vis_im, bound_im, [0.75,0.25,0.01], resultDir + 'imgs/crf/Q_' + str(test_i) + '_it' + str(vs_it))
vColor = color
if updateColor:
if np.sum(segmentation == 0) > 5:
segmentRegion = segmentation == 0
vColor = np.median(rendererGT.reshape([-1, 3])[segmentRegion.ravel()], axis=0) * 1.4
vColor = vColor / max(np.max(vColor), 1.)
chVColors[:] = vColor
chSHLightCoeffs[:] = lightCoeffs
variances = stds**2
fgProb = ch.exp( - (renderer - rendererGT)**2 / (2 * variances)) * (1./(stds * np.sqrt(2 * np.pi)))
h = renderer.r.shape[0]
w = renderer.r.shape[1]
occProb = np.ones([h,w])
bgProb = np.ones([h,w])
errorFun = -ch.sum(ch.log(vis_im[:, :, None]*((Q[0].reshape([h, w, 1]) * fgProb) + (Q[1].reshape([h, w]) * occProb + Q[2].reshape([h, w]) * bgProb)[:, :, None]) + (1- vis_im[:, :, None])))/(h*w)
if minAppLight:
options = {'disp': False, 'maxiter': 10}
def cb(_):
print("Error: " + str(errorFun.r))
ch.minimize({'raw': errorFun}, bounds=None, method=method, x0=free_variables_app_light, callback=cb, options=options)
res = res + [errorFun.r.reshape([1,1])]
return np.vstack(res)
return objFun
def bayesOpt(objFun, initX, initF, bounds):
seed(12345)
input_dim = len(bounds)
# Select an specific kernel from GPy
kernel = GPy.kern.RBF(input_dim, variance=.1, lengthscale=1) + GPy.kern.Bias(input_dim) # we add a bias kernel
# --- Problem definition and optimization
BO_model = GPyOpt.methods.BayesianOptimization(f=objFun, # function to optimize
kernel = kernel, # pre-specified model
X = initX,
Y = initF,
bounds=bounds, # box-constrains of the problem
acquisition='EI', # Selects the Expected improvement
numdata_initial_design=len(initX),
type_initial_design='random', # latin desing of the initial points
normalize = True) # normalized y
# Run the optimization
max_iter = 10
# --- Run the optimization # evaluation budget
ipdb.set_trace()
BO_model.run_optimization(max_iter, n_inbatch=10, n_procs=10, # Number of iterations
acqu_optimize_method = 'DIRECT', # method to optimize the acq. function
acqu_optimize_restarts = 1, # number of local optimizers
eps=10e-2, # secondary stop criteria (apart from the number of iterations)
true_gradients = True) # The gradients of the acquisition function are approximated (faster)
# # --- Plots
# if plots:
# objective_true.plot()
# BO_demo_2d.plot_acquisition()
# BO_demo_2d.plot_convergence()
return BO_model
def advanced_optimization_2d(plots=True):
import GPyOpt
import GPy
from numpy.random import seed
seed(12345)
# --- Objective function
objective_true = GPyOpt.fmodels.experiments2d.sixhumpcamel() # true function
objective_noisy = GPyOpt.fmodels.experiments2d.sixhumpcamel(sd = 0.1) # noisy version
bounds = objective_noisy.bounds # problem constrains
input_dim = len(bounds)
# Select an specific kernel from GPy
kernel = GPy.kern.RBF(input_dim, variance=.1, lengthscale=.1) + GPy.kern.Bias(input_dim) # we add a bias kernel
# --- Problem definition and optimization
BO_demo_2d = GPyOpt.methods.BayesianOptimization(f=objective_noisy.f, # function to optimize
kernel = kernel, # pre-specified model
bounds=bounds, # box-constrains of the problem
acquisition='EI', # Selects the Expected improvement
acquisition_par = 2, # parameter of the acquisition function
numdata_initial_design = 15, # 15 initial points
type_initial_design='random', # latin desing of the initial points
model_optimize_interval= 2, # The model is updated every two points are collected
normalize = True) # normalized y
# Run the optimization
max_iter = 20
# --- Run the optimization # evaluation budget
BO_demo_2d.run_optimization(max_iter, # Number of iterations
acqu_optimize_method = 'DIRECT', # method to optimize the acq. function
acqu_optimize_restarts = 30, # number of local optimizers
eps=10e-6, # secondary stop criteria (apart from the number of iterations)
true_gradients = True) # The gradients of the acquisition function are approximated (faster)
# --- Plots
if plots:
objective_true.plot()
BO_demo_2d.plot_acquisition()
BO_demo_2d.plot_convergence()
return BO_demo_2d | 8,886 | 39.031532 | 213 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.