partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
check_arguments
|
Check arguments passed by user that are not checked by argparse itself.
|
kerncraft/kerncraft.py
|
def check_arguments(args, parser):
"""Check arguments passed by user that are not checked by argparse itself."""
if args.asm_block not in ['auto', 'manual']:
try:
args.asm_block = int(args.asm_block)
except ValueError:
parser.error('--asm-block can only be "auto", "manual" or an integer')
# Set default unit depending on performance model requested
if not args.unit:
if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel:
args.unit = 'FLOP/s'
else:
args.unit = 'cy/CL'
|
def check_arguments(args, parser):
"""Check arguments passed by user that are not checked by argparse itself."""
if args.asm_block not in ['auto', 'manual']:
try:
args.asm_block = int(args.asm_block)
except ValueError:
parser.error('--asm-block can only be "auto", "manual" or an integer')
# Set default unit depending on performance model requested
if not args.unit:
if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel:
args.unit = 'FLOP/s'
else:
args.unit = 'cy/CL'
|
[
"Check",
"arguments",
"passed",
"by",
"user",
"that",
"are",
"not",
"checked",
"by",
"argparse",
"itself",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L189-L202
|
[
"def",
"check_arguments",
"(",
"args",
",",
"parser",
")",
":",
"if",
"args",
".",
"asm_block",
"not",
"in",
"[",
"'auto'",
",",
"'manual'",
"]",
":",
"try",
":",
"args",
".",
"asm_block",
"=",
"int",
"(",
"args",
".",
"asm_block",
")",
"except",
"ValueError",
":",
"parser",
".",
"error",
"(",
"'--asm-block can only be \"auto\", \"manual\" or an integer'",
")",
"# Set default unit depending on performance model requested",
"if",
"not",
"args",
".",
"unit",
":",
"if",
"'Roofline'",
"in",
"args",
".",
"pmodel",
"or",
"'RooflineIACA'",
"in",
"args",
".",
"pmodel",
":",
"args",
".",
"unit",
"=",
"'FLOP/s'",
"else",
":",
"args",
".",
"unit",
"=",
"'cy/CL'"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
run
|
Run command line interface.
|
kerncraft/kerncraft.py
|
def run(parser, args, output_file=sys.stdout):
"""Run command line interface."""
# Try loading results file (if requested)
result_storage = {}
if args.store:
args.store.seek(0)
try:
result_storage = pickle.load(args.store)
except EOFError:
pass
args.store.close()
# machine information
# Read machine description
machine = MachineModel(args.machine.name, args=args)
# process kernel
if not args.kernel_description:
code = str(args.code_file.read())
code = clean_code(code)
kernel = KernelCode(code, filename=args.code_file.name, machine=machine,
keep_intermediates=not args.clean_intermediates)
else:
description = str(args.code_file.read())
kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader), machine=machine)
# if no defines were given, guess suitable defines in-mem
# TODO support in-cache
# TODO broaden cases to n-dimensions
# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)
# works only for up to 3 dimensions
required_consts = [v[1] for v in kernel.variables.values() if v[1] is not None]
required_consts += [[l['start'], l['stop']] for l in kernel.get_loop_stack()]
# split into individual consts
required_consts = [i for l in required_consts for i in l]
required_consts = set([i for l in required_consts for i in l.free_symbols])
if len(required_consts) > 0:
# build defines permutations
define_dict = {}
for name, values in args.define:
if name not in define_dict:
define_dict[name] = [[name, v] for v in values]
continue
for v in values:
if v not in define_dict[name]:
define_dict[name].append([name, v])
define_product = list(itertools.product(*list(define_dict.values())))
# Check that all consts have been defined
if set(required_consts).difference(set([symbol_pos_int(k) for k in define_dict.keys()])):
raise ValueError("Not all constants have been defined. Required are: {}".format(
required_consts))
else:
define_product = [{}]
for define in define_product:
# Reset state of kernel
kernel.clear_state()
# Add constants from define arguments
for k, v in define:
kernel.set_constant(k, v)
for model_name in uniquify(args.pmodel):
# print header
print('{:^80}'.format(' kerncraft '), file=output_file)
print('{:<40}{:>40}'.format(args.code_file.name, '-m ' + args.machine.name),
file=output_file)
print(' '.join(['-D {} {}'.format(k, v) for k, v in define]), file=output_file)
print('{:-^80}'.format(' ' + model_name + ' '), file=output_file)
if args.verbose > 1:
if not args.kernel_description:
kernel.print_kernel_code(output_file=output_file)
print('', file=output_file)
kernel.print_variables_info(output_file=output_file)
kernel.print_kernel_info(output_file=output_file)
if args.verbose > 0:
kernel.print_constants_info(output_file=output_file)
model = getattr(models, model_name)(kernel, machine, args, parser)
model.analyze()
model.report(output_file=output_file)
# Add results to storage
kernel_name = os.path.split(args.code_file.name)[1]
if kernel_name not in result_storage:
result_storage[kernel_name] = {}
if tuple(kernel.constants.items()) not in result_storage[kernel_name]:
result_storage[kernel_name][tuple(kernel.constants.items())] = {}
result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \
model.results
print('', file=output_file)
# Save storage to file (if requested)
if args.store:
temp_name = args.store.name + '.tmp'
with open(temp_name, 'wb+') as f:
pickle.dump(result_storage, f)
shutil.move(temp_name, args.store.name)
|
def run(parser, args, output_file=sys.stdout):
"""Run command line interface."""
# Try loading results file (if requested)
result_storage = {}
if args.store:
args.store.seek(0)
try:
result_storage = pickle.load(args.store)
except EOFError:
pass
args.store.close()
# machine information
# Read machine description
machine = MachineModel(args.machine.name, args=args)
# process kernel
if not args.kernel_description:
code = str(args.code_file.read())
code = clean_code(code)
kernel = KernelCode(code, filename=args.code_file.name, machine=machine,
keep_intermediates=not args.clean_intermediates)
else:
description = str(args.code_file.read())
kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader), machine=machine)
# if no defines were given, guess suitable defines in-mem
# TODO support in-cache
# TODO broaden cases to n-dimensions
# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)
# works only for up to 3 dimensions
required_consts = [v[1] for v in kernel.variables.values() if v[1] is not None]
required_consts += [[l['start'], l['stop']] for l in kernel.get_loop_stack()]
# split into individual consts
required_consts = [i for l in required_consts for i in l]
required_consts = set([i for l in required_consts for i in l.free_symbols])
if len(required_consts) > 0:
# build defines permutations
define_dict = {}
for name, values in args.define:
if name not in define_dict:
define_dict[name] = [[name, v] for v in values]
continue
for v in values:
if v not in define_dict[name]:
define_dict[name].append([name, v])
define_product = list(itertools.product(*list(define_dict.values())))
# Check that all consts have been defined
if set(required_consts).difference(set([symbol_pos_int(k) for k in define_dict.keys()])):
raise ValueError("Not all constants have been defined. Required are: {}".format(
required_consts))
else:
define_product = [{}]
for define in define_product:
# Reset state of kernel
kernel.clear_state()
# Add constants from define arguments
for k, v in define:
kernel.set_constant(k, v)
for model_name in uniquify(args.pmodel):
# print header
print('{:^80}'.format(' kerncraft '), file=output_file)
print('{:<40}{:>40}'.format(args.code_file.name, '-m ' + args.machine.name),
file=output_file)
print(' '.join(['-D {} {}'.format(k, v) for k, v in define]), file=output_file)
print('{:-^80}'.format(' ' + model_name + ' '), file=output_file)
if args.verbose > 1:
if not args.kernel_description:
kernel.print_kernel_code(output_file=output_file)
print('', file=output_file)
kernel.print_variables_info(output_file=output_file)
kernel.print_kernel_info(output_file=output_file)
if args.verbose > 0:
kernel.print_constants_info(output_file=output_file)
model = getattr(models, model_name)(kernel, machine, args, parser)
model.analyze()
model.report(output_file=output_file)
# Add results to storage
kernel_name = os.path.split(args.code_file.name)[1]
if kernel_name not in result_storage:
result_storage[kernel_name] = {}
if tuple(kernel.constants.items()) not in result_storage[kernel_name]:
result_storage[kernel_name][tuple(kernel.constants.items())] = {}
result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \
model.results
print('', file=output_file)
# Save storage to file (if requested)
if args.store:
temp_name = args.store.name + '.tmp'
with open(temp_name, 'wb+') as f:
pickle.dump(result_storage, f)
shutil.move(temp_name, args.store.name)
|
[
"Run",
"command",
"line",
"interface",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L205-L305
|
[
"def",
"run",
"(",
"parser",
",",
"args",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"# Try loading results file (if requested)",
"result_storage",
"=",
"{",
"}",
"if",
"args",
".",
"store",
":",
"args",
".",
"store",
".",
"seek",
"(",
"0",
")",
"try",
":",
"result_storage",
"=",
"pickle",
".",
"load",
"(",
"args",
".",
"store",
")",
"except",
"EOFError",
":",
"pass",
"args",
".",
"store",
".",
"close",
"(",
")",
"# machine information",
"# Read machine description",
"machine",
"=",
"MachineModel",
"(",
"args",
".",
"machine",
".",
"name",
",",
"args",
"=",
"args",
")",
"# process kernel",
"if",
"not",
"args",
".",
"kernel_description",
":",
"code",
"=",
"str",
"(",
"args",
".",
"code_file",
".",
"read",
"(",
")",
")",
"code",
"=",
"clean_code",
"(",
"code",
")",
"kernel",
"=",
"KernelCode",
"(",
"code",
",",
"filename",
"=",
"args",
".",
"code_file",
".",
"name",
",",
"machine",
"=",
"machine",
",",
"keep_intermediates",
"=",
"not",
"args",
".",
"clean_intermediates",
")",
"else",
":",
"description",
"=",
"str",
"(",
"args",
".",
"code_file",
".",
"read",
"(",
")",
")",
"kernel",
"=",
"KernelDescription",
"(",
"yaml",
".",
"load",
"(",
"description",
",",
"Loader",
"=",
"yaml",
".",
"Loader",
")",
",",
"machine",
"=",
"machine",
")",
"# if no defines were given, guess suitable defines in-mem",
"# TODO support in-cache",
"# TODO broaden cases to n-dimensions",
"# TODO make configurable (no hardcoded 512MB/1GB/min. 3 iteration ...)",
"# works only for up to 3 dimensions",
"required_consts",
"=",
"[",
"v",
"[",
"1",
"]",
"for",
"v",
"in",
"kernel",
".",
"variables",
".",
"values",
"(",
")",
"if",
"v",
"[",
"1",
"]",
"is",
"not",
"None",
"]",
"required_consts",
"+=",
"[",
"[",
"l",
"[",
"'start'",
"]",
",",
"l",
"[",
"'stop'",
"]",
"]",
"for",
"l",
"in",
"kernel",
".",
"get_loop_stack",
"(",
")",
"]",
"# split into individual consts",
"required_consts",
"=",
"[",
"i",
"for",
"l",
"in",
"required_consts",
"for",
"i",
"in",
"l",
"]",
"required_consts",
"=",
"set",
"(",
"[",
"i",
"for",
"l",
"in",
"required_consts",
"for",
"i",
"in",
"l",
".",
"free_symbols",
"]",
")",
"if",
"len",
"(",
"required_consts",
")",
">",
"0",
":",
"# build defines permutations",
"define_dict",
"=",
"{",
"}",
"for",
"name",
",",
"values",
"in",
"args",
".",
"define",
":",
"if",
"name",
"not",
"in",
"define_dict",
":",
"define_dict",
"[",
"name",
"]",
"=",
"[",
"[",
"name",
",",
"v",
"]",
"for",
"v",
"in",
"values",
"]",
"continue",
"for",
"v",
"in",
"values",
":",
"if",
"v",
"not",
"in",
"define_dict",
"[",
"name",
"]",
":",
"define_dict",
"[",
"name",
"]",
".",
"append",
"(",
"[",
"name",
",",
"v",
"]",
")",
"define_product",
"=",
"list",
"(",
"itertools",
".",
"product",
"(",
"*",
"list",
"(",
"define_dict",
".",
"values",
"(",
")",
")",
")",
")",
"# Check that all consts have been defined",
"if",
"set",
"(",
"required_consts",
")",
".",
"difference",
"(",
"set",
"(",
"[",
"symbol_pos_int",
"(",
"k",
")",
"for",
"k",
"in",
"define_dict",
".",
"keys",
"(",
")",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Not all constants have been defined. Required are: {}\"",
".",
"format",
"(",
"required_consts",
")",
")",
"else",
":",
"define_product",
"=",
"[",
"{",
"}",
"]",
"for",
"define",
"in",
"define_product",
":",
"# Reset state of kernel",
"kernel",
".",
"clear_state",
"(",
")",
"# Add constants from define arguments",
"for",
"k",
",",
"v",
"in",
"define",
":",
"kernel",
".",
"set_constant",
"(",
"k",
",",
"v",
")",
"for",
"model_name",
"in",
"uniquify",
"(",
"args",
".",
"pmodel",
")",
":",
"# print header",
"print",
"(",
"'{:^80}'",
".",
"format",
"(",
"' kerncraft '",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'{:<40}{:>40}'",
".",
"format",
"(",
"args",
".",
"code_file",
".",
"name",
",",
"'-m '",
"+",
"args",
".",
"machine",
".",
"name",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"' '",
".",
"join",
"(",
"[",
"'-D {} {}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"define",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'{:-^80}'",
".",
"format",
"(",
"' '",
"+",
"model_name",
"+",
"' '",
")",
",",
"file",
"=",
"output_file",
")",
"if",
"args",
".",
"verbose",
">",
"1",
":",
"if",
"not",
"args",
".",
"kernel_description",
":",
"kernel",
".",
"print_kernel_code",
"(",
"output_file",
"=",
"output_file",
")",
"print",
"(",
"''",
",",
"file",
"=",
"output_file",
")",
"kernel",
".",
"print_variables_info",
"(",
"output_file",
"=",
"output_file",
")",
"kernel",
".",
"print_kernel_info",
"(",
"output_file",
"=",
"output_file",
")",
"if",
"args",
".",
"verbose",
">",
"0",
":",
"kernel",
".",
"print_constants_info",
"(",
"output_file",
"=",
"output_file",
")",
"model",
"=",
"getattr",
"(",
"models",
",",
"model_name",
")",
"(",
"kernel",
",",
"machine",
",",
"args",
",",
"parser",
")",
"model",
".",
"analyze",
"(",
")",
"model",
".",
"report",
"(",
"output_file",
"=",
"output_file",
")",
"# Add results to storage",
"kernel_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"args",
".",
"code_file",
".",
"name",
")",
"[",
"1",
"]",
"if",
"kernel_name",
"not",
"in",
"result_storage",
":",
"result_storage",
"[",
"kernel_name",
"]",
"=",
"{",
"}",
"if",
"tuple",
"(",
"kernel",
".",
"constants",
".",
"items",
"(",
")",
")",
"not",
"in",
"result_storage",
"[",
"kernel_name",
"]",
":",
"result_storage",
"[",
"kernel_name",
"]",
"[",
"tuple",
"(",
"kernel",
".",
"constants",
".",
"items",
"(",
")",
")",
"]",
"=",
"{",
"}",
"result_storage",
"[",
"kernel_name",
"]",
"[",
"tuple",
"(",
"kernel",
".",
"constants",
".",
"items",
"(",
")",
")",
"]",
"[",
"model_name",
"]",
"=",
"model",
".",
"results",
"print",
"(",
"''",
",",
"file",
"=",
"output_file",
")",
"# Save storage to file (if requested)",
"if",
"args",
".",
"store",
":",
"temp_name",
"=",
"args",
".",
"store",
".",
"name",
"+",
"'.tmp'",
"with",
"open",
"(",
"temp_name",
",",
"'wb+'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"result_storage",
",",
"f",
")",
"shutil",
".",
"move",
"(",
"temp_name",
",",
"args",
".",
"store",
".",
"name",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
main
|
Initialize and run command line interface.
|
kerncraft/kerncraft.py
|
def main():
"""Initialize and run command line interface."""
# Create and populate parser
parser = create_parser()
# Parse given arguments
args = parser.parse_args()
# Checking arguments
check_arguments(args, parser)
# BUSINESS LOGIC IS FOLLOWING
run(parser, args)
|
def main():
"""Initialize and run command line interface."""
# Create and populate parser
parser = create_parser()
# Parse given arguments
args = parser.parse_args()
# Checking arguments
check_arguments(args, parser)
# BUSINESS LOGIC IS FOLLOWING
run(parser, args)
|
[
"Initialize",
"and",
"run",
"command",
"line",
"interface",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kerncraft.py#L308-L320
|
[
"def",
"main",
"(",
")",
":",
"# Create and populate parser",
"parser",
"=",
"create_parser",
"(",
")",
"# Parse given arguments",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Checking arguments",
"check_arguments",
"(",
"args",
",",
"parser",
")",
"# BUSINESS LOGIC IS FOLLOWING",
"run",
"(",
"parser",
",",
"args",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
main
|
Comand line interface of picklemerge.
|
kerncraft/picklemerge.py
|
def main():
"""Comand line interface of picklemerge."""
parser = argparse.ArgumentParser(
description='Recursively merges two or more pickle files. Only supports pickles consisting '
'of a single dictionary object.')
parser.add_argument('destination', type=argparse.FileType('r+b'),
help='File to write to and include in resulting pickle. (WILL BE CHANGED)')
parser.add_argument('source', type=argparse.FileType('rb'), nargs='+',
help='File to include in resulting pickle.')
args = parser.parse_args()
result = pickle.load(args.destination)
assert isinstance(result, collections.Mapping), "only Mapping types can be handled."
for s in args.source:
data = pickle.load(s)
assert isinstance(data, collections.Mapping), "only Mapping types can be handled."
update(result, data)
args.destination.seek(0)
args.destination.truncate()
pickle.dump(result, args.destination)
|
def main():
"""Comand line interface of picklemerge."""
parser = argparse.ArgumentParser(
description='Recursively merges two or more pickle files. Only supports pickles consisting '
'of a single dictionary object.')
parser.add_argument('destination', type=argparse.FileType('r+b'),
help='File to write to and include in resulting pickle. (WILL BE CHANGED)')
parser.add_argument('source', type=argparse.FileType('rb'), nargs='+',
help='File to include in resulting pickle.')
args = parser.parse_args()
result = pickle.load(args.destination)
assert isinstance(result, collections.Mapping), "only Mapping types can be handled."
for s in args.source:
data = pickle.load(s)
assert isinstance(data, collections.Mapping), "only Mapping types can be handled."
update(result, data)
args.destination.seek(0)
args.destination.truncate()
pickle.dump(result, args.destination)
|
[
"Comand",
"line",
"interface",
"of",
"picklemerge",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/picklemerge.py#L23-L46
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Recursively merges two or more pickle files. Only supports pickles consisting '",
"'of a single dictionary object.'",
")",
"parser",
".",
"add_argument",
"(",
"'destination'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r+b'",
")",
",",
"help",
"=",
"'File to write to and include in resulting pickle. (WILL BE CHANGED)'",
")",
"parser",
".",
"add_argument",
"(",
"'source'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'rb'",
")",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'File to include in resulting pickle.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"result",
"=",
"pickle",
".",
"load",
"(",
"args",
".",
"destination",
")",
"assert",
"isinstance",
"(",
"result",
",",
"collections",
".",
"Mapping",
")",
",",
"\"only Mapping types can be handled.\"",
"for",
"s",
"in",
"args",
".",
"source",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"s",
")",
"assert",
"isinstance",
"(",
"data",
",",
"collections",
".",
"Mapping",
")",
",",
"\"only Mapping types can be handled.\"",
"update",
"(",
"result",
",",
"data",
")",
"args",
".",
"destination",
".",
"seek",
"(",
"0",
")",
"args",
".",
"destination",
".",
"truncate",
"(",
")",
"pickle",
".",
"dump",
"(",
"result",
",",
"args",
".",
"destination",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
symbol_pos_int
|
Create a sympy.Symbol with positive and integer assumptions.
|
kerncraft/kernel.py
|
def symbol_pos_int(*args, **kwargs):
"""Create a sympy.Symbol with positive and integer assumptions."""
kwargs.update({'positive': True,
'integer': True})
return sympy.Symbol(*args, **kwargs)
|
def symbol_pos_int(*args, **kwargs):
"""Create a sympy.Symbol with positive and integer assumptions."""
kwargs.update({'positive': True,
'integer': True})
return sympy.Symbol(*args, **kwargs)
|
[
"Create",
"a",
"sympy",
".",
"Symbol",
"with",
"positive",
"and",
"integer",
"assumptions",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L36-L40
|
[
"def",
"symbol_pos_int",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'positive'",
":",
"True",
",",
"'integer'",
":",
"True",
"}",
")",
"return",
"sympy",
".",
"Symbol",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
prefix_indent
|
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
|
kerncraft/kernel.py
|
def prefix_indent(prefix, textblock, later_prefix=' '):
"""
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
"""
textblock = textblock.split('\n')
line = prefix + textblock[0] + '\n'
if len(later_prefix) == 1:
later_prefix = ' '*len(prefix)
line = line + '\n'.join([later_prefix + x for x in textblock[1:]])
if line[-1] != '\n':
return line + '\n'
else:
return line
|
def prefix_indent(prefix, textblock, later_prefix=' '):
"""
Prefix and indent all lines in *textblock*.
*prefix* is a prefix string
*later_prefix* is used on all but the first line, if it is a single character
it will be repeated to match length of *prefix*
"""
textblock = textblock.split('\n')
line = prefix + textblock[0] + '\n'
if len(later_prefix) == 1:
later_prefix = ' '*len(prefix)
line = line + '\n'.join([later_prefix + x for x in textblock[1:]])
if line[-1] != '\n':
return line + '\n'
else:
return line
|
[
"Prefix",
"and",
"indent",
"all",
"lines",
"in",
"*",
"textblock",
"*",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L43-L59
|
[
"def",
"prefix_indent",
"(",
"prefix",
",",
"textblock",
",",
"later_prefix",
"=",
"' '",
")",
":",
"textblock",
"=",
"textblock",
".",
"split",
"(",
"'\\n'",
")",
"line",
"=",
"prefix",
"+",
"textblock",
"[",
"0",
"]",
"+",
"'\\n'",
"if",
"len",
"(",
"later_prefix",
")",
"==",
"1",
":",
"later_prefix",
"=",
"' '",
"*",
"len",
"(",
"prefix",
")",
"line",
"=",
"line",
"+",
"'\\n'",
".",
"join",
"(",
"[",
"later_prefix",
"+",
"x",
"for",
"x",
"in",
"textblock",
"[",
"1",
":",
"]",
"]",
")",
"if",
"line",
"[",
"-",
"1",
"]",
"!=",
"'\\n'",
":",
"return",
"line",
"+",
"'\\n'",
"else",
":",
"return",
"line"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
transform_multidim_to_1d_decl
|
Transform ast of multidimensional declaration to a single dimension declaration.
In-place operation!
Returns name and dimensions of array (to be used with transform_multidim_to_1d_ref())
|
kerncraft/kernel.py
|
def transform_multidim_to_1d_decl(decl):
"""
Transform ast of multidimensional declaration to a single dimension declaration.
In-place operation!
Returns name and dimensions of array (to be used with transform_multidim_to_1d_ref())
"""
dims = []
type_ = decl.type
while type(type_) is c_ast.ArrayDecl:
dims.append(type_.dim)
type_ = type_.type
if dims:
# Multidimensional array
decl.type.dim = reduce(lambda l, r: c_ast.BinaryOp('*', l, r), dims)
decl.type.type = type_
return decl.name, dims
|
def transform_multidim_to_1d_decl(decl):
"""
Transform ast of multidimensional declaration to a single dimension declaration.
In-place operation!
Returns name and dimensions of array (to be used with transform_multidim_to_1d_ref())
"""
dims = []
type_ = decl.type
while type(type_) is c_ast.ArrayDecl:
dims.append(type_.dim)
type_ = type_.type
if dims:
# Multidimensional array
decl.type.dim = reduce(lambda l, r: c_ast.BinaryOp('*', l, r), dims)
decl.type.type = type_
return decl.name, dims
|
[
"Transform",
"ast",
"of",
"multidimensional",
"declaration",
"to",
"a",
"single",
"dimension",
"declaration",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L62-L81
|
[
"def",
"transform_multidim_to_1d_decl",
"(",
"decl",
")",
":",
"dims",
"=",
"[",
"]",
"type_",
"=",
"decl",
".",
"type",
"while",
"type",
"(",
"type_",
")",
"is",
"c_ast",
".",
"ArrayDecl",
":",
"dims",
".",
"append",
"(",
"type_",
".",
"dim",
")",
"type_",
"=",
"type_",
".",
"type",
"if",
"dims",
":",
"# Multidimensional array",
"decl",
".",
"type",
".",
"dim",
"=",
"reduce",
"(",
"lambda",
"l",
",",
"r",
":",
"c_ast",
".",
"BinaryOp",
"(",
"'*'",
",",
"l",
",",
"r",
")",
",",
"dims",
")",
"decl",
".",
"type",
".",
"type",
"=",
"type_",
"return",
"decl",
".",
"name",
",",
"dims"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
transform_multidim_to_1d_ref
|
Transform ast of multidimensional reference to a single dimension reference.
In-place operation!
|
kerncraft/kernel.py
|
def transform_multidim_to_1d_ref(aref, dimension_dict):
"""
Transform ast of multidimensional reference to a single dimension reference.
In-place operation!
"""
dims = []
name = aref
while type(name) is c_ast.ArrayRef:
dims.append(name.subscript)
name = name.name
subscript_list = []
for i, d in enumerate(dims):
if i == 0:
subscript_list.append(d)
else:
subscript_list.append(c_ast.BinaryOp('*', d, reduce(
lambda l, r: c_ast.BinaryOp('*', l, r),
dimension_dict[name.name][-1:-i-1:-1])))
aref.subscript = reduce(
lambda l, r: c_ast.BinaryOp('+', l, r), subscript_list)
aref.name = name
|
def transform_multidim_to_1d_ref(aref, dimension_dict):
"""
Transform ast of multidimensional reference to a single dimension reference.
In-place operation!
"""
dims = []
name = aref
while type(name) is c_ast.ArrayRef:
dims.append(name.subscript)
name = name.name
subscript_list = []
for i, d in enumerate(dims):
if i == 0:
subscript_list.append(d)
else:
subscript_list.append(c_ast.BinaryOp('*', d, reduce(
lambda l, r: c_ast.BinaryOp('*', l, r),
dimension_dict[name.name][-1:-i-1:-1])))
aref.subscript = reduce(
lambda l, r: c_ast.BinaryOp('+', l, r), subscript_list)
aref.name = name
|
[
"Transform",
"ast",
"of",
"multidimensional",
"reference",
"to",
"a",
"single",
"dimension",
"reference",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L84-L107
|
[
"def",
"transform_multidim_to_1d_ref",
"(",
"aref",
",",
"dimension_dict",
")",
":",
"dims",
"=",
"[",
"]",
"name",
"=",
"aref",
"while",
"type",
"(",
"name",
")",
"is",
"c_ast",
".",
"ArrayRef",
":",
"dims",
".",
"append",
"(",
"name",
".",
"subscript",
")",
"name",
"=",
"name",
".",
"name",
"subscript_list",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dims",
")",
":",
"if",
"i",
"==",
"0",
":",
"subscript_list",
".",
"append",
"(",
"d",
")",
"else",
":",
"subscript_list",
".",
"append",
"(",
"c_ast",
".",
"BinaryOp",
"(",
"'*'",
",",
"d",
",",
"reduce",
"(",
"lambda",
"l",
",",
"r",
":",
"c_ast",
".",
"BinaryOp",
"(",
"'*'",
",",
"l",
",",
"r",
")",
",",
"dimension_dict",
"[",
"name",
".",
"name",
"]",
"[",
"-",
"1",
":",
"-",
"i",
"-",
"1",
":",
"-",
"1",
"]",
")",
")",
")",
"aref",
".",
"subscript",
"=",
"reduce",
"(",
"lambda",
"l",
",",
"r",
":",
"c_ast",
".",
"BinaryOp",
"(",
"'+'",
",",
"l",
",",
"r",
")",
",",
"subscript_list",
")",
"aref",
".",
"name",
"=",
"name"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
transform_array_decl_to_malloc
|
Transform ast of "type var_name[N]" to "type* var_name = aligned_malloc(sizeof(type)*N, 32)"
In-place operation.
:param with_init: if False, ommit malloc
|
kerncraft/kernel.py
|
def transform_array_decl_to_malloc(decl, with_init=True):
"""
Transform ast of "type var_name[N]" to "type* var_name = aligned_malloc(sizeof(type)*N, 32)"
In-place operation.
:param with_init: if False, ommit malloc
"""
if type(decl.type) is not c_ast.ArrayDecl:
# Not an array declaration, can be ignored
return
type_ = c_ast.PtrDecl([], decl.type.type)
if with_init:
decl.init = c_ast.FuncCall(
c_ast.ID('aligned_malloc'),
c_ast.ExprList([
c_ast.BinaryOp(
'*',
c_ast.UnaryOp(
'sizeof',
c_ast.Typename(None, [], c_ast.TypeDecl(
None, [], decl.type.type.type))),
decl.type.dim),
c_ast.Constant('int', '32')]))
decl.type = type_
|
def transform_array_decl_to_malloc(decl, with_init=True):
"""
Transform ast of "type var_name[N]" to "type* var_name = aligned_malloc(sizeof(type)*N, 32)"
In-place operation.
:param with_init: if False, ommit malloc
"""
if type(decl.type) is not c_ast.ArrayDecl:
# Not an array declaration, can be ignored
return
type_ = c_ast.PtrDecl([], decl.type.type)
if with_init:
decl.init = c_ast.FuncCall(
c_ast.ID('aligned_malloc'),
c_ast.ExprList([
c_ast.BinaryOp(
'*',
c_ast.UnaryOp(
'sizeof',
c_ast.Typename(None, [], c_ast.TypeDecl(
None, [], decl.type.type.type))),
decl.type.dim),
c_ast.Constant('int', '32')]))
decl.type = type_
|
[
"Transform",
"ast",
"of",
"type",
"var_name",
"[",
"N",
"]",
"to",
"type",
"*",
"var_name",
"=",
"aligned_malloc",
"(",
"sizeof",
"(",
"type",
")",
"*",
"N",
"32",
")"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L110-L135
|
[
"def",
"transform_array_decl_to_malloc",
"(",
"decl",
",",
"with_init",
"=",
"True",
")",
":",
"if",
"type",
"(",
"decl",
".",
"type",
")",
"is",
"not",
"c_ast",
".",
"ArrayDecl",
":",
"# Not an array declaration, can be ignored",
"return",
"type_",
"=",
"c_ast",
".",
"PtrDecl",
"(",
"[",
"]",
",",
"decl",
".",
"type",
".",
"type",
")",
"if",
"with_init",
":",
"decl",
".",
"init",
"=",
"c_ast",
".",
"FuncCall",
"(",
"c_ast",
".",
"ID",
"(",
"'aligned_malloc'",
")",
",",
"c_ast",
".",
"ExprList",
"(",
"[",
"c_ast",
".",
"BinaryOp",
"(",
"'*'",
",",
"c_ast",
".",
"UnaryOp",
"(",
"'sizeof'",
",",
"c_ast",
".",
"Typename",
"(",
"None",
",",
"[",
"]",
",",
"c_ast",
".",
"TypeDecl",
"(",
"None",
",",
"[",
"]",
",",
"decl",
".",
"type",
".",
"type",
".",
"type",
")",
")",
")",
",",
"decl",
".",
"type",
".",
"dim",
")",
",",
"c_ast",
".",
"Constant",
"(",
"'int'",
",",
"'32'",
")",
"]",
")",
")",
"decl",
".",
"type",
"=",
"type_"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
find_node_type
|
Return list of array references in AST.
|
kerncraft/kernel.py
|
def find_node_type(ast, node_type):
"""Return list of array references in AST."""
if type(ast) is node_type:
return [ast]
elif type(ast) is list:
return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), [])
elif ast is None:
return []
else:
return reduce(operator.add,
[find_node_type(o[1], node_type) for o in ast.children()], [])
|
def find_node_type(ast, node_type):
"""Return list of array references in AST."""
if type(ast) is node_type:
return [ast]
elif type(ast) is list:
return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), [])
elif ast is None:
return []
else:
return reduce(operator.add,
[find_node_type(o[1], node_type) for o in ast.children()], [])
|
[
"Return",
"list",
"of",
"array",
"references",
"in",
"AST",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L138-L148
|
[
"def",
"find_node_type",
"(",
"ast",
",",
"node_type",
")",
":",
"if",
"type",
"(",
"ast",
")",
"is",
"node_type",
":",
"return",
"[",
"ast",
"]",
"elif",
"type",
"(",
"ast",
")",
"is",
"list",
":",
"return",
"reduce",
"(",
"operator",
".",
"add",
",",
"list",
"(",
"map",
"(",
"lambda",
"a",
":",
"find_node_type",
"(",
"a",
",",
"node_type",
")",
",",
"ast",
")",
")",
",",
"[",
"]",
")",
"elif",
"ast",
"is",
"None",
":",
"return",
"[",
"]",
"else",
":",
"return",
"reduce",
"(",
"operator",
".",
"add",
",",
"[",
"find_node_type",
"(",
"o",
"[",
"1",
"]",
",",
"node_type",
")",
"for",
"o",
"in",
"ast",
".",
"children",
"(",
")",
"]",
",",
"[",
"]",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
force_iterable
|
Will make any functions return an iterable objects by wrapping its result in a list.
|
kerncraft/kernel.py
|
def force_iterable(f):
"""Will make any functions return an iterable objects by wrapping its result in a list."""
def wrapper(*args, **kwargs):
r = f(*args, **kwargs)
if hasattr(r, '__iter__'):
return r
else:
return [r]
return wrapper
|
def force_iterable(f):
"""Will make any functions return an iterable objects by wrapping its result in a list."""
def wrapper(*args, **kwargs):
r = f(*args, **kwargs)
if hasattr(r, '__iter__'):
return r
else:
return [r]
return wrapper
|
[
"Will",
"make",
"any",
"functions",
"return",
"an",
"iterable",
"objects",
"by",
"wrapping",
"its",
"result",
"in",
"a",
"list",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L157-L165
|
[
"def",
"force_iterable",
"(",
"f",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"hasattr",
"(",
"r",
",",
"'__iter__'",
")",
":",
"return",
"r",
"else",
":",
"return",
"[",
"r",
"]",
"return",
"wrapper"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
reduce_path
|
Reduce absolute path to relative (if shorter) for easier readability.
|
kerncraft/kernel.py
|
def reduce_path(path):
"""Reduce absolute path to relative (if shorter) for easier readability."""
relative_path = os.path.relpath(path)
if len(relative_path) < len(path):
return relative_path
else:
return path
|
def reduce_path(path):
"""Reduce absolute path to relative (if shorter) for easier readability."""
relative_path = os.path.relpath(path)
if len(relative_path) < len(path):
return relative_path
else:
return path
|
[
"Reduce",
"absolute",
"path",
"to",
"relative",
"(",
"if",
"shorter",
")",
"for",
"easier",
"readability",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L168-L174
|
[
"def",
"reduce_path",
"(",
"path",
")",
":",
"relative_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
")",
"if",
"len",
"(",
"relative_path",
")",
"<",
"len",
"(",
"path",
")",
":",
"return",
"relative_path",
"else",
":",
"return",
"path"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.check
|
Check that information about kernel makes sens and is valid.
|
kerncraft/kernel.py
|
def check(self):
"""Check that information about kernel makes sens and is valid."""
datatypes = [v[0] for v in self.variables.values()]
assert len(set(datatypes)) <= 1, 'mixing of datatypes within a kernel is not supported.'
|
def check(self):
"""Check that information about kernel makes sens and is valid."""
datatypes = [v[0] for v in self.variables.values()]
assert len(set(datatypes)) <= 1, 'mixing of datatypes within a kernel is not supported.'
|
[
"Check",
"that",
"information",
"about",
"kernel",
"makes",
"sens",
"and",
"is",
"valid",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L196-L199
|
[
"def",
"check",
"(",
"self",
")",
":",
"datatypes",
"=",
"[",
"v",
"[",
"0",
"]",
"for",
"v",
"in",
"self",
".",
"variables",
".",
"values",
"(",
")",
"]",
"assert",
"len",
"(",
"set",
"(",
"datatypes",
")",
")",
"<=",
"1",
",",
"'mixing of datatypes within a kernel is not supported.'"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.set_constant
|
Set constant of name to value.
:param name: may be a str or a sympy.Symbol
:param value: must be an int
|
kerncraft/kernel.py
|
def set_constant(self, name, value):
"""
Set constant of name to value.
:param name: may be a str or a sympy.Symbol
:param value: must be an int
"""
assert isinstance(name, str) or isinstance(name, sympy.Symbol), \
"constant name needs to be of type str, unicode or a sympy.Symbol"
assert type(value) is int, "constant value needs to be of type int"
if isinstance(name, sympy.Symbol):
self.constants[name] = value
else:
self.constants[symbol_pos_int(name)] = value
|
def set_constant(self, name, value):
"""
Set constant of name to value.
:param name: may be a str or a sympy.Symbol
:param value: must be an int
"""
assert isinstance(name, str) or isinstance(name, sympy.Symbol), \
"constant name needs to be of type str, unicode or a sympy.Symbol"
assert type(value) is int, "constant value needs to be of type int"
if isinstance(name, sympy.Symbol):
self.constants[name] = value
else:
self.constants[symbol_pos_int(name)] = value
|
[
"Set",
"constant",
"of",
"name",
"to",
"value",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L203-L216
|
[
"def",
"set_constant",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"str",
")",
"or",
"isinstance",
"(",
"name",
",",
"sympy",
".",
"Symbol",
")",
",",
"\"constant name needs to be of type str, unicode or a sympy.Symbol\"",
"assert",
"type",
"(",
"value",
")",
"is",
"int",
",",
"\"constant value needs to be of type int\"",
"if",
"isinstance",
"(",
"name",
",",
"sympy",
".",
"Symbol",
")",
":",
"self",
".",
"constants",
"[",
"name",
"]",
"=",
"value",
"else",
":",
"self",
".",
"constants",
"[",
"symbol_pos_int",
"(",
"name",
")",
"]",
"=",
"value"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.set_variable
|
Register variable of name and type_, with a (multidimensional) size.
:param name: variable name as it appears in code
:param type_: may be any key from Kernel.datatypes_size (typically float or double)
:param size: either None for scalars or an n-tuple of ints for an n-dimensional array
|
kerncraft/kernel.py
|
def set_variable(self, name, type_, size):
"""
Register variable of name and type_, with a (multidimensional) size.
:param name: variable name as it appears in code
:param type_: may be any key from Kernel.datatypes_size (typically float or double)
:param size: either None for scalars or an n-tuple of ints for an n-dimensional array
"""
assert type_ in self.datatypes_size, 'only float and double variables are supported'
if self.datatype is None:
self.datatype = type_
else:
assert type_ == self.datatype, 'mixing of datatypes within a kernel is not supported.'
assert type(size) in [tuple, type(None)], 'size has to be defined as tuple or None'
self.variables[name] = (type_, size)
|
def set_variable(self, name, type_, size):
"""
Register variable of name and type_, with a (multidimensional) size.
:param name: variable name as it appears in code
:param type_: may be any key from Kernel.datatypes_size (typically float or double)
:param size: either None for scalars or an n-tuple of ints for an n-dimensional array
"""
assert type_ in self.datatypes_size, 'only float and double variables are supported'
if self.datatype is None:
self.datatype = type_
else:
assert type_ == self.datatype, 'mixing of datatypes within a kernel is not supported.'
assert type(size) in [tuple, type(None)], 'size has to be defined as tuple or None'
self.variables[name] = (type_, size)
|
[
"Register",
"variable",
"of",
"name",
"and",
"type_",
"with",
"a",
"(",
"multidimensional",
")",
"size",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L218-L232
|
[
"def",
"set_variable",
"(",
"self",
",",
"name",
",",
"type_",
",",
"size",
")",
":",
"assert",
"type_",
"in",
"self",
".",
"datatypes_size",
",",
"'only float and double variables are supported'",
"if",
"self",
".",
"datatype",
"is",
"None",
":",
"self",
".",
"datatype",
"=",
"type_",
"else",
":",
"assert",
"type_",
"==",
"self",
".",
"datatype",
",",
"'mixing of datatypes within a kernel is not supported.'",
"assert",
"type",
"(",
"size",
")",
"in",
"[",
"tuple",
",",
"type",
"(",
"None",
")",
"]",
",",
"'size has to be defined as tuple or None'",
"self",
".",
"variables",
"[",
"name",
"]",
"=",
"(",
"type_",
",",
"size",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.subs_consts
|
Substitute constants in expression unless it is already a number.
|
kerncraft/kernel.py
|
def subs_consts(self, expr):
"""Substitute constants in expression unless it is already a number."""
if isinstance(expr, numbers.Number):
return expr
else:
return expr.subs(self.constants)
|
def subs_consts(self, expr):
"""Substitute constants in expression unless it is already a number."""
if isinstance(expr, numbers.Number):
return expr
else:
return expr.subs(self.constants)
|
[
"Substitute",
"constants",
"in",
"expression",
"unless",
"it",
"is",
"already",
"a",
"number",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L240-L245
|
[
"def",
"subs_consts",
"(",
"self",
",",
"expr",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"numbers",
".",
"Number",
")",
":",
"return",
"expr",
"else",
":",
"return",
"expr",
".",
"subs",
"(",
"self",
".",
"constants",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.array_sizes
|
Return a dictionary with all arrays sizes.
:param in_bytes: If True, output will be in bytes, not element counts.
:param subs_consts: If True, output will be numbers and not symbolic.
Scalar variables are ignored.
|
kerncraft/kernel.py
|
def array_sizes(self, in_bytes=False, subs_consts=False):
"""
Return a dictionary with all arrays sizes.
:param in_bytes: If True, output will be in bytes, not element counts.
:param subs_consts: If True, output will be numbers and not symbolic.
Scalar variables are ignored.
"""
var_sizes = {}
for var_name, var_info in self.variables.items():
var_type, var_size = var_info
# Skiping sclars
if var_size is None:
continue
var_sizes[var_name] = reduce(operator.mul, var_size, 1)
# Multiply by bytes per element if requested
if in_bytes:
element_size = self.datatypes_size[var_type]
var_sizes[var_name] *= element_size
if subs_consts:
return {k: self.subs_consts(v) for k, v in var_sizes.items()}
else:
return var_sizes
|
def array_sizes(self, in_bytes=False, subs_consts=False):
"""
Return a dictionary with all arrays sizes.
:param in_bytes: If True, output will be in bytes, not element counts.
:param subs_consts: If True, output will be numbers and not symbolic.
Scalar variables are ignored.
"""
var_sizes = {}
for var_name, var_info in self.variables.items():
var_type, var_size = var_info
# Skiping sclars
if var_size is None:
continue
var_sizes[var_name] = reduce(operator.mul, var_size, 1)
# Multiply by bytes per element if requested
if in_bytes:
element_size = self.datatypes_size[var_type]
var_sizes[var_name] *= element_size
if subs_consts:
return {k: self.subs_consts(v) for k, v in var_sizes.items()}
else:
return var_sizes
|
[
"Return",
"a",
"dictionary",
"with",
"all",
"arrays",
"sizes",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L247-L275
|
[
"def",
"array_sizes",
"(",
"self",
",",
"in_bytes",
"=",
"False",
",",
"subs_consts",
"=",
"False",
")",
":",
"var_sizes",
"=",
"{",
"}",
"for",
"var_name",
",",
"var_info",
"in",
"self",
".",
"variables",
".",
"items",
"(",
")",
":",
"var_type",
",",
"var_size",
"=",
"var_info",
"# Skiping sclars",
"if",
"var_size",
"is",
"None",
":",
"continue",
"var_sizes",
"[",
"var_name",
"]",
"=",
"reduce",
"(",
"operator",
".",
"mul",
",",
"var_size",
",",
"1",
")",
"# Multiply by bytes per element if requested",
"if",
"in_bytes",
":",
"element_size",
"=",
"self",
".",
"datatypes_size",
"[",
"var_type",
"]",
"var_sizes",
"[",
"var_name",
"]",
"*=",
"element_size",
"if",
"subs_consts",
":",
"return",
"{",
"k",
":",
"self",
".",
"subs_consts",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"var_sizes",
".",
"items",
"(",
")",
"}",
"else",
":",
"return",
"var_sizes"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel._calculate_relative_offset
|
Return the offset from the iteration center in number of elements.
The order of indices used in access is preserved.
|
kerncraft/kernel.py
|
def _calculate_relative_offset(self, name, access_dimensions):
"""
Return the offset from the iteration center in number of elements.
The order of indices used in access is preserved.
"""
# TODO to be replaced with compile_global_offsets
offset = 0
base_dims = self.variables[name][1]
for dim, offset_info in enumerate(access_dimensions):
offset_type, idx_name, dim_offset = offset_info
assert offset_type == 'rel', 'Only relative access to arrays is supported at the moment'
if offset_type == 'rel':
offset += self.subs_consts(
dim_offset*reduce(operator.mul, base_dims[dim+1:], sympy.Integer(1)))
else:
# should not happen
pass
return offset
|
def _calculate_relative_offset(self, name, access_dimensions):
"""
Return the offset from the iteration center in number of elements.
The order of indices used in access is preserved.
"""
# TODO to be replaced with compile_global_offsets
offset = 0
base_dims = self.variables[name][1]
for dim, offset_info in enumerate(access_dimensions):
offset_type, idx_name, dim_offset = offset_info
assert offset_type == 'rel', 'Only relative access to arrays is supported at the moment'
if offset_type == 'rel':
offset += self.subs_consts(
dim_offset*reduce(operator.mul, base_dims[dim+1:], sympy.Integer(1)))
else:
# should not happen
pass
return offset
|
[
"Return",
"the",
"offset",
"from",
"the",
"iteration",
"center",
"in",
"number",
"of",
"elements",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L277-L298
|
[
"def",
"_calculate_relative_offset",
"(",
"self",
",",
"name",
",",
"access_dimensions",
")",
":",
"# TODO to be replaced with compile_global_offsets",
"offset",
"=",
"0",
"base_dims",
"=",
"self",
".",
"variables",
"[",
"name",
"]",
"[",
"1",
"]",
"for",
"dim",
",",
"offset_info",
"in",
"enumerate",
"(",
"access_dimensions",
")",
":",
"offset_type",
",",
"idx_name",
",",
"dim_offset",
"=",
"offset_info",
"assert",
"offset_type",
"==",
"'rel'",
",",
"'Only relative access to arrays is supported at the moment'",
"if",
"offset_type",
"==",
"'rel'",
":",
"offset",
"+=",
"self",
".",
"subs_consts",
"(",
"dim_offset",
"*",
"reduce",
"(",
"operator",
".",
"mul",
",",
"base_dims",
"[",
"dim",
"+",
"1",
":",
"]",
",",
"sympy",
".",
"Integer",
"(",
"1",
")",
")",
")",
"else",
":",
"# should not happen",
"pass",
"return",
"offset"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel._remove_duplicate_accesses
|
Remove duplicate source and destination accesses
|
kerncraft/kernel.py
|
def _remove_duplicate_accesses(self):
"""
Remove duplicate source and destination accesses
"""
self.destinations = {var_name: set(acs) for var_name, acs in self.destinations.items()}
self.sources = {var_name: set(acs) for var_name, acs in self.sources.items()}
|
def _remove_duplicate_accesses(self):
"""
Remove duplicate source and destination accesses
"""
self.destinations = {var_name: set(acs) for var_name, acs in self.destinations.items()}
self.sources = {var_name: set(acs) for var_name, acs in self.sources.items()}
|
[
"Remove",
"duplicate",
"source",
"and",
"destination",
"accesses"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L300-L305
|
[
"def",
"_remove_duplicate_accesses",
"(",
"self",
")",
":",
"self",
".",
"destinations",
"=",
"{",
"var_name",
":",
"set",
"(",
"acs",
")",
"for",
"var_name",
",",
"acs",
"in",
"self",
".",
"destinations",
".",
"items",
"(",
")",
"}",
"self",
".",
"sources",
"=",
"{",
"var_name",
":",
"set",
"(",
"acs",
")",
"for",
"var_name",
",",
"acs",
"in",
"self",
".",
"sources",
".",
"items",
"(",
")",
"}"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.access_to_sympy
|
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
|
kerncraft/kernel.py
|
def access_to_sympy(self, var_name, access):
"""
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
"""
base_sizes = self.variables[var_name][1]
expr = sympy.Number(0)
for dimension, a in enumerate(access):
base_size = reduce(operator.mul, base_sizes[dimension+1:], sympy.Integer(1))
expr += base_size*a
return expr
|
def access_to_sympy(self, var_name, access):
"""
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
"""
base_sizes = self.variables[var_name][1]
expr = sympy.Number(0)
for dimension, a in enumerate(access):
base_size = reduce(operator.mul, base_sizes[dimension+1:], sympy.Integer(1))
expr += base_size*a
return expr
|
[
"Transform",
"a",
"(",
"multidimensional",
")",
"variable",
"access",
"to",
"a",
"flattend",
"sympy",
"expression",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L307-L322
|
[
"def",
"access_to_sympy",
"(",
"self",
",",
"var_name",
",",
"access",
")",
":",
"base_sizes",
"=",
"self",
".",
"variables",
"[",
"var_name",
"]",
"[",
"1",
"]",
"expr",
"=",
"sympy",
".",
"Number",
"(",
"0",
")",
"for",
"dimension",
",",
"a",
"in",
"enumerate",
"(",
"access",
")",
":",
"base_size",
"=",
"reduce",
"(",
"operator",
".",
"mul",
",",
"base_sizes",
"[",
"dimension",
"+",
"1",
":",
"]",
",",
"sympy",
".",
"Integer",
"(",
"1",
")",
")",
"expr",
"+=",
"base_size",
"*",
"a",
"return",
"expr"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.iteration_length
|
Return the number of global loop iterations that are performed.
If dimension is not None, it is the loop dimension that is returned
(-1 is the inner most loop and 0 the outermost)
|
kerncraft/kernel.py
|
def iteration_length(self, dimension=None):
"""
Return the number of global loop iterations that are performed.
If dimension is not None, it is the loop dimension that is returned
(-1 is the inner most loop and 0 the outermost)
"""
total_length = 1
if dimension is not None:
loops = [self._loop_stack[dimension]]
else:
loops = reversed(self._loop_stack)
for var_name, start, end, incr in loops:
# This unspools the iterations:
length = end-start
total_length = total_length*length
return self.subs_consts(total_length)
|
def iteration_length(self, dimension=None):
"""
Return the number of global loop iterations that are performed.
If dimension is not None, it is the loop dimension that is returned
(-1 is the inner most loop and 0 the outermost)
"""
total_length = 1
if dimension is not None:
loops = [self._loop_stack[dimension]]
else:
loops = reversed(self._loop_stack)
for var_name, start, end, incr in loops:
# This unspools the iterations:
length = end-start
total_length = total_length*length
return self.subs_consts(total_length)
|
[
"Return",
"the",
"number",
"of",
"global",
"loop",
"iterations",
"that",
"are",
"performed",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L324-L342
|
[
"def",
"iteration_length",
"(",
"self",
",",
"dimension",
"=",
"None",
")",
":",
"total_length",
"=",
"1",
"if",
"dimension",
"is",
"not",
"None",
":",
"loops",
"=",
"[",
"self",
".",
"_loop_stack",
"[",
"dimension",
"]",
"]",
"else",
":",
"loops",
"=",
"reversed",
"(",
"self",
".",
"_loop_stack",
")",
"for",
"var_name",
",",
"start",
",",
"end",
",",
"incr",
"in",
"loops",
":",
"# This unspools the iterations:",
"length",
"=",
"end",
"-",
"start",
"total_length",
"=",
"total_length",
"*",
"length",
"return",
"self",
".",
"subs_consts",
"(",
"total_length",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.get_loop_stack
|
Yield loop stack dictionaries in order from outer to inner.
|
kerncraft/kernel.py
|
def get_loop_stack(self, subs_consts=False):
"""Yield loop stack dictionaries in order from outer to inner."""
for l in self._loop_stack:
if subs_consts:
yield {'index': l[0],
'start': self.subs_consts(l[1]),
'stop': self.subs_consts(l[2]),
'increment': self.subs_consts(l[3])}
else:
yield {'index': l[0], 'start': l[1], 'stop': l[2], 'increment': l[3]}
|
def get_loop_stack(self, subs_consts=False):
"""Yield loop stack dictionaries in order from outer to inner."""
for l in self._loop_stack:
if subs_consts:
yield {'index': l[0],
'start': self.subs_consts(l[1]),
'stop': self.subs_consts(l[2]),
'increment': self.subs_consts(l[3])}
else:
yield {'index': l[0], 'start': l[1], 'stop': l[2], 'increment': l[3]}
|
[
"Yield",
"loop",
"stack",
"dictionaries",
"in",
"order",
"from",
"outer",
"to",
"inner",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L344-L353
|
[
"def",
"get_loop_stack",
"(",
"self",
",",
"subs_consts",
"=",
"False",
")",
":",
"for",
"l",
"in",
"self",
".",
"_loop_stack",
":",
"if",
"subs_consts",
":",
"yield",
"{",
"'index'",
":",
"l",
"[",
"0",
"]",
",",
"'start'",
":",
"self",
".",
"subs_consts",
"(",
"l",
"[",
"1",
"]",
")",
",",
"'stop'",
":",
"self",
".",
"subs_consts",
"(",
"l",
"[",
"2",
"]",
")",
",",
"'increment'",
":",
"self",
".",
"subs_consts",
"(",
"l",
"[",
"3",
"]",
")",
"}",
"else",
":",
"yield",
"{",
"'index'",
":",
"l",
"[",
"0",
"]",
",",
"'start'",
":",
"l",
"[",
"1",
"]",
",",
"'stop'",
":",
"l",
"[",
"2",
"]",
",",
"'increment'",
":",
"l",
"[",
"3",
"]",
"}"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.index_order
|
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
|
kerncraft/kernel.py
|
def index_order(self, sources=True, destinations=True):
"""
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
"""
if sources:
arefs = chain(*self.sources.values())
else:
arefs = []
if destinations:
arefs = chain(arefs, *self.destinations.values())
ret = []
for a in [aref for aref in arefs if aref is not None]:
ref = []
for expr in a:
ref.append(expr.free_symbols)
ret.append(ref)
return ret
|
def index_order(self, sources=True, destinations=True):
"""
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
"""
if sources:
arefs = chain(*self.sources.values())
else:
arefs = []
if destinations:
arefs = chain(arefs, *self.destinations.values())
ret = []
for a in [aref for aref in arefs if aref is not None]:
ref = []
for expr in a:
ref.append(expr.free_symbols)
ret.append(ref)
return ret
|
[
"Return",
"the",
"order",
"of",
"indices",
"as",
"they",
"appear",
"in",
"array",
"references",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L355-L375
|
[
"def",
"index_order",
"(",
"self",
",",
"sources",
"=",
"True",
",",
"destinations",
"=",
"True",
")",
":",
"if",
"sources",
":",
"arefs",
"=",
"chain",
"(",
"*",
"self",
".",
"sources",
".",
"values",
"(",
")",
")",
"else",
":",
"arefs",
"=",
"[",
"]",
"if",
"destinations",
":",
"arefs",
"=",
"chain",
"(",
"arefs",
",",
"*",
"self",
".",
"destinations",
".",
"values",
"(",
")",
")",
"ret",
"=",
"[",
"]",
"for",
"a",
"in",
"[",
"aref",
"for",
"aref",
"in",
"arefs",
"if",
"aref",
"is",
"not",
"None",
"]",
":",
"ref",
"=",
"[",
"]",
"for",
"expr",
"in",
"a",
":",
"ref",
".",
"append",
"(",
"expr",
".",
"free_symbols",
")",
"ret",
".",
"append",
"(",
"ref",
")",
"return",
"ret"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.compile_sympy_accesses
|
Return a dictionary of lists of sympy accesses, for each variable.
Use *source* and *destination* to filter output
|
kerncraft/kernel.py
|
def compile_sympy_accesses(self, sources=True, destinations=True):
"""
Return a dictionary of lists of sympy accesses, for each variable.
Use *source* and *destination* to filter output
"""
sympy_accesses = defaultdict(list)
# Compile sympy accesses
for var_name in self.variables:
if sources:
for r in self.sources.get(var_name, []):
if r is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, r))
if destinations:
for w in self.destinations.get(var_name, []):
if w is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, w))
return sympy_accesses
|
def compile_sympy_accesses(self, sources=True, destinations=True):
"""
Return a dictionary of lists of sympy accesses, for each variable.
Use *source* and *destination* to filter output
"""
sympy_accesses = defaultdict(list)
# Compile sympy accesses
for var_name in self.variables:
if sources:
for r in self.sources.get(var_name, []):
if r is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, r))
if destinations:
for w in self.destinations.get(var_name, []):
if w is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, w))
return sympy_accesses
|
[
"Return",
"a",
"dictionary",
"of",
"lists",
"of",
"sympy",
"accesses",
"for",
"each",
"variable",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L377-L397
|
[
"def",
"compile_sympy_accesses",
"(",
"self",
",",
"sources",
"=",
"True",
",",
"destinations",
"=",
"True",
")",
":",
"sympy_accesses",
"=",
"defaultdict",
"(",
"list",
")",
"# Compile sympy accesses",
"for",
"var_name",
"in",
"self",
".",
"variables",
":",
"if",
"sources",
":",
"for",
"r",
"in",
"self",
".",
"sources",
".",
"get",
"(",
"var_name",
",",
"[",
"]",
")",
":",
"if",
"r",
"is",
"None",
":",
"continue",
"sympy_accesses",
"[",
"var_name",
"]",
".",
"append",
"(",
"self",
".",
"access_to_sympy",
"(",
"var_name",
",",
"r",
")",
")",
"if",
"destinations",
":",
"for",
"w",
"in",
"self",
".",
"destinations",
".",
"get",
"(",
"var_name",
",",
"[",
"]",
")",
":",
"if",
"w",
"is",
"None",
":",
"continue",
"sympy_accesses",
"[",
"var_name",
"]",
".",
"append",
"(",
"self",
".",
"access_to_sympy",
"(",
"var_name",
",",
"w",
")",
")",
"return",
"sympy_accesses"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.compile_relative_distances
|
Return load and store distances between accesses.
:param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses()
e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1]
returned is a dict of list of sympy expressions, for each variable
|
kerncraft/kernel.py
|
def compile_relative_distances(self, sympy_accesses=None):
"""
Return load and store distances between accesses.
:param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses()
e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1]
returned is a dict of list of sympy expressions, for each variable
"""
if sympy_accesses is None:
sympy_accesses = self.compile_sympy_accesses()
sympy_distances = defaultdict(list)
for var_name, accesses in sympy_accesses.items():
for i in range(1, len(accesses)):
sympy_distances[var_name].append((accesses[i-1]-accesses[i]).simplify())
return sympy_distances
|
def compile_relative_distances(self, sympy_accesses=None):
"""
Return load and store distances between accesses.
:param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses()
e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1]
returned is a dict of list of sympy expressions, for each variable
"""
if sympy_accesses is None:
sympy_accesses = self.compile_sympy_accesses()
sympy_distances = defaultdict(list)
for var_name, accesses in sympy_accesses.items():
for i in range(1, len(accesses)):
sympy_distances[var_name].append((accesses[i-1]-accesses[i]).simplify())
return sympy_distances
|
[
"Return",
"load",
"and",
"store",
"distances",
"between",
"accesses",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L399-L417
|
[
"def",
"compile_relative_distances",
"(",
"self",
",",
"sympy_accesses",
"=",
"None",
")",
":",
"if",
"sympy_accesses",
"is",
"None",
":",
"sympy_accesses",
"=",
"self",
".",
"compile_sympy_accesses",
"(",
")",
"sympy_distances",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"var_name",
",",
"accesses",
"in",
"sympy_accesses",
".",
"items",
"(",
")",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"accesses",
")",
")",
":",
"sympy_distances",
"[",
"var_name",
"]",
".",
"append",
"(",
"(",
"accesses",
"[",
"i",
"-",
"1",
"]",
"-",
"accesses",
"[",
"i",
"]",
")",
".",
"simplify",
"(",
")",
")",
"return",
"sympy_distances"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.global_iterator_to_indices
|
Return sympy expressions translating global_iterator to loop indices.
If global_iterator is given, an integer is returned
|
kerncraft/kernel.py
|
def global_iterator_to_indices(self, git=None):
"""
Return sympy expressions translating global_iterator to loop indices.
If global_iterator is given, an integer is returned
"""
# unwind global iteration count into loop counters:
base_loop_counters = {}
global_iterator = symbol_pos_int('global_iterator')
idiv = implemented_function(sympy.Function(str('idiv')), lambda x, y: x//y)
total_length = 1
last_incr = 1
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
# This unspools the iterations:
length = end-start # FIXME is incr handled correct here?
counter = start+(idiv(global_iterator*last_incr, total_length)*incr) % length
total_length = total_length*length
last_incr = incr
base_loop_counters[loop_var] = sympy.lambdify(
global_iterator,
self.subs_consts(counter), modules=[numpy, {'Mod': numpy.mod}])
if git is not None:
try: # Try to resolve to integer if global_iterator was given
base_loop_counters[loop_var] = sympy.Integer(self.subs_consts(counter))
continue
except (ValueError, TypeError):
base_loop_counters[loop_var] = base_loop_counters[loop_var](git)
return base_loop_counters
|
def global_iterator_to_indices(self, git=None):
"""
Return sympy expressions translating global_iterator to loop indices.
If global_iterator is given, an integer is returned
"""
# unwind global iteration count into loop counters:
base_loop_counters = {}
global_iterator = symbol_pos_int('global_iterator')
idiv = implemented_function(sympy.Function(str('idiv')), lambda x, y: x//y)
total_length = 1
last_incr = 1
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
# This unspools the iterations:
length = end-start # FIXME is incr handled correct here?
counter = start+(idiv(global_iterator*last_incr, total_length)*incr) % length
total_length = total_length*length
last_incr = incr
base_loop_counters[loop_var] = sympy.lambdify(
global_iterator,
self.subs_consts(counter), modules=[numpy, {'Mod': numpy.mod}])
if git is not None:
try: # Try to resolve to integer if global_iterator was given
base_loop_counters[loop_var] = sympy.Integer(self.subs_consts(counter))
continue
except (ValueError, TypeError):
base_loop_counters[loop_var] = base_loop_counters[loop_var](git)
return base_loop_counters
|
[
"Return",
"sympy",
"expressions",
"translating",
"global_iterator",
"to",
"loop",
"indices",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L419-L451
|
[
"def",
"global_iterator_to_indices",
"(",
"self",
",",
"git",
"=",
"None",
")",
":",
"# unwind global iteration count into loop counters:",
"base_loop_counters",
"=",
"{",
"}",
"global_iterator",
"=",
"symbol_pos_int",
"(",
"'global_iterator'",
")",
"idiv",
"=",
"implemented_function",
"(",
"sympy",
".",
"Function",
"(",
"str",
"(",
"'idiv'",
")",
")",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"//",
"y",
")",
"total_length",
"=",
"1",
"last_incr",
"=",
"1",
"for",
"var_name",
",",
"start",
",",
"end",
",",
"incr",
"in",
"reversed",
"(",
"self",
".",
"_loop_stack",
")",
":",
"loop_var",
"=",
"symbol_pos_int",
"(",
"var_name",
")",
"# This unspools the iterations:",
"length",
"=",
"end",
"-",
"start",
"# FIXME is incr handled correct here?",
"counter",
"=",
"start",
"+",
"(",
"idiv",
"(",
"global_iterator",
"*",
"last_incr",
",",
"total_length",
")",
"*",
"incr",
")",
"%",
"length",
"total_length",
"=",
"total_length",
"*",
"length",
"last_incr",
"=",
"incr",
"base_loop_counters",
"[",
"loop_var",
"]",
"=",
"sympy",
".",
"lambdify",
"(",
"global_iterator",
",",
"self",
".",
"subs_consts",
"(",
"counter",
")",
",",
"modules",
"=",
"[",
"numpy",
",",
"{",
"'Mod'",
":",
"numpy",
".",
"mod",
"}",
"]",
")",
"if",
"git",
"is",
"not",
"None",
":",
"try",
":",
"# Try to resolve to integer if global_iterator was given",
"base_loop_counters",
"[",
"loop_var",
"]",
"=",
"sympy",
".",
"Integer",
"(",
"self",
".",
"subs_consts",
"(",
"counter",
")",
")",
"continue",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"base_loop_counters",
"[",
"loop_var",
"]",
"=",
"base_loop_counters",
"[",
"loop_var",
"]",
"(",
"git",
")",
"return",
"base_loop_counters"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.global_iterator
|
Return global iterator sympy expression
|
kerncraft/kernel.py
|
def global_iterator(self):
"""
Return global iterator sympy expression
"""
global_iterator = sympy.Integer(0)
total_length = sympy.Integer(1)
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
length = end - start # FIXME is incr handled correct here?
global_iterator += (loop_var - start) * total_length
total_length *= length
return global_iterator
|
def global_iterator(self):
"""
Return global iterator sympy expression
"""
global_iterator = sympy.Integer(0)
total_length = sympy.Integer(1)
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
length = end - start # FIXME is incr handled correct here?
global_iterator += (loop_var - start) * total_length
total_length *= length
return global_iterator
|
[
"Return",
"global",
"iterator",
"sympy",
"expression"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L454-L465
|
[
"def",
"global_iterator",
"(",
"self",
")",
":",
"global_iterator",
"=",
"sympy",
".",
"Integer",
"(",
"0",
")",
"total_length",
"=",
"sympy",
".",
"Integer",
"(",
"1",
")",
"for",
"var_name",
",",
"start",
",",
"end",
",",
"incr",
"in",
"reversed",
"(",
"self",
".",
"_loop_stack",
")",
":",
"loop_var",
"=",
"symbol_pos_int",
"(",
"var_name",
")",
"length",
"=",
"end",
"-",
"start",
"# FIXME is incr handled correct here?",
"global_iterator",
"+=",
"(",
"loop_var",
"-",
"start",
")",
"*",
"total_length",
"total_length",
"*=",
"length",
"return",
"global_iterator"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.indices_to_global_iterator
|
Transform a dictionary of indices to a global iterator integer.
Inverse of global_iterator_to_indices().
|
kerncraft/kernel.py
|
def indices_to_global_iterator(self, indices):
"""
Transform a dictionary of indices to a global iterator integer.
Inverse of global_iterator_to_indices().
"""
global_iterator = self.subs_consts(self.global_iterator().subs(indices))
return global_iterator
|
def indices_to_global_iterator(self, indices):
"""
Transform a dictionary of indices to a global iterator integer.
Inverse of global_iterator_to_indices().
"""
global_iterator = self.subs_consts(self.global_iterator().subs(indices))
return global_iterator
|
[
"Transform",
"a",
"dictionary",
"of",
"indices",
"to",
"a",
"global",
"iterator",
"integer",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L467-L474
|
[
"def",
"indices_to_global_iterator",
"(",
"self",
",",
"indices",
")",
":",
"global_iterator",
"=",
"self",
".",
"subs_consts",
"(",
"self",
".",
"global_iterator",
"(",
")",
".",
"subs",
"(",
"indices",
")",
")",
"return",
"global_iterator"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.max_global_iteration
|
Return global iterator with last iteration number
|
kerncraft/kernel.py
|
def max_global_iteration(self):
"""Return global iterator with last iteration number"""
return self.indices_to_global_iterator({
symbol_pos_int(var_name): end-1 for var_name, start, end, incr in self._loop_stack
})
|
def max_global_iteration(self):
"""Return global iterator with last iteration number"""
return self.indices_to_global_iterator({
symbol_pos_int(var_name): end-1 for var_name, start, end, incr in self._loop_stack
})
|
[
"Return",
"global",
"iterator",
"with",
"last",
"iteration",
"number"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L476-L480
|
[
"def",
"max_global_iteration",
"(",
"self",
")",
":",
"return",
"self",
".",
"indices_to_global_iterator",
"(",
"{",
"symbol_pos_int",
"(",
"var_name",
")",
":",
"end",
"-",
"1",
"for",
"var_name",
",",
"start",
",",
"end",
",",
"incr",
"in",
"self",
".",
"_loop_stack",
"}",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.compile_global_offsets
|
Return load and store offsets on a virtual address space.
:param iteration: controls the inner index counter
:param spacing: sets a spacing between the arrays, default is 0
All array variables (non scalars) are laid out linearly starting from 0. An optional
spacing can be set. The accesses are based on this layout.
The iteration 0 is the first iteration. All loops are mapped to this linear iteration
space.
Accesses to scalars are ignored.
Returned are load and store byte-offset pairs for each iteration.
|
kerncraft/kernel.py
|
def compile_global_offsets(self, iteration=0, spacing=0):
"""
Return load and store offsets on a virtual address space.
:param iteration: controls the inner index counter
:param spacing: sets a spacing between the arrays, default is 0
All array variables (non scalars) are laid out linearly starting from 0. An optional
spacing can be set. The accesses are based on this layout.
The iteration 0 is the first iteration. All loops are mapped to this linear iteration
space.
Accesses to scalars are ignored.
Returned are load and store byte-offset pairs for each iteration.
"""
global_load_offsets = []
global_store_offsets = []
if isinstance(iteration, range):
iteration = numpy.arange(iteration.start, iteration.stop, iteration.step, dtype='O')
else:
if not isinstance(iteration, collections.Sequence):
iteration = [iteration]
iteration = numpy.array(iteration, dtype='O')
# loop indices based on iteration
# unwind global iteration count into loop counters:
base_loop_counters = self.global_iterator_to_indices()
total_length = self.iteration_length()
assert iteration.max() < self.subs_consts(total_length), \
"Iterations go beyond what is possible in the original code ({} vs {}). " \
"One common reason, is that the iteration length are unrealistically small.".format(
iteration.max(), self.subs_consts(total_length))
# Get sizes of arrays and base offsets for each array
var_sizes = self.array_sizes(in_bytes=True, subs_consts=True)
base_offsets = {}
base = 0
# Always arrange arrays in alphabetical order in memory, for reproducibility
for var_name, var_size in sorted(var_sizes.items(), key=lambda v: v[0]):
base_offsets[var_name] = base
array_total_size = self.subs_consts(var_size + spacing)
# Add bytes to align by 64 byte (typical cacheline size):
array_total_size = ((int(array_total_size) + 63) & ~63)
base += array_total_size
# Gather all read and write accesses to the array:
for var_name, var_size in var_sizes.items():
element_size = self.datatypes_size[self.variables[var_name][0]]
for r in self.sources.get(var_name, []):
offset_expr = self.access_to_sympy(var_name, r)
# Ignore accesses that always go to the same location (constant offsets)
if not any([s in base_loop_counters.keys() for s in offset_expr.free_symbols]):
continue
offset = force_iterable(sympy.lambdify(
base_loop_counters.keys(),
self.subs_consts(
offset_expr*element_size
+ base_offsets[var_name]), numpy))
# TODO possibly differentiate between index order
global_load_offsets.append(offset)
for w in self.destinations.get(var_name, []):
offset_expr = self.access_to_sympy(var_name, w)
# Ignore accesses that always go to the same location (constant offsets)
if not any([s in base_loop_counters.keys() for s in offset_expr.free_symbols]):
continue
offset = force_iterable(sympy.lambdify(
base_loop_counters.keys(),
self.subs_consts(
offset_expr*element_size
+ base_offsets[var_name]), numpy))
# TODO possibly differentiate between index order
global_store_offsets.append(offset)
# TODO take element sizes into account, return in bytes
# Generate numpy.array for each counter
counter_per_it = [v(iteration) for v in base_loop_counters.values()]
# Old and slow - left for reference
## Data access as they appear with iteration order
#return zip_longest(zip(*[o(*counter_per_it) for o in global_load_offsets]),
# zip(*[o(*counter_per_it) for o in global_store_offsets]),
# fillvalue=None)
# Data access as they appear with iteration order
load_offsets = []
for o in global_load_offsets:
load_offsets.append(o(*counter_per_it))
# Convert to numpy ndarray and transpose to get offsets per iterations
load_offsets = numpy.asarray(load_offsets).T
store_offsets = []
for o in global_store_offsets:
store_offsets.append(o(*counter_per_it))
store_offsets = numpy.asarray(store_offsets).T
# Combine loads and stores
store_width = store_offsets.shape[1] if len(store_offsets.shape) > 1 else 0
dtype = [('load', load_offsets.dtype, (load_offsets.shape[1],)),
('store', store_offsets.dtype, (store_width,))]
offsets = numpy.empty(max(load_offsets.shape[0], store_offsets.shape[0]), dtype=dtype)
offsets['load'] = load_offsets
offsets['store'] = store_offsets
return offsets
|
def compile_global_offsets(self, iteration=0, spacing=0):
"""
Return load and store offsets on a virtual address space.
:param iteration: controls the inner index counter
:param spacing: sets a spacing between the arrays, default is 0
All array variables (non scalars) are laid out linearly starting from 0. An optional
spacing can be set. The accesses are based on this layout.
The iteration 0 is the first iteration. All loops are mapped to this linear iteration
space.
Accesses to scalars are ignored.
Returned are load and store byte-offset pairs for each iteration.
"""
global_load_offsets = []
global_store_offsets = []
if isinstance(iteration, range):
iteration = numpy.arange(iteration.start, iteration.stop, iteration.step, dtype='O')
else:
if not isinstance(iteration, collections.Sequence):
iteration = [iteration]
iteration = numpy.array(iteration, dtype='O')
# loop indices based on iteration
# unwind global iteration count into loop counters:
base_loop_counters = self.global_iterator_to_indices()
total_length = self.iteration_length()
assert iteration.max() < self.subs_consts(total_length), \
"Iterations go beyond what is possible in the original code ({} vs {}). " \
"One common reason, is that the iteration length are unrealistically small.".format(
iteration.max(), self.subs_consts(total_length))
# Get sizes of arrays and base offsets for each array
var_sizes = self.array_sizes(in_bytes=True, subs_consts=True)
base_offsets = {}
base = 0
# Always arrange arrays in alphabetical order in memory, for reproducibility
for var_name, var_size in sorted(var_sizes.items(), key=lambda v: v[0]):
base_offsets[var_name] = base
array_total_size = self.subs_consts(var_size + spacing)
# Add bytes to align by 64 byte (typical cacheline size):
array_total_size = ((int(array_total_size) + 63) & ~63)
base += array_total_size
# Gather all read and write accesses to the array:
for var_name, var_size in var_sizes.items():
element_size = self.datatypes_size[self.variables[var_name][0]]
for r in self.sources.get(var_name, []):
offset_expr = self.access_to_sympy(var_name, r)
# Ignore accesses that always go to the same location (constant offsets)
if not any([s in base_loop_counters.keys() for s in offset_expr.free_symbols]):
continue
offset = force_iterable(sympy.lambdify(
base_loop_counters.keys(),
self.subs_consts(
offset_expr*element_size
+ base_offsets[var_name]), numpy))
# TODO possibly differentiate between index order
global_load_offsets.append(offset)
for w in self.destinations.get(var_name, []):
offset_expr = self.access_to_sympy(var_name, w)
# Ignore accesses that always go to the same location (constant offsets)
if not any([s in base_loop_counters.keys() for s in offset_expr.free_symbols]):
continue
offset = force_iterable(sympy.lambdify(
base_loop_counters.keys(),
self.subs_consts(
offset_expr*element_size
+ base_offsets[var_name]), numpy))
# TODO possibly differentiate between index order
global_store_offsets.append(offset)
# TODO take element sizes into account, return in bytes
# Generate numpy.array for each counter
counter_per_it = [v(iteration) for v in base_loop_counters.values()]
# Old and slow - left for reference
## Data access as they appear with iteration order
#return zip_longest(zip(*[o(*counter_per_it) for o in global_load_offsets]),
# zip(*[o(*counter_per_it) for o in global_store_offsets]),
# fillvalue=None)
# Data access as they appear with iteration order
load_offsets = []
for o in global_load_offsets:
load_offsets.append(o(*counter_per_it))
# Convert to numpy ndarray and transpose to get offsets per iterations
load_offsets = numpy.asarray(load_offsets).T
store_offsets = []
for o in global_store_offsets:
store_offsets.append(o(*counter_per_it))
store_offsets = numpy.asarray(store_offsets).T
# Combine loads and stores
store_width = store_offsets.shape[1] if len(store_offsets.shape) > 1 else 0
dtype = [('load', load_offsets.dtype, (load_offsets.shape[1],)),
('store', store_offsets.dtype, (store_width,))]
offsets = numpy.empty(max(load_offsets.shape[0], store_offsets.shape[0]), dtype=dtype)
offsets['load'] = load_offsets
offsets['store'] = store_offsets
return offsets
|
[
"Return",
"load",
"and",
"store",
"offsets",
"on",
"a",
"virtual",
"address",
"space",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L482-L589
|
[
"def",
"compile_global_offsets",
"(",
"self",
",",
"iteration",
"=",
"0",
",",
"spacing",
"=",
"0",
")",
":",
"global_load_offsets",
"=",
"[",
"]",
"global_store_offsets",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"iteration",
",",
"range",
")",
":",
"iteration",
"=",
"numpy",
".",
"arange",
"(",
"iteration",
".",
"start",
",",
"iteration",
".",
"stop",
",",
"iteration",
".",
"step",
",",
"dtype",
"=",
"'O'",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"iteration",
",",
"collections",
".",
"Sequence",
")",
":",
"iteration",
"=",
"[",
"iteration",
"]",
"iteration",
"=",
"numpy",
".",
"array",
"(",
"iteration",
",",
"dtype",
"=",
"'O'",
")",
"# loop indices based on iteration",
"# unwind global iteration count into loop counters:",
"base_loop_counters",
"=",
"self",
".",
"global_iterator_to_indices",
"(",
")",
"total_length",
"=",
"self",
".",
"iteration_length",
"(",
")",
"assert",
"iteration",
".",
"max",
"(",
")",
"<",
"self",
".",
"subs_consts",
"(",
"total_length",
")",
",",
"\"Iterations go beyond what is possible in the original code ({} vs {}). \"",
"\"One common reason, is that the iteration length are unrealistically small.\"",
".",
"format",
"(",
"iteration",
".",
"max",
"(",
")",
",",
"self",
".",
"subs_consts",
"(",
"total_length",
")",
")",
"# Get sizes of arrays and base offsets for each array",
"var_sizes",
"=",
"self",
".",
"array_sizes",
"(",
"in_bytes",
"=",
"True",
",",
"subs_consts",
"=",
"True",
")",
"base_offsets",
"=",
"{",
"}",
"base",
"=",
"0",
"# Always arrange arrays in alphabetical order in memory, for reproducibility",
"for",
"var_name",
",",
"var_size",
"in",
"sorted",
"(",
"var_sizes",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
"[",
"0",
"]",
")",
":",
"base_offsets",
"[",
"var_name",
"]",
"=",
"base",
"array_total_size",
"=",
"self",
".",
"subs_consts",
"(",
"var_size",
"+",
"spacing",
")",
"# Add bytes to align by 64 byte (typical cacheline size):",
"array_total_size",
"=",
"(",
"(",
"int",
"(",
"array_total_size",
")",
"+",
"63",
")",
"&",
"~",
"63",
")",
"base",
"+=",
"array_total_size",
"# Gather all read and write accesses to the array:",
"for",
"var_name",
",",
"var_size",
"in",
"var_sizes",
".",
"items",
"(",
")",
":",
"element_size",
"=",
"self",
".",
"datatypes_size",
"[",
"self",
".",
"variables",
"[",
"var_name",
"]",
"[",
"0",
"]",
"]",
"for",
"r",
"in",
"self",
".",
"sources",
".",
"get",
"(",
"var_name",
",",
"[",
"]",
")",
":",
"offset_expr",
"=",
"self",
".",
"access_to_sympy",
"(",
"var_name",
",",
"r",
")",
"# Ignore accesses that always go to the same location (constant offsets)",
"if",
"not",
"any",
"(",
"[",
"s",
"in",
"base_loop_counters",
".",
"keys",
"(",
")",
"for",
"s",
"in",
"offset_expr",
".",
"free_symbols",
"]",
")",
":",
"continue",
"offset",
"=",
"force_iterable",
"(",
"sympy",
".",
"lambdify",
"(",
"base_loop_counters",
".",
"keys",
"(",
")",
",",
"self",
".",
"subs_consts",
"(",
"offset_expr",
"*",
"element_size",
"+",
"base_offsets",
"[",
"var_name",
"]",
")",
",",
"numpy",
")",
")",
"# TODO possibly differentiate between index order",
"global_load_offsets",
".",
"append",
"(",
"offset",
")",
"for",
"w",
"in",
"self",
".",
"destinations",
".",
"get",
"(",
"var_name",
",",
"[",
"]",
")",
":",
"offset_expr",
"=",
"self",
".",
"access_to_sympy",
"(",
"var_name",
",",
"w",
")",
"# Ignore accesses that always go to the same location (constant offsets)",
"if",
"not",
"any",
"(",
"[",
"s",
"in",
"base_loop_counters",
".",
"keys",
"(",
")",
"for",
"s",
"in",
"offset_expr",
".",
"free_symbols",
"]",
")",
":",
"continue",
"offset",
"=",
"force_iterable",
"(",
"sympy",
".",
"lambdify",
"(",
"base_loop_counters",
".",
"keys",
"(",
")",
",",
"self",
".",
"subs_consts",
"(",
"offset_expr",
"*",
"element_size",
"+",
"base_offsets",
"[",
"var_name",
"]",
")",
",",
"numpy",
")",
")",
"# TODO possibly differentiate between index order",
"global_store_offsets",
".",
"append",
"(",
"offset",
")",
"# TODO take element sizes into account, return in bytes",
"# Generate numpy.array for each counter",
"counter_per_it",
"=",
"[",
"v",
"(",
"iteration",
")",
"for",
"v",
"in",
"base_loop_counters",
".",
"values",
"(",
")",
"]",
"# Old and slow - left for reference",
"## Data access as they appear with iteration order",
"#return zip_longest(zip(*[o(*counter_per_it) for o in global_load_offsets]),",
"# zip(*[o(*counter_per_it) for o in global_store_offsets]),",
"# fillvalue=None)",
"# Data access as they appear with iteration order",
"load_offsets",
"=",
"[",
"]",
"for",
"o",
"in",
"global_load_offsets",
":",
"load_offsets",
".",
"append",
"(",
"o",
"(",
"*",
"counter_per_it",
")",
")",
"# Convert to numpy ndarray and transpose to get offsets per iterations",
"load_offsets",
"=",
"numpy",
".",
"asarray",
"(",
"load_offsets",
")",
".",
"T",
"store_offsets",
"=",
"[",
"]",
"for",
"o",
"in",
"global_store_offsets",
":",
"store_offsets",
".",
"append",
"(",
"o",
"(",
"*",
"counter_per_it",
")",
")",
"store_offsets",
"=",
"numpy",
".",
"asarray",
"(",
"store_offsets",
")",
".",
"T",
"# Combine loads and stores",
"store_width",
"=",
"store_offsets",
".",
"shape",
"[",
"1",
"]",
"if",
"len",
"(",
"store_offsets",
".",
"shape",
")",
">",
"1",
"else",
"0",
"dtype",
"=",
"[",
"(",
"'load'",
",",
"load_offsets",
".",
"dtype",
",",
"(",
"load_offsets",
".",
"shape",
"[",
"1",
"]",
",",
")",
")",
",",
"(",
"'store'",
",",
"store_offsets",
".",
"dtype",
",",
"(",
"store_width",
",",
")",
")",
"]",
"offsets",
"=",
"numpy",
".",
"empty",
"(",
"max",
"(",
"load_offsets",
".",
"shape",
"[",
"0",
"]",
",",
"store_offsets",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"dtype",
")",
"offsets",
"[",
"'load'",
"]",
"=",
"load_offsets",
"offsets",
"[",
"'store'",
"]",
"=",
"store_offsets",
"return",
"offsets"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.bytes_per_iteration
|
Consecutive bytes written out per high-level iterations (as counted by loop stack).
Is used to compute number of iterations per cacheline.
|
kerncraft/kernel.py
|
def bytes_per_iteration(self):
"""
Consecutive bytes written out per high-level iterations (as counted by loop stack).
Is used to compute number of iterations per cacheline.
"""
# TODO Find longst consecutive writes to any variable and use as basis
var_name = list(self.destinations)[0]
var_type = self.variables[var_name][0]
# FIXME this is correct most of the time, but not guaranteed:
# Multiplying datatype size with step increment of inner-most loop
return self.datatypes_size[var_type] * self._loop_stack[-1][3]
|
def bytes_per_iteration(self):
"""
Consecutive bytes written out per high-level iterations (as counted by loop stack).
Is used to compute number of iterations per cacheline.
"""
# TODO Find longst consecutive writes to any variable and use as basis
var_name = list(self.destinations)[0]
var_type = self.variables[var_name][0]
# FIXME this is correct most of the time, but not guaranteed:
# Multiplying datatype size with step increment of inner-most loop
return self.datatypes_size[var_type] * self._loop_stack[-1][3]
|
[
"Consecutive",
"bytes",
"written",
"out",
"per",
"high",
"-",
"level",
"iterations",
"(",
"as",
"counted",
"by",
"loop",
"stack",
")",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L592-L603
|
[
"def",
"bytes_per_iteration",
"(",
"self",
")",
":",
"# TODO Find longst consecutive writes to any variable and use as basis",
"var_name",
"=",
"list",
"(",
"self",
".",
"destinations",
")",
"[",
"0",
"]",
"var_type",
"=",
"self",
".",
"variables",
"[",
"var_name",
"]",
"[",
"0",
"]",
"# FIXME this is correct most of the time, but not guaranteed:",
"# Multiplying datatype size with step increment of inner-most loop",
"return",
"self",
".",
"datatypes_size",
"[",
"var_type",
"]",
"*",
"self",
".",
"_loop_stack",
"[",
"-",
"1",
"]",
"[",
"3",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.print_kernel_info
|
Print kernel information in human readble format.
|
kerncraft/kernel.py
|
def print_kernel_info(self, output_file=sys.stdout):
"""Print kernel information in human readble format."""
table = (' idx | min max step\n' +
'---------+---------------------------------\n')
for l in self._loop_stack:
table += '{:>8} | {!r:>10} {!r:>10} {!r:>10}\n'.format(*l)
print(prefix_indent('loop stack: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.sources.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data sources: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.destinations.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data destinations: ', table), file=output_file)
table = (' op | count \n' +
'----+-------\n')
for op, count in list(self._flops.items()):
table += '{:>3} | {:>4}\n'.format(op, count)
table += ' =======\n'
table += ' {:>4}'.format(sum(self._flops.values()))
print(prefix_indent('FLOPs: ', table), file=output_file)
|
def print_kernel_info(self, output_file=sys.stdout):
"""Print kernel information in human readble format."""
table = (' idx | min max step\n' +
'---------+---------------------------------\n')
for l in self._loop_stack:
table += '{:>8} | {!r:>10} {!r:>10} {!r:>10}\n'.format(*l)
print(prefix_indent('loop stack: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.sources.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data sources: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.destinations.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data destinations: ', table), file=output_file)
table = (' op | count \n' +
'----+-------\n')
for op, count in list(self._flops.items()):
table += '{:>3} | {:>4}\n'.format(op, count)
table += ' =======\n'
table += ' {:>4}'.format(sum(self._flops.values()))
print(prefix_indent('FLOPs: ', table), file=output_file)
|
[
"Print",
"kernel",
"information",
"in",
"human",
"readble",
"format",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L605-L635
|
[
"def",
"print_kernel_info",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"table",
"=",
"(",
"' idx | min max step\\n'",
"+",
"'---------+---------------------------------\\n'",
")",
"for",
"l",
"in",
"self",
".",
"_loop_stack",
":",
"table",
"+=",
"'{:>8} | {!r:>10} {!r:>10} {!r:>10}\\n'",
".",
"format",
"(",
"*",
"l",
")",
"print",
"(",
"prefix_indent",
"(",
"'loop stack: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")",
"table",
"=",
"(",
"' name | offsets ...\\n'",
"+",
"'---------+------------...\\n'",
")",
"for",
"name",
",",
"offsets",
"in",
"list",
"(",
"self",
".",
"sources",
".",
"items",
"(",
")",
")",
":",
"prefix",
"=",
"'{:>8} | '",
".",
"format",
"(",
"name",
")",
"right_side",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'{!r:}'",
".",
"format",
"(",
"o",
")",
"for",
"o",
"in",
"offsets",
"]",
")",
"table",
"+=",
"prefix_indent",
"(",
"prefix",
",",
"right_side",
",",
"later_prefix",
"=",
"' | '",
")",
"print",
"(",
"prefix_indent",
"(",
"'data sources: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")",
"table",
"=",
"(",
"' name | offsets ...\\n'",
"+",
"'---------+------------...\\n'",
")",
"for",
"name",
",",
"offsets",
"in",
"list",
"(",
"self",
".",
"destinations",
".",
"items",
"(",
")",
")",
":",
"prefix",
"=",
"'{:>8} | '",
".",
"format",
"(",
"name",
")",
"right_side",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'{!r:}'",
".",
"format",
"(",
"o",
")",
"for",
"o",
"in",
"offsets",
"]",
")",
"table",
"+=",
"prefix_indent",
"(",
"prefix",
",",
"right_side",
",",
"later_prefix",
"=",
"' | '",
")",
"print",
"(",
"prefix_indent",
"(",
"'data destinations: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")",
"table",
"=",
"(",
"' op | count \\n'",
"+",
"'----+-------\\n'",
")",
"for",
"op",
",",
"count",
"in",
"list",
"(",
"self",
".",
"_flops",
".",
"items",
"(",
")",
")",
":",
"table",
"+=",
"'{:>3} | {:>4}\\n'",
".",
"format",
"(",
"op",
",",
"count",
")",
"table",
"+=",
"' =======\\n'",
"table",
"+=",
"' {:>4}'",
".",
"format",
"(",
"sum",
"(",
"self",
".",
"_flops",
".",
"values",
"(",
")",
")",
")",
"print",
"(",
"prefix_indent",
"(",
"'FLOPs: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.print_variables_info
|
Print variables information in human readble format.
|
kerncraft/kernel.py
|
def print_variables_info(self, output_file=sys.stdout):
"""Print variables information in human readble format."""
table = (' name | type size \n' +
'---------+-------------------------\n')
for name, var_info in list(self.variables.items()):
table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1])
print(prefix_indent('variables: ', table), file=output_file)
|
def print_variables_info(self, output_file=sys.stdout):
"""Print variables information in human readble format."""
table = (' name | type size \n' +
'---------+-------------------------\n')
for name, var_info in list(self.variables.items()):
table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1])
print(prefix_indent('variables: ', table), file=output_file)
|
[
"Print",
"variables",
"information",
"in",
"human",
"readble",
"format",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L637-L643
|
[
"def",
"print_variables_info",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"table",
"=",
"(",
"' name | type size \\n'",
"+",
"'---------+-------------------------\\n'",
")",
"for",
"name",
",",
"var_info",
"in",
"list",
"(",
"self",
".",
"variables",
".",
"items",
"(",
")",
")",
":",
"table",
"+=",
"'{:>8} | {:>6} {!s:<10}\\n'",
".",
"format",
"(",
"name",
",",
"var_info",
"[",
"0",
"]",
",",
"var_info",
"[",
"1",
"]",
")",
"print",
"(",
"prefix_indent",
"(",
"'variables: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Kernel.print_constants_info
|
Print constants information in human readble format.
|
kerncraft/kernel.py
|
def print_constants_info(self, output_file=sys.stdout):
"""Print constants information in human readble format."""
table = (' name | value \n' +
'---------+-----------\n')
for name, value in list(self.constants.items()):
table += '{!s:>8} | {:<10}\n'.format(name, value)
print(prefix_indent('constants: ', table), file=output_file)
|
def print_constants_info(self, output_file=sys.stdout):
"""Print constants information in human readble format."""
table = (' name | value \n' +
'---------+-----------\n')
for name, value in list(self.constants.items()):
table += '{!s:>8} | {:<10}\n'.format(name, value)
print(prefix_indent('constants: ', table), file=output_file)
|
[
"Print",
"constants",
"information",
"in",
"human",
"readble",
"format",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L645-L651
|
[
"def",
"print_constants_info",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"table",
"=",
"(",
"' name | value \\n'",
"+",
"'---------+-----------\\n'",
")",
"for",
"name",
",",
"value",
"in",
"list",
"(",
"self",
".",
"constants",
".",
"items",
"(",
")",
")",
":",
"table",
"+=",
"'{!s:>8} | {:<10}\\n'",
".",
"format",
"(",
"name",
",",
"value",
")",
"print",
"(",
"prefix_indent",
"(",
"'constants: '",
",",
"table",
")",
",",
"file",
"=",
"output_file",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._get_intermediate_file
|
Create or open intermediate file (may be used for caching).
Will replace files older than kernel file, machine file or kerncraft version.
:param machine_and_compiler_dependent: set to False if file content does not depend on
machine file or compiler settings
:param fp: if False, will only return file name, not file object
:paarm binary: if True, use binary mode for file access
:return: (file object or file name, boolean if already existent and up-to-date)
|
kerncraft/kernel.py
|
def _get_intermediate_file(self, name, machine_and_compiler_dependent=True, binary=False,
fp=True):
"""
Create or open intermediate file (may be used for caching).
Will replace files older than kernel file, machine file or kerncraft version.
:param machine_and_compiler_dependent: set to False if file content does not depend on
machine file or compiler settings
:param fp: if False, will only return file name, not file object
:paarm binary: if True, use binary mode for file access
:return: (file object or file name, boolean if already existent and up-to-date)
"""
if self._filename:
base_name = os.path.join(os.path.dirname(self._filename),
'.' + os.path.basename(self._filename) + '_kerncraft')
else:
base_name = tempfile.mkdtemp()
if not self._keep_intermediates:
# Remove directory and all content up on program exit
atexit.register(shutil.rmtree, base_name)
if machine_and_compiler_dependent:
compiler, compiler_args = self._machine.get_compiler()
compiler_args = '_'.join(compiler_args).replace('/', '')
base_name += '/{}/{}/{}/'.format(
self._machine.get_identifier(), compiler, compiler_args)
# Create dirs recursively
os.makedirs(base_name, exist_ok=True)
# Build actual file path
file_path = os.path.join(base_name, name)
already_exists = False
# Check if file exists and is still fresh
if os.path.exists(file_path):
file_modified = datetime.utcfromtimestamp(os.stat(file_path).st_mtime)
if (file_modified < self._machine.get_last_modified_datetime() or
file_modified < kerncraft.get_last_modified_datetime() or
(self._filename and
file_modified < datetime.utcfromtimestamp(os.stat(self._filename).st_mtime))):
os.remove(file_path)
else:
already_exists = True
if fp:
if already_exists:
mode = 'r+'
else:
mode = 'w'
if binary:
mode += 'b'
f = open(file_path, mode)
return f, already_exists
else:
return reduce_path(file_path), already_exists
|
def _get_intermediate_file(self, name, machine_and_compiler_dependent=True, binary=False,
fp=True):
"""
Create or open intermediate file (may be used for caching).
Will replace files older than kernel file, machine file or kerncraft version.
:param machine_and_compiler_dependent: set to False if file content does not depend on
machine file or compiler settings
:param fp: if False, will only return file name, not file object
:paarm binary: if True, use binary mode for file access
:return: (file object or file name, boolean if already existent and up-to-date)
"""
if self._filename:
base_name = os.path.join(os.path.dirname(self._filename),
'.' + os.path.basename(self._filename) + '_kerncraft')
else:
base_name = tempfile.mkdtemp()
if not self._keep_intermediates:
# Remove directory and all content up on program exit
atexit.register(shutil.rmtree, base_name)
if machine_and_compiler_dependent:
compiler, compiler_args = self._machine.get_compiler()
compiler_args = '_'.join(compiler_args).replace('/', '')
base_name += '/{}/{}/{}/'.format(
self._machine.get_identifier(), compiler, compiler_args)
# Create dirs recursively
os.makedirs(base_name, exist_ok=True)
# Build actual file path
file_path = os.path.join(base_name, name)
already_exists = False
# Check if file exists and is still fresh
if os.path.exists(file_path):
file_modified = datetime.utcfromtimestamp(os.stat(file_path).st_mtime)
if (file_modified < self._machine.get_last_modified_datetime() or
file_modified < kerncraft.get_last_modified_datetime() or
(self._filename and
file_modified < datetime.utcfromtimestamp(os.stat(self._filename).st_mtime))):
os.remove(file_path)
else:
already_exists = True
if fp:
if already_exists:
mode = 'r+'
else:
mode = 'w'
if binary:
mode += 'b'
f = open(file_path, mode)
return f, already_exists
else:
return reduce_path(file_path), already_exists
|
[
"Create",
"or",
"open",
"intermediate",
"file",
"(",
"may",
"be",
"used",
"for",
"caching",
")",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L701-L760
|
[
"def",
"_get_intermediate_file",
"(",
"self",
",",
"name",
",",
"machine_and_compiler_dependent",
"=",
"True",
",",
"binary",
"=",
"False",
",",
"fp",
"=",
"True",
")",
":",
"if",
"self",
".",
"_filename",
":",
"base_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"_filename",
")",
",",
"'.'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"_filename",
")",
"+",
"'_kerncraft'",
")",
"else",
":",
"base_name",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"not",
"self",
".",
"_keep_intermediates",
":",
"# Remove directory and all content up on program exit",
"atexit",
".",
"register",
"(",
"shutil",
".",
"rmtree",
",",
"base_name",
")",
"if",
"machine_and_compiler_dependent",
":",
"compiler",
",",
"compiler_args",
"=",
"self",
".",
"_machine",
".",
"get_compiler",
"(",
")",
"compiler_args",
"=",
"'_'",
".",
"join",
"(",
"compiler_args",
")",
".",
"replace",
"(",
"'/'",
",",
"''",
")",
"base_name",
"+=",
"'/{}/{}/{}/'",
".",
"format",
"(",
"self",
".",
"_machine",
".",
"get_identifier",
"(",
")",
",",
"compiler",
",",
"compiler_args",
")",
"# Create dirs recursively",
"os",
".",
"makedirs",
"(",
"base_name",
",",
"exist_ok",
"=",
"True",
")",
"# Build actual file path",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_name",
",",
"name",
")",
"already_exists",
"=",
"False",
"# Check if file exists and is still fresh",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"file_modified",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"os",
".",
"stat",
"(",
"file_path",
")",
".",
"st_mtime",
")",
"if",
"(",
"file_modified",
"<",
"self",
".",
"_machine",
".",
"get_last_modified_datetime",
"(",
")",
"or",
"file_modified",
"<",
"kerncraft",
".",
"get_last_modified_datetime",
"(",
")",
"or",
"(",
"self",
".",
"_filename",
"and",
"file_modified",
"<",
"datetime",
".",
"utcfromtimestamp",
"(",
"os",
".",
"stat",
"(",
"self",
".",
"_filename",
")",
".",
"st_mtime",
")",
")",
")",
":",
"os",
".",
"remove",
"(",
"file_path",
")",
"else",
":",
"already_exists",
"=",
"True",
"if",
"fp",
":",
"if",
"already_exists",
":",
"mode",
"=",
"'r+'",
"else",
":",
"mode",
"=",
"'w'",
"if",
"binary",
":",
"mode",
"+=",
"'b'",
"f",
"=",
"open",
"(",
"file_path",
",",
"mode",
")",
"return",
"f",
",",
"already_exists",
"else",
":",
"return",
"reduce_path",
"(",
"file_path",
")",
",",
"already_exists"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.print_kernel_code
|
Print source code of kernel.
|
kerncraft/kernel.py
|
def print_kernel_code(self, output_file=sys.stdout):
"""Print source code of kernel."""
print(self.kernel_code, file=output_file)
|
def print_kernel_code(self, output_file=sys.stdout):
"""Print source code of kernel."""
print(self.kernel_code, file=output_file)
|
[
"Print",
"source",
"code",
"of",
"kernel",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L772-L774
|
[
"def",
"print_kernel_code",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"print",
"(",
"self",
".",
"kernel_code",
",",
"file",
"=",
"output_file",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.conv_ast_to_sym
|
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
|
kerncraft/kernel.py
|
def conv_ast_to_sym(self, math_ast):
"""
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
"""
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else: # elif type(dim) is c_ast.BinaryOp:
op = {
'*': operator.mul,
'+': operator.add,
'-': operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right))
|
def conv_ast_to_sym(self, math_ast):
"""
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
"""
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else: # elif type(dim) is c_ast.BinaryOp:
op = {
'*': operator.mul,
'+': operator.add,
'-': operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right))
|
[
"Convert",
"mathematical",
"expressions",
"to",
"a",
"sympy",
"representation",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L852-L871
|
[
"def",
"conv_ast_to_sym",
"(",
"self",
",",
"math_ast",
")",
":",
"if",
"type",
"(",
"math_ast",
")",
"is",
"c_ast",
".",
"ID",
":",
"return",
"symbol_pos_int",
"(",
"math_ast",
".",
"name",
")",
"elif",
"type",
"(",
"math_ast",
")",
"is",
"c_ast",
".",
"Constant",
":",
"return",
"sympy",
".",
"Integer",
"(",
"math_ast",
".",
"value",
")",
"else",
":",
"# elif type(dim) is c_ast.BinaryOp:",
"op",
"=",
"{",
"'*'",
":",
"operator",
".",
"mul",
",",
"'+'",
":",
"operator",
".",
"add",
",",
"'-'",
":",
"operator",
".",
"sub",
"}",
"return",
"op",
"[",
"math_ast",
".",
"op",
"]",
"(",
"self",
".",
"conv_ast_to_sym",
"(",
"math_ast",
".",
"left",
")",
",",
"self",
".",
"conv_ast_to_sym",
"(",
"math_ast",
".",
"right",
")",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._get_offsets
|
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
|
kerncraft/kernel.py
|
def _get_offsets(self, aref, dim=0):
"""
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
"""
if isinstance(aref, c_ast.ID):
return None
# Check for restrictions
assert type(aref.name) in [c_ast.ArrayRef, c_ast.ID], \
"array references must only be used with variables or other array references"
assert type(aref.subscript) in [c_ast.ID, c_ast.Constant, c_ast.BinaryOp], \
'array subscript must only contain variables or binary operations'
# Convert subscript to sympy and append
idxs = [self.conv_ast_to_sym(aref.subscript)]
# Check for more indices (multi-dimensional access)
if type(aref.name) is c_ast.ArrayRef:
idxs += self._get_offsets(aref.name, dim=dim+1)
# Reverse to preserver order (the subscripts in the AST are traversed backwards)
if dim == 0:
idxs.reverse()
return tuple(idxs)
|
def _get_offsets(self, aref, dim=0):
"""
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
"""
if isinstance(aref, c_ast.ID):
return None
# Check for restrictions
assert type(aref.name) in [c_ast.ArrayRef, c_ast.ID], \
"array references must only be used with variables or other array references"
assert type(aref.subscript) in [c_ast.ID, c_ast.Constant, c_ast.BinaryOp], \
'array subscript must only contain variables or binary operations'
# Convert subscript to sympy and append
idxs = [self.conv_ast_to_sym(aref.subscript)]
# Check for more indices (multi-dimensional access)
if type(aref.name) is c_ast.ArrayRef:
idxs += self._get_offsets(aref.name, dim=dim+1)
# Reverse to preserver order (the subscripts in the AST are traversed backwards)
if dim == 0:
idxs.reverse()
return tuple(idxs)
|
[
"Return",
"a",
"tuple",
"of",
"offsets",
"of",
"an",
"ArrayRef",
"object",
"in",
"all",
"dimensions",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L873-L902
|
[
"def",
"_get_offsets",
"(",
"self",
",",
"aref",
",",
"dim",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"aref",
",",
"c_ast",
".",
"ID",
")",
":",
"return",
"None",
"# Check for restrictions",
"assert",
"type",
"(",
"aref",
".",
"name",
")",
"in",
"[",
"c_ast",
".",
"ArrayRef",
",",
"c_ast",
".",
"ID",
"]",
",",
"\"array references must only be used with variables or other array references\"",
"assert",
"type",
"(",
"aref",
".",
"subscript",
")",
"in",
"[",
"c_ast",
".",
"ID",
",",
"c_ast",
".",
"Constant",
",",
"c_ast",
".",
"BinaryOp",
"]",
",",
"'array subscript must only contain variables or binary operations'",
"# Convert subscript to sympy and append",
"idxs",
"=",
"[",
"self",
".",
"conv_ast_to_sym",
"(",
"aref",
".",
"subscript",
")",
"]",
"# Check for more indices (multi-dimensional access)",
"if",
"type",
"(",
"aref",
".",
"name",
")",
"is",
"c_ast",
".",
"ArrayRef",
":",
"idxs",
"+=",
"self",
".",
"_get_offsets",
"(",
"aref",
".",
"name",
",",
"dim",
"=",
"dim",
"+",
"1",
")",
"# Reverse to preserver order (the subscripts in the AST are traversed backwards)",
"if",
"dim",
"==",
"0",
":",
"idxs",
".",
"reverse",
"(",
")",
"return",
"tuple",
"(",
"idxs",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._get_basename
|
Return base name of ArrayRef object.
e.g. c[i+1][j-2] -> 'c'
|
kerncraft/kernel.py
|
def _get_basename(cls, aref):
"""
Return base name of ArrayRef object.
e.g. c[i+1][j-2] -> 'c'
"""
if isinstance(aref.name, c_ast.ArrayRef):
return cls._get_basename(aref.name)
elif isinstance(aref.name, str):
return aref.name
else:
return aref.name.name
|
def _get_basename(cls, aref):
"""
Return base name of ArrayRef object.
e.g. c[i+1][j-2] -> 'c'
"""
if isinstance(aref.name, c_ast.ArrayRef):
return cls._get_basename(aref.name)
elif isinstance(aref.name, str):
return aref.name
else:
return aref.name.name
|
[
"Return",
"base",
"name",
"of",
"ArrayRef",
"object",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L905-L916
|
[
"def",
"_get_basename",
"(",
"cls",
",",
"aref",
")",
":",
"if",
"isinstance",
"(",
"aref",
".",
"name",
",",
"c_ast",
".",
"ArrayRef",
")",
":",
"return",
"cls",
".",
"_get_basename",
"(",
"aref",
".",
"name",
")",
"elif",
"isinstance",
"(",
"aref",
".",
"name",
",",
"str",
")",
":",
"return",
"aref",
".",
"name",
"else",
":",
"return",
"aref",
".",
"name",
".",
"name"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.get_index_type
|
Return index type used in loop nest.
If index type between loops differ, an exception is raised.
|
kerncraft/kernel.py
|
def get_index_type(self, loop_nest=None):
"""
Return index type used in loop nest.
If index type between loops differ, an exception is raised.
"""
if loop_nest is None:
loop_nest = self.get_kernel_loop_nest()
if type(loop_nest) is c_ast.For:
loop_nest = [loop_nest]
index_types = (None, None)
for s in loop_nest:
if type(s) is c_ast.For:
if type(s.stmt) in [c_ast.For, c_ast.Compound]:
other = self.get_index_type(loop_nest=s.stmt)
else:
other = None
index_types = (s.init.decls[0].type.type.names, other)
break
if index_types[0] == index_types[1] or index_types[1] is None:
return index_types[0]
else:
raise ValueError("Loop indices must have same type, found {}.".format(index_types))
|
def get_index_type(self, loop_nest=None):
"""
Return index type used in loop nest.
If index type between loops differ, an exception is raised.
"""
if loop_nest is None:
loop_nest = self.get_kernel_loop_nest()
if type(loop_nest) is c_ast.For:
loop_nest = [loop_nest]
index_types = (None, None)
for s in loop_nest:
if type(s) is c_ast.For:
if type(s.stmt) in [c_ast.For, c_ast.Compound]:
other = self.get_index_type(loop_nest=s.stmt)
else:
other = None
index_types = (s.init.decls[0].type.type.names, other)
break
if index_types[0] == index_types[1] or index_types[1] is None:
return index_types[0]
else:
raise ValueError("Loop indices must have same type, found {}.".format(index_types))
|
[
"Return",
"index",
"type",
"used",
"in",
"loop",
"nest",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1051-L1073
|
[
"def",
"get_index_type",
"(",
"self",
",",
"loop_nest",
"=",
"None",
")",
":",
"if",
"loop_nest",
"is",
"None",
":",
"loop_nest",
"=",
"self",
".",
"get_kernel_loop_nest",
"(",
")",
"if",
"type",
"(",
"loop_nest",
")",
"is",
"c_ast",
".",
"For",
":",
"loop_nest",
"=",
"[",
"loop_nest",
"]",
"index_types",
"=",
"(",
"None",
",",
"None",
")",
"for",
"s",
"in",
"loop_nest",
":",
"if",
"type",
"(",
"s",
")",
"is",
"c_ast",
".",
"For",
":",
"if",
"type",
"(",
"s",
".",
"stmt",
")",
"in",
"[",
"c_ast",
".",
"For",
",",
"c_ast",
".",
"Compound",
"]",
":",
"other",
"=",
"self",
".",
"get_index_type",
"(",
"loop_nest",
"=",
"s",
".",
"stmt",
")",
"else",
":",
"other",
"=",
"None",
"index_types",
"=",
"(",
"s",
".",
"init",
".",
"decls",
"[",
"0",
"]",
".",
"type",
".",
"type",
".",
"names",
",",
"other",
")",
"break",
"if",
"index_types",
"[",
"0",
"]",
"==",
"index_types",
"[",
"1",
"]",
"or",
"index_types",
"[",
"1",
"]",
"is",
"None",
":",
"return",
"index_types",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Loop indices must have same type, found {}.\"",
".",
"format",
"(",
"index_types",
")",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_const_declartions
|
Generate constants declarations
:return: list of declarations
|
kerncraft/kernel.py
|
def _build_const_declartions(self, with_init=True):
"""
Generate constants declarations
:return: list of declarations
"""
decls = []
# Use type as provided by user in loop indices
index_type = self.get_index_type()
i = 2 # subscript for cli input, 1 is reserved for repeat
for k in self.constants:
# const long long N = strtoul(argv[2])
# with increasing N and 1
# TODO change subscript of argv depending on constant count
type_decl = c_ast.TypeDecl(k.name, ['const'], c_ast.IdentifierType(index_type))
init = None
if with_init:
init = c_ast.FuncCall(
c_ast.ID('atoi'),
c_ast.ExprList([c_ast.ArrayRef(c_ast.ID('argv'),
c_ast.Constant('int', str(i)))]))
i += 1
decls.append(c_ast.Decl(
k.name, ['const'], [], [],
type_decl, init, None))
return decls
|
def _build_const_declartions(self, with_init=True):
"""
Generate constants declarations
:return: list of declarations
"""
decls = []
# Use type as provided by user in loop indices
index_type = self.get_index_type()
i = 2 # subscript for cli input, 1 is reserved for repeat
for k in self.constants:
# const long long N = strtoul(argv[2])
# with increasing N and 1
# TODO change subscript of argv depending on constant count
type_decl = c_ast.TypeDecl(k.name, ['const'], c_ast.IdentifierType(index_type))
init = None
if with_init:
init = c_ast.FuncCall(
c_ast.ID('atoi'),
c_ast.ExprList([c_ast.ArrayRef(c_ast.ID('argv'),
c_ast.Constant('int', str(i)))]))
i += 1
decls.append(c_ast.Decl(
k.name, ['const'], [], [],
type_decl, init, None))
return decls
|
[
"Generate",
"constants",
"declarations"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1075-L1103
|
[
"def",
"_build_const_declartions",
"(",
"self",
",",
"with_init",
"=",
"True",
")",
":",
"decls",
"=",
"[",
"]",
"# Use type as provided by user in loop indices",
"index_type",
"=",
"self",
".",
"get_index_type",
"(",
")",
"i",
"=",
"2",
"# subscript for cli input, 1 is reserved for repeat",
"for",
"k",
"in",
"self",
".",
"constants",
":",
"# const long long N = strtoul(argv[2])",
"# with increasing N and 1",
"# TODO change subscript of argv depending on constant count",
"type_decl",
"=",
"c_ast",
".",
"TypeDecl",
"(",
"k",
".",
"name",
",",
"[",
"'const'",
"]",
",",
"c_ast",
".",
"IdentifierType",
"(",
"index_type",
")",
")",
"init",
"=",
"None",
"if",
"with_init",
":",
"init",
"=",
"c_ast",
".",
"FuncCall",
"(",
"c_ast",
".",
"ID",
"(",
"'atoi'",
")",
",",
"c_ast",
".",
"ExprList",
"(",
"[",
"c_ast",
".",
"ArrayRef",
"(",
"c_ast",
".",
"ID",
"(",
"'argv'",
")",
",",
"c_ast",
".",
"Constant",
"(",
"'int'",
",",
"str",
"(",
"i",
")",
")",
")",
"]",
")",
")",
"i",
"+=",
"1",
"decls",
".",
"append",
"(",
"c_ast",
".",
"Decl",
"(",
"k",
".",
"name",
",",
"[",
"'const'",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"type_decl",
",",
"init",
",",
"None",
")",
")",
"return",
"decls"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.get_array_declarations
|
Return array declarations.
|
kerncraft/kernel.py
|
def get_array_declarations(self):
"""Return array declarations."""
return [d for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.ArrayDecl]
|
def get_array_declarations(self):
"""Return array declarations."""
return [d for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.ArrayDecl]
|
[
"Return",
"array",
"declarations",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1105-L1108
|
[
"def",
"get_array_declarations",
"(",
"self",
")",
":",
"return",
"[",
"d",
"for",
"d",
"in",
"self",
".",
"kernel_ast",
".",
"block_items",
"if",
"type",
"(",
"d",
")",
"is",
"c_ast",
".",
"Decl",
"and",
"type",
"(",
"d",
".",
"type",
")",
"is",
"c_ast",
".",
"ArrayDecl",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.get_kernel_loop_nest
|
Return kernel loop nest including any preceding pragmas and following swaps.
|
kerncraft/kernel.py
|
def get_kernel_loop_nest(self):
"""Return kernel loop nest including any preceding pragmas and following swaps."""
loop_nest = [s for s in self.kernel_ast.block_items
if type(s) in [c_ast.For, c_ast.Pragma, c_ast.FuncCall]]
assert len(loop_nest) >= 1, "Found to few for statements in kernel"
return loop_nest
|
def get_kernel_loop_nest(self):
"""Return kernel loop nest including any preceding pragmas and following swaps."""
loop_nest = [s for s in self.kernel_ast.block_items
if type(s) in [c_ast.For, c_ast.Pragma, c_ast.FuncCall]]
assert len(loop_nest) >= 1, "Found to few for statements in kernel"
return loop_nest
|
[
"Return",
"kernel",
"loop",
"nest",
"including",
"any",
"preceding",
"pragmas",
"and",
"following",
"swaps",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1110-L1115
|
[
"def",
"get_kernel_loop_nest",
"(",
"self",
")",
":",
"loop_nest",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"kernel_ast",
".",
"block_items",
"if",
"type",
"(",
"s",
")",
"in",
"[",
"c_ast",
".",
"For",
",",
"c_ast",
".",
"Pragma",
",",
"c_ast",
".",
"FuncCall",
"]",
"]",
"assert",
"len",
"(",
"loop_nest",
")",
">=",
"1",
",",
"\"Found to few for statements in kernel\"",
"return",
"loop_nest"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_array_declarations
|
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
|
kerncraft/kernel.py
|
def _build_array_declarations(self, with_init=True):
"""
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
"""
# copy array declarations from from kernel ast
array_declarations = deepcopy(self.get_array_declarations())
array_dict = []
for d in array_declarations:
# We need to transform
array_dict.append(transform_multidim_to_1d_decl(d))
transform_array_decl_to_malloc(d, with_init=with_init)
return array_declarations, dict(array_dict)
|
def _build_array_declarations(self, with_init=True):
"""
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
"""
# copy array declarations from from kernel ast
array_declarations = deepcopy(self.get_array_declarations())
array_dict = []
for d in array_declarations:
# We need to transform
array_dict.append(transform_multidim_to_1d_decl(d))
transform_array_decl_to_malloc(d, with_init=with_init)
return array_declarations, dict(array_dict)
|
[
"Generate",
"declaration",
"statements",
"for",
"arrays",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1117-L1134
|
[
"def",
"_build_array_declarations",
"(",
"self",
",",
"with_init",
"=",
"True",
")",
":",
"# copy array declarations from from kernel ast",
"array_declarations",
"=",
"deepcopy",
"(",
"self",
".",
"get_array_declarations",
"(",
")",
")",
"array_dict",
"=",
"[",
"]",
"for",
"d",
"in",
"array_declarations",
":",
"# We need to transform",
"array_dict",
".",
"append",
"(",
"transform_multidim_to_1d_decl",
"(",
"d",
")",
")",
"transform_array_decl_to_malloc",
"(",
"d",
",",
"with_init",
"=",
"with_init",
")",
"return",
"array_declarations",
",",
"dict",
"(",
"array_dict",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._find_inner_most_loop
|
Return inner most for loop in loop nest
|
kerncraft/kernel.py
|
def _find_inner_most_loop(self, loop_nest):
"""Return inner most for loop in loop nest"""
r = None
for s in loop_nest:
if type(s) is c_ast.For:
return self._find_inner_most_loop(s) or s
else:
r = r or self._find_inner_most_loop(s)
return r
|
def _find_inner_most_loop(self, loop_nest):
"""Return inner most for loop in loop nest"""
r = None
for s in loop_nest:
if type(s) is c_ast.For:
return self._find_inner_most_loop(s) or s
else:
r = r or self._find_inner_most_loop(s)
return r
|
[
"Return",
"inner",
"most",
"for",
"loop",
"in",
"loop",
"nest"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1136-L1144
|
[
"def",
"_find_inner_most_loop",
"(",
"self",
",",
"loop_nest",
")",
":",
"r",
"=",
"None",
"for",
"s",
"in",
"loop_nest",
":",
"if",
"type",
"(",
"s",
")",
"is",
"c_ast",
".",
"For",
":",
"return",
"self",
".",
"_find_inner_most_loop",
"(",
"s",
")",
"or",
"s",
"else",
":",
"r",
"=",
"r",
"or",
"self",
".",
"_find_inner_most_loop",
"(",
"s",
")",
"return",
"r"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_array_initializations
|
Generate initialization statements for arrays.
:param array_dimensions: dictionary of array dimensions
:return: list of nodes
|
kerncraft/kernel.py
|
def _build_array_initializations(self, array_dimensions):
"""
Generate initialization statements for arrays.
:param array_dimensions: dictionary of array dimensions
:return: list of nodes
"""
kernel = deepcopy(deepcopy(self.get_kernel_loop_nest()))
# traverse to the inner most for loop:
inner_most = self._find_inner_most_loop(kernel)
orig_inner_stmt = inner_most.stmt
inner_most.stmt = c_ast.Compound([])
rand_float_str = str(random.uniform(1.0, 0.1))
# find all array references in original orig_inner_stmt
for aref in find_node_type(orig_inner_stmt, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
# build static assignments and inject into inner_most.stmt
inner_most.stmt.block_items.append(c_ast.Assignment(
'=', aref, c_ast.Constant('float', rand_float_str)))
return kernel
|
def _build_array_initializations(self, array_dimensions):
"""
Generate initialization statements for arrays.
:param array_dimensions: dictionary of array dimensions
:return: list of nodes
"""
kernel = deepcopy(deepcopy(self.get_kernel_loop_nest()))
# traverse to the inner most for loop:
inner_most = self._find_inner_most_loop(kernel)
orig_inner_stmt = inner_most.stmt
inner_most.stmt = c_ast.Compound([])
rand_float_str = str(random.uniform(1.0, 0.1))
# find all array references in original orig_inner_stmt
for aref in find_node_type(orig_inner_stmt, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
# build static assignments and inject into inner_most.stmt
inner_most.stmt.block_items.append(c_ast.Assignment(
'=', aref, c_ast.Constant('float', rand_float_str)))
return kernel
|
[
"Generate",
"initialization",
"statements",
"for",
"arrays",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1146-L1170
|
[
"def",
"_build_array_initializations",
"(",
"self",
",",
"array_dimensions",
")",
":",
"kernel",
"=",
"deepcopy",
"(",
"deepcopy",
"(",
"self",
".",
"get_kernel_loop_nest",
"(",
")",
")",
")",
"# traverse to the inner most for loop:",
"inner_most",
"=",
"self",
".",
"_find_inner_most_loop",
"(",
"kernel",
")",
"orig_inner_stmt",
"=",
"inner_most",
".",
"stmt",
"inner_most",
".",
"stmt",
"=",
"c_ast",
".",
"Compound",
"(",
"[",
"]",
")",
"rand_float_str",
"=",
"str",
"(",
"random",
".",
"uniform",
"(",
"1.0",
",",
"0.1",
")",
")",
"# find all array references in original orig_inner_stmt",
"for",
"aref",
"in",
"find_node_type",
"(",
"orig_inner_stmt",
",",
"c_ast",
".",
"ArrayRef",
")",
":",
"# transform to 1d references",
"transform_multidim_to_1d_ref",
"(",
"aref",
",",
"array_dimensions",
")",
"# build static assignments and inject into inner_most.stmt",
"inner_most",
".",
"stmt",
".",
"block_items",
".",
"append",
"(",
"c_ast",
".",
"Assignment",
"(",
"'='",
",",
"aref",
",",
"c_ast",
".",
"Constant",
"(",
"'float'",
",",
"rand_float_str",
")",
")",
")",
"return",
"kernel"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_dummy_calls
|
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
|
kerncraft/kernel.py
|
def _build_dummy_calls(self):
"""
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
"""
# Make sure nothing gets removed by inserting dummy calls
dummy_calls = []
for d in self.kernel_ast.block_items:
# Only consider toplevel declarations from kernel ast
if type(d) is not c_ast.Decl: continue
if type(d.type) is c_ast.ArrayDecl:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.ID(d.name)])))
else:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))])))
dummy_stmt = c_ast.If(
cond=c_ast.ID('var_false'),
iftrue=c_ast.Compound(dummy_calls),
iffalse=None)
return dummy_stmt
|
def _build_dummy_calls(self):
"""
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
"""
# Make sure nothing gets removed by inserting dummy calls
dummy_calls = []
for d in self.kernel_ast.block_items:
# Only consider toplevel declarations from kernel ast
if type(d) is not c_ast.Decl: continue
if type(d.type) is c_ast.ArrayDecl:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.ID(d.name)])))
else:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))])))
dummy_stmt = c_ast.If(
cond=c_ast.ID('var_false'),
iftrue=c_ast.Compound(dummy_calls),
iffalse=None)
return dummy_stmt
|
[
"Generate",
"false",
"if",
"branch",
"with",
"dummy",
"calls"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1172-L1197
|
[
"def",
"_build_dummy_calls",
"(",
"self",
")",
":",
"# Make sure nothing gets removed by inserting dummy calls",
"dummy_calls",
"=",
"[",
"]",
"for",
"d",
"in",
"self",
".",
"kernel_ast",
".",
"block_items",
":",
"# Only consider toplevel declarations from kernel ast",
"if",
"type",
"(",
"d",
")",
"is",
"not",
"c_ast",
".",
"Decl",
":",
"continue",
"if",
"type",
"(",
"d",
".",
"type",
")",
"is",
"c_ast",
".",
"ArrayDecl",
":",
"dummy_calls",
".",
"append",
"(",
"c_ast",
".",
"FuncCall",
"(",
"c_ast",
".",
"ID",
"(",
"'dummy'",
")",
",",
"c_ast",
".",
"ExprList",
"(",
"[",
"c_ast",
".",
"ID",
"(",
"d",
".",
"name",
")",
"]",
")",
")",
")",
"else",
":",
"dummy_calls",
".",
"append",
"(",
"c_ast",
".",
"FuncCall",
"(",
"c_ast",
".",
"ID",
"(",
"'dummy'",
")",
",",
"c_ast",
".",
"ExprList",
"(",
"[",
"c_ast",
".",
"UnaryOp",
"(",
"'&'",
",",
"c_ast",
".",
"ID",
"(",
"d",
".",
"name",
")",
")",
"]",
")",
")",
")",
"dummy_stmt",
"=",
"c_ast",
".",
"If",
"(",
"cond",
"=",
"c_ast",
".",
"ID",
"(",
"'var_false'",
")",
",",
"iftrue",
"=",
"c_ast",
".",
"Compound",
"(",
"dummy_calls",
")",
",",
"iffalse",
"=",
"None",
")",
"return",
"dummy_stmt"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_kernel_function_declaration
|
Build and return kernel function declaration
|
kerncraft/kernel.py
|
def _build_kernel_function_declaration(self, name='kernel'):
"""Build and return kernel function declaration"""
array_declarations, array_dimensions = self._build_array_declarations(with_init=False)
scalar_declarations = self._build_scalar_declarations(with_init=False)
const_declarations = self._build_const_declartions(with_init=False)
return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations +
const_declarations),
type=c_ast.TypeDecl(declname=name,
quals=[],
type=c_ast.IdentifierType(names=['void'])))
|
def _build_kernel_function_declaration(self, name='kernel'):
"""Build and return kernel function declaration"""
array_declarations, array_dimensions = self._build_array_declarations(with_init=False)
scalar_declarations = self._build_scalar_declarations(with_init=False)
const_declarations = self._build_const_declartions(with_init=False)
return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations +
const_declarations),
type=c_ast.TypeDecl(declname=name,
quals=[],
type=c_ast.IdentifierType(names=['void'])))
|
[
"Build",
"and",
"return",
"kernel",
"function",
"declaration"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1199-L1208
|
[
"def",
"_build_kernel_function_declaration",
"(",
"self",
",",
"name",
"=",
"'kernel'",
")",
":",
"array_declarations",
",",
"array_dimensions",
"=",
"self",
".",
"_build_array_declarations",
"(",
"with_init",
"=",
"False",
")",
"scalar_declarations",
"=",
"self",
".",
"_build_scalar_declarations",
"(",
"with_init",
"=",
"False",
")",
"const_declarations",
"=",
"self",
".",
"_build_const_declartions",
"(",
"with_init",
"=",
"False",
")",
"return",
"c_ast",
".",
"FuncDecl",
"(",
"args",
"=",
"c_ast",
".",
"ParamList",
"(",
"params",
"=",
"array_declarations",
"+",
"scalar_declarations",
"+",
"const_declarations",
")",
",",
"type",
"=",
"c_ast",
".",
"TypeDecl",
"(",
"declname",
"=",
"name",
",",
"quals",
"=",
"[",
"]",
",",
"type",
"=",
"c_ast",
".",
"IdentifierType",
"(",
"names",
"=",
"[",
"'void'",
"]",
")",
")",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_scalar_declarations
|
Build and return scalar variable declarations
|
kerncraft/kernel.py
|
def _build_scalar_declarations(self, with_init=True):
"""Build and return scalar variable declarations"""
# copy scalar declarations from from kernel ast
scalar_declarations = [deepcopy(d) for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.TypeDecl]
# add init values to declarations
if with_init:
random.seed(2342) # we want reproducible random numbers
for d in scalar_declarations:
if d.type.type.names[0] in ['double', 'float']:
d.init = c_ast.Constant('float', str(random.uniform(1.0, 0.1)))
elif d.type.type.names[0] in ['int', 'long', 'long long',
'unsigned int', 'unsigned long', 'unsigned long long']:
d.init = c_ast.Constant('int', 2)
return scalar_declarations
|
def _build_scalar_declarations(self, with_init=True):
"""Build and return scalar variable declarations"""
# copy scalar declarations from from kernel ast
scalar_declarations = [deepcopy(d) for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.TypeDecl]
# add init values to declarations
if with_init:
random.seed(2342) # we want reproducible random numbers
for d in scalar_declarations:
if d.type.type.names[0] in ['double', 'float']:
d.init = c_ast.Constant('float', str(random.uniform(1.0, 0.1)))
elif d.type.type.names[0] in ['int', 'long', 'long long',
'unsigned int', 'unsigned long', 'unsigned long long']:
d.init = c_ast.Constant('int', 2)
return scalar_declarations
|
[
"Build",
"and",
"return",
"scalar",
"variable",
"declarations"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1210-L1225
|
[
"def",
"_build_scalar_declarations",
"(",
"self",
",",
"with_init",
"=",
"True",
")",
":",
"# copy scalar declarations from from kernel ast",
"scalar_declarations",
"=",
"[",
"deepcopy",
"(",
"d",
")",
"for",
"d",
"in",
"self",
".",
"kernel_ast",
".",
"block_items",
"if",
"type",
"(",
"d",
")",
"is",
"c_ast",
".",
"Decl",
"and",
"type",
"(",
"d",
".",
"type",
")",
"is",
"c_ast",
".",
"TypeDecl",
"]",
"# add init values to declarations",
"if",
"with_init",
":",
"random",
".",
"seed",
"(",
"2342",
")",
"# we want reproducible random numbers",
"for",
"d",
"in",
"scalar_declarations",
":",
"if",
"d",
".",
"type",
".",
"type",
".",
"names",
"[",
"0",
"]",
"in",
"[",
"'double'",
",",
"'float'",
"]",
":",
"d",
".",
"init",
"=",
"c_ast",
".",
"Constant",
"(",
"'float'",
",",
"str",
"(",
"random",
".",
"uniform",
"(",
"1.0",
",",
"0.1",
")",
")",
")",
"elif",
"d",
".",
"type",
".",
"type",
".",
"names",
"[",
"0",
"]",
"in",
"[",
"'int'",
",",
"'long'",
",",
"'long long'",
",",
"'unsigned int'",
",",
"'unsigned long'",
",",
"'unsigned long long'",
"]",
":",
"d",
".",
"init",
"=",
"c_ast",
".",
"Constant",
"(",
"'int'",
",",
"2",
")",
"return",
"scalar_declarations"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.get_kernel_code
|
Generate and return compilable source code with kernel function from AST.
:param openmp: if true, OpenMP code will be generated
:param as_filename: if true, will save to file and return filename
:param name: name of kernel function
|
kerncraft/kernel.py
|
def get_kernel_code(self, openmp=False, as_filename=False, name='kernel'):
"""
Generate and return compilable source code with kernel function from AST.
:param openmp: if true, OpenMP code will be generated
:param as_filename: if true, will save to file and return filename
:param name: name of kernel function
"""
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
file_name = 'kernel'
if openmp:
file_name += '-omp'
file_name += '.c'
fp, already_available = self._get_intermediate_file(
file_name, machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
array_declarations, array_dimensions = self._build_array_declarations()
# Prepare actual kernel loop nest
if openmp:
# with OpenMP code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
omp_pragmas = [p for p in find_node_type(kernel, c_ast.Pragma)
if 'omp' in p.string]
# TODO if omp parallel was found, remove it (also replace "parallel for" -> "for")
# if no omp for pragmas are present, insert suitable ones
if not omp_pragmas:
kernel.insert(0, c_ast.Pragma("omp for"))
# otherwise do not change anything
else:
# with original code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
function_ast = c_ast.FuncDef(decl=c_ast.Decl(
name=name, type=self._build_kernel_function_declaration(name=name), quals=[],
storage=[], funcspec=[], init=None, bitsize=None),
body=c_ast.Compound(block_items=kernel),
param_decls=None)
# Generate code
code = CGenerator().visit(function_ast)
# Insert missing #includes from template to top of code
code = '#include "kerncraft.h"\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code
|
def get_kernel_code(self, openmp=False, as_filename=False, name='kernel'):
"""
Generate and return compilable source code with kernel function from AST.
:param openmp: if true, OpenMP code will be generated
:param as_filename: if true, will save to file and return filename
:param name: name of kernel function
"""
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
file_name = 'kernel'
if openmp:
file_name += '-omp'
file_name += '.c'
fp, already_available = self._get_intermediate_file(
file_name, machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
array_declarations, array_dimensions = self._build_array_declarations()
# Prepare actual kernel loop nest
if openmp:
# with OpenMP code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
omp_pragmas = [p for p in find_node_type(kernel, c_ast.Pragma)
if 'omp' in p.string]
# TODO if omp parallel was found, remove it (also replace "parallel for" -> "for")
# if no omp for pragmas are present, insert suitable ones
if not omp_pragmas:
kernel.insert(0, c_ast.Pragma("omp for"))
# otherwise do not change anything
else:
# with original code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
function_ast = c_ast.FuncDef(decl=c_ast.Decl(
name=name, type=self._build_kernel_function_declaration(name=name), quals=[],
storage=[], funcspec=[], init=None, bitsize=None),
body=c_ast.Compound(block_items=kernel),
param_decls=None)
# Generate code
code = CGenerator().visit(function_ast)
# Insert missing #includes from template to top of code
code = '#include "kerncraft.h"\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code
|
[
"Generate",
"and",
"return",
"compilable",
"source",
"code",
"with",
"kernel",
"function",
"from",
"AST",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1228-L1294
|
[
"def",
"get_kernel_code",
"(",
"self",
",",
"openmp",
"=",
"False",
",",
"as_filename",
"=",
"False",
",",
"name",
"=",
"'kernel'",
")",
":",
"assert",
"self",
".",
"kernel_ast",
"is",
"not",
"None",
",",
"\"AST does not exist, this could be due to running \"",
"\"based on a kernel description rather than code.\"",
"file_name",
"=",
"'kernel'",
"if",
"openmp",
":",
"file_name",
"+=",
"'-omp'",
"file_name",
"+=",
"'.c'",
"fp",
",",
"already_available",
"=",
"self",
".",
"_get_intermediate_file",
"(",
"file_name",
",",
"machine_and_compiler_dependent",
"=",
"False",
")",
"# Use already cached version",
"if",
"already_available",
":",
"code",
"=",
"fp",
".",
"read",
"(",
")",
"else",
":",
"array_declarations",
",",
"array_dimensions",
"=",
"self",
".",
"_build_array_declarations",
"(",
")",
"# Prepare actual kernel loop nest",
"if",
"openmp",
":",
"# with OpenMP code",
"kernel",
"=",
"deepcopy",
"(",
"self",
".",
"get_kernel_loop_nest",
"(",
")",
")",
"# find all array references in kernel",
"for",
"aref",
"in",
"find_node_type",
"(",
"kernel",
",",
"c_ast",
".",
"ArrayRef",
")",
":",
"# transform to 1d references",
"transform_multidim_to_1d_ref",
"(",
"aref",
",",
"array_dimensions",
")",
"omp_pragmas",
"=",
"[",
"p",
"for",
"p",
"in",
"find_node_type",
"(",
"kernel",
",",
"c_ast",
".",
"Pragma",
")",
"if",
"'omp'",
"in",
"p",
".",
"string",
"]",
"# TODO if omp parallel was found, remove it (also replace \"parallel for\" -> \"for\")",
"# if no omp for pragmas are present, insert suitable ones",
"if",
"not",
"omp_pragmas",
":",
"kernel",
".",
"insert",
"(",
"0",
",",
"c_ast",
".",
"Pragma",
"(",
"\"omp for\"",
")",
")",
"# otherwise do not change anything",
"else",
":",
"# with original code",
"kernel",
"=",
"deepcopy",
"(",
"self",
".",
"get_kernel_loop_nest",
"(",
")",
")",
"# find all array references in kernel",
"for",
"aref",
"in",
"find_node_type",
"(",
"kernel",
",",
"c_ast",
".",
"ArrayRef",
")",
":",
"# transform to 1d references",
"transform_multidim_to_1d_ref",
"(",
"aref",
",",
"array_dimensions",
")",
"function_ast",
"=",
"c_ast",
".",
"FuncDef",
"(",
"decl",
"=",
"c_ast",
".",
"Decl",
"(",
"name",
"=",
"name",
",",
"type",
"=",
"self",
".",
"_build_kernel_function_declaration",
"(",
"name",
"=",
"name",
")",
",",
"quals",
"=",
"[",
"]",
",",
"storage",
"=",
"[",
"]",
",",
"funcspec",
"=",
"[",
"]",
",",
"init",
"=",
"None",
",",
"bitsize",
"=",
"None",
")",
",",
"body",
"=",
"c_ast",
".",
"Compound",
"(",
"block_items",
"=",
"kernel",
")",
",",
"param_decls",
"=",
"None",
")",
"# Generate code",
"code",
"=",
"CGenerator",
"(",
")",
".",
"visit",
"(",
"function_ast",
")",
"# Insert missing #includes from template to top of code",
"code",
"=",
"'#include \"kerncraft.h\"\\n\\n'",
"+",
"code",
"# Store to file",
"fp",
".",
"write",
"(",
"code",
")",
"fp",
".",
"close",
"(",
")",
"if",
"as_filename",
":",
"return",
"fp",
".",
"name",
"else",
":",
"return",
"code"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode._build_kernel_call
|
Generate and return kernel call ast.
|
kerncraft/kernel.py
|
def _build_kernel_call(self, name='kernel'):
"""Generate and return kernel call ast."""
return c_ast.FuncCall(name=c_ast.ID(name=name), args=c_ast.ExprList(exprs=[
c_ast.ID(name=d.name) for d in (
self._build_array_declarations()[0] +
self._build_scalar_declarations() +
self._build_const_declartions())]))
|
def _build_kernel_call(self, name='kernel'):
"""Generate and return kernel call ast."""
return c_ast.FuncCall(name=c_ast.ID(name=name), args=c_ast.ExprList(exprs=[
c_ast.ID(name=d.name) for d in (
self._build_array_declarations()[0] +
self._build_scalar_declarations() +
self._build_const_declartions())]))
|
[
"Generate",
"and",
"return",
"kernel",
"call",
"ast",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1296-L1302
|
[
"def",
"_build_kernel_call",
"(",
"self",
",",
"name",
"=",
"'kernel'",
")",
":",
"return",
"c_ast",
".",
"FuncCall",
"(",
"name",
"=",
"c_ast",
".",
"ID",
"(",
"name",
"=",
"name",
")",
",",
"args",
"=",
"c_ast",
".",
"ExprList",
"(",
"exprs",
"=",
"[",
"c_ast",
".",
"ID",
"(",
"name",
"=",
"d",
".",
"name",
")",
"for",
"d",
"in",
"(",
"self",
".",
"_build_array_declarations",
"(",
")",
"[",
"0",
"]",
"+",
"self",
".",
"_build_scalar_declarations",
"(",
")",
"+",
"self",
".",
"_build_const_declartions",
"(",
")",
")",
"]",
")",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.get_main_code
|
Generate and return compilable source code from AST.
|
kerncraft/kernel.py
|
def get_main_code(self, as_filename=False, kernel_function_name='kernel'):
"""
Generate and return compilable source code from AST.
"""
# TODO produce nicer code, including help text and other "comfort features".
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
fp, already_available = self._get_intermediate_file('main.c',
machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
parser = CParser()
template_code = self.CODE_TEMPLATE
template_ast = parser.parse(clean_code(template_code,
macros=True, comments=True, pragmas=False))
ast = deepcopy(template_ast)
# Define and replace DECLARE_CONSTS
replace_id(ast, "DECLARE_CONSTS", self._build_const_declartions(with_init=True))
# Define and replace DECLARE_ARRAYS
array_declarations, array_dimensions = self._build_array_declarations()
replace_id(ast, "DECLARE_ARRAYS", array_declarations)
# Define and replace DECLARE_INIT_SCALARS
replace_id(ast, "DECLARE_INIT_SCALARS", self._build_scalar_declarations())
# Define and replace DUMMY_CALLS
replace_id(ast, "DUMMY_CALLS", self._build_dummy_calls())
# Define and replace KERNEL_DECL
ast.ext.insert(0, self._build_kernel_function_declaration(
name=kernel_function_name))
# Define and replace KERNEL_CALL
replace_id(ast, "KERNEL_CALL", self._build_kernel_call())
# Define and replace INIT_ARRAYS based on previously generated kernel
replace_id(ast, "INIT_ARRAYS", self._build_array_initializations(array_dimensions))
# Generate code
code = CGenerator().visit(ast)
# Insert missing #includes from template to top of code
code = '\n'.join([l for l in template_code.split('\n') if l.startswith("#include")]) + \
'\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code
|
def get_main_code(self, as_filename=False, kernel_function_name='kernel'):
"""
Generate and return compilable source code from AST.
"""
# TODO produce nicer code, including help text and other "comfort features".
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
fp, already_available = self._get_intermediate_file('main.c',
machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
parser = CParser()
template_code = self.CODE_TEMPLATE
template_ast = parser.parse(clean_code(template_code,
macros=True, comments=True, pragmas=False))
ast = deepcopy(template_ast)
# Define and replace DECLARE_CONSTS
replace_id(ast, "DECLARE_CONSTS", self._build_const_declartions(with_init=True))
# Define and replace DECLARE_ARRAYS
array_declarations, array_dimensions = self._build_array_declarations()
replace_id(ast, "DECLARE_ARRAYS", array_declarations)
# Define and replace DECLARE_INIT_SCALARS
replace_id(ast, "DECLARE_INIT_SCALARS", self._build_scalar_declarations())
# Define and replace DUMMY_CALLS
replace_id(ast, "DUMMY_CALLS", self._build_dummy_calls())
# Define and replace KERNEL_DECL
ast.ext.insert(0, self._build_kernel_function_declaration(
name=kernel_function_name))
# Define and replace KERNEL_CALL
replace_id(ast, "KERNEL_CALL", self._build_kernel_call())
# Define and replace INIT_ARRAYS based on previously generated kernel
replace_id(ast, "INIT_ARRAYS", self._build_array_initializations(array_dimensions))
# Generate code
code = CGenerator().visit(ast)
# Insert missing #includes from template to top of code
code = '\n'.join([l for l in template_code.split('\n') if l.startswith("#include")]) + \
'\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code
|
[
"Generate",
"and",
"return",
"compilable",
"source",
"code",
"from",
"AST",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1351-L1409
|
[
"def",
"get_main_code",
"(",
"self",
",",
"as_filename",
"=",
"False",
",",
"kernel_function_name",
"=",
"'kernel'",
")",
":",
"# TODO produce nicer code, including help text and other \"comfort features\".",
"assert",
"self",
".",
"kernel_ast",
"is",
"not",
"None",
",",
"\"AST does not exist, this could be due to running \"",
"\"based on a kernel description rather than code.\"",
"fp",
",",
"already_available",
"=",
"self",
".",
"_get_intermediate_file",
"(",
"'main.c'",
",",
"machine_and_compiler_dependent",
"=",
"False",
")",
"# Use already cached version",
"if",
"already_available",
":",
"code",
"=",
"fp",
".",
"read",
"(",
")",
"else",
":",
"parser",
"=",
"CParser",
"(",
")",
"template_code",
"=",
"self",
".",
"CODE_TEMPLATE",
"template_ast",
"=",
"parser",
".",
"parse",
"(",
"clean_code",
"(",
"template_code",
",",
"macros",
"=",
"True",
",",
"comments",
"=",
"True",
",",
"pragmas",
"=",
"False",
")",
")",
"ast",
"=",
"deepcopy",
"(",
"template_ast",
")",
"# Define and replace DECLARE_CONSTS",
"replace_id",
"(",
"ast",
",",
"\"DECLARE_CONSTS\"",
",",
"self",
".",
"_build_const_declartions",
"(",
"with_init",
"=",
"True",
")",
")",
"# Define and replace DECLARE_ARRAYS",
"array_declarations",
",",
"array_dimensions",
"=",
"self",
".",
"_build_array_declarations",
"(",
")",
"replace_id",
"(",
"ast",
",",
"\"DECLARE_ARRAYS\"",
",",
"array_declarations",
")",
"# Define and replace DECLARE_INIT_SCALARS",
"replace_id",
"(",
"ast",
",",
"\"DECLARE_INIT_SCALARS\"",
",",
"self",
".",
"_build_scalar_declarations",
"(",
")",
")",
"# Define and replace DUMMY_CALLS",
"replace_id",
"(",
"ast",
",",
"\"DUMMY_CALLS\"",
",",
"self",
".",
"_build_dummy_calls",
"(",
")",
")",
"# Define and replace KERNEL_DECL",
"ast",
".",
"ext",
".",
"insert",
"(",
"0",
",",
"self",
".",
"_build_kernel_function_declaration",
"(",
"name",
"=",
"kernel_function_name",
")",
")",
"# Define and replace KERNEL_CALL",
"replace_id",
"(",
"ast",
",",
"\"KERNEL_CALL\"",
",",
"self",
".",
"_build_kernel_call",
"(",
")",
")",
"# Define and replace INIT_ARRAYS based on previously generated kernel",
"replace_id",
"(",
"ast",
",",
"\"INIT_ARRAYS\"",
",",
"self",
".",
"_build_array_initializations",
"(",
"array_dimensions",
")",
")",
"# Generate code",
"code",
"=",
"CGenerator",
"(",
")",
".",
"visit",
"(",
"ast",
")",
"# Insert missing #includes from template to top of code",
"code",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"template_code",
".",
"split",
"(",
"'\\n'",
")",
"if",
"l",
".",
"startswith",
"(",
"\"#include\"",
")",
"]",
")",
"+",
"'\\n\\n'",
"+",
"code",
"# Store to file",
"fp",
".",
"write",
"(",
"code",
")",
"fp",
".",
"close",
"(",
")",
"if",
"as_filename",
":",
"return",
"fp",
".",
"name",
"else",
":",
"return",
"code"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.assemble_to_object
|
Assemble *in_filename* assembly into *out_filename* object.
If *iaca_marked* is set to true, markers are inserted around the block with most packed
instructions or (if no packed instr. were found) the largest block and modified file is
saved to *in_file*.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
Returns two-tuple (filepointer, filename) to temp binary file.
|
kerncraft/kernel.py
|
def assemble_to_object(self, in_filename, verbose=False):
"""
Assemble *in_filename* assembly into *out_filename* object.
If *iaca_marked* is set to true, markers are inserted around the block with most packed
instructions or (if no packed instr. were found) the largest block and modified file is
saved to *in_file*.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
Returns two-tuple (filepointer, filename) to temp binary file.
"""
# Build file name
file_base_name = os.path.splitext(os.path.basename(in_filename))[0]
out_filename, already_exists = self._get_intermediate_file(file_base_name + '.o',
binary=True,
fp=False)
if already_exists:
# Do not use caching, because pointer_increment or asm_block selection may be different
pass
compiler, compiler_args = self._machine.get_compiler()
# Compile to object file
compiler_args.append('-c')
cmd = [compiler] + [
in_filename] + \
compiler_args + ['-o', out_filename]
if verbose:
print('Executing (assemble_to_object): ', ' '.join(cmd))
try:
# Assemble all to a binary
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Assembly failed:", e, file=sys.stderr)
sys.exit(1)
return out_filename
|
def assemble_to_object(self, in_filename, verbose=False):
"""
Assemble *in_filename* assembly into *out_filename* object.
If *iaca_marked* is set to true, markers are inserted around the block with most packed
instructions or (if no packed instr. were found) the largest block and modified file is
saved to *in_file*.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
Returns two-tuple (filepointer, filename) to temp binary file.
"""
# Build file name
file_base_name = os.path.splitext(os.path.basename(in_filename))[0]
out_filename, already_exists = self._get_intermediate_file(file_base_name + '.o',
binary=True,
fp=False)
if already_exists:
# Do not use caching, because pointer_increment or asm_block selection may be different
pass
compiler, compiler_args = self._machine.get_compiler()
# Compile to object file
compiler_args.append('-c')
cmd = [compiler] + [
in_filename] + \
compiler_args + ['-o', out_filename]
if verbose:
print('Executing (assemble_to_object): ', ' '.join(cmd))
try:
# Assemble all to a binary
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Assembly failed:", e, file=sys.stderr)
sys.exit(1)
return out_filename
|
[
"Assemble",
"*",
"in_filename",
"*",
"assembly",
"into",
"*",
"out_filename",
"*",
"object",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1411-L1456
|
[
"def",
"assemble_to_object",
"(",
"self",
",",
"in_filename",
",",
"verbose",
"=",
"False",
")",
":",
"# Build file name",
"file_base_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"in_filename",
")",
")",
"[",
"0",
"]",
"out_filename",
",",
"already_exists",
"=",
"self",
".",
"_get_intermediate_file",
"(",
"file_base_name",
"+",
"'.o'",
",",
"binary",
"=",
"True",
",",
"fp",
"=",
"False",
")",
"if",
"already_exists",
":",
"# Do not use caching, because pointer_increment or asm_block selection may be different",
"pass",
"compiler",
",",
"compiler_args",
"=",
"self",
".",
"_machine",
".",
"get_compiler",
"(",
")",
"# Compile to object file",
"compiler_args",
".",
"append",
"(",
"'-c'",
")",
"cmd",
"=",
"[",
"compiler",
"]",
"+",
"[",
"in_filename",
"]",
"+",
"compiler_args",
"+",
"[",
"'-o'",
",",
"out_filename",
"]",
"if",
"verbose",
":",
"print",
"(",
"'Executing (assemble_to_object): '",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"try",
":",
"# Assemble all to a binary",
"subprocess",
".",
"check_output",
"(",
"cmd",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"print",
"(",
"\"Assembly failed:\"",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"out_filename"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.compile_kernel
|
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
|
kerncraft/kernel.py
|
def compile_kernel(self, openmp=False, assembly=False, verbose=False):
"""
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
"""
compiler, compiler_args = self._machine.get_compiler()
in_filename = self.get_kernel_code(openmp=openmp, as_filename=True)
if assembly:
compiler_args += ['-S']
suffix = '.s'
else:
suffix = '.o'
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(in_filename))[0]+suffix, binary=not assembly, fp=False)
if already_exists:
if verbose:
print('Executing (compile_kernel): ', 'using cached', out_filename)
return out_filename
compiler_args += ['-std=c99']
cmd = ([compiler] +
[in_filename,
'-c',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
'-o', out_filename] +
compiler_args)
if verbose:
print('Executing (compile_kernel): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Compilation failed:", e, file=sys.stderr)
sys.exit(1)
# FIXME TODO FIXME TODO FIXME TODO
# Hacky workaround for icc issue (icc may issue vkmovb instructions with AVX512, which are
# invalid and should be kmovb):
if compiler == 'icc' and assembly:
with open(out_filename, 'r+') as f:
assembly = f.read()
f.seek(0)
f.write(assembly.replace('vkmovb', 'kmovb'))
f.truncate()
# FIXME TODO FIXME TODO FIXME TODO
# Let's return the out_file name
return out_filename
|
def compile_kernel(self, openmp=False, assembly=False, verbose=False):
"""
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
"""
compiler, compiler_args = self._machine.get_compiler()
in_filename = self.get_kernel_code(openmp=openmp, as_filename=True)
if assembly:
compiler_args += ['-S']
suffix = '.s'
else:
suffix = '.o'
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(in_filename))[0]+suffix, binary=not assembly, fp=False)
if already_exists:
if verbose:
print('Executing (compile_kernel): ', 'using cached', out_filename)
return out_filename
compiler_args += ['-std=c99']
cmd = ([compiler] +
[in_filename,
'-c',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
'-o', out_filename] +
compiler_args)
if verbose:
print('Executing (compile_kernel): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Compilation failed:", e, file=sys.stderr)
sys.exit(1)
# FIXME TODO FIXME TODO FIXME TODO
# Hacky workaround for icc issue (icc may issue vkmovb instructions with AVX512, which are
# invalid and should be kmovb):
if compiler == 'icc' and assembly:
with open(out_filename, 'r+') as f:
assembly = f.read()
f.seek(0)
f.write(assembly.replace('vkmovb', 'kmovb'))
f.truncate()
# FIXME TODO FIXME TODO FIXME TODO
# Let's return the out_file name
return out_filename
|
[
"Compile",
"source",
"(",
"from",
"as_code",
"(",
"type_",
"))",
"to",
"assembly",
"or",
"object",
"and",
"return",
"(",
"fileptr",
"filename",
")",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1458-L1512
|
[
"def",
"compile_kernel",
"(",
"self",
",",
"openmp",
"=",
"False",
",",
"assembly",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"compiler",
",",
"compiler_args",
"=",
"self",
".",
"_machine",
".",
"get_compiler",
"(",
")",
"in_filename",
"=",
"self",
".",
"get_kernel_code",
"(",
"openmp",
"=",
"openmp",
",",
"as_filename",
"=",
"True",
")",
"if",
"assembly",
":",
"compiler_args",
"+=",
"[",
"'-S'",
"]",
"suffix",
"=",
"'.s'",
"else",
":",
"suffix",
"=",
"'.o'",
"out_filename",
",",
"already_exists",
"=",
"self",
".",
"_get_intermediate_file",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"in_filename",
")",
")",
"[",
"0",
"]",
"+",
"suffix",
",",
"binary",
"=",
"not",
"assembly",
",",
"fp",
"=",
"False",
")",
"if",
"already_exists",
":",
"if",
"verbose",
":",
"print",
"(",
"'Executing (compile_kernel): '",
",",
"'using cached'",
",",
"out_filename",
")",
"return",
"out_filename",
"compiler_args",
"+=",
"[",
"'-std=c99'",
"]",
"cmd",
"=",
"(",
"[",
"compiler",
"]",
"+",
"[",
"in_filename",
",",
"'-c'",
",",
"'-I'",
"+",
"reduce_path",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
"+",
"'/headers/'",
")",
",",
"'-o'",
",",
"out_filename",
"]",
"+",
"compiler_args",
")",
"if",
"verbose",
":",
"print",
"(",
"'Executing (compile_kernel): '",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"try",
":",
"subprocess",
".",
"check_output",
"(",
"cmd",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"print",
"(",
"\"Compilation failed:\"",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# FIXME TODO FIXME TODO FIXME TODO",
"# Hacky workaround for icc issue (icc may issue vkmovb instructions with AVX512, which are",
"# invalid and should be kmovb):",
"if",
"compiler",
"==",
"'icc'",
"and",
"assembly",
":",
"with",
"open",
"(",
"out_filename",
",",
"'r+'",
")",
"as",
"f",
":",
"assembly",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"seek",
"(",
"0",
")",
"f",
".",
"write",
"(",
"assembly",
".",
"replace",
"(",
"'vkmovb'",
",",
"'kmovb'",
")",
")",
"f",
".",
"truncate",
"(",
")",
"# FIXME TODO FIXME TODO FIXME TODO",
"# Let's return the out_file name",
"return",
"out_filename"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.iaca_analysis
|
Run an IACA analysis and return its outcome.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
|
kerncraft/kernel.py
|
def iaca_analysis(self, micro_architecture, asm_block='auto',
pointer_increment='auto_with_manual_fallback', verbose=False):
"""
Run an IACA analysis and return its outcome.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
"""
asm_filename = self.compile_kernel(assembly=True, verbose=verbose)
asm_marked_filename = os.path.splitext(asm_filename)[0]+'-iaca.s'
with open(asm_filename, 'r') as in_file, open(asm_marked_filename, 'w') as out_file:
self.asm_block = iaca.iaca_instrumentation(
in_file, out_file,
block_selection=asm_block,
pointer_increment=pointer_increment)
obj_name = self.assemble_to_object(asm_marked_filename, verbose=verbose)
return iaca.iaca_analyse_instrumented_binary(obj_name, micro_architecture), self.asm_block
|
def iaca_analysis(self, micro_architecture, asm_block='auto',
pointer_increment='auto_with_manual_fallback', verbose=False):
"""
Run an IACA analysis and return its outcome.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
"""
asm_filename = self.compile_kernel(assembly=True, verbose=verbose)
asm_marked_filename = os.path.splitext(asm_filename)[0]+'-iaca.s'
with open(asm_filename, 'r') as in_file, open(asm_marked_filename, 'w') as out_file:
self.asm_block = iaca.iaca_instrumentation(
in_file, out_file,
block_selection=asm_block,
pointer_increment=pointer_increment)
obj_name = self.assemble_to_object(asm_marked_filename, verbose=verbose)
return iaca.iaca_analyse_instrumented_binary(obj_name, micro_architecture), self.asm_block
|
[
"Run",
"an",
"IACA",
"analysis",
"and",
"return",
"its",
"outcome",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1514-L1535
|
[
"def",
"iaca_analysis",
"(",
"self",
",",
"micro_architecture",
",",
"asm_block",
"=",
"'auto'",
",",
"pointer_increment",
"=",
"'auto_with_manual_fallback'",
",",
"verbose",
"=",
"False",
")",
":",
"asm_filename",
"=",
"self",
".",
"compile_kernel",
"(",
"assembly",
"=",
"True",
",",
"verbose",
"=",
"verbose",
")",
"asm_marked_filename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"asm_filename",
")",
"[",
"0",
"]",
"+",
"'-iaca.s'",
"with",
"open",
"(",
"asm_filename",
",",
"'r'",
")",
"as",
"in_file",
",",
"open",
"(",
"asm_marked_filename",
",",
"'w'",
")",
"as",
"out_file",
":",
"self",
".",
"asm_block",
"=",
"iaca",
".",
"iaca_instrumentation",
"(",
"in_file",
",",
"out_file",
",",
"block_selection",
"=",
"asm_block",
",",
"pointer_increment",
"=",
"pointer_increment",
")",
"obj_name",
"=",
"self",
".",
"assemble_to_object",
"(",
"asm_marked_filename",
",",
"verbose",
"=",
"verbose",
")",
"return",
"iaca",
".",
"iaca_analyse_instrumented_binary",
"(",
"obj_name",
",",
"micro_architecture",
")",
",",
"self",
".",
"asm_block"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelCode.build_executable
|
Compile source to executable with likwid capabilities and return the executable name.
|
kerncraft/kernel.py
|
def build_executable(self, lflags=None, verbose=False, openmp=False):
"""Compile source to executable with likwid capabilities and return the executable name."""
compiler, compiler_args = self._machine.get_compiler()
kernel_obj_filename = self.compile_kernel(openmp=openmp, verbose=verbose)
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(kernel_obj_filename))[0], binary=True, fp=False)
if not already_exists:
main_source_filename = self.get_main_code(as_filename=True)
if not (('LIKWID_INCLUDE' in os.environ or 'LIKWID_INC' in os.environ) and
'LIKWID_LIB' in os.environ):
print('Could not find LIKWID_INCLUDE (e.g., "-I/app/likwid/4.1.2/include") and '
'LIKWID_LIB (e.g., "-L/apps/likwid/4.1.2/lib") environment variables',
file=sys.stderr)
sys.exit(1)
compiler_args += [
'-std=c99',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
os.environ.get('LIKWID_INCLUDE', ''),
os.environ.get('LIKWID_INC', ''),
'-llikwid']
# This is a special case for unittesting
if os.environ.get('LIKWID_LIB') == '':
compiler_args = compiler_args[:-1]
if lflags is None:
lflags = []
lflags += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
compiler_args += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
infiles = [reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/dummy.c'),
kernel_obj_filename, main_source_filename]
cmd = [compiler] + infiles + compiler_args + ['-o', out_filename]
# remove empty arguments
cmd = list(filter(bool, cmd))
if verbose:
print('Executing (build_executable): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Build failed:", e, file=sys.stderr)
sys.exit(1)
else:
if verbose:
print('Executing (build_executable): ', 'using cached', out_filename)
return out_filename
|
def build_executable(self, lflags=None, verbose=False, openmp=False):
"""Compile source to executable with likwid capabilities and return the executable name."""
compiler, compiler_args = self._machine.get_compiler()
kernel_obj_filename = self.compile_kernel(openmp=openmp, verbose=verbose)
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(kernel_obj_filename))[0], binary=True, fp=False)
if not already_exists:
main_source_filename = self.get_main_code(as_filename=True)
if not (('LIKWID_INCLUDE' in os.environ or 'LIKWID_INC' in os.environ) and
'LIKWID_LIB' in os.environ):
print('Could not find LIKWID_INCLUDE (e.g., "-I/app/likwid/4.1.2/include") and '
'LIKWID_LIB (e.g., "-L/apps/likwid/4.1.2/lib") environment variables',
file=sys.stderr)
sys.exit(1)
compiler_args += [
'-std=c99',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
os.environ.get('LIKWID_INCLUDE', ''),
os.environ.get('LIKWID_INC', ''),
'-llikwid']
# This is a special case for unittesting
if os.environ.get('LIKWID_LIB') == '':
compiler_args = compiler_args[:-1]
if lflags is None:
lflags = []
lflags += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
compiler_args += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
infiles = [reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/dummy.c'),
kernel_obj_filename, main_source_filename]
cmd = [compiler] + infiles + compiler_args + ['-o', out_filename]
# remove empty arguments
cmd = list(filter(bool, cmd))
if verbose:
print('Executing (build_executable): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Build failed:", e, file=sys.stderr)
sys.exit(1)
else:
if verbose:
print('Executing (build_executable): ', 'using cached', out_filename)
return out_filename
|
[
"Compile",
"source",
"to",
"executable",
"with",
"likwid",
"capabilities",
"and",
"return",
"the",
"executable",
"name",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1537-L1590
|
[
"def",
"build_executable",
"(",
"self",
",",
"lflags",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"openmp",
"=",
"False",
")",
":",
"compiler",
",",
"compiler_args",
"=",
"self",
".",
"_machine",
".",
"get_compiler",
"(",
")",
"kernel_obj_filename",
"=",
"self",
".",
"compile_kernel",
"(",
"openmp",
"=",
"openmp",
",",
"verbose",
"=",
"verbose",
")",
"out_filename",
",",
"already_exists",
"=",
"self",
".",
"_get_intermediate_file",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"kernel_obj_filename",
")",
")",
"[",
"0",
"]",
",",
"binary",
"=",
"True",
",",
"fp",
"=",
"False",
")",
"if",
"not",
"already_exists",
":",
"main_source_filename",
"=",
"self",
".",
"get_main_code",
"(",
"as_filename",
"=",
"True",
")",
"if",
"not",
"(",
"(",
"'LIKWID_INCLUDE'",
"in",
"os",
".",
"environ",
"or",
"'LIKWID_INC'",
"in",
"os",
".",
"environ",
")",
"and",
"'LIKWID_LIB'",
"in",
"os",
".",
"environ",
")",
":",
"print",
"(",
"'Could not find LIKWID_INCLUDE (e.g., \"-I/app/likwid/4.1.2/include\") and '",
"'LIKWID_LIB (e.g., \"-L/apps/likwid/4.1.2/lib\") environment variables'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"compiler_args",
"+=",
"[",
"'-std=c99'",
",",
"'-I'",
"+",
"reduce_path",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
"+",
"'/headers/'",
")",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'LIKWID_INCLUDE'",
",",
"''",
")",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'LIKWID_INC'",
",",
"''",
")",
",",
"'-llikwid'",
"]",
"# This is a special case for unittesting",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'LIKWID_LIB'",
")",
"==",
"''",
":",
"compiler_args",
"=",
"compiler_args",
"[",
":",
"-",
"1",
"]",
"if",
"lflags",
"is",
"None",
":",
"lflags",
"=",
"[",
"]",
"lflags",
"+=",
"os",
".",
"environ",
"[",
"'LIKWID_LIB'",
"]",
".",
"split",
"(",
"' '",
")",
"+",
"[",
"'-pthread'",
"]",
"compiler_args",
"+=",
"os",
".",
"environ",
"[",
"'LIKWID_LIB'",
"]",
".",
"split",
"(",
"' '",
")",
"+",
"[",
"'-pthread'",
"]",
"infiles",
"=",
"[",
"reduce_path",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
"+",
"'/headers/dummy.c'",
")",
",",
"kernel_obj_filename",
",",
"main_source_filename",
"]",
"cmd",
"=",
"[",
"compiler",
"]",
"+",
"infiles",
"+",
"compiler_args",
"+",
"[",
"'-o'",
",",
"out_filename",
"]",
"# remove empty arguments",
"cmd",
"=",
"list",
"(",
"filter",
"(",
"bool",
",",
"cmd",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"'Executing (build_executable): '",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"try",
":",
"subprocess",
".",
"check_output",
"(",
"cmd",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"print",
"(",
"\"Build failed:\"",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'Executing (build_executable): '",
",",
"'using cached'",
",",
"out_filename",
")",
"return",
"out_filename"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
KernelDescription.string_to_sympy
|
Convert any string to a sympy object or None.
|
kerncraft/kernel.py
|
def string_to_sympy(cls, s):
"""Convert any string to a sympy object or None."""
if isinstance(s, int):
return sympy.Integer(s)
elif isinstance(s, list):
return tuple([cls.string_to_sympy(e) for e in s])
elif s is None:
return None
else:
# Step 1 build expression with the whole alphabet redefined:
local_dict = {c: symbol_pos_int(c) for c in s if c in string.ascii_letters}
# TODO find nicer solution for N and other pre-mapped letters
preliminary_expr = parse_expr(s, local_dict=local_dict)
# Replace all free symbols with positive integer versions:
local_dict.update(
{s.name: symbol_pos_int(s.name) for s in preliminary_expr.free_symbols})
return parse_expr(s, local_dict=local_dict)
|
def string_to_sympy(cls, s):
"""Convert any string to a sympy object or None."""
if isinstance(s, int):
return sympy.Integer(s)
elif isinstance(s, list):
return tuple([cls.string_to_sympy(e) for e in s])
elif s is None:
return None
else:
# Step 1 build expression with the whole alphabet redefined:
local_dict = {c: symbol_pos_int(c) for c in s if c in string.ascii_letters}
# TODO find nicer solution for N and other pre-mapped letters
preliminary_expr = parse_expr(s, local_dict=local_dict)
# Replace all free symbols with positive integer versions:
local_dict.update(
{s.name: symbol_pos_int(s.name) for s in preliminary_expr.free_symbols})
return parse_expr(s, local_dict=local_dict)
|
[
"Convert",
"any",
"string",
"to",
"a",
"sympy",
"object",
"or",
"None",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1648-L1664
|
[
"def",
"string_to_sympy",
"(",
"cls",
",",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"int",
")",
":",
"return",
"sympy",
".",
"Integer",
"(",
"s",
")",
"elif",
"isinstance",
"(",
"s",
",",
"list",
")",
":",
"return",
"tuple",
"(",
"[",
"cls",
".",
"string_to_sympy",
"(",
"e",
")",
"for",
"e",
"in",
"s",
"]",
")",
"elif",
"s",
"is",
"None",
":",
"return",
"None",
"else",
":",
"# Step 1 build expression with the whole alphabet redefined:",
"local_dict",
"=",
"{",
"c",
":",
"symbol_pos_int",
"(",
"c",
")",
"for",
"c",
"in",
"s",
"if",
"c",
"in",
"string",
".",
"ascii_letters",
"}",
"# TODO find nicer solution for N and other pre-mapped letters",
"preliminary_expr",
"=",
"parse_expr",
"(",
"s",
",",
"local_dict",
"=",
"local_dict",
")",
"# Replace all free symbols with positive integer versions:",
"local_dict",
".",
"update",
"(",
"{",
"s",
".",
"name",
":",
"symbol_pos_int",
"(",
"s",
".",
"name",
")",
"for",
"s",
"in",
"preliminary_expr",
".",
"free_symbols",
"}",
")",
"return",
"parse_expr",
"(",
"s",
",",
"local_dict",
"=",
"local_dict",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.get_identifier
|
Return identifier which is either the machine file name or sha256 checksum of data.
|
kerncraft/machinemodel.py
|
def get_identifier(self):
"""Return identifier which is either the machine file name or sha256 checksum of data."""
if self._path:
return os.path.basename(self._path)
else:
return hashlib.sha256(hashlib.sha256(repr(self._data).encode())).hexdigest()
|
def get_identifier(self):
"""Return identifier which is either the machine file name or sha256 checksum of data."""
if self._path:
return os.path.basename(self._path)
else:
return hashlib.sha256(hashlib.sha256(repr(self._data).encode())).hexdigest()
|
[
"Return",
"identifier",
"which",
"is",
"either",
"the",
"machine",
"file",
"name",
"or",
"sha256",
"checksum",
"of",
"data",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L92-L97
|
[
"def",
"get_identifier",
"(",
"self",
")",
":",
"if",
"self",
".",
"_path",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"_path",
")",
"else",
":",
"return",
"hashlib",
".",
"sha256",
"(",
"hashlib",
".",
"sha256",
"(",
"repr",
"(",
"self",
".",
"_data",
")",
".",
"encode",
"(",
")",
")",
")",
".",
"hexdigest",
"(",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.get_last_modified_datetime
|
Return datetime object of modified time of machine file. Return now if not a file.
|
kerncraft/machinemodel.py
|
def get_last_modified_datetime(self):
"""Return datetime object of modified time of machine file. Return now if not a file."""
if self._path:
statbuf = os.stat(self._path)
return datetime.utcfromtimestamp(statbuf.st_mtime)
else:
return datetime.now()
|
def get_last_modified_datetime(self):
"""Return datetime object of modified time of machine file. Return now if not a file."""
if self._path:
statbuf = os.stat(self._path)
return datetime.utcfromtimestamp(statbuf.st_mtime)
else:
return datetime.now()
|
[
"Return",
"datetime",
"object",
"of",
"modified",
"time",
"of",
"machine",
"file",
".",
"Return",
"now",
"if",
"not",
"a",
"file",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L99-L105
|
[
"def",
"get_last_modified_datetime",
"(",
"self",
")",
":",
"if",
"self",
".",
"_path",
":",
"statbuf",
"=",
"os",
".",
"stat",
"(",
"self",
".",
"_path",
")",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"statbuf",
".",
"st_mtime",
")",
"else",
":",
"return",
"datetime",
".",
"now",
"(",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.get_cachesim
|
Return a cachesim.CacheSimulator object based on the machine description.
:param cores: core count (default: 1)
|
kerncraft/machinemodel.py
|
def get_cachesim(self, cores=1):
"""
Return a cachesim.CacheSimulator object based on the machine description.
:param cores: core count (default: 1)
"""
cache_dict = {}
for c in self['memory hierarchy']:
# Skip main memory
if 'cache per group' not in c:
continue
cache_dict[c['level']] = deepcopy(c['cache per group'])
# Scale size of shared caches according to cores
if c['cores per group'] > 1:
cache_dict[c['level']]['sets'] //= cores
cs, caches, mem = cachesim.CacheSimulator.from_dict(cache_dict)
return cs
|
def get_cachesim(self, cores=1):
"""
Return a cachesim.CacheSimulator object based on the machine description.
:param cores: core count (default: 1)
"""
cache_dict = {}
for c in self['memory hierarchy']:
# Skip main memory
if 'cache per group' not in c:
continue
cache_dict[c['level']] = deepcopy(c['cache per group'])
# Scale size of shared caches according to cores
if c['cores per group'] > 1:
cache_dict[c['level']]['sets'] //= cores
cs, caches, mem = cachesim.CacheSimulator.from_dict(cache_dict)
return cs
|
[
"Return",
"a",
"cachesim",
".",
"CacheSimulator",
"object",
"based",
"on",
"the",
"machine",
"description",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L107-L125
|
[
"def",
"get_cachesim",
"(",
"self",
",",
"cores",
"=",
"1",
")",
":",
"cache_dict",
"=",
"{",
"}",
"for",
"c",
"in",
"self",
"[",
"'memory hierarchy'",
"]",
":",
"# Skip main memory",
"if",
"'cache per group'",
"not",
"in",
"c",
":",
"continue",
"cache_dict",
"[",
"c",
"[",
"'level'",
"]",
"]",
"=",
"deepcopy",
"(",
"c",
"[",
"'cache per group'",
"]",
")",
"# Scale size of shared caches according to cores",
"if",
"c",
"[",
"'cores per group'",
"]",
">",
"1",
":",
"cache_dict",
"[",
"c",
"[",
"'level'",
"]",
"]",
"[",
"'sets'",
"]",
"//=",
"cores",
"cs",
",",
"caches",
",",
"mem",
"=",
"cachesim",
".",
"CacheSimulator",
".",
"from_dict",
"(",
"cache_dict",
")",
"return",
"cs"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.get_bandwidth
|
Return best fitting bandwidth according to number of threads, read and write streams.
:param cache_level: integer of cache (0 is L1, 1 is L2 ...)
:param read_streams: number of read streams expected
:param write_streams: number of write streams expected
:param threads_per_core: number of threads that are run on each core
:param cores: if not given, will choose maximum bandwidth for single NUMA domain
|
kerncraft/machinemodel.py
|
def get_bandwidth(self, cache_level, read_streams, write_streams, threads_per_core, cores=None):
"""
Return best fitting bandwidth according to number of threads, read and write streams.
:param cache_level: integer of cache (0 is L1, 1 is L2 ...)
:param read_streams: number of read streams expected
:param write_streams: number of write streams expected
:param threads_per_core: number of threads that are run on each core
:param cores: if not given, will choose maximum bandwidth for single NUMA domain
"""
# try to find best fitting kernel (closest to read/write ratio):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
try:
target_ratio = read_streams/write_streams
except ZeroDivisionError:
target_ratio = float('inf')
measurement_kernel = 'load'
measurement_kernel_info = self['benchmarks']['kernels'][measurement_kernel]
measurement_kernel_ratio = float('inf')
for kernel_name, kernel_info in sorted(self['benchmarks']['kernels'].items()):
try:
kernel_ratio = ((kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) /
kernel_info['write streams']['streams'])
except ZeroDivisionError:
kernel_ratio = float('inf')
if abs(kernel_ratio - target_ratio) < abs(measurement_kernel_ratio - target_ratio):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
measurement_kernel_ratio = kernel_ratio
# choose smt, and then use max/saturation bw
bw_level = self['memory hierarchy'][cache_level]['level']
bw_measurements = \
self['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
if cores is not None:
# Used by Roofline model
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
else:
# Used by ECM model
# Choose maximum number of cores to get bandwidth for
max_cores = min(self['memory hierarchy'][cache_level]['cores per group'],
self['cores per NUMA domain'])
bw = max(bw_measurements['results'][measurement_kernel][:max_cores])
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
if cache_level == 0:
# L1 does not have write-allocate, so everything is measured correctly
factor = 1.0
else:
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
return bw, measurement_kernel
|
def get_bandwidth(self, cache_level, read_streams, write_streams, threads_per_core, cores=None):
"""
Return best fitting bandwidth according to number of threads, read and write streams.
:param cache_level: integer of cache (0 is L1, 1 is L2 ...)
:param read_streams: number of read streams expected
:param write_streams: number of write streams expected
:param threads_per_core: number of threads that are run on each core
:param cores: if not given, will choose maximum bandwidth for single NUMA domain
"""
# try to find best fitting kernel (closest to read/write ratio):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
try:
target_ratio = read_streams/write_streams
except ZeroDivisionError:
target_ratio = float('inf')
measurement_kernel = 'load'
measurement_kernel_info = self['benchmarks']['kernels'][measurement_kernel]
measurement_kernel_ratio = float('inf')
for kernel_name, kernel_info in sorted(self['benchmarks']['kernels'].items()):
try:
kernel_ratio = ((kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) /
kernel_info['write streams']['streams'])
except ZeroDivisionError:
kernel_ratio = float('inf')
if abs(kernel_ratio - target_ratio) < abs(measurement_kernel_ratio - target_ratio):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
measurement_kernel_ratio = kernel_ratio
# choose smt, and then use max/saturation bw
bw_level = self['memory hierarchy'][cache_level]['level']
bw_measurements = \
self['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
if cores is not None:
# Used by Roofline model
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
else:
# Used by ECM model
# Choose maximum number of cores to get bandwidth for
max_cores = min(self['memory hierarchy'][cache_level]['cores per group'],
self['cores per NUMA domain'])
bw = max(bw_measurements['results'][measurement_kernel][:max_cores])
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
if cache_level == 0:
# L1 does not have write-allocate, so everything is measured correctly
factor = 1.0
else:
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
return bw, measurement_kernel
|
[
"Return",
"best",
"fitting",
"bandwidth",
"according",
"to",
"number",
"of",
"threads",
"read",
"and",
"write",
"streams",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L127-L191
|
[
"def",
"get_bandwidth",
"(",
"self",
",",
"cache_level",
",",
"read_streams",
",",
"write_streams",
",",
"threads_per_core",
",",
"cores",
"=",
"None",
")",
":",
"# try to find best fitting kernel (closest to read/write ratio):",
"# write allocate has to be handled in kernel information (all writes are also reads)",
"# TODO support for non-write-allocate architectures",
"try",
":",
"target_ratio",
"=",
"read_streams",
"/",
"write_streams",
"except",
"ZeroDivisionError",
":",
"target_ratio",
"=",
"float",
"(",
"'inf'",
")",
"measurement_kernel",
"=",
"'load'",
"measurement_kernel_info",
"=",
"self",
"[",
"'benchmarks'",
"]",
"[",
"'kernels'",
"]",
"[",
"measurement_kernel",
"]",
"measurement_kernel_ratio",
"=",
"float",
"(",
"'inf'",
")",
"for",
"kernel_name",
",",
"kernel_info",
"in",
"sorted",
"(",
"self",
"[",
"'benchmarks'",
"]",
"[",
"'kernels'",
"]",
".",
"items",
"(",
")",
")",
":",
"try",
":",
"kernel_ratio",
"=",
"(",
"(",
"kernel_info",
"[",
"'read streams'",
"]",
"[",
"'streams'",
"]",
"+",
"kernel_info",
"[",
"'write streams'",
"]",
"[",
"'streams'",
"]",
"-",
"kernel_info",
"[",
"'read+write streams'",
"]",
"[",
"'streams'",
"]",
")",
"/",
"kernel_info",
"[",
"'write streams'",
"]",
"[",
"'streams'",
"]",
")",
"except",
"ZeroDivisionError",
":",
"kernel_ratio",
"=",
"float",
"(",
"'inf'",
")",
"if",
"abs",
"(",
"kernel_ratio",
"-",
"target_ratio",
")",
"<",
"abs",
"(",
"measurement_kernel_ratio",
"-",
"target_ratio",
")",
":",
"measurement_kernel",
"=",
"kernel_name",
"measurement_kernel_info",
"=",
"kernel_info",
"measurement_kernel_ratio",
"=",
"kernel_ratio",
"# choose smt, and then use max/saturation bw",
"bw_level",
"=",
"self",
"[",
"'memory hierarchy'",
"]",
"[",
"cache_level",
"]",
"[",
"'level'",
"]",
"bw_measurements",
"=",
"self",
"[",
"'benchmarks'",
"]",
"[",
"'measurements'",
"]",
"[",
"bw_level",
"]",
"[",
"threads_per_core",
"]",
"assert",
"threads_per_core",
"==",
"bw_measurements",
"[",
"'threads per core'",
"]",
",",
"'malformed measurement dictionary in machine file.'",
"if",
"cores",
"is",
"not",
"None",
":",
"# Used by Roofline model",
"run_index",
"=",
"bw_measurements",
"[",
"'cores'",
"]",
".",
"index",
"(",
"cores",
")",
"bw",
"=",
"bw_measurements",
"[",
"'results'",
"]",
"[",
"measurement_kernel",
"]",
"[",
"run_index",
"]",
"else",
":",
"# Used by ECM model",
"# Choose maximum number of cores to get bandwidth for",
"max_cores",
"=",
"min",
"(",
"self",
"[",
"'memory hierarchy'",
"]",
"[",
"cache_level",
"]",
"[",
"'cores per group'",
"]",
",",
"self",
"[",
"'cores per NUMA domain'",
"]",
")",
"bw",
"=",
"max",
"(",
"bw_measurements",
"[",
"'results'",
"]",
"[",
"measurement_kernel",
"]",
"[",
":",
"max_cores",
"]",
")",
"# Correct bandwidth due to miss-measurement of write allocation",
"# TODO support non-temporal stores and non-write-allocate architectures",
"if",
"cache_level",
"==",
"0",
":",
"# L1 does not have write-allocate, so everything is measured correctly",
"factor",
"=",
"1.0",
"else",
":",
"factor",
"=",
"(",
"float",
"(",
"measurement_kernel_info",
"[",
"'read streams'",
"]",
"[",
"'bytes'",
"]",
")",
"+",
"2.0",
"*",
"float",
"(",
"measurement_kernel_info",
"[",
"'write streams'",
"]",
"[",
"'bytes'",
"]",
")",
"-",
"float",
"(",
"measurement_kernel_info",
"[",
"'read+write streams'",
"]",
"[",
"'bytes'",
"]",
")",
")",
"/",
"(",
"float",
"(",
"measurement_kernel_info",
"[",
"'read streams'",
"]",
"[",
"'bytes'",
"]",
")",
"+",
"float",
"(",
"measurement_kernel_info",
"[",
"'write streams'",
"]",
"[",
"'bytes'",
"]",
")",
")",
"bw",
"=",
"bw",
"*",
"factor",
"return",
"bw",
",",
"measurement_kernel"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.get_compiler
|
Return tuple of compiler and compiler flags.
Selects compiler and flags from machine description file, commandline arguments or call
arguements.
|
kerncraft/machinemodel.py
|
def get_compiler(self, compiler=None, flags=None):
"""
Return tuple of compiler and compiler flags.
Selects compiler and flags from machine description file, commandline arguments or call
arguements.
"""
if self._args:
compiler = compiler or self._args.compiler
flags = flags or self._args.compiler_flags
if compiler is None:
# Select first available compiler in machine description file's compiler dict
for c in self['compiler'].keys():
# Making sure compiler is available:
if find_executable(c) is not None:
compiler = c
break
else:
raise RuntimeError("No compiler ({}) was found. Add different one in machine file, "
"via --compiler argument or make sure it will be found in "
"$PATH.".format(list(self['compiler'].keys())))
if flags is None:
# Select from machine description file
flags = self['compiler'].get(compiler, '')
return compiler, flags.split(' ')
|
def get_compiler(self, compiler=None, flags=None):
"""
Return tuple of compiler and compiler flags.
Selects compiler and flags from machine description file, commandline arguments or call
arguements.
"""
if self._args:
compiler = compiler or self._args.compiler
flags = flags or self._args.compiler_flags
if compiler is None:
# Select first available compiler in machine description file's compiler dict
for c in self['compiler'].keys():
# Making sure compiler is available:
if find_executable(c) is not None:
compiler = c
break
else:
raise RuntimeError("No compiler ({}) was found. Add different one in machine file, "
"via --compiler argument or make sure it will be found in "
"$PATH.".format(list(self['compiler'].keys())))
if flags is None:
# Select from machine description file
flags = self['compiler'].get(compiler, '')
return compiler, flags.split(' ')
|
[
"Return",
"tuple",
"of",
"compiler",
"and",
"compiler",
"flags",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L193-L218
|
[
"def",
"get_compiler",
"(",
"self",
",",
"compiler",
"=",
"None",
",",
"flags",
"=",
"None",
")",
":",
"if",
"self",
".",
"_args",
":",
"compiler",
"=",
"compiler",
"or",
"self",
".",
"_args",
".",
"compiler",
"flags",
"=",
"flags",
"or",
"self",
".",
"_args",
".",
"compiler_flags",
"if",
"compiler",
"is",
"None",
":",
"# Select first available compiler in machine description file's compiler dict",
"for",
"c",
"in",
"self",
"[",
"'compiler'",
"]",
".",
"keys",
"(",
")",
":",
"# Making sure compiler is available:",
"if",
"find_executable",
"(",
"c",
")",
"is",
"not",
"None",
":",
"compiler",
"=",
"c",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No compiler ({}) was found. Add different one in machine file, \"",
"\"via --compiler argument or make sure it will be found in \"",
"\"$PATH.\"",
".",
"format",
"(",
"list",
"(",
"self",
"[",
"'compiler'",
"]",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"flags",
"is",
"None",
":",
"# Select from machine description file",
"flags",
"=",
"self",
"[",
"'compiler'",
"]",
".",
"get",
"(",
"compiler",
",",
"''",
")",
"return",
"compiler",
",",
"flags",
".",
"split",
"(",
"' '",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.parse_perfctr_event
|
Parse events in machine description to tuple representation used in Benchmark module.
Examples:
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]')
('PERF_EVENT', 'REG[0-3]')
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]:STAY:FOO=23:BAR=0x23')
('PERF_EVENT', 'REG[0-3]', {'STAY': None, 'FOO': 23, 'BAR': 35})
|
kerncraft/machinemodel.py
|
def parse_perfctr_event(perfctr):
"""
Parse events in machine description to tuple representation used in Benchmark module.
Examples:
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]')
('PERF_EVENT', 'REG[0-3]')
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]:STAY:FOO=23:BAR=0x23')
('PERF_EVENT', 'REG[0-3]', {'STAY': None, 'FOO': 23, 'BAR': 35})
"""
split_perfctr = perfctr.split(':')
assert len(split_perfctr) >= 2, "Atleast one colon (:) is required in the event name"
event_tuple = split_perfctr[:2]
parameters = {}
for p in split_perfctr[2:]:
if '=' in p:
k, v = p.split('=')
if v.startswith('0x'):
parameters[k] = int(v, 16)
else:
parameters[k] = int(v)
else:
parameters[p] = None
event_tuple.append(parameters)
return tuple(event_tuple)
|
def parse_perfctr_event(perfctr):
"""
Parse events in machine description to tuple representation used in Benchmark module.
Examples:
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]')
('PERF_EVENT', 'REG[0-3]')
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]:STAY:FOO=23:BAR=0x23')
('PERF_EVENT', 'REG[0-3]', {'STAY': None, 'FOO': 23, 'BAR': 35})
"""
split_perfctr = perfctr.split(':')
assert len(split_perfctr) >= 2, "Atleast one colon (:) is required in the event name"
event_tuple = split_perfctr[:2]
parameters = {}
for p in split_perfctr[2:]:
if '=' in p:
k, v = p.split('=')
if v.startswith('0x'):
parameters[k] = int(v, 16)
else:
parameters[k] = int(v)
else:
parameters[p] = None
event_tuple.append(parameters)
return tuple(event_tuple)
|
[
"Parse",
"events",
"in",
"machine",
"description",
"to",
"tuple",
"representation",
"used",
"in",
"Benchmark",
"module",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L221-L246
|
[
"def",
"parse_perfctr_event",
"(",
"perfctr",
")",
":",
"split_perfctr",
"=",
"perfctr",
".",
"split",
"(",
"':'",
")",
"assert",
"len",
"(",
"split_perfctr",
")",
">=",
"2",
",",
"\"Atleast one colon (:) is required in the event name\"",
"event_tuple",
"=",
"split_perfctr",
"[",
":",
"2",
"]",
"parameters",
"=",
"{",
"}",
"for",
"p",
"in",
"split_perfctr",
"[",
"2",
":",
"]",
":",
"if",
"'='",
"in",
"p",
":",
"k",
",",
"v",
"=",
"p",
".",
"split",
"(",
"'='",
")",
"if",
"v",
".",
"startswith",
"(",
"'0x'",
")",
":",
"parameters",
"[",
"k",
"]",
"=",
"int",
"(",
"v",
",",
"16",
")",
"else",
":",
"parameters",
"[",
"k",
"]",
"=",
"int",
"(",
"v",
")",
"else",
":",
"parameters",
"[",
"p",
"]",
"=",
"None",
"event_tuple",
".",
"append",
"(",
"parameters",
")",
"return",
"tuple",
"(",
"event_tuple",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
MachineModel.parse_perfmetric
|
Return (sympy expressions, event names and symbols dict) from performance metric str.
|
kerncraft/machinemodel.py
|
def parse_perfmetric(metric):
"""Return (sympy expressions, event names and symbols dict) from performance metric str."""
# Find all perfs counter references
perfcounters = re.findall(r'[A-Z0-9_]+:[A-Z0-9\[\]|\-]+(?::[A-Za-z0-9\-_=]+)*', metric)
# Build a temporary metric, with parser-friendly Symbol names
temp_metric = metric
temp_pc_names = {"SYM{}".format(re.sub("[\[\]\-|=:]", "_", pc)): pc
for i, pc in enumerate(perfcounters)}
for var_name, pc in temp_pc_names.items():
temp_metric = temp_metric.replace(pc, var_name)
# Parse temporary expression
expr = parse_expr(temp_metric)
# Rename symbols to originals
for s in expr.free_symbols:
if s.name in temp_pc_names:
s.name = temp_pc_names[str(s)]
events = {s: MachineModel.parse_perfctr_event(s.name) for s in expr.free_symbols
if s.name in perfcounters}
return expr, events
|
def parse_perfmetric(metric):
"""Return (sympy expressions, event names and symbols dict) from performance metric str."""
# Find all perfs counter references
perfcounters = re.findall(r'[A-Z0-9_]+:[A-Z0-9\[\]|\-]+(?::[A-Za-z0-9\-_=]+)*', metric)
# Build a temporary metric, with parser-friendly Symbol names
temp_metric = metric
temp_pc_names = {"SYM{}".format(re.sub("[\[\]\-|=:]", "_", pc)): pc
for i, pc in enumerate(perfcounters)}
for var_name, pc in temp_pc_names.items():
temp_metric = temp_metric.replace(pc, var_name)
# Parse temporary expression
expr = parse_expr(temp_metric)
# Rename symbols to originals
for s in expr.free_symbols:
if s.name in temp_pc_names:
s.name = temp_pc_names[str(s)]
events = {s: MachineModel.parse_perfctr_event(s.name) for s in expr.free_symbols
if s.name in perfcounters}
return expr, events
|
[
"Return",
"(",
"sympy",
"expressions",
"event",
"names",
"and",
"symbols",
"dict",
")",
"from",
"performance",
"metric",
"str",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L249-L271
|
[
"def",
"parse_perfmetric",
"(",
"metric",
")",
":",
"# Find all perfs counter references",
"perfcounters",
"=",
"re",
".",
"findall",
"(",
"r'[A-Z0-9_]+:[A-Z0-9\\[\\]|\\-]+(?::[A-Za-z0-9\\-_=]+)*'",
",",
"metric",
")",
"# Build a temporary metric, with parser-friendly Symbol names",
"temp_metric",
"=",
"metric",
"temp_pc_names",
"=",
"{",
"\"SYM{}\"",
".",
"format",
"(",
"re",
".",
"sub",
"(",
"\"[\\[\\]\\-|=:]\"",
",",
"\"_\"",
",",
"pc",
")",
")",
":",
"pc",
"for",
"i",
",",
"pc",
"in",
"enumerate",
"(",
"perfcounters",
")",
"}",
"for",
"var_name",
",",
"pc",
"in",
"temp_pc_names",
".",
"items",
"(",
")",
":",
"temp_metric",
"=",
"temp_metric",
".",
"replace",
"(",
"pc",
",",
"var_name",
")",
"# Parse temporary expression",
"expr",
"=",
"parse_expr",
"(",
"temp_metric",
")",
"# Rename symbols to originals",
"for",
"s",
"in",
"expr",
".",
"free_symbols",
":",
"if",
"s",
".",
"name",
"in",
"temp_pc_names",
":",
"s",
".",
"name",
"=",
"temp_pc_names",
"[",
"str",
"(",
"s",
")",
"]",
"events",
"=",
"{",
"s",
":",
"MachineModel",
".",
"parse_perfctr_event",
"(",
"s",
".",
"name",
")",
"for",
"s",
"in",
"expr",
".",
"free_symbols",
"if",
"s",
".",
"name",
"in",
"perfcounters",
"}",
"return",
"expr",
",",
"events"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Intervals._enforce_no_overlap
|
Enforce that no ranges overlap in internal storage.
|
kerncraft/intervals.py
|
def _enforce_no_overlap(self, start_at=0):
"""Enforce that no ranges overlap in internal storage."""
i = start_at
while i+1 < len(self.data):
if self.data[i][1] >= self.data[i+1][0]:
# beginning of i+1-th range is contained in i-th range
if self.data[i][1] < self.data[i+1][1]:
# i+1-th range is longer, thus enlarge i-th range
self.data[i][1] = self.data[i+1][1]
# removed contained range
del self.data[i+1]
i += 1
|
def _enforce_no_overlap(self, start_at=0):
"""Enforce that no ranges overlap in internal storage."""
i = start_at
while i+1 < len(self.data):
if self.data[i][1] >= self.data[i+1][0]:
# beginning of i+1-th range is contained in i-th range
if self.data[i][1] < self.data[i+1][1]:
# i+1-th range is longer, thus enlarge i-th range
self.data[i][1] = self.data[i+1][1]
# removed contained range
del self.data[i+1]
i += 1
|
[
"Enforce",
"that",
"no",
"ranges",
"overlap",
"in",
"internal",
"storage",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/intervals.py#L20-L32
|
[
"def",
"_enforce_no_overlap",
"(",
"self",
",",
"start_at",
"=",
"0",
")",
":",
"i",
"=",
"start_at",
"while",
"i",
"+",
"1",
"<",
"len",
"(",
"self",
".",
"data",
")",
":",
"if",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"1",
"]",
">=",
"self",
".",
"data",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
":",
"# beginning of i+1-th range is contained in i-th range",
"if",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"1",
"]",
"<",
"self",
".",
"data",
"[",
"i",
"+",
"1",
"]",
"[",
"1",
"]",
":",
"# i+1-th range is longer, thus enlarge i-th range",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"1",
"]",
"=",
"self",
".",
"data",
"[",
"i",
"+",
"1",
"]",
"[",
"1",
"]",
"# removed contained range",
"del",
"self",
".",
"data",
"[",
"i",
"+",
"1",
"]",
"i",
"+=",
"1"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
get_header_path
|
Return local folder path of header files.
|
kerncraft/__init__.py
|
def get_header_path() -> str:
"""Return local folder path of header files."""
import os
return os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/headers/'
|
def get_header_path() -> str:
"""Return local folder path of header files."""
import os
return os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/headers/'
|
[
"Return",
"local",
"folder",
"path",
"of",
"header",
"files",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/__init__.py#L12-L15
|
[
"def",
"get_header_path",
"(",
")",
"->",
"str",
":",
"import",
"os",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
"+",
"'/headers/'"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor._align_iteration_with_cl_boundary
|
Align iteration with cacheline boundary.
|
kerncraft/cacheprediction.py
|
def _align_iteration_with_cl_boundary(self, iteration, subtract=True):
"""Align iteration with cacheline boundary."""
# FIXME handle multiple datatypes
element_size = self.kernel.datatypes_size[self.kernel.datatype]
cacheline_size = self.machine['cacheline size']
elements_per_cacheline = int(cacheline_size // element_size)
# Gathering some loop information:
inner_loop = list(self.kernel.get_loop_stack(subs_consts=True))[-1]
inner_increment = inner_loop['increment']
# do this by aligning either writes (preferred) or reads
# Assumption: writes (and reads) increase linearly
o = self.kernel.compile_global_offsets(iteration=iteration)[0]
if len(o[1]):
# we have a write to work with:
first_offset = min(o[1])
else:
# we use reads
first_offset = min(o[0])
diff = first_offset - \
(int(first_offset) >> self.csim.first_level.cl_bits << self.csim.first_level.cl_bits)
if diff == 0:
return iteration
elif subtract:
return iteration - (diff // element_size) // inner_increment
else:
return iteration + (elements_per_cacheline - diff // element_size) // inner_increment
|
def _align_iteration_with_cl_boundary(self, iteration, subtract=True):
"""Align iteration with cacheline boundary."""
# FIXME handle multiple datatypes
element_size = self.kernel.datatypes_size[self.kernel.datatype]
cacheline_size = self.machine['cacheline size']
elements_per_cacheline = int(cacheline_size // element_size)
# Gathering some loop information:
inner_loop = list(self.kernel.get_loop_stack(subs_consts=True))[-1]
inner_increment = inner_loop['increment']
# do this by aligning either writes (preferred) or reads
# Assumption: writes (and reads) increase linearly
o = self.kernel.compile_global_offsets(iteration=iteration)[0]
if len(o[1]):
# we have a write to work with:
first_offset = min(o[1])
else:
# we use reads
first_offset = min(o[0])
diff = first_offset - \
(int(first_offset) >> self.csim.first_level.cl_bits << self.csim.first_level.cl_bits)
if diff == 0:
return iteration
elif subtract:
return iteration - (diff // element_size) // inner_increment
else:
return iteration + (elements_per_cacheline - diff // element_size) // inner_increment
|
[
"Align",
"iteration",
"with",
"cacheline",
"boundary",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L352-L380
|
[
"def",
"_align_iteration_with_cl_boundary",
"(",
"self",
",",
"iteration",
",",
"subtract",
"=",
"True",
")",
":",
"# FIXME handle multiple datatypes",
"element_size",
"=",
"self",
".",
"kernel",
".",
"datatypes_size",
"[",
"self",
".",
"kernel",
".",
"datatype",
"]",
"cacheline_size",
"=",
"self",
".",
"machine",
"[",
"'cacheline size'",
"]",
"elements_per_cacheline",
"=",
"int",
"(",
"cacheline_size",
"//",
"element_size",
")",
"# Gathering some loop information:",
"inner_loop",
"=",
"list",
"(",
"self",
".",
"kernel",
".",
"get_loop_stack",
"(",
"subs_consts",
"=",
"True",
")",
")",
"[",
"-",
"1",
"]",
"inner_increment",
"=",
"inner_loop",
"[",
"'increment'",
"]",
"# do this by aligning either writes (preferred) or reads",
"# Assumption: writes (and reads) increase linearly",
"o",
"=",
"self",
".",
"kernel",
".",
"compile_global_offsets",
"(",
"iteration",
"=",
"iteration",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"o",
"[",
"1",
"]",
")",
":",
"# we have a write to work with:",
"first_offset",
"=",
"min",
"(",
"o",
"[",
"1",
"]",
")",
"else",
":",
"# we use reads",
"first_offset",
"=",
"min",
"(",
"o",
"[",
"0",
"]",
")",
"diff",
"=",
"first_offset",
"-",
"(",
"int",
"(",
"first_offset",
")",
">>",
"self",
".",
"csim",
".",
"first_level",
".",
"cl_bits",
"<<",
"self",
".",
"csim",
".",
"first_level",
".",
"cl_bits",
")",
"if",
"diff",
"==",
"0",
":",
"return",
"iteration",
"elif",
"subtract",
":",
"return",
"iteration",
"-",
"(",
"diff",
"//",
"element_size",
")",
"//",
"inner_increment",
"else",
":",
"return",
"iteration",
"+",
"(",
"elements_per_cacheline",
"-",
"diff",
"//",
"element_size",
")",
"//",
"inner_increment"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_loads
|
Return a list with number of loaded cache lines per memory hierarchy level.
|
kerncraft/cacheprediction.py
|
def get_loads(self):
"""Return a list with number of loaded cache lines per memory hierarchy level."""
return [self.stats[cache_level]['LOAD_count'] / self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
def get_loads(self):
"""Return a list with number of loaded cache lines per memory hierarchy level."""
return [self.stats[cache_level]['LOAD_count'] / self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
[
"Return",
"a",
"list",
"with",
"number",
"of",
"loaded",
"cache",
"lines",
"per",
"memory",
"hierarchy",
"level",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L486-L489
|
[
"def",
"get_loads",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'LOAD_count'",
"]",
"/",
"self",
".",
"first_dim_factor",
"for",
"cache_level",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_hits
|
Return a list with number of hit cache lines per memory hierarchy level.
|
kerncraft/cacheprediction.py
|
def get_hits(self):
"""Return a list with number of hit cache lines per memory hierarchy level."""
return [self.stats[cache_level]['HIT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
def get_hits(self):
"""Return a list with number of hit cache lines per memory hierarchy level."""
return [self.stats[cache_level]['HIT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
[
"Return",
"a",
"list",
"with",
"number",
"of",
"hit",
"cache",
"lines",
"per",
"memory",
"hierarchy",
"level",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L491-L494
|
[
"def",
"get_hits",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'HIT_count'",
"]",
"/",
"self",
".",
"first_dim_factor",
"for",
"cache_level",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_misses
|
Return a list with number of missed cache lines per memory hierarchy level.
|
kerncraft/cacheprediction.py
|
def get_misses(self):
"""Return a list with number of missed cache lines per memory hierarchy level."""
return [self.stats[cache_level]['MISS_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
def get_misses(self):
"""Return a list with number of missed cache lines per memory hierarchy level."""
return [self.stats[cache_level]['MISS_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
[
"Return",
"a",
"list",
"with",
"number",
"of",
"missed",
"cache",
"lines",
"per",
"memory",
"hierarchy",
"level",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L496-L499
|
[
"def",
"get_misses",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'MISS_count'",
"]",
"/",
"self",
".",
"first_dim_factor",
"for",
"cache_level",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_stores
|
Return a list with number of stored cache lines per memory hierarchy level.
|
kerncraft/cacheprediction.py
|
def get_stores(self):
"""Return a list with number of stored cache lines per memory hierarchy level."""
return [self.stats[cache_level]['STORE_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
def get_stores(self):
"""Return a list with number of stored cache lines per memory hierarchy level."""
return [self.stats[cache_level]['STORE_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
[
"Return",
"a",
"list",
"with",
"number",
"of",
"stored",
"cache",
"lines",
"per",
"memory",
"hierarchy",
"level",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L501-L504
|
[
"def",
"get_stores",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'STORE_count'",
"]",
"/",
"self",
".",
"first_dim_factor",
"for",
"cache_level",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_evicts
|
Return a list with number of evicted cache lines per memory hierarchy level.
|
kerncraft/cacheprediction.py
|
def get_evicts(self):
"""Return a list with number of evicted cache lines per memory hierarchy level."""
return [self.stats[cache_level]['EVICT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
def get_evicts(self):
"""Return a list with number of evicted cache lines per memory hierarchy level."""
return [self.stats[cache_level]['EVICT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))]
|
[
"Return",
"a",
"list",
"with",
"number",
"of",
"evicted",
"cache",
"lines",
"per",
"memory",
"hierarchy",
"level",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L506-L509
|
[
"def",
"get_evicts",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'EVICT_count'",
"]",
"/",
"self",
".",
"first_dim_factor",
"for",
"cache_level",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
CacheSimulationPredictor.get_infos
|
Return verbose information about the predictor.
|
kerncraft/cacheprediction.py
|
def get_infos(self):
"""Return verbose information about the predictor."""
first_dim_factor = self.first_dim_factor
infos = {'memory hierarchy': [], 'cache stats': self.stats,
'cachelines in stats': first_dim_factor}
for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy'])):
infos['memory hierarchy'].append({
'index': len(infos['memory hierarchy']),
'level': '{}'.format(cache_info['level']),
'total loads': self.stats[cache_level]['LOAD_byte']/first_dim_factor,
'total misses': self.stats[cache_level]['MISS_byte']/first_dim_factor,
'total hits': self.stats[cache_level]['HIT_byte']/first_dim_factor,
'total stores': self.stats[cache_level]['STORE_byte']/first_dim_factor,
'total evicts': self.stats[cache_level]['EVICT_byte']/first_dim_factor,
'total lines load': self.stats[cache_level]['LOAD_count']/first_dim_factor,
'total lines misses': self.stats[cache_level]['MISS_count']/first_dim_factor,
'total lines hits': self.stats[cache_level]['HIT_count']/first_dim_factor,
'total lines stores': self.stats[cache_level]['STORE_count']/first_dim_factor,
'total lines evicts': self.stats[cache_level]['EVICT_count']/first_dim_factor,
'cycles': None})
return infos
|
def get_infos(self):
"""Return verbose information about the predictor."""
first_dim_factor = self.first_dim_factor
infos = {'memory hierarchy': [], 'cache stats': self.stats,
'cachelines in stats': first_dim_factor}
for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy'])):
infos['memory hierarchy'].append({
'index': len(infos['memory hierarchy']),
'level': '{}'.format(cache_info['level']),
'total loads': self.stats[cache_level]['LOAD_byte']/first_dim_factor,
'total misses': self.stats[cache_level]['MISS_byte']/first_dim_factor,
'total hits': self.stats[cache_level]['HIT_byte']/first_dim_factor,
'total stores': self.stats[cache_level]['STORE_byte']/first_dim_factor,
'total evicts': self.stats[cache_level]['EVICT_byte']/first_dim_factor,
'total lines load': self.stats[cache_level]['LOAD_count']/first_dim_factor,
'total lines misses': self.stats[cache_level]['MISS_count']/first_dim_factor,
'total lines hits': self.stats[cache_level]['HIT_count']/first_dim_factor,
'total lines stores': self.stats[cache_level]['STORE_count']/first_dim_factor,
'total lines evicts': self.stats[cache_level]['EVICT_count']/first_dim_factor,
'cycles': None})
return infos
|
[
"Return",
"verbose",
"information",
"about",
"the",
"predictor",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/cacheprediction.py#L511-L531
|
[
"def",
"get_infos",
"(",
"self",
")",
":",
"first_dim_factor",
"=",
"self",
".",
"first_dim_factor",
"infos",
"=",
"{",
"'memory hierarchy'",
":",
"[",
"]",
",",
"'cache stats'",
":",
"self",
".",
"stats",
",",
"'cachelines in stats'",
":",
"first_dim_factor",
"}",
"for",
"cache_level",
",",
"cache_info",
"in",
"list",
"(",
"enumerate",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
")",
":",
"infos",
"[",
"'memory hierarchy'",
"]",
".",
"append",
"(",
"{",
"'index'",
":",
"len",
"(",
"infos",
"[",
"'memory hierarchy'",
"]",
")",
",",
"'level'",
":",
"'{}'",
".",
"format",
"(",
"cache_info",
"[",
"'level'",
"]",
")",
",",
"'total loads'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'LOAD_byte'",
"]",
"/",
"first_dim_factor",
",",
"'total misses'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'MISS_byte'",
"]",
"/",
"first_dim_factor",
",",
"'total hits'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'HIT_byte'",
"]",
"/",
"first_dim_factor",
",",
"'total stores'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'STORE_byte'",
"]",
"/",
"first_dim_factor",
",",
"'total evicts'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'EVICT_byte'",
"]",
"/",
"first_dim_factor",
",",
"'total lines load'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'LOAD_count'",
"]",
"/",
"first_dim_factor",
",",
"'total lines misses'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'MISS_count'",
"]",
"/",
"first_dim_factor",
",",
"'total lines hits'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'HIT_count'",
"]",
"/",
"first_dim_factor",
",",
"'total lines stores'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'STORE_count'",
"]",
"/",
"first_dim_factor",
",",
"'total lines evicts'",
":",
"self",
".",
"stats",
"[",
"cache_level",
"]",
"[",
"'EVICT_count'",
"]",
"/",
"first_dim_factor",
",",
"'cycles'",
":",
"None",
"}",
")",
"return",
"infos"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
measure_bw
|
*size* is given in kilo bytes
|
kerncraft/likwid_bench_auto.py
|
def measure_bw(type_, total_size, threads_per_core, max_threads_per_core, cores_per_socket,
sockets):
"""*size* is given in kilo bytes"""
groups = []
for s in range(sockets):
groups += [
'-w',
'S' + str(s) + ':' + str(total_size) + 'kB:' +
str(threads_per_core * cores_per_socket) +
':1:' + str(int(max_threads_per_core / threads_per_core))]
# for older likwid versions add ['-g', str(sockets), '-i', str(iterations)] to cmd
cmd = ['likwid-bench', '-t', type_] + groups
sys.stderr.write(' '.join(cmd))
output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
if not output:
print(' '.join(cmd) + ' returned no output, possibly wrong version installed '
'(requires 4.0 or later)', file=sys.stderr)
sys.exit(1)
bw = float(get_match_or_break(r'^MByte/s:\s+([0-9]+(?:\.[0-9]+)?)\s*$', output)[0])
print(' ', PrefixedUnit(bw, 'MB/s'), file=sys.stderr)
return PrefixedUnit(bw, 'MB/s')
|
def measure_bw(type_, total_size, threads_per_core, max_threads_per_core, cores_per_socket,
sockets):
"""*size* is given in kilo bytes"""
groups = []
for s in range(sockets):
groups += [
'-w',
'S' + str(s) + ':' + str(total_size) + 'kB:' +
str(threads_per_core * cores_per_socket) +
':1:' + str(int(max_threads_per_core / threads_per_core))]
# for older likwid versions add ['-g', str(sockets), '-i', str(iterations)] to cmd
cmd = ['likwid-bench', '-t', type_] + groups
sys.stderr.write(' '.join(cmd))
output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
if not output:
print(' '.join(cmd) + ' returned no output, possibly wrong version installed '
'(requires 4.0 or later)', file=sys.stderr)
sys.exit(1)
bw = float(get_match_or_break(r'^MByte/s:\s+([0-9]+(?:\.[0-9]+)?)\s*$', output)[0])
print(' ', PrefixedUnit(bw, 'MB/s'), file=sys.stderr)
return PrefixedUnit(bw, 'MB/s')
|
[
"*",
"size",
"*",
"is",
"given",
"in",
"kilo",
"bytes"
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/likwid_bench_auto.py#L154-L174
|
[
"def",
"measure_bw",
"(",
"type_",
",",
"total_size",
",",
"threads_per_core",
",",
"max_threads_per_core",
",",
"cores_per_socket",
",",
"sockets",
")",
":",
"groups",
"=",
"[",
"]",
"for",
"s",
"in",
"range",
"(",
"sockets",
")",
":",
"groups",
"+=",
"[",
"'-w'",
",",
"'S'",
"+",
"str",
"(",
"s",
")",
"+",
"':'",
"+",
"str",
"(",
"total_size",
")",
"+",
"'kB:'",
"+",
"str",
"(",
"threads_per_core",
"*",
"cores_per_socket",
")",
"+",
"':1:'",
"+",
"str",
"(",
"int",
"(",
"max_threads_per_core",
"/",
"threads_per_core",
")",
")",
"]",
"# for older likwid versions add ['-g', str(sockets), '-i', str(iterations)] to cmd",
"cmd",
"=",
"[",
"'likwid-bench'",
",",
"'-t'",
",",
"type_",
"]",
"+",
"groups",
"sys",
".",
"stderr",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"output",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"not",
"output",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
"+",
"' returned no output, possibly wrong version installed '",
"'(requires 4.0 or later)'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"bw",
"=",
"float",
"(",
"get_match_or_break",
"(",
"r'^MByte/s:\\s+([0-9]+(?:\\.[0-9]+)?)\\s*$'",
",",
"output",
")",
"[",
"0",
"]",
")",
"print",
"(",
"' '",
",",
"PrefixedUnit",
"(",
"bw",
",",
"'MB/s'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"PrefixedUnit",
"(",
"bw",
",",
"'MB/s'",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
fix_env_variable
|
Fix environment variable to a value within context. Unset if value is None.
|
kerncraft/models/benchmark.py
|
def fix_env_variable(name, value):
"""Fix environment variable to a value within context. Unset if value is None."""
orig = os.environ.get(name, None)
if value is not None:
# Set if value is not None
os.environ[name] = value
elif name in os.environ:
# Unset if value is None
del os.environ[name]
try:
yield
finally:
if orig is not None:
# Restore original value
os.environ[name] = orig
elif name in os.environ:
# Unset
del os.environ[name]
|
def fix_env_variable(name, value):
"""Fix environment variable to a value within context. Unset if value is None."""
orig = os.environ.get(name, None)
if value is not None:
# Set if value is not None
os.environ[name] = value
elif name in os.environ:
# Unset if value is None
del os.environ[name]
try:
yield
finally:
if orig is not None:
# Restore original value
os.environ[name] = orig
elif name in os.environ:
# Unset
del os.environ[name]
|
[
"Fix",
"environment",
"variable",
"to",
"a",
"value",
"within",
"context",
".",
"Unset",
"if",
"value",
"is",
"None",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L41-L58
|
[
"def",
"fix_env_variable",
"(",
"name",
",",
"value",
")",
":",
"orig",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"value",
"is",
"not",
"None",
":",
"# Set if value is not None",
"os",
".",
"environ",
"[",
"name",
"]",
"=",
"value",
"elif",
"name",
"in",
"os",
".",
"environ",
":",
"# Unset if value is None",
"del",
"os",
".",
"environ",
"[",
"name",
"]",
"try",
":",
"yield",
"finally",
":",
"if",
"orig",
"is",
"not",
"None",
":",
"# Restore original value",
"os",
".",
"environ",
"[",
"name",
"]",
"=",
"orig",
"elif",
"name",
"in",
"os",
".",
"environ",
":",
"# Unset",
"del",
"os",
".",
"environ",
"[",
"name",
"]"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Benchmark.configure_arggroup
|
Configure argument parser.
|
kerncraft/models/benchmark.py
|
def configure_arggroup(cls, parser):
"""Configure argument parser."""
parser.add_argument(
'--no-phenoecm', action='store_true',
help='Disables the phenomenological ECM model building.')
parser.add_argument(
'--iterations', type=int, default=10,
help='Number of outer-loop iterations (e.g. time loop) during benchmarking. '
'Default is 10, but actual number will be adapted to at least 0.2s runtime.')
parser.add_argument(
'--ignore-warnings', action='store_true',
help='Ignore warnings about missmatched CPU model and frequency.')
|
def configure_arggroup(cls, parser):
"""Configure argument parser."""
parser.add_argument(
'--no-phenoecm', action='store_true',
help='Disables the phenomenological ECM model building.')
parser.add_argument(
'--iterations', type=int, default=10,
help='Number of outer-loop iterations (e.g. time loop) during benchmarking. '
'Default is 10, but actual number will be adapted to at least 0.2s runtime.')
parser.add_argument(
'--ignore-warnings', action='store_true',
help='Ignore warnings about missmatched CPU model and frequency.')
|
[
"Configure",
"argument",
"parser",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L198-L209
|
[
"def",
"configure_arggroup",
"(",
"cls",
",",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--no-phenoecm'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Disables the phenomenological ECM model building.'",
")",
"parser",
".",
"add_argument",
"(",
"'--iterations'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10",
",",
"help",
"=",
"'Number of outer-loop iterations (e.g. time loop) during benchmarking. '",
"'Default is 10, but actual number will be adapted to at least 0.2s runtime.'",
")",
"parser",
".",
"add_argument",
"(",
"'--ignore-warnings'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Ignore warnings about missmatched CPU model and frequency.'",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Benchmark.perfctr
|
Run *cmd* with likwid-perfctr and returns result as dict.
*group* may be a performance group known to likwid-perfctr or an event string.
if CLI argument cores > 1, running with multi-core, otherwise single-core
|
kerncraft/models/benchmark.py
|
def perfctr(self, cmd, group='MEM', code_markers=True):
"""
Run *cmd* with likwid-perfctr and returns result as dict.
*group* may be a performance group known to likwid-perfctr or an event string.
if CLI argument cores > 1, running with multi-core, otherwise single-core
"""
# Making sure likwid-perfctr is available:
if find_executable('likwid-perfctr') is None:
print("likwid-perfctr was not found. Make sure likwid is installed and found in PATH.",
file=sys.stderr)
sys.exit(1)
# FIXME currently only single core measurements support!
perf_cmd = ['likwid-perfctr', '-f', '-O', '-g', group]
cpu = 'S0:0'
if self._args.cores > 1:
cpu += '-'+str(self._args.cores-1)
# Pinned and measured on cpu
perf_cmd += ['-C', cpu]
# code must be marked using likwid markers
perf_cmd.append('-m')
perf_cmd += cmd
if self.verbose > 1:
print(' '.join(perf_cmd))
try:
with fix_env_variable('OMP_NUM_THREADS', None):
output = subprocess.check_output(perf_cmd).decode('utf-8').split('\n')
except subprocess.CalledProcessError as e:
print("Executing benchmark failed: {!s}".format(e), file=sys.stderr)
sys.exit(1)
# TODO multicore output is different and needs to be considered here!
results = {}
for line in output:
line = line.split(',')
try:
# Metrics
results[line[0]] = float(line[1])
except ValueError:
# Would not convert to float
pass
except IndexError:
# Not a parable line (did not contain any commas)
continue
try:
# Event counters
if line[2] == '-' or line[2] == 'nan':
counter_value = 0
else:
counter_value = int(line[2])
if re.fullmatch(r'[A-Z0-9_]+', line[0]) and re.fullmatch(r'[A-Z0-9]+', line[1]):
results.setdefault(line[0], {})
results[line[0]][line[1]] = counter_value
except (IndexError, ValueError):
pass
return results
|
def perfctr(self, cmd, group='MEM', code_markers=True):
"""
Run *cmd* with likwid-perfctr and returns result as dict.
*group* may be a performance group known to likwid-perfctr or an event string.
if CLI argument cores > 1, running with multi-core, otherwise single-core
"""
# Making sure likwid-perfctr is available:
if find_executable('likwid-perfctr') is None:
print("likwid-perfctr was not found. Make sure likwid is installed and found in PATH.",
file=sys.stderr)
sys.exit(1)
# FIXME currently only single core measurements support!
perf_cmd = ['likwid-perfctr', '-f', '-O', '-g', group]
cpu = 'S0:0'
if self._args.cores > 1:
cpu += '-'+str(self._args.cores-1)
# Pinned and measured on cpu
perf_cmd += ['-C', cpu]
# code must be marked using likwid markers
perf_cmd.append('-m')
perf_cmd += cmd
if self.verbose > 1:
print(' '.join(perf_cmd))
try:
with fix_env_variable('OMP_NUM_THREADS', None):
output = subprocess.check_output(perf_cmd).decode('utf-8').split('\n')
except subprocess.CalledProcessError as e:
print("Executing benchmark failed: {!s}".format(e), file=sys.stderr)
sys.exit(1)
# TODO multicore output is different and needs to be considered here!
results = {}
for line in output:
line = line.split(',')
try:
# Metrics
results[line[0]] = float(line[1])
except ValueError:
# Would not convert to float
pass
except IndexError:
# Not a parable line (did not contain any commas)
continue
try:
# Event counters
if line[2] == '-' or line[2] == 'nan':
counter_value = 0
else:
counter_value = int(line[2])
if re.fullmatch(r'[A-Z0-9_]+', line[0]) and re.fullmatch(r'[A-Z0-9]+', line[1]):
results.setdefault(line[0], {})
results[line[0]][line[1]] = counter_value
except (IndexError, ValueError):
pass
return results
|
[
"Run",
"*",
"cmd",
"*",
"with",
"likwid",
"-",
"perfctr",
"and",
"returns",
"result",
"as",
"dict",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L279-L341
|
[
"def",
"perfctr",
"(",
"self",
",",
"cmd",
",",
"group",
"=",
"'MEM'",
",",
"code_markers",
"=",
"True",
")",
":",
"# Making sure likwid-perfctr is available:",
"if",
"find_executable",
"(",
"'likwid-perfctr'",
")",
"is",
"None",
":",
"print",
"(",
"\"likwid-perfctr was not found. Make sure likwid is installed and found in PATH.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# FIXME currently only single core measurements support!",
"perf_cmd",
"=",
"[",
"'likwid-perfctr'",
",",
"'-f'",
",",
"'-O'",
",",
"'-g'",
",",
"group",
"]",
"cpu",
"=",
"'S0:0'",
"if",
"self",
".",
"_args",
".",
"cores",
">",
"1",
":",
"cpu",
"+=",
"'-'",
"+",
"str",
"(",
"self",
".",
"_args",
".",
"cores",
"-",
"1",
")",
"# Pinned and measured on cpu",
"perf_cmd",
"+=",
"[",
"'-C'",
",",
"cpu",
"]",
"# code must be marked using likwid markers",
"perf_cmd",
".",
"append",
"(",
"'-m'",
")",
"perf_cmd",
"+=",
"cmd",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"perf_cmd",
")",
")",
"try",
":",
"with",
"fix_env_variable",
"(",
"'OMP_NUM_THREADS'",
",",
"None",
")",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"perf_cmd",
")",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'\\n'",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"print",
"(",
"\"Executing benchmark failed: {!s}\"",
".",
"format",
"(",
"e",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# TODO multicore output is different and needs to be considered here!",
"results",
"=",
"{",
"}",
"for",
"line",
"in",
"output",
":",
"line",
"=",
"line",
".",
"split",
"(",
"','",
")",
"try",
":",
"# Metrics",
"results",
"[",
"line",
"[",
"0",
"]",
"]",
"=",
"float",
"(",
"line",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"# Would not convert to float",
"pass",
"except",
"IndexError",
":",
"# Not a parable line (did not contain any commas)",
"continue",
"try",
":",
"# Event counters",
"if",
"line",
"[",
"2",
"]",
"==",
"'-'",
"or",
"line",
"[",
"2",
"]",
"==",
"'nan'",
":",
"counter_value",
"=",
"0",
"else",
":",
"counter_value",
"=",
"int",
"(",
"line",
"[",
"2",
"]",
")",
"if",
"re",
".",
"fullmatch",
"(",
"r'[A-Z0-9_]+'",
",",
"line",
"[",
"0",
"]",
")",
"and",
"re",
".",
"fullmatch",
"(",
"r'[A-Z0-9]+'",
",",
"line",
"[",
"1",
"]",
")",
":",
"results",
".",
"setdefault",
"(",
"line",
"[",
"0",
"]",
",",
"{",
"}",
")",
"results",
"[",
"line",
"[",
"0",
"]",
"]",
"[",
"line",
"[",
"1",
"]",
"]",
"=",
"counter_value",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"return",
"results"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Benchmark.analyze
|
Run analysis.
|
kerncraft/models/benchmark.py
|
def analyze(self):
"""Run analysis."""
bench = self.kernel.build_executable(verbose=self.verbose > 1, openmp=self._args.cores > 1)
element_size = self.kernel.datatypes_size[self.kernel.datatype]
# Build arguments to pass to command:
args = [str(s) for s in list(self.kernel.constants.values())]
# Determine base runtime with 10 iterations
runtime = 0.0
time_per_repetition = 2.0 / 10.0
repetitions = self.iterations // 10
mem_results = {}
# TODO if cores > 1, results are for openmp run. Things might need to be changed here!
while runtime < 1.5:
# Interpolate to a 2.0s run
if time_per_repetition != 0.0:
repetitions = 2.0 // time_per_repetition
else:
repetitions = int(repetitions * 10)
mem_results = self.perfctr([bench] + [str(repetitions)] + args, group="MEM")
runtime = mem_results['Runtime (RDTSC) [s]']
time_per_repetition = runtime / float(repetitions)
raw_results = [mem_results]
# Base metrics for further metric computations:
# An iteration is equal to one high-level code inner-most-loop iteration
iterations_per_repetition = reduce(
operator.mul,
[self.kernel.subs_consts(max_ - min_) / self.kernel.subs_consts(step)
for idx, min_, max_, step in self.kernel._loop_stack],
1)
iterations_per_cacheline = (float(self.machine['cacheline size']) /
self.kernel.bytes_per_iteration)
cys_per_repetition = time_per_repetition * float(self.machine['clock'])
# Gather remaining counters
if not self.no_phenoecm:
# Build events and sympy expressions for all model metrics
T_OL, event_counters = self.machine.parse_perfmetric(
self.machine['overlapping model']['performance counter metric'])
T_data, event_dict = self.machine.parse_perfmetric(
self.machine['non-overlapping model']['performance counter metric'])
event_counters.update(event_dict)
cache_metrics = defaultdict(dict)
for i in range(len(self.machine['memory hierarchy']) - 1):
cache_info = self.machine['memory hierarchy'][i]
name = cache_info['level']
for k, v in cache_info['performance counter metrics'].items():
cache_metrics[name][k], event_dict = self.machine.parse_perfmetric(v)
event_counters.update(event_dict)
# Compile minimal runs to gather all required events
minimal_runs = build_minimal_runs(list(event_counters.values()))
measured_ctrs = {}
for run in minimal_runs:
ctrs = ','.join([eventstr(e) for e in run])
r = self.perfctr([bench] + [str(repetitions)] + args, group=ctrs)
raw_results.append(r)
measured_ctrs.update(r)
# Match measured counters to symbols
event_counter_results = {}
for sym, ctr in event_counters.items():
event, regs, parameter = ctr[0], register_options(ctr[1]), ctr[2]
for r in regs:
if r in measured_ctrs[event]:
event_counter_results[sym] = measured_ctrs[event][r]
# Analytical metrics needed for futher calculation
cl_size = float(self.machine['cacheline size'])
total_iterations = iterations_per_repetition * repetitions
total_cachelines = total_iterations / iterations_per_cacheline
T_OL_result = T_OL.subs(event_counter_results) / total_cachelines
cache_metric_results = defaultdict(dict)
for cache, mtrcs in cache_metrics.items():
for m, e in mtrcs.items():
cache_metric_results[cache][m] = e.subs(event_counter_results)
# Inter-cache transfers per CL
cache_transfers_per_cl = {cache: {k: PrefixedUnit(v / total_cachelines, 'CL/CL')
for k, v in d.items()}
for cache, d in cache_metric_results.items()}
cache_transfers_per_cl['L1']['accesses'].unit = 'LOAD/CL'
# Select appropriate bandwidth
mem_bw, mem_bw_kernel = self.machine.get_bandwidth(
-1, # mem
cache_metric_results['L3']['misses'], # load_streams
cache_metric_results['L3']['evicts'], # store_streams
1)
data_transfers = {
# Assuming 0.5 cy / LOAD (SSE on SNB or IVB; AVX on HSW, BDW, SKL or SKX)
'T_nOL': (cache_metric_results['L1']['accesses'] / total_cachelines * 0.5),
'T_L1L2': ((cache_metric_results['L1']['misses'] +
cache_metric_results['L1']['evicts']) /
total_cachelines * cl_size /
self.machine['memory hierarchy'][1]['non-overlap upstream throughput'][0]),
'T_L2L3': ((cache_metric_results['L2']['misses'] +
cache_metric_results['L2']['evicts']) /
total_cachelines * cl_size /
self.machine['memory hierarchy'][2]['non-overlap upstream throughput'][0]),
'T_L3MEM': ((cache_metric_results['L3']['misses'] +
cache_metric_results['L3']['evicts']) *
float(self.machine['cacheline size']) /
total_cachelines / mem_bw *
float(self.machine['clock']))
}
# Build phenomenological ECM model:
ecm_model = {'T_OL': T_OL_result}
ecm_model.update(data_transfers)
else:
event_counters = {}
ecm_model = None
cache_transfers_per_cl = None
self.results = {'raw output': raw_results, 'ECM': ecm_model,
'data transfers': cache_transfers_per_cl,
'Runtime (per repetition) [s]': time_per_repetition,
'event counters': event_counters,
'Iterations per repetition': iterations_per_repetition,
'Iterations per cacheline': iterations_per_cacheline}
# TODO make more generic to support other (and multiple) constant names
self.results['Runtime (per cacheline update) [cy/CL]'] = \
(cys_per_repetition / iterations_per_repetition) * iterations_per_cacheline
self.results['MEM volume (per repetition) [B]'] = \
mem_results['Memory data volume [GBytes]'] * 1e9 / repetitions
self.results['Performance [MFLOP/s]'] = \
sum(self.kernel._flops.values()) / (
time_per_repetition / iterations_per_repetition) / 1e6
if 'Memory bandwidth [MBytes/s]' in mem_results:
self.results['MEM BW [MByte/s]'] = mem_results['Memory bandwidth [MBytes/s]']
else:
self.results['MEM BW [MByte/s]'] = mem_results['Memory BW [MBytes/s]']
self.results['Performance [MLUP/s]'] = (
iterations_per_repetition / time_per_repetition) / 1e6
self.results['Performance [MIt/s]'] = (
iterations_per_repetition / time_per_repetition) / 1e6
|
def analyze(self):
"""Run analysis."""
bench = self.kernel.build_executable(verbose=self.verbose > 1, openmp=self._args.cores > 1)
element_size = self.kernel.datatypes_size[self.kernel.datatype]
# Build arguments to pass to command:
args = [str(s) for s in list(self.kernel.constants.values())]
# Determine base runtime with 10 iterations
runtime = 0.0
time_per_repetition = 2.0 / 10.0
repetitions = self.iterations // 10
mem_results = {}
# TODO if cores > 1, results are for openmp run. Things might need to be changed here!
while runtime < 1.5:
# Interpolate to a 2.0s run
if time_per_repetition != 0.0:
repetitions = 2.0 // time_per_repetition
else:
repetitions = int(repetitions * 10)
mem_results = self.perfctr([bench] + [str(repetitions)] + args, group="MEM")
runtime = mem_results['Runtime (RDTSC) [s]']
time_per_repetition = runtime / float(repetitions)
raw_results = [mem_results]
# Base metrics for further metric computations:
# An iteration is equal to one high-level code inner-most-loop iteration
iterations_per_repetition = reduce(
operator.mul,
[self.kernel.subs_consts(max_ - min_) / self.kernel.subs_consts(step)
for idx, min_, max_, step in self.kernel._loop_stack],
1)
iterations_per_cacheline = (float(self.machine['cacheline size']) /
self.kernel.bytes_per_iteration)
cys_per_repetition = time_per_repetition * float(self.machine['clock'])
# Gather remaining counters
if not self.no_phenoecm:
# Build events and sympy expressions for all model metrics
T_OL, event_counters = self.machine.parse_perfmetric(
self.machine['overlapping model']['performance counter metric'])
T_data, event_dict = self.machine.parse_perfmetric(
self.machine['non-overlapping model']['performance counter metric'])
event_counters.update(event_dict)
cache_metrics = defaultdict(dict)
for i in range(len(self.machine['memory hierarchy']) - 1):
cache_info = self.machine['memory hierarchy'][i]
name = cache_info['level']
for k, v in cache_info['performance counter metrics'].items():
cache_metrics[name][k], event_dict = self.machine.parse_perfmetric(v)
event_counters.update(event_dict)
# Compile minimal runs to gather all required events
minimal_runs = build_minimal_runs(list(event_counters.values()))
measured_ctrs = {}
for run in minimal_runs:
ctrs = ','.join([eventstr(e) for e in run])
r = self.perfctr([bench] + [str(repetitions)] + args, group=ctrs)
raw_results.append(r)
measured_ctrs.update(r)
# Match measured counters to symbols
event_counter_results = {}
for sym, ctr in event_counters.items():
event, regs, parameter = ctr[0], register_options(ctr[1]), ctr[2]
for r in regs:
if r in measured_ctrs[event]:
event_counter_results[sym] = measured_ctrs[event][r]
# Analytical metrics needed for futher calculation
cl_size = float(self.machine['cacheline size'])
total_iterations = iterations_per_repetition * repetitions
total_cachelines = total_iterations / iterations_per_cacheline
T_OL_result = T_OL.subs(event_counter_results) / total_cachelines
cache_metric_results = defaultdict(dict)
for cache, mtrcs in cache_metrics.items():
for m, e in mtrcs.items():
cache_metric_results[cache][m] = e.subs(event_counter_results)
# Inter-cache transfers per CL
cache_transfers_per_cl = {cache: {k: PrefixedUnit(v / total_cachelines, 'CL/CL')
for k, v in d.items()}
for cache, d in cache_metric_results.items()}
cache_transfers_per_cl['L1']['accesses'].unit = 'LOAD/CL'
# Select appropriate bandwidth
mem_bw, mem_bw_kernel = self.machine.get_bandwidth(
-1, # mem
cache_metric_results['L3']['misses'], # load_streams
cache_metric_results['L3']['evicts'], # store_streams
1)
data_transfers = {
# Assuming 0.5 cy / LOAD (SSE on SNB or IVB; AVX on HSW, BDW, SKL or SKX)
'T_nOL': (cache_metric_results['L1']['accesses'] / total_cachelines * 0.5),
'T_L1L2': ((cache_metric_results['L1']['misses'] +
cache_metric_results['L1']['evicts']) /
total_cachelines * cl_size /
self.machine['memory hierarchy'][1]['non-overlap upstream throughput'][0]),
'T_L2L3': ((cache_metric_results['L2']['misses'] +
cache_metric_results['L2']['evicts']) /
total_cachelines * cl_size /
self.machine['memory hierarchy'][2]['non-overlap upstream throughput'][0]),
'T_L3MEM': ((cache_metric_results['L3']['misses'] +
cache_metric_results['L3']['evicts']) *
float(self.machine['cacheline size']) /
total_cachelines / mem_bw *
float(self.machine['clock']))
}
# Build phenomenological ECM model:
ecm_model = {'T_OL': T_OL_result}
ecm_model.update(data_transfers)
else:
event_counters = {}
ecm_model = None
cache_transfers_per_cl = None
self.results = {'raw output': raw_results, 'ECM': ecm_model,
'data transfers': cache_transfers_per_cl,
'Runtime (per repetition) [s]': time_per_repetition,
'event counters': event_counters,
'Iterations per repetition': iterations_per_repetition,
'Iterations per cacheline': iterations_per_cacheline}
# TODO make more generic to support other (and multiple) constant names
self.results['Runtime (per cacheline update) [cy/CL]'] = \
(cys_per_repetition / iterations_per_repetition) * iterations_per_cacheline
self.results['MEM volume (per repetition) [B]'] = \
mem_results['Memory data volume [GBytes]'] * 1e9 / repetitions
self.results['Performance [MFLOP/s]'] = \
sum(self.kernel._flops.values()) / (
time_per_repetition / iterations_per_repetition) / 1e6
if 'Memory bandwidth [MBytes/s]' in mem_results:
self.results['MEM BW [MByte/s]'] = mem_results['Memory bandwidth [MBytes/s]']
else:
self.results['MEM BW [MByte/s]'] = mem_results['Memory BW [MBytes/s]']
self.results['Performance [MLUP/s]'] = (
iterations_per_repetition / time_per_repetition) / 1e6
self.results['Performance [MIt/s]'] = (
iterations_per_repetition / time_per_repetition) / 1e6
|
[
"Run",
"analysis",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L343-L486
|
[
"def",
"analyze",
"(",
"self",
")",
":",
"bench",
"=",
"self",
".",
"kernel",
".",
"build_executable",
"(",
"verbose",
"=",
"self",
".",
"verbose",
">",
"1",
",",
"openmp",
"=",
"self",
".",
"_args",
".",
"cores",
">",
"1",
")",
"element_size",
"=",
"self",
".",
"kernel",
".",
"datatypes_size",
"[",
"self",
".",
"kernel",
".",
"datatype",
"]",
"# Build arguments to pass to command:",
"args",
"=",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"list",
"(",
"self",
".",
"kernel",
".",
"constants",
".",
"values",
"(",
")",
")",
"]",
"# Determine base runtime with 10 iterations",
"runtime",
"=",
"0.0",
"time_per_repetition",
"=",
"2.0",
"/",
"10.0",
"repetitions",
"=",
"self",
".",
"iterations",
"//",
"10",
"mem_results",
"=",
"{",
"}",
"# TODO if cores > 1, results are for openmp run. Things might need to be changed here!",
"while",
"runtime",
"<",
"1.5",
":",
"# Interpolate to a 2.0s run",
"if",
"time_per_repetition",
"!=",
"0.0",
":",
"repetitions",
"=",
"2.0",
"//",
"time_per_repetition",
"else",
":",
"repetitions",
"=",
"int",
"(",
"repetitions",
"*",
"10",
")",
"mem_results",
"=",
"self",
".",
"perfctr",
"(",
"[",
"bench",
"]",
"+",
"[",
"str",
"(",
"repetitions",
")",
"]",
"+",
"args",
",",
"group",
"=",
"\"MEM\"",
")",
"runtime",
"=",
"mem_results",
"[",
"'Runtime (RDTSC) [s]'",
"]",
"time_per_repetition",
"=",
"runtime",
"/",
"float",
"(",
"repetitions",
")",
"raw_results",
"=",
"[",
"mem_results",
"]",
"# Base metrics for further metric computations:",
"# An iteration is equal to one high-level code inner-most-loop iteration",
"iterations_per_repetition",
"=",
"reduce",
"(",
"operator",
".",
"mul",
",",
"[",
"self",
".",
"kernel",
".",
"subs_consts",
"(",
"max_",
"-",
"min_",
")",
"/",
"self",
".",
"kernel",
".",
"subs_consts",
"(",
"step",
")",
"for",
"idx",
",",
"min_",
",",
"max_",
",",
"step",
"in",
"self",
".",
"kernel",
".",
"_loop_stack",
"]",
",",
"1",
")",
"iterations_per_cacheline",
"=",
"(",
"float",
"(",
"self",
".",
"machine",
"[",
"'cacheline size'",
"]",
")",
"/",
"self",
".",
"kernel",
".",
"bytes_per_iteration",
")",
"cys_per_repetition",
"=",
"time_per_repetition",
"*",
"float",
"(",
"self",
".",
"machine",
"[",
"'clock'",
"]",
")",
"# Gather remaining counters",
"if",
"not",
"self",
".",
"no_phenoecm",
":",
"# Build events and sympy expressions for all model metrics",
"T_OL",
",",
"event_counters",
"=",
"self",
".",
"machine",
".",
"parse_perfmetric",
"(",
"self",
".",
"machine",
"[",
"'overlapping model'",
"]",
"[",
"'performance counter metric'",
"]",
")",
"T_data",
",",
"event_dict",
"=",
"self",
".",
"machine",
".",
"parse_perfmetric",
"(",
"self",
".",
"machine",
"[",
"'non-overlapping model'",
"]",
"[",
"'performance counter metric'",
"]",
")",
"event_counters",
".",
"update",
"(",
"event_dict",
")",
"cache_metrics",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
")",
"-",
"1",
")",
":",
"cache_info",
"=",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
"[",
"i",
"]",
"name",
"=",
"cache_info",
"[",
"'level'",
"]",
"for",
"k",
",",
"v",
"in",
"cache_info",
"[",
"'performance counter metrics'",
"]",
".",
"items",
"(",
")",
":",
"cache_metrics",
"[",
"name",
"]",
"[",
"k",
"]",
",",
"event_dict",
"=",
"self",
".",
"machine",
".",
"parse_perfmetric",
"(",
"v",
")",
"event_counters",
".",
"update",
"(",
"event_dict",
")",
"# Compile minimal runs to gather all required events",
"minimal_runs",
"=",
"build_minimal_runs",
"(",
"list",
"(",
"event_counters",
".",
"values",
"(",
")",
")",
")",
"measured_ctrs",
"=",
"{",
"}",
"for",
"run",
"in",
"minimal_runs",
":",
"ctrs",
"=",
"','",
".",
"join",
"(",
"[",
"eventstr",
"(",
"e",
")",
"for",
"e",
"in",
"run",
"]",
")",
"r",
"=",
"self",
".",
"perfctr",
"(",
"[",
"bench",
"]",
"+",
"[",
"str",
"(",
"repetitions",
")",
"]",
"+",
"args",
",",
"group",
"=",
"ctrs",
")",
"raw_results",
".",
"append",
"(",
"r",
")",
"measured_ctrs",
".",
"update",
"(",
"r",
")",
"# Match measured counters to symbols",
"event_counter_results",
"=",
"{",
"}",
"for",
"sym",
",",
"ctr",
"in",
"event_counters",
".",
"items",
"(",
")",
":",
"event",
",",
"regs",
",",
"parameter",
"=",
"ctr",
"[",
"0",
"]",
",",
"register_options",
"(",
"ctr",
"[",
"1",
"]",
")",
",",
"ctr",
"[",
"2",
"]",
"for",
"r",
"in",
"regs",
":",
"if",
"r",
"in",
"measured_ctrs",
"[",
"event",
"]",
":",
"event_counter_results",
"[",
"sym",
"]",
"=",
"measured_ctrs",
"[",
"event",
"]",
"[",
"r",
"]",
"# Analytical metrics needed for futher calculation",
"cl_size",
"=",
"float",
"(",
"self",
".",
"machine",
"[",
"'cacheline size'",
"]",
")",
"total_iterations",
"=",
"iterations_per_repetition",
"*",
"repetitions",
"total_cachelines",
"=",
"total_iterations",
"/",
"iterations_per_cacheline",
"T_OL_result",
"=",
"T_OL",
".",
"subs",
"(",
"event_counter_results",
")",
"/",
"total_cachelines",
"cache_metric_results",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"cache",
",",
"mtrcs",
"in",
"cache_metrics",
".",
"items",
"(",
")",
":",
"for",
"m",
",",
"e",
"in",
"mtrcs",
".",
"items",
"(",
")",
":",
"cache_metric_results",
"[",
"cache",
"]",
"[",
"m",
"]",
"=",
"e",
".",
"subs",
"(",
"event_counter_results",
")",
"# Inter-cache transfers per CL",
"cache_transfers_per_cl",
"=",
"{",
"cache",
":",
"{",
"k",
":",
"PrefixedUnit",
"(",
"v",
"/",
"total_cachelines",
",",
"'CL/CL'",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"}",
"for",
"cache",
",",
"d",
"in",
"cache_metric_results",
".",
"items",
"(",
")",
"}",
"cache_transfers_per_cl",
"[",
"'L1'",
"]",
"[",
"'accesses'",
"]",
".",
"unit",
"=",
"'LOAD/CL'",
"# Select appropriate bandwidth",
"mem_bw",
",",
"mem_bw_kernel",
"=",
"self",
".",
"machine",
".",
"get_bandwidth",
"(",
"-",
"1",
",",
"# mem",
"cache_metric_results",
"[",
"'L3'",
"]",
"[",
"'misses'",
"]",
",",
"# load_streams",
"cache_metric_results",
"[",
"'L3'",
"]",
"[",
"'evicts'",
"]",
",",
"# store_streams",
"1",
")",
"data_transfers",
"=",
"{",
"# Assuming 0.5 cy / LOAD (SSE on SNB or IVB; AVX on HSW, BDW, SKL or SKX)",
"'T_nOL'",
":",
"(",
"cache_metric_results",
"[",
"'L1'",
"]",
"[",
"'accesses'",
"]",
"/",
"total_cachelines",
"*",
"0.5",
")",
",",
"'T_L1L2'",
":",
"(",
"(",
"cache_metric_results",
"[",
"'L1'",
"]",
"[",
"'misses'",
"]",
"+",
"cache_metric_results",
"[",
"'L1'",
"]",
"[",
"'evicts'",
"]",
")",
"/",
"total_cachelines",
"*",
"cl_size",
"/",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
"[",
"1",
"]",
"[",
"'non-overlap upstream throughput'",
"]",
"[",
"0",
"]",
")",
",",
"'T_L2L3'",
":",
"(",
"(",
"cache_metric_results",
"[",
"'L2'",
"]",
"[",
"'misses'",
"]",
"+",
"cache_metric_results",
"[",
"'L2'",
"]",
"[",
"'evicts'",
"]",
")",
"/",
"total_cachelines",
"*",
"cl_size",
"/",
"self",
".",
"machine",
"[",
"'memory hierarchy'",
"]",
"[",
"2",
"]",
"[",
"'non-overlap upstream throughput'",
"]",
"[",
"0",
"]",
")",
",",
"'T_L3MEM'",
":",
"(",
"(",
"cache_metric_results",
"[",
"'L3'",
"]",
"[",
"'misses'",
"]",
"+",
"cache_metric_results",
"[",
"'L3'",
"]",
"[",
"'evicts'",
"]",
")",
"*",
"float",
"(",
"self",
".",
"machine",
"[",
"'cacheline size'",
"]",
")",
"/",
"total_cachelines",
"/",
"mem_bw",
"*",
"float",
"(",
"self",
".",
"machine",
"[",
"'clock'",
"]",
")",
")",
"}",
"# Build phenomenological ECM model:",
"ecm_model",
"=",
"{",
"'T_OL'",
":",
"T_OL_result",
"}",
"ecm_model",
".",
"update",
"(",
"data_transfers",
")",
"else",
":",
"event_counters",
"=",
"{",
"}",
"ecm_model",
"=",
"None",
"cache_transfers_per_cl",
"=",
"None",
"self",
".",
"results",
"=",
"{",
"'raw output'",
":",
"raw_results",
",",
"'ECM'",
":",
"ecm_model",
",",
"'data transfers'",
":",
"cache_transfers_per_cl",
",",
"'Runtime (per repetition) [s]'",
":",
"time_per_repetition",
",",
"'event counters'",
":",
"event_counters",
",",
"'Iterations per repetition'",
":",
"iterations_per_repetition",
",",
"'Iterations per cacheline'",
":",
"iterations_per_cacheline",
"}",
"# TODO make more generic to support other (and multiple) constant names",
"self",
".",
"results",
"[",
"'Runtime (per cacheline update) [cy/CL]'",
"]",
"=",
"(",
"cys_per_repetition",
"/",
"iterations_per_repetition",
")",
"*",
"iterations_per_cacheline",
"self",
".",
"results",
"[",
"'MEM volume (per repetition) [B]'",
"]",
"=",
"mem_results",
"[",
"'Memory data volume [GBytes]'",
"]",
"*",
"1e9",
"/",
"repetitions",
"self",
".",
"results",
"[",
"'Performance [MFLOP/s]'",
"]",
"=",
"sum",
"(",
"self",
".",
"kernel",
".",
"_flops",
".",
"values",
"(",
")",
")",
"/",
"(",
"time_per_repetition",
"/",
"iterations_per_repetition",
")",
"/",
"1e6",
"if",
"'Memory bandwidth [MBytes/s]'",
"in",
"mem_results",
":",
"self",
".",
"results",
"[",
"'MEM BW [MByte/s]'",
"]",
"=",
"mem_results",
"[",
"'Memory bandwidth [MBytes/s]'",
"]",
"else",
":",
"self",
".",
"results",
"[",
"'MEM BW [MByte/s]'",
"]",
"=",
"mem_results",
"[",
"'Memory BW [MBytes/s]'",
"]",
"self",
".",
"results",
"[",
"'Performance [MLUP/s]'",
"]",
"=",
"(",
"iterations_per_repetition",
"/",
"time_per_repetition",
")",
"/",
"1e6",
"self",
".",
"results",
"[",
"'Performance [MIt/s]'",
"]",
"=",
"(",
"iterations_per_repetition",
"/",
"time_per_repetition",
")",
"/",
"1e6"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
Benchmark.report
|
Report gathered analysis data in human readable form.
|
kerncraft/models/benchmark.py
|
def report(self, output_file=sys.stdout):
"""Report gathered analysis data in human readable form."""
if self.verbose > 1:
with pprint_nosort():
pprint.pprint(self.results)
if self.verbose > 0:
print('Runtime (per repetition): {:.2g} s'.format(
self.results['Runtime (per repetition) [s]']),
file=output_file)
if self.verbose > 0:
print('Iterations per repetition: {!s}'.format(
self.results['Iterations per repetition']),
file=output_file)
print('Runtime (per cacheline update): {:.2f} cy/CL'.format(
self.results['Runtime (per cacheline update) [cy/CL]']),
file=output_file)
print('MEM volume (per repetition): {:.0f} Byte'.format(
self.results['MEM volume (per repetition) [B]']),
file=output_file)
print('Performance: {:.2f} MFLOP/s'.format(self.results['Performance [MFLOP/s]']),
file=output_file)
print('Performance: {:.2f} MLUP/s'.format(self.results['Performance [MLUP/s]']),
file=output_file)
print('Performance: {:.2f} It/s'.format(self.results['Performance [MIt/s]']),
file=output_file)
if self.verbose > 0:
print('MEM bandwidth: {:.2f} MByte/s'.format(self.results['MEM BW [MByte/s]']),
file=output_file)
print('', file=output_file)
if not self.no_phenoecm:
print("Data Transfers:")
print("{:^8} |".format("cache"), end='')
for metrics in self.results['data transfers'].values():
for metric_name in sorted(metrics):
print(" {:^14}".format(metric_name), end='')
print()
break
for cache, metrics in sorted(self.results['data transfers'].items()):
print("{!s:^8} |".format(cache), end='')
for k, v in sorted(metrics.items()):
print(" {!s:^14}".format(v), end='')
print()
print()
print('Phenomenological ECM model: {{ {T_OL:.1f} || {T_nOL:.1f} | {T_L1L2:.1f} | '
'{T_L2L3:.1f} | {T_L3MEM:.1f} }} cy/CL'.format(
**{k: float(v) for k, v in self.results['ECM'].items()}),
file=output_file)
print('T_OL assumes that two loads per cycle may be retiered, which is true for '
'128bit SSE/half-AVX loads on SNB and IVY, and 256bit full-AVX loads on HSW, '
'BDW, SKL and SKX, but it also depends on AGU availability.',
file=output_file)
|
def report(self, output_file=sys.stdout):
"""Report gathered analysis data in human readable form."""
if self.verbose > 1:
with pprint_nosort():
pprint.pprint(self.results)
if self.verbose > 0:
print('Runtime (per repetition): {:.2g} s'.format(
self.results['Runtime (per repetition) [s]']),
file=output_file)
if self.verbose > 0:
print('Iterations per repetition: {!s}'.format(
self.results['Iterations per repetition']),
file=output_file)
print('Runtime (per cacheline update): {:.2f} cy/CL'.format(
self.results['Runtime (per cacheline update) [cy/CL]']),
file=output_file)
print('MEM volume (per repetition): {:.0f} Byte'.format(
self.results['MEM volume (per repetition) [B]']),
file=output_file)
print('Performance: {:.2f} MFLOP/s'.format(self.results['Performance [MFLOP/s]']),
file=output_file)
print('Performance: {:.2f} MLUP/s'.format(self.results['Performance [MLUP/s]']),
file=output_file)
print('Performance: {:.2f} It/s'.format(self.results['Performance [MIt/s]']),
file=output_file)
if self.verbose > 0:
print('MEM bandwidth: {:.2f} MByte/s'.format(self.results['MEM BW [MByte/s]']),
file=output_file)
print('', file=output_file)
if not self.no_phenoecm:
print("Data Transfers:")
print("{:^8} |".format("cache"), end='')
for metrics in self.results['data transfers'].values():
for metric_name in sorted(metrics):
print(" {:^14}".format(metric_name), end='')
print()
break
for cache, metrics in sorted(self.results['data transfers'].items()):
print("{!s:^8} |".format(cache), end='')
for k, v in sorted(metrics.items()):
print(" {!s:^14}".format(v), end='')
print()
print()
print('Phenomenological ECM model: {{ {T_OL:.1f} || {T_nOL:.1f} | {T_L1L2:.1f} | '
'{T_L2L3:.1f} | {T_L3MEM:.1f} }} cy/CL'.format(
**{k: float(v) for k, v in self.results['ECM'].items()}),
file=output_file)
print('T_OL assumes that two loads per cycle may be retiered, which is true for '
'128bit SSE/half-AVX loads on SNB and IVY, and 256bit full-AVX loads on HSW, '
'BDW, SKL and SKX, but it also depends on AGU availability.',
file=output_file)
|
[
"Report",
"gathered",
"analysis",
"data",
"in",
"human",
"readable",
"form",
"."
] |
RRZE-HPC/kerncraft
|
python
|
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L488-L541
|
[
"def",
"report",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
")",
":",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"with",
"pprint_nosort",
"(",
")",
":",
"pprint",
".",
"pprint",
"(",
"self",
".",
"results",
")",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print",
"(",
"'Runtime (per repetition): {:.2g} s'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Runtime (per repetition) [s]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print",
"(",
"'Iterations per repetition: {!s}'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Iterations per repetition'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'Runtime (per cacheline update): {:.2f} cy/CL'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Runtime (per cacheline update) [cy/CL]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'MEM volume (per repetition): {:.0f} Byte'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'MEM volume (per repetition) [B]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'Performance: {:.2f} MFLOP/s'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Performance [MFLOP/s]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'Performance: {:.2f} MLUP/s'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Performance [MLUP/s]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'Performance: {:.2f} It/s'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'Performance [MIt/s]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print",
"(",
"'MEM bandwidth: {:.2f} MByte/s'",
".",
"format",
"(",
"self",
".",
"results",
"[",
"'MEM BW [MByte/s]'",
"]",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"''",
",",
"file",
"=",
"output_file",
")",
"if",
"not",
"self",
".",
"no_phenoecm",
":",
"print",
"(",
"\"Data Transfers:\"",
")",
"print",
"(",
"\"{:^8} |\"",
".",
"format",
"(",
"\"cache\"",
")",
",",
"end",
"=",
"''",
")",
"for",
"metrics",
"in",
"self",
".",
"results",
"[",
"'data transfers'",
"]",
".",
"values",
"(",
")",
":",
"for",
"metric_name",
"in",
"sorted",
"(",
"metrics",
")",
":",
"print",
"(",
"\" {:^14}\"",
".",
"format",
"(",
"metric_name",
")",
",",
"end",
"=",
"''",
")",
"print",
"(",
")",
"break",
"for",
"cache",
",",
"metrics",
"in",
"sorted",
"(",
"self",
".",
"results",
"[",
"'data transfers'",
"]",
".",
"items",
"(",
")",
")",
":",
"print",
"(",
"\"{!s:^8} |\"",
".",
"format",
"(",
"cache",
")",
",",
"end",
"=",
"''",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"metrics",
".",
"items",
"(",
")",
")",
":",
"print",
"(",
"\" {!s:^14}\"",
".",
"format",
"(",
"v",
")",
",",
"end",
"=",
"''",
")",
"print",
"(",
")",
"print",
"(",
")",
"print",
"(",
"'Phenomenological ECM model: {{ {T_OL:.1f} || {T_nOL:.1f} | {T_L1L2:.1f} | '",
"'{T_L2L3:.1f} | {T_L3MEM:.1f} }} cy/CL'",
".",
"format",
"(",
"*",
"*",
"{",
"k",
":",
"float",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"results",
"[",
"'ECM'",
"]",
".",
"items",
"(",
")",
"}",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"'T_OL assumes that two loads per cycle may be retiered, which is true for '",
"'128bit SSE/half-AVX loads on SNB and IVY, and 256bit full-AVX loads on HSW, '",
"'BDW, SKL and SKX, but it also depends on AGU availability.'",
",",
"file",
"=",
"output_file",
")"
] |
c60baf8043e4da8d8d66da7575021c2f4c6c78af
|
test
|
parse_description
|
Parse the description in the README file
CommandLine:
python -c "import setup; print(setup.parse_description())"
|
setup.py
|
def parse_description():
"""
Parse the description in the README file
CommandLine:
python -c "import setup; print(setup.parse_description())"
"""
from os.path import dirname, join, exists
readme_fpath = join(dirname(__file__), 'README.md')
# print('readme_fpath = %r' % (readme_fpath,))
# This breaks on pip install, so check that it exists.
if exists(readme_fpath):
# try:
# # convert markdown to rst for pypi
# import pypandoc
# return pypandoc.convert(readme_fpath, 'rst')
# except Exception as ex:
# strip out markdown to make a clean readme for pypi
textlines = []
with open(readme_fpath, 'r') as f:
capture = False
for line in f.readlines():
if '# Purpose' in line:
capture = True
elif line.startswith('##'):
break
elif capture:
textlines += [line]
text = ''.join(textlines).strip()
text = text.replace('\n\n', '_NLHACK_')
text = text.replace('\n', ' ')
text = text.replace('_NLHACK_', '\n\n')
return text
return ''
|
def parse_description():
"""
Parse the description in the README file
CommandLine:
python -c "import setup; print(setup.parse_description())"
"""
from os.path import dirname, join, exists
readme_fpath = join(dirname(__file__), 'README.md')
# print('readme_fpath = %r' % (readme_fpath,))
# This breaks on pip install, so check that it exists.
if exists(readme_fpath):
# try:
# # convert markdown to rst for pypi
# import pypandoc
# return pypandoc.convert(readme_fpath, 'rst')
# except Exception as ex:
# strip out markdown to make a clean readme for pypi
textlines = []
with open(readme_fpath, 'r') as f:
capture = False
for line in f.readlines():
if '# Purpose' in line:
capture = True
elif line.startswith('##'):
break
elif capture:
textlines += [line]
text = ''.join(textlines).strip()
text = text.replace('\n\n', '_NLHACK_')
text = text.replace('\n', ' ')
text = text.replace('_NLHACK_', '\n\n')
return text
return ''
|
[
"Parse",
"the",
"description",
"in",
"the",
"README",
"file"
] |
Erotemic/progiter
|
python
|
https://github.com/Erotemic/progiter/blob/24f1ad15d79f76cccef7b5811d341ab33b72bf1e/setup.py#L71-L104
|
[
"def",
"parse_description",
"(",
")",
":",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"join",
",",
"exists",
"readme_fpath",
"=",
"join",
"(",
"dirname",
"(",
"__file__",
")",
",",
"'README.md'",
")",
"# print('readme_fpath = %r' % (readme_fpath,))",
"# This breaks on pip install, so check that it exists.",
"if",
"exists",
"(",
"readme_fpath",
")",
":",
"# try:",
"# # convert markdown to rst for pypi",
"# import pypandoc",
"# return pypandoc.convert(readme_fpath, 'rst')",
"# except Exception as ex:",
"# strip out markdown to make a clean readme for pypi",
"textlines",
"=",
"[",
"]",
"with",
"open",
"(",
"readme_fpath",
",",
"'r'",
")",
"as",
"f",
":",
"capture",
"=",
"False",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"if",
"'# Purpose'",
"in",
"line",
":",
"capture",
"=",
"True",
"elif",
"line",
".",
"startswith",
"(",
"'##'",
")",
":",
"break",
"elif",
"capture",
":",
"textlines",
"+=",
"[",
"line",
"]",
"text",
"=",
"''",
".",
"join",
"(",
"textlines",
")",
".",
"strip",
"(",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\n\\n'",
",",
"'_NLHACK_'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'_NLHACK_'",
",",
"'\\n\\n'",
")",
"return",
"text",
"return",
"''"
] |
24f1ad15d79f76cccef7b5811d341ab33b72bf1e
|
test
|
schedule_retry
|
Schedule a retry
|
ecommerce_worker/sailthru/v1/tasks.py
|
def schedule_retry(self, config):
"""Schedule a retry"""
raise self.retry(countdown=config.get('SAILTHRU_RETRY_SECONDS'),
max_retries=config.get('SAILTHRU_RETRY_ATTEMPTS'))
|
def schedule_retry(self, config):
"""Schedule a retry"""
raise self.retry(countdown=config.get('SAILTHRU_RETRY_SECONDS'),
max_retries=config.get('SAILTHRU_RETRY_ATTEMPTS'))
|
[
"Schedule",
"a",
"retry"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L22-L25
|
[
"def",
"schedule_retry",
"(",
"self",
",",
"config",
")",
":",
"raise",
"self",
".",
"retry",
"(",
"countdown",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_RETRY_SECONDS'",
")",
",",
"max_retries",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_RETRY_ATTEMPTS'",
")",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_build_purchase_item
|
Build and return Sailthru purchase item object
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku):
"""Build and return Sailthru purchase item object"""
# build item description
item = {
'id': "{}-{}".format(course_id, mode),
'url': course_url,
'price': cost_in_cents,
'qty': 1,
}
# get title from course info if we don't already have it from Sailthru
if 'title' in course_data:
item['title'] = course_data['title']
else:
# can't find, just invent title
item['title'] = 'Course {} mode: {}'.format(course_id, mode)
if 'tags' in course_data:
item['tags'] = course_data['tags']
# add vars to item
item['vars'] = dict(course_data.get('vars', {}), mode=mode, course_run_id=course_id)
item['vars']['purchase_sku'] = sku
return item
|
def _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku):
"""Build and return Sailthru purchase item object"""
# build item description
item = {
'id': "{}-{}".format(course_id, mode),
'url': course_url,
'price': cost_in_cents,
'qty': 1,
}
# get title from course info if we don't already have it from Sailthru
if 'title' in course_data:
item['title'] = course_data['title']
else:
# can't find, just invent title
item['title'] = 'Course {} mode: {}'.format(course_id, mode)
if 'tags' in course_data:
item['tags'] = course_data['tags']
# add vars to item
item['vars'] = dict(course_data.get('vars', {}), mode=mode, course_run_id=course_id)
item['vars']['purchase_sku'] = sku
return item
|
[
"Build",
"and",
"return",
"Sailthru",
"purchase",
"item",
"object"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L28-L54
|
[
"def",
"_build_purchase_item",
"(",
"course_id",
",",
"course_url",
",",
"cost_in_cents",
",",
"mode",
",",
"course_data",
",",
"sku",
")",
":",
"# build item description",
"item",
"=",
"{",
"'id'",
":",
"\"{}-{}\"",
".",
"format",
"(",
"course_id",
",",
"mode",
")",
",",
"'url'",
":",
"course_url",
",",
"'price'",
":",
"cost_in_cents",
",",
"'qty'",
":",
"1",
",",
"}",
"# get title from course info if we don't already have it from Sailthru",
"if",
"'title'",
"in",
"course_data",
":",
"item",
"[",
"'title'",
"]",
"=",
"course_data",
"[",
"'title'",
"]",
"else",
":",
"# can't find, just invent title",
"item",
"[",
"'title'",
"]",
"=",
"'Course {} mode: {}'",
".",
"format",
"(",
"course_id",
",",
"mode",
")",
"if",
"'tags'",
"in",
"course_data",
":",
"item",
"[",
"'tags'",
"]",
"=",
"course_data",
"[",
"'tags'",
"]",
"# add vars to item",
"item",
"[",
"'vars'",
"]",
"=",
"dict",
"(",
"course_data",
".",
"get",
"(",
"'vars'",
",",
"{",
"}",
")",
",",
"mode",
"=",
"mode",
",",
"course_run_id",
"=",
"course_id",
")",
"item",
"[",
"'vars'",
"]",
"[",
"'purchase_sku'",
"]",
"=",
"sku",
"return",
"item"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_record_purchase
|
Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
"""Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True
"""
try:
sailthru_response = sailthru_client.purchase(email, [item],
incomplete=purchase_incomplete, message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
except SailthruClientError as exc:
logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc))
return False
return True
|
def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
"""Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True
"""
try:
sailthru_response = sailthru_client.purchase(email, [item],
incomplete=purchase_incomplete, message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
except SailthruClientError as exc:
logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc))
return False
return True
|
[
"Record",
"a",
"purchase",
"in",
"Sailthru"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L57-L85
|
[
"def",
"_record_purchase",
"(",
"sailthru_client",
",",
"email",
",",
"item",
",",
"purchase_incomplete",
",",
"message_id",
",",
"options",
")",
":",
"try",
":",
"sailthru_response",
"=",
"sailthru_client",
".",
"purchase",
"(",
"email",
",",
"[",
"item",
"]",
",",
"incomplete",
"=",
"purchase_incomplete",
",",
"message_id",
"=",
"message_id",
",",
"options",
"=",
"options",
")",
"if",
"not",
"sailthru_response",
".",
"is_ok",
"(",
")",
":",
"error",
"=",
"sailthru_response",
".",
"get_error",
"(",
")",
"logger",
".",
"error",
"(",
"\"Error attempting to record purchase in Sailthru: %s\"",
",",
"error",
".",
"get_message",
"(",
")",
")",
"return",
"not",
"can_retry_sailthru_request",
"(",
"error",
")",
"except",
"SailthruClientError",
"as",
"exc",
":",
"logger",
".",
"exception",
"(",
"\"Exception attempting to record purchase for %s in Sailthru - %s\"",
",",
"email",
",",
"text_type",
"(",
"exc",
")",
")",
"return",
"False",
"return",
"True"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_get_course_content
|
Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
"""Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
"""
# check cache first
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if not response:
logger.error('Could not get course data from Sailthru on enroll/purchase event. '
'Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response
|
def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
"""Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
"""
# check cache first
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if not response:
logger.error('Could not get course data from Sailthru on enroll/purchase event. '
'Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response
|
[
"Get",
"course",
"information",
"using",
"the",
"Sailthru",
"content",
"api",
"or",
"from",
"cache",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L88-L125
|
[
"def",
"_get_course_content",
"(",
"course_id",
",",
"course_url",
",",
"sailthru_client",
",",
"site_code",
",",
"config",
")",
":",
"# check cache first",
"cache_key",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"site_code",
",",
"course_url",
")",
"response",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"not",
"response",
":",
"try",
":",
"sailthru_response",
"=",
"sailthru_client",
".",
"api_get",
"(",
"\"content\"",
",",
"{",
"\"id\"",
":",
"course_url",
"}",
")",
"if",
"not",
"sailthru_response",
".",
"is_ok",
"(",
")",
":",
"response",
"=",
"{",
"}",
"else",
":",
"response",
"=",
"sailthru_response",
".",
"json",
"cache",
".",
"set",
"(",
"cache_key",
",",
"response",
",",
"config",
".",
"get",
"(",
"'SAILTHRU_CACHE_TTL_SECONDS'",
")",
")",
"except",
"SailthruClientError",
":",
"response",
"=",
"{",
"}",
"if",
"not",
"response",
":",
"logger",
".",
"error",
"(",
"'Could not get course data from Sailthru on enroll/purchase event. '",
"'Calling Ecommerce Course API to get course info for enrollment confirmation email'",
")",
"response",
"=",
"_get_course_content_from_ecommerce",
"(",
"course_id",
",",
"site_code",
"=",
"site_code",
")",
"if",
"response",
":",
"cache",
".",
"set",
"(",
"cache_key",
",",
"response",
",",
"config",
".",
"get",
"(",
"'SAILTHRU_CACHE_TTL_SECONDS'",
")",
")",
"return",
"response"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_get_course_content_from_ecommerce
|
Get course information using the Ecommerce course api.
In case of error returns empty response.
Arguments:
course_id (str): course key of the course
site_code (str): site code
Returns:
course information from Ecommerce
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _get_course_content_from_ecommerce(course_id, site_code=None):
"""
Get course information using the Ecommerce course api.
In case of error returns empty response.
Arguments:
course_id (str): course key of the course
site_code (str): site code
Returns:
course information from Ecommerce
"""
api = get_ecommerce_client(site_code=site_code)
try:
api_response = api.courses(course_id).get()
except Exception: # pylint: disable=broad-except
logger.exception(
'An error occurred while retrieving data for course run [%s] from the Catalog API.',
course_id,
exc_info=True
)
return {}
return {
'title': api_response.get('name'),
'verification_deadline': api_response.get('verification_deadline')
}
|
def _get_course_content_from_ecommerce(course_id, site_code=None):
"""
Get course information using the Ecommerce course api.
In case of error returns empty response.
Arguments:
course_id (str): course key of the course
site_code (str): site code
Returns:
course information from Ecommerce
"""
api = get_ecommerce_client(site_code=site_code)
try:
api_response = api.courses(course_id).get()
except Exception: # pylint: disable=broad-except
logger.exception(
'An error occurred while retrieving data for course run [%s] from the Catalog API.',
course_id,
exc_info=True
)
return {}
return {
'title': api_response.get('name'),
'verification_deadline': api_response.get('verification_deadline')
}
|
[
"Get",
"course",
"information",
"using",
"the",
"Ecommerce",
"course",
"api",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L128-L154
|
[
"def",
"_get_course_content_from_ecommerce",
"(",
"course_id",
",",
"site_code",
"=",
"None",
")",
":",
"api",
"=",
"get_ecommerce_client",
"(",
"site_code",
"=",
"site_code",
")",
"try",
":",
"api_response",
"=",
"api",
".",
"courses",
"(",
"course_id",
")",
".",
"get",
"(",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"logger",
".",
"exception",
"(",
"'An error occurred while retrieving data for course run [%s] from the Catalog API.'",
",",
"course_id",
",",
"exc_info",
"=",
"True",
")",
"return",
"{",
"}",
"return",
"{",
"'title'",
":",
"api_response",
".",
"get",
"(",
"'name'",
")",
",",
"'verification_deadline'",
":",
"api_response",
".",
"get",
"(",
"'verification_deadline'",
")",
"}"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_update_unenrolled_list
|
Maintain a list of courses the user has unenrolled from in the Sailthru user record
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
course_url (str): LMS url for course info page.
unenroll (boolean): True if unenrolling, False if enrolling
Returns:
False if retryable error, else True
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
"""Maintain a list of courses the user has unenrolled from in the Sailthru user record
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
course_url (str): LMS url for course info page.
unenroll (boolean): True if unenrolling, False if enrolling
Returns:
False if retryable error, else True
"""
try:
# get the user 'vars' values from sailthru
sailthru_response = sailthru_client.api_get("user", {"id": email, "fields": {"vars": 1}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to read user record from Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
response_json = sailthru_response.json
unenroll_list = []
if response_json and "vars" in response_json and response_json["vars"] \
and "unenrolled" in response_json["vars"]:
unenroll_list = response_json["vars"]["unenrolled"]
changed = False
# if unenrolling, add course to unenroll list
if unenroll:
if course_url not in unenroll_list:
unenroll_list.append(course_url)
changed = True
# if enrolling, remove course from unenroll list
elif course_url in unenroll_list:
unenroll_list.remove(course_url)
changed = True
if changed:
# write user record back
sailthru_response = sailthru_client.api_post(
'user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to update user record in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
return True
except SailthruClientError as exc:
logger.exception("Exception attempting to update user record for %s in Sailthru - %s", email, text_type(exc))
return False
|
def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
"""Maintain a list of courses the user has unenrolled from in the Sailthru user record
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
course_url (str): LMS url for course info page.
unenroll (boolean): True if unenrolling, False if enrolling
Returns:
False if retryable error, else True
"""
try:
# get the user 'vars' values from sailthru
sailthru_response = sailthru_client.api_get("user", {"id": email, "fields": {"vars": 1}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to read user record from Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
response_json = sailthru_response.json
unenroll_list = []
if response_json and "vars" in response_json and response_json["vars"] \
and "unenrolled" in response_json["vars"]:
unenroll_list = response_json["vars"]["unenrolled"]
changed = False
# if unenrolling, add course to unenroll list
if unenroll:
if course_url not in unenroll_list:
unenroll_list.append(course_url)
changed = True
# if enrolling, remove course from unenroll list
elif course_url in unenroll_list:
unenroll_list.remove(course_url)
changed = True
if changed:
# write user record back
sailthru_response = sailthru_client.api_post(
'user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to update user record in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
return True
except SailthruClientError as exc:
logger.exception("Exception attempting to update user record for %s in Sailthru - %s", email, text_type(exc))
return False
|
[
"Maintain",
"a",
"list",
"of",
"courses",
"the",
"user",
"has",
"unenrolled",
"from",
"in",
"the",
"Sailthru",
"user",
"record"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L157-L210
|
[
"def",
"_update_unenrolled_list",
"(",
"sailthru_client",
",",
"email",
",",
"course_url",
",",
"unenroll",
")",
":",
"try",
":",
"# get the user 'vars' values from sailthru",
"sailthru_response",
"=",
"sailthru_client",
".",
"api_get",
"(",
"\"user\"",
",",
"{",
"\"id\"",
":",
"email",
",",
"\"fields\"",
":",
"{",
"\"vars\"",
":",
"1",
"}",
"}",
")",
"if",
"not",
"sailthru_response",
".",
"is_ok",
"(",
")",
":",
"error",
"=",
"sailthru_response",
".",
"get_error",
"(",
")",
"logger",
".",
"error",
"(",
"\"Error attempting to read user record from Sailthru: %s\"",
",",
"error",
".",
"get_message",
"(",
")",
")",
"return",
"not",
"can_retry_sailthru_request",
"(",
"error",
")",
"response_json",
"=",
"sailthru_response",
".",
"json",
"unenroll_list",
"=",
"[",
"]",
"if",
"response_json",
"and",
"\"vars\"",
"in",
"response_json",
"and",
"response_json",
"[",
"\"vars\"",
"]",
"and",
"\"unenrolled\"",
"in",
"response_json",
"[",
"\"vars\"",
"]",
":",
"unenroll_list",
"=",
"response_json",
"[",
"\"vars\"",
"]",
"[",
"\"unenrolled\"",
"]",
"changed",
"=",
"False",
"# if unenrolling, add course to unenroll list",
"if",
"unenroll",
":",
"if",
"course_url",
"not",
"in",
"unenroll_list",
":",
"unenroll_list",
".",
"append",
"(",
"course_url",
")",
"changed",
"=",
"True",
"# if enrolling, remove course from unenroll list",
"elif",
"course_url",
"in",
"unenroll_list",
":",
"unenroll_list",
".",
"remove",
"(",
"course_url",
")",
"changed",
"=",
"True",
"if",
"changed",
":",
"# write user record back",
"sailthru_response",
"=",
"sailthru_client",
".",
"api_post",
"(",
"'user'",
",",
"{",
"'id'",
":",
"email",
",",
"'key'",
":",
"'email'",
",",
"'vars'",
":",
"{",
"'unenrolled'",
":",
"unenroll_list",
"}",
"}",
")",
"if",
"not",
"sailthru_response",
".",
"is_ok",
"(",
")",
":",
"error",
"=",
"sailthru_response",
".",
"get_error",
"(",
")",
"logger",
".",
"error",
"(",
"\"Error attempting to update user record in Sailthru: %s\"",
",",
"error",
".",
"get_message",
"(",
")",
")",
"return",
"not",
"can_retry_sailthru_request",
"(",
"error",
")",
"return",
"True",
"except",
"SailthruClientError",
"as",
"exc",
":",
"logger",
".",
"exception",
"(",
"\"Exception attempting to update user record for %s in Sailthru - %s\"",
",",
"email",
",",
"text_type",
"(",
"exc",
")",
")",
"return",
"False"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
update_course_enrollment
|
Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course
Args:
email(str): The user's email address
course_url(str): Course home page url
purchase_incomplete(boolean): True if adding to cart
mode(string): enroll mode (audit, verification, ...)
unit_cost(decimal): cost if purchase event
course_id(CourseKey): course id
currency(str): currency if purchase event - currently ignored since Sailthru only supports USD
message_id(str): value from Sailthru marketing campaign cookie
site_code(str): site code
Returns:
None
|
ecommerce_worker/sailthru/v1/tasks.py
|
def update_course_enrollment(self, email, course_url, purchase_incomplete, mode, unit_cost=None, course_id=None,
currency=None, message_id=None, site_code=None, sku=None):
"""Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course
Args:
email(str): The user's email address
course_url(str): Course home page url
purchase_incomplete(boolean): True if adding to cart
mode(string): enroll mode (audit, verification, ...)
unit_cost(decimal): cost if purchase event
course_id(CourseKey): course id
currency(str): currency if purchase event - currently ignored since Sailthru only supports USD
message_id(str): value from Sailthru marketing campaign cookie
site_code(str): site code
Returns:
None
"""
# Get configuration
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
# NOTE: We rely on the function to log the error for us
return
# Use event type to figure out processing required
new_enroll = False
send_template = None
if not purchase_incomplete:
if mode == 'verified':
# upgrade complete
send_template = config.get('SAILTHRU_UPGRADE_TEMPLATE')
elif mode == 'audit' or mode == 'honor':
# free enroll
new_enroll = True
send_template = config.get('SAILTHRU_ENROLL_TEMPLATE')
else:
# paid course purchase complete
new_enroll = True
send_template = config.get('SAILTHRU_PURCHASE_TEMPLATE')
# calc price in pennies for Sailthru
# https://getstarted.sailthru.com/new-for-developers-overview/advanced-features/purchase/
cost_in_cents = int(unit_cost * 100)
# update the "unenrolled" course array in the user record on Sailthru if new enroll or unenroll
if new_enroll:
if not _update_unenrolled_list(sailthru_client, email, course_url, False):
schedule_retry(self, config)
# Get course data from Sailthru content library or cache
course_data = _get_course_content(course_id, course_url, sailthru_client, site_code, config)
# build item description
item = _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku)
# build purchase api options list
options = {}
if purchase_incomplete and config.get('SAILTHRU_ABANDONED_CART_TEMPLATE'):
options['reminder_template'] = config.get('SAILTHRU_ABANDONED_CART_TEMPLATE')
# Sailthru reminder time format is '+n time unit'
options['reminder_time'] = "+{} minutes".format(config.get('SAILTHRU_ABANDONED_CART_DELAY'))
# add appropriate send template
if send_template:
options['send_template'] = send_template
if not _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
schedule_retry(self, config)
|
def update_course_enrollment(self, email, course_url, purchase_incomplete, mode, unit_cost=None, course_id=None,
currency=None, message_id=None, site_code=None, sku=None):
"""Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course
Args:
email(str): The user's email address
course_url(str): Course home page url
purchase_incomplete(boolean): True if adding to cart
mode(string): enroll mode (audit, verification, ...)
unit_cost(decimal): cost if purchase event
course_id(CourseKey): course id
currency(str): currency if purchase event - currently ignored since Sailthru only supports USD
message_id(str): value from Sailthru marketing campaign cookie
site_code(str): site code
Returns:
None
"""
# Get configuration
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
# NOTE: We rely on the function to log the error for us
return
# Use event type to figure out processing required
new_enroll = False
send_template = None
if not purchase_incomplete:
if mode == 'verified':
# upgrade complete
send_template = config.get('SAILTHRU_UPGRADE_TEMPLATE')
elif mode == 'audit' or mode == 'honor':
# free enroll
new_enroll = True
send_template = config.get('SAILTHRU_ENROLL_TEMPLATE')
else:
# paid course purchase complete
new_enroll = True
send_template = config.get('SAILTHRU_PURCHASE_TEMPLATE')
# calc price in pennies for Sailthru
# https://getstarted.sailthru.com/new-for-developers-overview/advanced-features/purchase/
cost_in_cents = int(unit_cost * 100)
# update the "unenrolled" course array in the user record on Sailthru if new enroll or unenroll
if new_enroll:
if not _update_unenrolled_list(sailthru_client, email, course_url, False):
schedule_retry(self, config)
# Get course data from Sailthru content library or cache
course_data = _get_course_content(course_id, course_url, sailthru_client, site_code, config)
# build item description
item = _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku)
# build purchase api options list
options = {}
if purchase_incomplete and config.get('SAILTHRU_ABANDONED_CART_TEMPLATE'):
options['reminder_template'] = config.get('SAILTHRU_ABANDONED_CART_TEMPLATE')
# Sailthru reminder time format is '+n time unit'
options['reminder_time'] = "+{} minutes".format(config.get('SAILTHRU_ABANDONED_CART_DELAY'))
# add appropriate send template
if send_template:
options['send_template'] = send_template
if not _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
schedule_retry(self, config)
|
[
"Adds",
"/",
"updates",
"Sailthru",
"when",
"a",
"user",
"adds",
"to",
"cart",
"/",
"purchases",
"/",
"upgrades",
"a",
"course"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L234-L305
|
[
"def",
"update_course_enrollment",
"(",
"self",
",",
"email",
",",
"course_url",
",",
"purchase_incomplete",
",",
"mode",
",",
"unit_cost",
"=",
"None",
",",
"course_id",
"=",
"None",
",",
"currency",
"=",
"None",
",",
"message_id",
"=",
"None",
",",
"site_code",
"=",
"None",
",",
"sku",
"=",
"None",
")",
":",
"# Get configuration",
"config",
"=",
"get_sailthru_configuration",
"(",
"site_code",
")",
"try",
":",
"sailthru_client",
"=",
"get_sailthru_client",
"(",
"site_code",
")",
"except",
"SailthruError",
":",
"# NOTE: We rely on the function to log the error for us",
"return",
"# Use event type to figure out processing required",
"new_enroll",
"=",
"False",
"send_template",
"=",
"None",
"if",
"not",
"purchase_incomplete",
":",
"if",
"mode",
"==",
"'verified'",
":",
"# upgrade complete",
"send_template",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_UPGRADE_TEMPLATE'",
")",
"elif",
"mode",
"==",
"'audit'",
"or",
"mode",
"==",
"'honor'",
":",
"# free enroll",
"new_enroll",
"=",
"True",
"send_template",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_ENROLL_TEMPLATE'",
")",
"else",
":",
"# paid course purchase complete",
"new_enroll",
"=",
"True",
"send_template",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_PURCHASE_TEMPLATE'",
")",
"# calc price in pennies for Sailthru",
"# https://getstarted.sailthru.com/new-for-developers-overview/advanced-features/purchase/",
"cost_in_cents",
"=",
"int",
"(",
"unit_cost",
"*",
"100",
")",
"# update the \"unenrolled\" course array in the user record on Sailthru if new enroll or unenroll",
"if",
"new_enroll",
":",
"if",
"not",
"_update_unenrolled_list",
"(",
"sailthru_client",
",",
"email",
",",
"course_url",
",",
"False",
")",
":",
"schedule_retry",
"(",
"self",
",",
"config",
")",
"# Get course data from Sailthru content library or cache",
"course_data",
"=",
"_get_course_content",
"(",
"course_id",
",",
"course_url",
",",
"sailthru_client",
",",
"site_code",
",",
"config",
")",
"# build item description",
"item",
"=",
"_build_purchase_item",
"(",
"course_id",
",",
"course_url",
",",
"cost_in_cents",
",",
"mode",
",",
"course_data",
",",
"sku",
")",
"# build purchase api options list",
"options",
"=",
"{",
"}",
"if",
"purchase_incomplete",
"and",
"config",
".",
"get",
"(",
"'SAILTHRU_ABANDONED_CART_TEMPLATE'",
")",
":",
"options",
"[",
"'reminder_template'",
"]",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_ABANDONED_CART_TEMPLATE'",
")",
"# Sailthru reminder time format is '+n time unit'",
"options",
"[",
"'reminder_time'",
"]",
"=",
"\"+{} minutes\"",
".",
"format",
"(",
"config",
".",
"get",
"(",
"'SAILTHRU_ABANDONED_CART_DELAY'",
")",
")",
"# add appropriate send template",
"if",
"send_template",
":",
"options",
"[",
"'send_template'",
"]",
"=",
"send_template",
"if",
"not",
"_record_purchase",
"(",
"sailthru_client",
",",
"email",
",",
"item",
",",
"purchase_incomplete",
",",
"message_id",
",",
"options",
")",
":",
"schedule_retry",
"(",
"self",
",",
"config",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
send_course_refund_email
|
Sends the course refund email.
Args:
self: Ignore.
email (str): Recipient's email address.
refund_id (int): ID of the refund that initiated this task.
amount (str): Formatted amount of the refund.
course_name (str): Name of the course for which payment was refunded.
order_number (str): Order number of the order that was refunded.
order_url (str): Receipt URL of the refunded order.
site_code (str): Identifier of the site sending the email.
|
ecommerce_worker/sailthru/v1/tasks.py
|
def send_course_refund_email(self, email, refund_id, amount, course_name, order_number, order_url, site_code=None):
""" Sends the course refund email.
Args:
self: Ignore.
email (str): Recipient's email address.
refund_id (int): ID of the refund that initiated this task.
amount (str): Formatted amount of the refund.
course_name (str): Name of the course for which payment was refunded.
order_number (str): Order number of the order that was refunded.
order_url (str): Receipt URL of the refunded order.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
# NOTE: We rely on the function to log the error for us
return
email_vars = {
'amount': amount,
'course_name': course_name,
'order_number': order_number,
'order_url': order_url,
}
try:
response = sailthru_client.send(
template=config['templates']['course_refund'],
email=email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'A client error occurred while attempting to send a course refund notification for refund [%d].',
refund_id
)
return
if response.is_ok():
logger.info('Course refund notification sent for refund %d.', refund_id)
else:
error = response.get_error()
logger.error(
'An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s',
refund_id, error.get_error_code(), error.get_message()
)
if can_retry_sailthru_request(error):
logger.info(
'An attempt will be made again to send a course refund notification for refund [%d].',
refund_id
)
schedule_retry(self, config)
else:
logger.warning(
'No further attempts will be made to send a course refund notification for refund [%d].',
refund_id
)
|
def send_course_refund_email(self, email, refund_id, amount, course_name, order_number, order_url, site_code=None):
""" Sends the course refund email.
Args:
self: Ignore.
email (str): Recipient's email address.
refund_id (int): ID of the refund that initiated this task.
amount (str): Formatted amount of the refund.
course_name (str): Name of the course for which payment was refunded.
order_number (str): Order number of the order that was refunded.
order_url (str): Receipt URL of the refunded order.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
# NOTE: We rely on the function to log the error for us
return
email_vars = {
'amount': amount,
'course_name': course_name,
'order_number': order_number,
'order_url': order_url,
}
try:
response = sailthru_client.send(
template=config['templates']['course_refund'],
email=email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'A client error occurred while attempting to send a course refund notification for refund [%d].',
refund_id
)
return
if response.is_ok():
logger.info('Course refund notification sent for refund %d.', refund_id)
else:
error = response.get_error()
logger.error(
'An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s',
refund_id, error.get_error_code(), error.get_message()
)
if can_retry_sailthru_request(error):
logger.info(
'An attempt will be made again to send a course refund notification for refund [%d].',
refund_id
)
schedule_retry(self, config)
else:
logger.warning(
'No further attempts will be made to send a course refund notification for refund [%d].',
refund_id
)
|
[
"Sends",
"the",
"course",
"refund",
"email",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L309-L369
|
[
"def",
"send_course_refund_email",
"(",
"self",
",",
"email",
",",
"refund_id",
",",
"amount",
",",
"course_name",
",",
"order_number",
",",
"order_url",
",",
"site_code",
"=",
"None",
")",
":",
"config",
"=",
"get_sailthru_configuration",
"(",
"site_code",
")",
"try",
":",
"sailthru_client",
"=",
"get_sailthru_client",
"(",
"site_code",
")",
"except",
"SailthruError",
":",
"# NOTE: We rely on the function to log the error for us",
"return",
"email_vars",
"=",
"{",
"'amount'",
":",
"amount",
",",
"'course_name'",
":",
"course_name",
",",
"'order_number'",
":",
"order_number",
",",
"'order_url'",
":",
"order_url",
",",
"}",
"try",
":",
"response",
"=",
"sailthru_client",
".",
"send",
"(",
"template",
"=",
"config",
"[",
"'templates'",
"]",
"[",
"'course_refund'",
"]",
",",
"email",
"=",
"email",
",",
"_vars",
"=",
"email_vars",
")",
"except",
"SailthruClientError",
":",
"logger",
".",
"exception",
"(",
"'A client error occurred while attempting to send a course refund notification for refund [%d].'",
",",
"refund_id",
")",
"return",
"if",
"response",
".",
"is_ok",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Course refund notification sent for refund %d.'",
",",
"refund_id",
")",
"else",
":",
"error",
"=",
"response",
".",
"get_error",
"(",
")",
"logger",
".",
"error",
"(",
"'An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s'",
",",
"refund_id",
",",
"error",
".",
"get_error_code",
"(",
")",
",",
"error",
".",
"get_message",
"(",
")",
")",
"if",
"can_retry_sailthru_request",
"(",
"error",
")",
":",
"logger",
".",
"info",
"(",
"'An attempt will be made again to send a course refund notification for refund [%d].'",
",",
"refund_id",
")",
"schedule_retry",
"(",
"self",
",",
"config",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'No further attempts will be made to send a course refund notification for refund [%d].'",
",",
"refund_id",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
send_offer_assignment_email
|
Sends the offer assignment email.
Args:
self: Ignore.
user_email (str): Recipient's email address.
offer_assignment_id (str): Key of the entry in the offer_assignment model.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
|
ecommerce_worker/sailthru/v1/tasks.py
|
def send_offer_assignment_email(self, user_email, offer_assignment_id, subject, email_body, site_code=None):
""" Sends the offer assignment email.
Args:
self: Ignore.
user_email (str): Recipient's email address.
offer_assignment_id (str): Key of the entry in the offer_assignment model.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
response = _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
if response and response.is_ok():
send_id = response.get_body().get('send_id') # pylint: disable=no-member
if _update_assignment_email_status(offer_assignment_id, send_id, 'success'):
logger.info('[Offer Assignment] Offer assignment notification sent with message --- {message}'.format(
message=email_body))
else:
logger.exception(
'[Offer Assignment] An error occurred while updating email status data for '
'offer {token_offer} and email {token_email} via the ecommerce API.'.format(
token_offer=offer_assignment_id,
token_email=user_email,
)
)
|
def send_offer_assignment_email(self, user_email, offer_assignment_id, subject, email_body, site_code=None):
""" Sends the offer assignment email.
Args:
self: Ignore.
user_email (str): Recipient's email address.
offer_assignment_id (str): Key of the entry in the offer_assignment model.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
response = _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
if response and response.is_ok():
send_id = response.get_body().get('send_id') # pylint: disable=no-member
if _update_assignment_email_status(offer_assignment_id, send_id, 'success'):
logger.info('[Offer Assignment] Offer assignment notification sent with message --- {message}'.format(
message=email_body))
else:
logger.exception(
'[Offer Assignment] An error occurred while updating email status data for '
'offer {token_offer} and email {token_email} via the ecommerce API.'.format(
token_offer=offer_assignment_id,
token_email=user_email,
)
)
|
[
"Sends",
"the",
"offer",
"assignment",
"email",
".",
"Args",
":",
"self",
":",
"Ignore",
".",
"user_email",
"(",
"str",
")",
":",
"Recipient",
"s",
"email",
"address",
".",
"offer_assignment_id",
"(",
"str",
")",
":",
"Key",
"of",
"the",
"entry",
"in",
"the",
"offer_assignment",
"model",
".",
"subject",
"(",
"str",
")",
":",
"Email",
"subject",
".",
"email_body",
"(",
"str",
")",
":",
"The",
"body",
"of",
"the",
"email",
".",
"site_code",
"(",
"str",
")",
":",
"Identifier",
"of",
"the",
"site",
"sending",
"the",
"email",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L373-L397
|
[
"def",
"send_offer_assignment_email",
"(",
"self",
",",
"user_email",
",",
"offer_assignment_id",
",",
"subject",
",",
"email_body",
",",
"site_code",
"=",
"None",
")",
":",
"config",
"=",
"get_sailthru_configuration",
"(",
"site_code",
")",
"response",
"=",
"_send_offer_assignment_notification_email",
"(",
"config",
",",
"user_email",
",",
"subject",
",",
"email_body",
",",
"site_code",
",",
"self",
")",
"if",
"response",
"and",
"response",
".",
"is_ok",
"(",
")",
":",
"send_id",
"=",
"response",
".",
"get_body",
"(",
")",
".",
"get",
"(",
"'send_id'",
")",
"# pylint: disable=no-member",
"if",
"_update_assignment_email_status",
"(",
"offer_assignment_id",
",",
"send_id",
",",
"'success'",
")",
":",
"logger",
".",
"info",
"(",
"'[Offer Assignment] Offer assignment notification sent with message --- {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
")",
")",
"else",
":",
"logger",
".",
"exception",
"(",
"'[Offer Assignment] An error occurred while updating email status data for '",
"'offer {token_offer} and email {token_email} via the ecommerce API.'",
".",
"format",
"(",
"token_offer",
"=",
"offer_assignment_id",
",",
"token_email",
"=",
"user_email",
",",
")",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_send_offer_assignment_notification_email
|
Handles sending offer assignment notification emails and retrying failed emails when appropriate.
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, task):
"""Handles sending offer assignment notification emails and retrying failed emails when appropriate."""
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
email_vars = {
'subject': subject,
'email_body': email_body,
}
try:
response = sailthru_client.send(
template=config['templates']['assignment_email'],
email=user_email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
if not response.is_ok():
error = response.get_error()
logger.error(
'[Offer Assignment] A {token_error_code} - {token_error_message} error occurred'
' while attempting to send a offer assignment notification.'
' Message: {message}'.format(
message=email_body,
token_error_code=error.get_error_code(),
token_error_message=error.get_message()
)
)
if can_retry_sailthru_request(error):
logger.info(
'[Offer Assignment] An attempt will be made to resend the offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
schedule_retry(task, config)
else:
logger.warning(
'[Offer Assignment] No further attempts will be made to send the offer assignment notification.'
' Failed Message: {message}'.format(message=email_body)
)
return response
|
def _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, task):
"""Handles sending offer assignment notification emails and retrying failed emails when appropriate."""
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
email_vars = {
'subject': subject,
'email_body': email_body,
}
try:
response = sailthru_client.send(
template=config['templates']['assignment_email'],
email=user_email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
if not response.is_ok():
error = response.get_error()
logger.error(
'[Offer Assignment] A {token_error_code} - {token_error_message} error occurred'
' while attempting to send a offer assignment notification.'
' Message: {message}'.format(
message=email_body,
token_error_code=error.get_error_code(),
token_error_message=error.get_message()
)
)
if can_retry_sailthru_request(error):
logger.info(
'[Offer Assignment] An attempt will be made to resend the offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
schedule_retry(task, config)
else:
logger.warning(
'[Offer Assignment] No further attempts will be made to send the offer assignment notification.'
' Failed Message: {message}'.format(message=email_body)
)
return response
|
[
"Handles",
"sending",
"offer",
"assignment",
"notification",
"emails",
"and",
"retrying",
"failed",
"emails",
"when",
"appropriate",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L400-L450
|
[
"def",
"_send_offer_assignment_notification_email",
"(",
"config",
",",
"user_email",
",",
"subject",
",",
"email_body",
",",
"site_code",
",",
"task",
")",
":",
"try",
":",
"sailthru_client",
"=",
"get_sailthru_client",
"(",
"site_code",
")",
"except",
"SailthruError",
":",
"logger",
".",
"exception",
"(",
"'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'",
"' Message: {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
")",
")",
"return",
"None",
"email_vars",
"=",
"{",
"'subject'",
":",
"subject",
",",
"'email_body'",
":",
"email_body",
",",
"}",
"try",
":",
"response",
"=",
"sailthru_client",
".",
"send",
"(",
"template",
"=",
"config",
"[",
"'templates'",
"]",
"[",
"'assignment_email'",
"]",
",",
"email",
"=",
"user_email",
",",
"_vars",
"=",
"email_vars",
")",
"except",
"SailthruClientError",
":",
"logger",
".",
"exception",
"(",
"'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'",
"' Message: {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
")",
")",
"return",
"None",
"if",
"not",
"response",
".",
"is_ok",
"(",
")",
":",
"error",
"=",
"response",
".",
"get_error",
"(",
")",
"logger",
".",
"error",
"(",
"'[Offer Assignment] A {token_error_code} - {token_error_message} error occurred'",
"' while attempting to send a offer assignment notification.'",
"' Message: {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
",",
"token_error_code",
"=",
"error",
".",
"get_error_code",
"(",
")",
",",
"token_error_message",
"=",
"error",
".",
"get_message",
"(",
")",
")",
")",
"if",
"can_retry_sailthru_request",
"(",
"error",
")",
":",
"logger",
".",
"info",
"(",
"'[Offer Assignment] An attempt will be made to resend the offer assignment notification.'",
"' Message: {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
")",
")",
"schedule_retry",
"(",
"task",
",",
"config",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'[Offer Assignment] No further attempts will be made to send the offer assignment notification.'",
"' Failed Message: {message}'",
".",
"format",
"(",
"message",
"=",
"email_body",
")",
")",
"return",
"response"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_update_assignment_email_status
|
Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api.
Arguments:
offer_assignment_id (str): Key of the entry in the offer_assignment model.
send_id (str): Unique message id from Sailthru
status (str): status to be sent to the api
site_code (str): site code
Returns:
True or False based on model update status from Ecommerce api
|
ecommerce_worker/sailthru/v1/tasks.py
|
def _update_assignment_email_status(offer_assignment_id, send_id, status, site_code=None):
"""
Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api.
Arguments:
offer_assignment_id (str): Key of the entry in the offer_assignment model.
send_id (str): Unique message id from Sailthru
status (str): status to be sent to the api
site_code (str): site code
Returns:
True or False based on model update status from Ecommerce api
"""
api = get_ecommerce_client(url_postfix='assignment-email/', site_code=site_code)
post_data = {
'offer_assignment_id': offer_assignment_id,
'send_id': send_id,
'status': status,
}
try:
api_response = api.status().post(post_data)
except RequestException:
logger.exception(
'[Offer Assignment] An error occurred while updating offer assignment email status for '
'offer id {token_offer} and message id {token_send_id} via the Ecommerce API.'.format(
token_offer=offer_assignment_id,
token_send_id=send_id
)
)
return False
return True if api_response.get('status') == 'updated' else False
|
def _update_assignment_email_status(offer_assignment_id, send_id, status, site_code=None):
"""
Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api.
Arguments:
offer_assignment_id (str): Key of the entry in the offer_assignment model.
send_id (str): Unique message id from Sailthru
status (str): status to be sent to the api
site_code (str): site code
Returns:
True or False based on model update status from Ecommerce api
"""
api = get_ecommerce_client(url_postfix='assignment-email/', site_code=site_code)
post_data = {
'offer_assignment_id': offer_assignment_id,
'send_id': send_id,
'status': status,
}
try:
api_response = api.status().post(post_data)
except RequestException:
logger.exception(
'[Offer Assignment] An error occurred while updating offer assignment email status for '
'offer id {token_offer} and message id {token_send_id} via the Ecommerce API.'.format(
token_offer=offer_assignment_id,
token_send_id=send_id
)
)
return False
return True if api_response.get('status') == 'updated' else False
|
[
"Update",
"the",
"offer_assignment",
"and",
"offer_assignment_email",
"model",
"using",
"the",
"Ecommerce",
"assignmentemail",
"api",
".",
"Arguments",
":",
"offer_assignment_id",
"(",
"str",
")",
":",
"Key",
"of",
"the",
"entry",
"in",
"the",
"offer_assignment",
"model",
".",
"send_id",
"(",
"str",
")",
":",
"Unique",
"message",
"id",
"from",
"Sailthru",
"status",
"(",
"str",
")",
":",
"status",
"to",
"be",
"sent",
"to",
"the",
"api",
"site_code",
"(",
"str",
")",
":",
"site",
"code",
"Returns",
":",
"True",
"or",
"False",
"based",
"on",
"model",
"update",
"status",
"from",
"Ecommerce",
"api"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L453-L481
|
[
"def",
"_update_assignment_email_status",
"(",
"offer_assignment_id",
",",
"send_id",
",",
"status",
",",
"site_code",
"=",
"None",
")",
":",
"api",
"=",
"get_ecommerce_client",
"(",
"url_postfix",
"=",
"'assignment-email/'",
",",
"site_code",
"=",
"site_code",
")",
"post_data",
"=",
"{",
"'offer_assignment_id'",
":",
"offer_assignment_id",
",",
"'send_id'",
":",
"send_id",
",",
"'status'",
":",
"status",
",",
"}",
"try",
":",
"api_response",
"=",
"api",
".",
"status",
"(",
")",
".",
"post",
"(",
"post_data",
")",
"except",
"RequestException",
":",
"logger",
".",
"exception",
"(",
"'[Offer Assignment] An error occurred while updating offer assignment email status for '",
"'offer id {token_offer} and message id {token_send_id} via the Ecommerce API.'",
".",
"format",
"(",
"token_offer",
"=",
"offer_assignment_id",
",",
"token_send_id",
"=",
"send_id",
")",
")",
"return",
"False",
"return",
"True",
"if",
"api_response",
".",
"get",
"(",
"'status'",
")",
"==",
"'updated'",
"else",
"False"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
send_offer_update_email
|
Sends the offer emails after assignment, either for revoking or reminding.
Args:
self: Ignore.
user_email (str): Recipient's email address.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
|
ecommerce_worker/sailthru/v1/tasks.py
|
def send_offer_update_email(self, user_email, subject, email_body, site_code=None):
""" Sends the offer emails after assignment, either for revoking or reminding.
Args:
self: Ignore.
user_email (str): Recipient's email address.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
_send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
|
def send_offer_update_email(self, user_email, subject, email_body, site_code=None):
""" Sends the offer emails after assignment, either for revoking or reminding.
Args:
self: Ignore.
user_email (str): Recipient's email address.
subject (str): Email subject.
email_body (str): The body of the email.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
_send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
|
[
"Sends",
"the",
"offer",
"emails",
"after",
"assignment",
"either",
"for",
"revoking",
"or",
"reminding",
".",
"Args",
":",
"self",
":",
"Ignore",
".",
"user_email",
"(",
"str",
")",
":",
"Recipient",
"s",
"email",
"address",
".",
"subject",
"(",
"str",
")",
":",
"Email",
"subject",
".",
"email_body",
"(",
"str",
")",
":",
"The",
"body",
"of",
"the",
"email",
".",
"site_code",
"(",
"str",
")",
":",
"Identifier",
"of",
"the",
"site",
"sending",
"the",
"email",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L485-L495
|
[
"def",
"send_offer_update_email",
"(",
"self",
",",
"user_email",
",",
"subject",
",",
"email_body",
",",
"site_code",
"=",
"None",
")",
":",
"config",
"=",
"get_sailthru_configuration",
"(",
"site_code",
")",
"_send_offer_assignment_notification_email",
"(",
"config",
",",
"user_email",
",",
"subject",
",",
"email_body",
",",
"site_code",
",",
"self",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
get_logger_config
|
Returns a dictionary containing logging configuration.
If dev_env is True, logging will not be done via local rsyslogd.
Instead, application logs will be dropped into log_dir. 'edx_filename'
is ignored unless dev_env is True.
|
ecommerce_worker/configuration/logger.py
|
def get_logger_config(log_dir='/var/tmp',
logging_env='no_env',
edx_filename='edx.log',
dev_env=False,
debug=False,
local_loglevel='INFO',
service_variant='ecomworker'):
"""
Returns a dictionary containing logging configuration.
If dev_env is True, logging will not be done via local rsyslogd.
Instead, application logs will be dropped into log_dir. 'edx_filename'
is ignored unless dev_env is True.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
local_loglevel = 'INFO'
hostname = platform.node().split('.')[0]
syslog_format = (
'[service_variant={service_variant}]'
'[%(name)s][env:{logging_env}] %(levelname)s '
'[{hostname} %(process)d] [%(filename)s:%(lineno)d] '
'- %(message)s'
).format(
service_variant=service_variant,
logging_env=logging_env, hostname=hostname
)
if debug:
handlers = ['console']
else:
handlers = ['local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if debug else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
},
},
'loggers': {
'requests': {
'handlers': handlers,
'level': 'WARNING',
'propagate': True
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
# Use a different address for Mac OS X
'address': '/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
})
return logger_config
|
def get_logger_config(log_dir='/var/tmp',
logging_env='no_env',
edx_filename='edx.log',
dev_env=False,
debug=False,
local_loglevel='INFO',
service_variant='ecomworker'):
"""
Returns a dictionary containing logging configuration.
If dev_env is True, logging will not be done via local rsyslogd.
Instead, application logs will be dropped into log_dir. 'edx_filename'
is ignored unless dev_env is True.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
local_loglevel = 'INFO'
hostname = platform.node().split('.')[0]
syslog_format = (
'[service_variant={service_variant}]'
'[%(name)s][env:{logging_env}] %(levelname)s '
'[{hostname} %(process)d] [%(filename)s:%(lineno)d] '
'- %(message)s'
).format(
service_variant=service_variant,
logging_env=logging_env, hostname=hostname
)
if debug:
handlers = ['console']
else:
handlers = ['local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if debug else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
},
},
'loggers': {
'requests': {
'handlers': handlers,
'level': 'WARNING',
'propagate': True
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
# Use a different address for Mac OS X
'address': '/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
})
return logger_config
|
[
"Returns",
"a",
"dictionary",
"containing",
"logging",
"configuration",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/configuration/logger.py#L8-L100
|
[
"def",
"get_logger_config",
"(",
"log_dir",
"=",
"'/var/tmp'",
",",
"logging_env",
"=",
"'no_env'",
",",
"edx_filename",
"=",
"'edx.log'",
",",
"dev_env",
"=",
"False",
",",
"debug",
"=",
"False",
",",
"local_loglevel",
"=",
"'INFO'",
",",
"service_variant",
"=",
"'ecomworker'",
")",
":",
"# Revert to INFO if an invalid string is passed in",
"if",
"local_loglevel",
"not",
"in",
"[",
"'DEBUG'",
",",
"'INFO'",
",",
"'WARNING'",
",",
"'ERROR'",
",",
"'CRITICAL'",
"]",
":",
"local_loglevel",
"=",
"'INFO'",
"hostname",
"=",
"platform",
".",
"node",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"syslog_format",
"=",
"(",
"'[service_variant={service_variant}]'",
"'[%(name)s][env:{logging_env}] %(levelname)s '",
"'[{hostname} %(process)d] [%(filename)s:%(lineno)d] '",
"'- %(message)s'",
")",
".",
"format",
"(",
"service_variant",
"=",
"service_variant",
",",
"logging_env",
"=",
"logging_env",
",",
"hostname",
"=",
"hostname",
")",
"if",
"debug",
":",
"handlers",
"=",
"[",
"'console'",
"]",
"else",
":",
"handlers",
"=",
"[",
"'local'",
"]",
"logger_config",
"=",
"{",
"'version'",
":",
"1",
",",
"'disable_existing_loggers'",
":",
"False",
",",
"'formatters'",
":",
"{",
"'standard'",
":",
"{",
"'format'",
":",
"'%(asctime)s %(levelname)s %(process)d '",
"'[%(name)s] %(filename)s:%(lineno)d - %(message)s'",
",",
"}",
",",
"'syslog_format'",
":",
"{",
"'format'",
":",
"syslog_format",
"}",
",",
"'raw'",
":",
"{",
"'format'",
":",
"'%(message)s'",
"}",
",",
"}",
",",
"'handlers'",
":",
"{",
"'console'",
":",
"{",
"'level'",
":",
"'DEBUG'",
"if",
"debug",
"else",
"'INFO'",
",",
"'class'",
":",
"'logging.StreamHandler'",
",",
"'formatter'",
":",
"'standard'",
",",
"'stream'",
":",
"sys",
".",
"stdout",
",",
"}",
",",
"}",
",",
"'loggers'",
":",
"{",
"'requests'",
":",
"{",
"'handlers'",
":",
"handlers",
",",
"'level'",
":",
"'WARNING'",
",",
"'propagate'",
":",
"True",
"}",
",",
"''",
":",
"{",
"'handlers'",
":",
"handlers",
",",
"'level'",
":",
"'DEBUG'",
",",
"'propagate'",
":",
"False",
"}",
",",
"}",
"}",
"if",
"dev_env",
":",
"edx_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"log_dir",
",",
"edx_filename",
")",
"logger_config",
"[",
"'handlers'",
"]",
".",
"update",
"(",
"{",
"'local'",
":",
"{",
"'class'",
":",
"'logging.handlers.RotatingFileHandler'",
",",
"'level'",
":",
"local_loglevel",
",",
"'formatter'",
":",
"'standard'",
",",
"'filename'",
":",
"edx_file_loc",
",",
"'maxBytes'",
":",
"1024",
"*",
"1024",
"*",
"2",
",",
"'backupCount'",
":",
"5",
",",
"}",
",",
"}",
")",
"else",
":",
"logger_config",
"[",
"'handlers'",
"]",
".",
"update",
"(",
"{",
"'local'",
":",
"{",
"'level'",
":",
"local_loglevel",
",",
"'class'",
":",
"'logging.handlers.SysLogHandler'",
",",
"# Use a different address for Mac OS X",
"'address'",
":",
"'/var/run/syslog'",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
"else",
"'/dev/log'",
",",
"'formatter'",
":",
"'syslog_format'",
",",
"'facility'",
":",
"SysLogHandler",
".",
"LOG_LOCAL0",
",",
"}",
",",
"}",
")",
"return",
"logger_config"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
_retry_order
|
Retry with exponential backoff until fulfillment
succeeds or the retry limit is reached. If the retry limit is exceeded,
the exception is re-raised.
|
ecommerce_worker/fulfillment/v1/tasks.py
|
def _retry_order(self, exception, max_fulfillment_retries, order_number):
"""
Retry with exponential backoff until fulfillment
succeeds or the retry limit is reached. If the retry limit is exceeded,
the exception is re-raised.
"""
retries = self.request.retries
if retries == max_fulfillment_retries:
logger.exception('Fulfillment of order [%s] failed. Giving up.', order_number)
else:
logger.warning('Fulfillment of order [%s] failed. Retrying.', order_number)
countdown = 2 ** retries
raise self.retry(exc=exception, countdown=countdown, max_retries=max_fulfillment_retries)
|
def _retry_order(self, exception, max_fulfillment_retries, order_number):
"""
Retry with exponential backoff until fulfillment
succeeds or the retry limit is reached. If the retry limit is exceeded,
the exception is re-raised.
"""
retries = self.request.retries
if retries == max_fulfillment_retries:
logger.exception('Fulfillment of order [%s] failed. Giving up.', order_number)
else:
logger.warning('Fulfillment of order [%s] failed. Retrying.', order_number)
countdown = 2 ** retries
raise self.retry(exc=exception, countdown=countdown, max_retries=max_fulfillment_retries)
|
[
"Retry",
"with",
"exponential",
"backoff",
"until",
"fulfillment",
"succeeds",
"or",
"the",
"retry",
"limit",
"is",
"reached",
".",
"If",
"the",
"retry",
"limit",
"is",
"exceeded",
"the",
"exception",
"is",
"re",
"-",
"raised",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/fulfillment/v1/tasks.py#L14-L27
|
[
"def",
"_retry_order",
"(",
"self",
",",
"exception",
",",
"max_fulfillment_retries",
",",
"order_number",
")",
":",
"retries",
"=",
"self",
".",
"request",
".",
"retries",
"if",
"retries",
"==",
"max_fulfillment_retries",
":",
"logger",
".",
"exception",
"(",
"'Fulfillment of order [%s] failed. Giving up.'",
",",
"order_number",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Fulfillment of order [%s] failed. Retrying.'",
",",
"order_number",
")",
"countdown",
"=",
"2",
"**",
"retries",
"raise",
"self",
".",
"retry",
"(",
"exc",
"=",
"exception",
",",
"countdown",
"=",
"countdown",
",",
"max_retries",
"=",
"max_fulfillment_retries",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
fulfill_order
|
Fulfills an order.
Arguments:
order_number (str): Order number indicating which order to fulfill.
Returns:
None
|
ecommerce_worker/fulfillment/v1/tasks.py
|
def fulfill_order(self, order_number, site_code=None, email_opt_in=False):
"""Fulfills an order.
Arguments:
order_number (str): Order number indicating which order to fulfill.
Returns:
None
"""
max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)
api = get_ecommerce_client(site_code=site_code)
try:
logger.info('Requesting fulfillment of order [%s].', order_number)
api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)
except exceptions.HttpClientError as exc:
status_code = exc.response.status_code # pylint: disable=no-member
if status_code == 406:
# The order is not fulfillable. Therefore, it must be complete.
logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)
raise Ignore()
else:
# Unknown client error. Let's retry to resolve it.
logger.warning(
'Fulfillment of order [%s] failed because of HttpClientError. Retrying',
order_number,
exc_info=True
)
_retry_order(self, exc, max_fulfillment_retries, order_number)
except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:
# Fulfillment failed, retry
_retry_order(self, exc, max_fulfillment_retries, order_number)
|
def fulfill_order(self, order_number, site_code=None, email_opt_in=False):
"""Fulfills an order.
Arguments:
order_number (str): Order number indicating which order to fulfill.
Returns:
None
"""
max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)
api = get_ecommerce_client(site_code=site_code)
try:
logger.info('Requesting fulfillment of order [%s].', order_number)
api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)
except exceptions.HttpClientError as exc:
status_code = exc.response.status_code # pylint: disable=no-member
if status_code == 406:
# The order is not fulfillable. Therefore, it must be complete.
logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)
raise Ignore()
else:
# Unknown client error. Let's retry to resolve it.
logger.warning(
'Fulfillment of order [%s] failed because of HttpClientError. Retrying',
order_number,
exc_info=True
)
_retry_order(self, exc, max_fulfillment_retries, order_number)
except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:
# Fulfillment failed, retry
_retry_order(self, exc, max_fulfillment_retries, order_number)
|
[
"Fulfills",
"an",
"order",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/fulfillment/v1/tasks.py#L31-L62
|
[
"def",
"fulfill_order",
"(",
"self",
",",
"order_number",
",",
"site_code",
"=",
"None",
",",
"email_opt_in",
"=",
"False",
")",
":",
"max_fulfillment_retries",
"=",
"get_configuration",
"(",
"'MAX_FULFILLMENT_RETRIES'",
",",
"site_code",
"=",
"site_code",
")",
"api",
"=",
"get_ecommerce_client",
"(",
"site_code",
"=",
"site_code",
")",
"try",
":",
"logger",
".",
"info",
"(",
"'Requesting fulfillment of order [%s].'",
",",
"order_number",
")",
"api",
".",
"orders",
"(",
"order_number",
")",
".",
"fulfill",
".",
"put",
"(",
"email_opt_in",
"=",
"email_opt_in",
")",
"except",
"exceptions",
".",
"HttpClientError",
"as",
"exc",
":",
"status_code",
"=",
"exc",
".",
"response",
".",
"status_code",
"# pylint: disable=no-member",
"if",
"status_code",
"==",
"406",
":",
"# The order is not fulfillable. Therefore, it must be complete.",
"logger",
".",
"info",
"(",
"'Order [%s] has already been fulfilled. Ignoring.'",
",",
"order_number",
")",
"raise",
"Ignore",
"(",
")",
"else",
":",
"# Unknown client error. Let's retry to resolve it.",
"logger",
".",
"warning",
"(",
"'Fulfillment of order [%s] failed because of HttpClientError. Retrying'",
",",
"order_number",
",",
"exc_info",
"=",
"True",
")",
"_retry_order",
"(",
"self",
",",
"exc",
",",
"max_fulfillment_retries",
",",
"order_number",
")",
"except",
"(",
"exceptions",
".",
"HttpServerError",
",",
"exceptions",
".",
"Timeout",
",",
"SSLError",
")",
"as",
"exc",
":",
"# Fulfillment failed, retry",
"_retry_order",
"(",
"self",
",",
"exc",
",",
"max_fulfillment_retries",
",",
"order_number",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
get_sailthru_client
|
Returns a Sailthru client for the specified site.
Args:
site_code (str): Site for which the client should be configured.
Returns:
SailthruClient
Raises:
SailthruNotEnabled: If Sailthru is not enabled for the specified site.
ConfigurationError: If either the Sailthru API key or secret are not set for the site.
|
ecommerce_worker/sailthru/v1/utils.py
|
def get_sailthru_client(site_code):
"""
Returns a Sailthru client for the specified site.
Args:
site_code (str): Site for which the client should be configured.
Returns:
SailthruClient
Raises:
SailthruNotEnabled: If Sailthru is not enabled for the specified site.
ConfigurationError: If either the Sailthru API key or secret are not set for the site.
"""
# Get configuration
config = get_sailthru_configuration(site_code)
# Return if Sailthru integration disabled
if not config.get('SAILTHRU_ENABLE'):
msg = 'Sailthru is not enabled for site {}'.format(site_code)
log.debug(msg)
raise SailthruNotEnabled(msg)
# Make sure key and secret configured
key = config.get('SAILTHRU_KEY')
secret = config.get('SAILTHRU_SECRET')
if not (key and secret):
msg = 'Both key and secret are required for site {}'.format(site_code)
log.error(msg)
raise ConfigurationError(msg)
return SailthruClient(key, secret)
|
def get_sailthru_client(site_code):
"""
Returns a Sailthru client for the specified site.
Args:
site_code (str): Site for which the client should be configured.
Returns:
SailthruClient
Raises:
SailthruNotEnabled: If Sailthru is not enabled for the specified site.
ConfigurationError: If either the Sailthru API key or secret are not set for the site.
"""
# Get configuration
config = get_sailthru_configuration(site_code)
# Return if Sailthru integration disabled
if not config.get('SAILTHRU_ENABLE'):
msg = 'Sailthru is not enabled for site {}'.format(site_code)
log.debug(msg)
raise SailthruNotEnabled(msg)
# Make sure key and secret configured
key = config.get('SAILTHRU_KEY')
secret = config.get('SAILTHRU_SECRET')
if not (key and secret):
msg = 'Both key and secret are required for site {}'.format(site_code)
log.error(msg)
raise ConfigurationError(msg)
return SailthruClient(key, secret)
|
[
"Returns",
"a",
"Sailthru",
"client",
"for",
"the",
"specified",
"site",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/utils.py#L18-L50
|
[
"def",
"get_sailthru_client",
"(",
"site_code",
")",
":",
"# Get configuration",
"config",
"=",
"get_sailthru_configuration",
"(",
"site_code",
")",
"# Return if Sailthru integration disabled",
"if",
"not",
"config",
".",
"get",
"(",
"'SAILTHRU_ENABLE'",
")",
":",
"msg",
"=",
"'Sailthru is not enabled for site {}'",
".",
"format",
"(",
"site_code",
")",
"log",
".",
"debug",
"(",
"msg",
")",
"raise",
"SailthruNotEnabled",
"(",
"msg",
")",
"# Make sure key and secret configured",
"key",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_KEY'",
")",
"secret",
"=",
"config",
".",
"get",
"(",
"'SAILTHRU_SECRET'",
")",
"if",
"not",
"(",
"key",
"and",
"secret",
")",
":",
"msg",
"=",
"'Both key and secret are required for site {}'",
".",
"format",
"(",
"site_code",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"ConfigurationError",
"(",
"msg",
")",
"return",
"SailthruClient",
"(",
"key",
",",
"secret",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
Cache.get
|
Get an object from the cache
Arguments:
key (str): Cache key
Returns:
Cached object
|
ecommerce_worker/cache.py
|
def get(self, key):
"""Get an object from the cache
Arguments:
key (str): Cache key
Returns:
Cached object
"""
lock.acquire()
try:
if key not in self:
return None
current_time = time.time()
if self[key].expire > current_time:
return self[key].value
# expired key, clean out all expired keys
deletes = []
for k, val in self.items():
if val.expire <= current_time:
deletes.append(k)
for k in deletes:
del self[k]
return None
finally:
lock.release()
|
def get(self, key):
"""Get an object from the cache
Arguments:
key (str): Cache key
Returns:
Cached object
"""
lock.acquire()
try:
if key not in self:
return None
current_time = time.time()
if self[key].expire > current_time:
return self[key].value
# expired key, clean out all expired keys
deletes = []
for k, val in self.items():
if val.expire <= current_time:
deletes.append(k)
for k in deletes:
del self[k]
return None
finally:
lock.release()
|
[
"Get",
"an",
"object",
"from",
"the",
"cache"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/cache.py#L23-L51
|
[
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"key",
"not",
"in",
"self",
":",
"return",
"None",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
"[",
"key",
"]",
".",
"expire",
">",
"current_time",
":",
"return",
"self",
"[",
"key",
"]",
".",
"value",
"# expired key, clean out all expired keys",
"deletes",
"=",
"[",
"]",
"for",
"k",
",",
"val",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"val",
".",
"expire",
"<=",
"current_time",
":",
"deletes",
".",
"append",
"(",
"k",
")",
"for",
"k",
"in",
"deletes",
":",
"del",
"self",
"[",
"k",
"]",
"return",
"None",
"finally",
":",
"lock",
".",
"release",
"(",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
Cache.set
|
Save an object in the cache
Arguments:
key (str): Cache key
value (object): object to cache
duration (int): time in seconds to keep object in cache
|
ecommerce_worker/cache.py
|
def set(self, key, value, duration):
"""Save an object in the cache
Arguments:
key (str): Cache key
value (object): object to cache
duration (int): time in seconds to keep object in cache
"""
lock.acquire()
try:
self[key] = CacheObject(value, duration)
finally:
lock.release()
|
def set(self, key, value, duration):
"""Save an object in the cache
Arguments:
key (str): Cache key
value (object): object to cache
duration (int): time in seconds to keep object in cache
"""
lock.acquire()
try:
self[key] = CacheObject(value, duration)
finally:
lock.release()
|
[
"Save",
"an",
"object",
"in",
"the",
"cache"
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/cache.py#L53-L66
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
",",
"duration",
")",
":",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
"[",
"key",
"]",
"=",
"CacheObject",
"(",
"value",
",",
"duration",
")",
"finally",
":",
"lock",
".",
"release",
"(",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
get_configuration
|
Get a value from configuration.
Retrieves the value corresponding to the given variable from the configuration module
currently in use by the app. Specify a site_code value to check for a site-specific override.
Arguments:
variable (str): The name of a variable from the configuration module.
Keyword Arguments:
site_code (str): The SITE_OVERRIDES key to inspect for site-specific values
Returns:
The value corresponding to the variable, or None if the variable is not found.
|
ecommerce_worker/utils.py
|
def get_configuration(variable, site_code=None):
"""
Get a value from configuration.
Retrieves the value corresponding to the given variable from the configuration module
currently in use by the app. Specify a site_code value to check for a site-specific override.
Arguments:
variable (str): The name of a variable from the configuration module.
Keyword Arguments:
site_code (str): The SITE_OVERRIDES key to inspect for site-specific values
Returns:
The value corresponding to the variable, or None if the variable is not found.
"""
name = os.environ.get(CONFIGURATION_MODULE)
# __import__ performs a full import, but only returns the top-level
# package, not the targeted module. sys.modules is a dictionary
# mapping module names to loaded modules.
__import__(name)
module = sys.modules[name]
# Locate the setting in the specified module, then attempt to apply a site-specific override
setting_value = getattr(module, variable, None)
site_overrides = getattr(module, 'SITE_OVERRIDES', None)
if site_overrides and site_code is not None:
site_specific_overrides = site_overrides.get(site_code)
if site_specific_overrides:
override_value = site_specific_overrides.get(variable)
if override_value:
setting_value = override_value
if setting_value is None:
raise RuntimeError('Worker is improperly configured: {} is unset in {}.'.format(variable, module))
return setting_value
|
def get_configuration(variable, site_code=None):
"""
Get a value from configuration.
Retrieves the value corresponding to the given variable from the configuration module
currently in use by the app. Specify a site_code value to check for a site-specific override.
Arguments:
variable (str): The name of a variable from the configuration module.
Keyword Arguments:
site_code (str): The SITE_OVERRIDES key to inspect for site-specific values
Returns:
The value corresponding to the variable, or None if the variable is not found.
"""
name = os.environ.get(CONFIGURATION_MODULE)
# __import__ performs a full import, but only returns the top-level
# package, not the targeted module. sys.modules is a dictionary
# mapping module names to loaded modules.
__import__(name)
module = sys.modules[name]
# Locate the setting in the specified module, then attempt to apply a site-specific override
setting_value = getattr(module, variable, None)
site_overrides = getattr(module, 'SITE_OVERRIDES', None)
if site_overrides and site_code is not None:
site_specific_overrides = site_overrides.get(site_code)
if site_specific_overrides:
override_value = site_specific_overrides.get(variable)
if override_value:
setting_value = override_value
if setting_value is None:
raise RuntimeError('Worker is improperly configured: {} is unset in {}.'.format(variable, module))
return setting_value
|
[
"Get",
"a",
"value",
"from",
"configuration",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/utils.py#L10-L46
|
[
"def",
"get_configuration",
"(",
"variable",
",",
"site_code",
"=",
"None",
")",
":",
"name",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"CONFIGURATION_MODULE",
")",
"# __import__ performs a full import, but only returns the top-level",
"# package, not the targeted module. sys.modules is a dictionary",
"# mapping module names to loaded modules.",
"__import__",
"(",
"name",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"name",
"]",
"# Locate the setting in the specified module, then attempt to apply a site-specific override",
"setting_value",
"=",
"getattr",
"(",
"module",
",",
"variable",
",",
"None",
")",
"site_overrides",
"=",
"getattr",
"(",
"module",
",",
"'SITE_OVERRIDES'",
",",
"None",
")",
"if",
"site_overrides",
"and",
"site_code",
"is",
"not",
"None",
":",
"site_specific_overrides",
"=",
"site_overrides",
".",
"get",
"(",
"site_code",
")",
"if",
"site_specific_overrides",
":",
"override_value",
"=",
"site_specific_overrides",
".",
"get",
"(",
"variable",
")",
"if",
"override_value",
":",
"setting_value",
"=",
"override_value",
"if",
"setting_value",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Worker is improperly configured: {} is unset in {}.'",
".",
"format",
"(",
"variable",
",",
"module",
")",
")",
"return",
"setting_value"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
test
|
get_ecommerce_client
|
Get client for fetching data from ecommerce API.
Arguments:
site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values
url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value.
Returns:
EdxRestApiClient object
|
ecommerce_worker/utils.py
|
def get_ecommerce_client(url_postfix='', site_code=None):
"""
Get client for fetching data from ecommerce API.
Arguments:
site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values
url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value.
Returns:
EdxRestApiClient object
"""
ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code)
signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code)
issuer = get_configuration('JWT_ISSUER', site_code=site_code)
service_username = get_configuration('ECOMMERCE_SERVICE_USERNAME', site_code=site_code)
return EdxRestApiClient(
ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)
|
def get_ecommerce_client(url_postfix='', site_code=None):
"""
Get client for fetching data from ecommerce API.
Arguments:
site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values
url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value.
Returns:
EdxRestApiClient object
"""
ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code)
signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code)
issuer = get_configuration('JWT_ISSUER', site_code=site_code)
service_username = get_configuration('ECOMMERCE_SERVICE_USERNAME', site_code=site_code)
return EdxRestApiClient(
ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)
|
[
"Get",
"client",
"for",
"fetching",
"data",
"from",
"ecommerce",
"API",
".",
"Arguments",
":",
"site_code",
"(",
"str",
")",
":",
"(",
"Optional",
")",
"The",
"SITE_OVERRIDES",
"key",
"to",
"inspect",
"for",
"site",
"-",
"specific",
"values",
"url_postfix",
"(",
"str",
")",
":",
"(",
"Optional",
")",
"The",
"URL",
"postfix",
"value",
"to",
"append",
"to",
"the",
"ECOMMERCE_API_ROOT",
"value",
"."
] |
edx/ecommerce-worker
|
python
|
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/utils.py#L49-L64
|
[
"def",
"get_ecommerce_client",
"(",
"url_postfix",
"=",
"''",
",",
"site_code",
"=",
"None",
")",
":",
"ecommerce_api_root",
"=",
"get_configuration",
"(",
"'ECOMMERCE_API_ROOT'",
",",
"site_code",
"=",
"site_code",
")",
"signing_key",
"=",
"get_configuration",
"(",
"'JWT_SECRET_KEY'",
",",
"site_code",
"=",
"site_code",
")",
"issuer",
"=",
"get_configuration",
"(",
"'JWT_ISSUER'",
",",
"site_code",
"=",
"site_code",
")",
"service_username",
"=",
"get_configuration",
"(",
"'ECOMMERCE_SERVICE_USERNAME'",
",",
"site_code",
"=",
"site_code",
")",
"return",
"EdxRestApiClient",
"(",
"ecommerce_api_root",
"+",
"url_postfix",
",",
"signing_key",
"=",
"signing_key",
",",
"issuer",
"=",
"issuer",
",",
"username",
"=",
"service_username",
")"
] |
55246961d805b1f64d661a5c0bae0a216589401f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.