repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
PIRA | PIRA-master/test/integration/GameOfLife/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/inputs/functors/runner_test_item_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'Runner'
def active(benchmark, **kwargs):
pass
| 158 | 13.454545 | 43 | py |
PIRA | PIRA-master/test/inputs/functors/clean_test_item_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'Clean'
def active(benchmark, **kwargs):
pass
| 157 | 13.363636 | 43 | py |
PIRA | PIRA-master/test/inputs/functors/test_item_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'Test_Item'
def active(benchmark, **kwargs):
pass
| 161 | 13.727273 | 43 | py |
PIRA | PIRA-master/test/inputs/functors/analyze_test_item_ct.py |
temporary_it = 0
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
global temporary_it
temporary_it = 1
return 'echo '
def active(benchmark, **kwargs):
pass
def get_it():
global temporary_it
return temporary_it
| 276 | 12.85 | 43 | py |
PIRA | PIRA-master/test/inputs/functors/no_instr_test_item_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'No_Instr'
def active(benchmark, **kwargs):
pass
| 160 | 13.636364 | 43 | py |
PIRA | PIRA-master/lib/db.py | """
File: db.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: --
"""
import sqlite3 as db
import lib.Logging as log
class database:
def __init__(self, conf_db_name):
try:
self.conn = db.connect(conf_db_name)
except Exception as e:
log.get_logger().log(str(e), level='warn')
return None
def create_cursor(self, conn):
try:
cursor = conn.cursor()
return cursor
except Exception as e:
log.get_logger().log(str(e), level='warn')
def create_table(self, cursor, table_name):
try:
cursor.execute(table_name)
except Exception as e:
log.get_logger().log(str(e), level='warn')
def insert_data_application(self, cursor, values):
#cursor.execute("INSERT INTO "+table_name+" VALUES (?,?,?)")
sql = ''' INSERT INTO Application(AppID,App_Name,Global_Flavor,Global_Submitter)
VALUES(?,?,?,?) '''
cursor.execute(sql, values)
self.conn.commit()
def insert_data_builds(self, cursor, values):
#cursor.execute("INSERT INTO "+table_name+" VALUES (?,?,?)")
sql = ''' INSERT INTO Builds(BuildID,Build_Name,Prefix,Flavors,AppName)
VALUES(?,?,?,?,?) '''
cursor.execute(sql, values)
self.conn.commit()
def insert_data_items(self, cursor, values):
#cursor.execute("INSERT INTO "+table_name+" VALUES (?,?,?)")
sql = ''' INSERT INTO Items(ItemID,Item_Name,Inst_Analysis_Functor_Path,Builders_Funtor_Path,Run_Args,Runner_Functor_Path,Submitter_Functor_Path,Exp_Data_Dir_Base_Path,BuildName)
VALUES(?,?,?,?,?,?,?,?,?) '''
cursor.execute(sql, values)
self.conn.commit()
def insert_data_experiment(self, cursor, values):
#cursor.execute("INSERT INTO "+table_name+" VALUES (?,?,?)")
sql = ''' INSERT INTO Experiment(Experiment_ID,BenchmarkName,Iteration_No,IsWithInstrumentation,CubeFilePath,Runtime,Item_ID)
VALUES(?,?,?,?,?,?,?) '''
cursor.execute(sql, values)
self.conn.commit()
| 2,057 | 32.193548 | 182 | py |
PIRA | PIRA-master/lib/Measurement.py | """
File: Measurement.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module hosts measurement support infrastructure.
"""
import lib.Utility as U
import lib.Logging as L
import lib.DefaultFlags as D
from lib.Configuration import PiraConfig, TargetConfig, InstrumentConfig, InvocationConfig
from lib.Exception import PiraException
import typing
import os
import re
import statistics as stat
class MeasurementSystemException(PiraException):
""" This exception is thrown if problems in the runtime occur. """
def __init__(self, message):
super().__init__(message)
class RunResultSeries:
# rt is runtime, reps is the number of repetitions for one input data set, and num_data_sets is the number of different input data sets.
# One RunResultSeries object per iteration / phase
def __init__(self, rt: float = None, reps: int = None, num_data_sets: int = 1):
self.rt_values = []
self.reps = reps
self.num_data_sets = num_data_sets
def is_multi_value(self):
return False
def add_values(self, rt: float, reps: int) -> None:
assert (reps == self.reps)
self.rt_values.append(rt)
def add_from(self, other) -> None:
assert (other.reps == self.reps)
for rt in other.rt_values:
self.rt_values.append(rt)
def get_num_data_sets(self) -> int:
return self.num_data_sets
def get_average(self, pos: int = 0, data_set: int = 0) -> float:
# 1 data set and three repetitions
# [ 1 1 1 ]
# 2 data sets and three repetitions
# [ 1 1 1, 2 2 2 ]
if data_set > self.num_data_sets:
raise RuntimeError('Trying to access out-of-bounds data set')
start_idx = pos + data_set * self.reps
end_idx = start_idx + self.reps
L.get_logger().log('Computing mean for values: ' + str(self.rt_values[start_idx:end_idx]) +
' [pos: ' + str(pos) + ' | data_set: ' + str(data_set) + ' => start_idx: ' +
str(start_idx) + ' <> ' + str(end_idx))
return stat.mean(self.rt_values[start_idx:end_idx])
def get_median(self, pos: int = 0, data_set: int = 0) -> float:
start_idx = pos + data_set * self.reps
end_idx = start_idx + self.reps
L.get_logger().log('Computing median for values: ' + str(self.rt_values[start_idx:end_idx]))
return stat.median(self.rt_values[start_idx:end_idx])
def get_stdev(self, pos: int = 0, data_set: int = 0) -> float:
# prevent calculation of stdev on a single data point (self.reps == 1)
if self.reps > 1:
start_idx = pos + data_set * self.reps
end_idx = start_idx + self.reps
L.get_logger().log('Computing stdev for values: ' + str(self.rt_values[start_idx:end_idx]))
return stat.stdev(self.rt_values[start_idx:end_idx])
else:
return 0.0
def compute_overhead(self, base_line, pos: int = 0, data_set: int = 0) -> float:
L.get_logger().log('Computing overhead in RunResultSeries')
base_median = base_line.get_median()
if base_median == .0:
L.get_logger().log('Detected 0 seconds baseline. Setting baseline median to 1.0',
level='warn')
base_median = 1.0
return (self.get_median(pos) / base_median) - 1
def get_all_averages(self) -> typing.List[float]:
if len(self.rt_values) % self.reps != 0:
raise RuntimeError('number of runtime values must be cleanly divisable by num reps.')
num_averages = len(self.rt_values) / self.reps
intermediate_averages = []
for i in range(0, len(self.rt_values), self.reps):
intermediate_averages.append(self.get_average(i))
return intermediate_averages
def compute_all_overheads(self) -> typing.List[float]:
assert (False)
def get_accumulated_runtime(self):
accu_rt = .0
for rt in self.rt_values:
accu_rt += rt
return accu_rt
def get_nr_of_repetitions(self):
return self.reps
class RunResult:
""" Holds the result of a measurement execution with potentially multiple iterations. """
def __init__(self,
accumulated_runtime: float = None,
nr_of_iterations: int = None,
rt_trace=None):
"""Initializes the class
:accumulated_runtime: TODO
:nr_of_iterations: TODO
:average: TODO
"""
if accumulated_runtime is not None:
self._accumulated_runtime = [accumulated_runtime]
else:
self._accumulated_runtime = []
if nr_of_iterations is not None:
self._nr_of_iterations = [nr_of_iterations]
else:
self._nr_of_iterations = []
if rt_trace is not None:
self._rt_trace = [rt_trace]
else:
self._rt_trace = []
def is_multi_value(self):
return len(self._accumulated_runtime) > 1
def add_values(self, accu_rt: float, nr_iter: int) -> None:
self._accumulated_runtime.append(accu_rt)
self._nr_of_iterations.append(nr_iter)
self._rt_trace.append(None)
def add_from(self, other) -> None:
for (accu, iters) in zip(other._accumulated_runtime, other._nr_of_iterations):
self._accumulated_runtime.append(accu)
self._nr_of_iterations.append(iters)
def get_average(self, pos: int = 0) -> float:
if self._nr_of_iterations == 0 or self._nr_of_iterations == []:
L.get_logger().log('Calculating average based on 0 repetitions - assuming 1', level='warn')
raise RuntimeError('Calculating average based on 0 repetitions impossible.')
self._nr_of_iterations = 1
return self._accumulated_runtime[pos] / self._nr_of_iterations[pos]
def compute_overhead(self, base_line, pos: int = 0) -> float:
base_line_avg = base_line.get_average(pos)
if base_line_avg == 0:
base_line_avg = 1
result = self.get_average(pos) / base_line_avg
return result
def get_all_averages(self) -> typing.List[float]:
avgs = []
for (rt, ni) in zip(self._accumulated_runtime, self._nr_of_iterations):
avgs.append(rt / ni)
return avgs
def compute_all_overheads(self, base_line: typing.List) -> typing.List[float]:
ovhds = []
for (thisAvg, otherAvg) in zip(self.get_all_averages(), base_line.get_all_averages()):
ovhds.append(thisAvg / otherAvg)
return ovhds
def get_accumulated_runtime(self):
return self._accumulated_runtime
def get_nr_of_iterations(self):
return self._nr_of_iterations
class ScorepSystemHelper:
""" Takes care of setting necessary environment variables appropriately. """
def __init__(self, config: PiraConfig) -> None:
self.known_files = ['.cubex']
self.config = config
self.data = {}
self.cur_mem_size = ''
self.cur_exp_directory = ''
self.cur_overwrite_exp_dir = 'False'
self.cur_base_name = ''
self.cur_filter_file = ''
self._enable_unwinding = 'False'
self._MPI_filter_so_path = ''
def get_data_elem(self, key: str):
try:
if key in self.data.keys():
return self.data[key]
except KeyError:
pass
L.get_logger().log('Key ' + key + ' was not found in ScorepSystemHelper')
return ''
def set_up(self,
target_config: TargetConfig,
instrumentation_config: InstrumentConfig,
parameter_mapping=None) -> None:
compile_time_filter = InvocationConfig.get_instance().is_compile_time_filtering()
if not compile_time_filter:
scorep_filter_file = self.prepare_scorep_filter_file(target_config.get_instr_file())
self.set_filter_file(scorep_filter_file)
self._set_up(target_config.get_build(), target_config.get_target(), target_config.get_flavor(),
instrumentation_config.get_instrumentation_iteration(),
instrumentation_config.is_instrumentation_run(), parameter_mapping)
def prepare_scorep_filter_file(self, filter_file: str) -> None:
'''
Prepares the file that Score-P uses to include or exclude.
NOTE: The filter_file is a positive list! We want to include these functions!
'''
file_dir = U.get_base_dir(filter_file)
if InvocationConfig.get_instance().is_hybrid_filtering():
U.remove_arrow_lines(filter_file)
file_content = U.read_file(filter_file)
scorep_filter_file_content = file_content
scorep_filter_file_name = file_dir + '/scorep_filter_file.txt'
else:
file_content = U.read_file(filter_file)
scorep_filter_file_content = self.append_scorep_footer(
self.prepend_scorep_header(file_content))
scorep_filter_file_name = file_dir + '/scorep_filter_file.txt'
U.write_file(scorep_filter_file_name, scorep_filter_file_content)
return scorep_filter_file_name
def _set_up(self, build, item, flavor, it_nr, is_instr_run, param_mapping=None) -> None:
L.get_logger().log('ScorepSystemHelper::_set_up: is_instr_run: ' + str(is_instr_run),
level='debug')
if not is_instr_run:
return
exp_dir = self.config.get_analyzer_exp_dir(build, item)
# for batch system scaling experiments: Append parameter mapping to differentiate between jobs
if param_mapping is not None:
exp_dir = f"{exp_dir}-{param_mapping}"
L.get_logger().log('ScorepSystemHelper::_set_up: Retrieved analyzer experiment directory: ' +
exp_dir,
level='debug')
effective_dir = U.get_cube_file_path(exp_dir, flavor, it_nr)
if not U.check_provided_directory(effective_dir):
L.get_logger().log(
'ScorepSystemHelper::_set_up: Experiment directory does not exist. \nCreating path: ' +
effective_dir,
level='debug')
U.create_directory(effective_dir)
db_exp_dir = U.build_cube_file_path_for_db(exp_dir, flavor, it_nr)
self.data['cube_dir'] = db_exp_dir
self.set_exp_dir(exp_dir, flavor, it_nr)
self.set_memory_size('500M')
self.set_overwrite_exp_dir()
self.set_profiling_basename(flavor, build, item)
# TODO WHEN FIXED: FOR NOW LET'S ENABLE UNWINDING
# self.set_enable_unwinding(self)
def set_memory_size(self, mem_str: str) -> None:
self.cur_mem_size = mem_str
U.set_env('SCOREP_TOTAL_MEMORY', self.cur_mem_size)
def set_profiling_basename(self, flavor: str, base: str, item: str) -> None:
self.cur_base_name = flavor + '-' + item
U.set_env('SCOREP_PROFILING_BASE_NAME', self.cur_base_name)
def set_exp_dir(self, exp_dir: str, flavor: str, iterationNumber: int) -> None:
effective_dir = U.get_cube_file_path(exp_dir, flavor, iterationNumber)
if not U.is_valid_file_name(effective_dir):
raise MeasurementSystemException('Score-p experiment directory invalid.')
self.cur_exp_directory = effective_dir
U.set_env('SCOREP_EXPERIMENT_DIRECTORY', self.cur_exp_directory)
return
def get_exp_dir(self) -> str:
assert (self.cur_exp_directory != '')
return self.cur_exp_directory
def set_overwrite_exp_dir(self) -> None:
self.cur_overwrite_exp_dir = 'True'
U.set_env('SCOREP_OVERWRITE_EXPERIMENT_DIRECTORY', self.cur_overwrite_exp_dir)
def set_enable_unwinding(self) -> None:
self._enable_unwinding = 'True'
U.set_env('SCOREP_ENABLE_UNWINDING', self._enable_unwinding)
def set_filter_file(self, file_name: str) -> None:
L.get_logger().log('ScorepMeasurementSystem::set_filter_file: File for runtime filtering = ' +
file_name)
if not U.is_valid_file_name(file_name):
raise MeasurementSystemException('Score-P filter file not valid.')
self.cur_filter_file = file_name
U.set_env('SCOREP_FILTERING_FILE', self.cur_filter_file)
def append_scorep_footer(self, input_str: str) -> str:
return input_str + '\nSCOREP_REGION_NAMES_END\n'
def prepend_scorep_header(self, input_str: str) -> str:
line = 'SCOREP_REGION_NAMES_BEGIN\nEXCLUDE *\nINCLUDE MANGLED '
return line + input_str
@classmethod
def get_config_libs(cls) -> str:
return '`scorep-config --nomemory --libs`'
@classmethod
def get_config_ldflags(cls) -> str:
return '`scorep-config --nomemory --ldflags`'
@classmethod
def get_additional_libs(cls) -> str:
return '-lscorep_adapter_memory_mgmt -lscorep_alloc_metric'
@classmethod
def get_instrumentation_flags(cls, instr_file: str) -> str:
default_provider = D.BackendDefaults()
flags = default_provider.get_default_instrumentation_flag() + ' '
compile_time_filter = InvocationConfig.get_instance().is_compile_time_filtering()
if compile_time_filter:
flags += default_provider.get_default_instrumentation_selection_flag() + '=' + instr_file
return flags
@classmethod
def get_scorep_compliant_CC_command(cls, instr_file: str) -> str:
""" Returns instrumentation flags for the C compiler.
:instr_file: str: The file name to use for filtering
:compile_time_filter: bool: Should compile-time filtering be used (default)
"""
default_provider = D.BackendDefaults()
L.get_logger().log('ScorepSystemHelper::get_scorep_compliant_CC_command: ', level='debug')
cc_str = default_provider.get_default_c_compiler_name() + ' ' + cls.get_instrumentation_flags(
instr_file)
return '\"' + cc_str + '\"'
@classmethod
def get_scorep_compliant_CXX_command(cls, instr_file: str) -> str:
""" Returns instrumentation flags for the C++ compiler.
:instr_file: str: The file name to use for filtering
:compile_time_filter: bool: Should compile-time filtering be used (default)
"""
default_provider = D.BackendDefaults()
cxx_str = default_provider.get_default_cpp_compiler_name(
) + ' ' + cls.get_instrumentation_flags(instr_file)
return '\"' + cxx_str + '\"'
@classmethod
def get_scorep_needed_libs_c(cls) -> str:
return '\" scorep.init.o ' + cls.get_config_libs() + ' ' + cls.get_config_ldflags(
) + ' ' + cls.get_additional_libs() + '\"'
@classmethod
def get_scorep_needed_libs_cxx(cls) -> str:
return '\" scorep.init.o ' + cls.get_config_libs() + ' ' + cls.get_config_ldflags(
) + ' -lscorep_adapter_memory_event_cxx_L64 ' + cls.get_additional_libs() + '\"'
@classmethod
def check_build_prerequisites(cls) -> None:
scorep_init_file_name = 'scorep.init.c'
L.get_logger().log('ScorepMeasurementSystem::check_build_prerequisites: global home dir: ' +
U.get_home_dir())
pira_scorep_resource = U.get_home_dir() + '/resources/scorep.init.c'
if not U.is_file(scorep_init_file_name):
U.copy_file(pira_scorep_resource, U.get_cwd() + '/' + scorep_init_file_name)
# In case something goes wrong with copying
if U.is_file(scorep_init_file_name):
U.shell('gcc -c ' + scorep_init_file_name)
else:
raise MeasurementSystemException(
'ScorepMeasurementSystem::check_build_prerequisites: Missing ' + scorep_init_file_name)
@classmethod
def prepare_MPI_filtering(cls, filter_file: str) -> None:
# Find which MPI functions to filter
# Get all MPI functions (our filter_file is a WHITELIST)
default_provider = D.BackendDefaults()
mpi_funcs_dump = os.path.join(default_provider.instance.get_pira_dir(), 'mpi_funcs.dump')
U.shell('wrap.py -d > ' + mpi_funcs_dump)
all_MPI_functions_decls = U.read_file(mpi_funcs_dump).split('\n')
all_MPI_functions = []
for fd in all_MPI_functions_decls:
name = fd[fd.find(' '):fd.find('(')]
all_MPI_functions.append(name.strip())
MPI_functions_to_filter = []
file_content = U.read_file(filter_file).split('\n')
# We always want to measure some functions to ensure Score-P works correctly
always_measure = [
'MPI_Init', 'MPI_Finalize', 'MPI_Comm_group', 'MPI_Comm_dup', 'MPI_Comm_create_group',
'MPI_Comm_split', 'MPI_Comm_free', 'MPI_Group_free'
]
file_content.extend(always_measure)
for l in file_content:
# Match MPI functions which have been marked for instrumentation
# Example: (MPI_Barrier is representative for all MPI functions here)
# MPI_Barrier => match to 'MPI_Barrier'
# SomeFunction => no match
# INCLUDE MPI_Barrier => match to 'MPI_Barrier'
# INCLUDE SomeFunction => no match
# INCLUDE SomeFunction -> MPI_Barrier => match to 'MPI_Barrier'
# INCLUDE MPI_Barrier -> SomeFunction => no match
match_object = re.match(r'^.*(MPI_\S+)\s*?$', l)
if match_object:
mpi_func_name = match_object.group(1)
L.get_logger().log('ScorepSystemHelper::prepare_MPI_filtering: Remove ' + mpi_func_name)
# prevent double removal
if mpi_func_name in all_MPI_functions:
all_MPI_functions.remove(mpi_func_name)
# Generate the .c file using the mpi wrap.py script
L.get_logger().log('ScorepSystemHelper::prepare_MPI_filtering: About to filter ' +
str(len(all_MPI_functions)) + ' MPI functions')
wrap_script = '{{fn PIRA_Filter'
for mpi_func in all_MPI_functions:
wrap_script += ' ' + mpi_func
wrap_script += '}}\n{{callfn}}\n{{endfn}}'
default_provider = D.BackendDefaults()
wrap_file = default_provider.get_wrap_w_file()
if U.check_file(wrap_file):
U.remove_file(wrap_file)
U.write_file(wrap_file, wrap_script)
wrap_c_path = default_provider.get_wrap_c_file()
wrap_command = 'wrap.py -o ' + wrap_c_path + ' ' + wrap_file
U.shell(wrap_command)
# Compile it to .so file
compile_mpi_wrapper_command = 'mpicc -shared -fPIC -o ' + default_provider.get_wrap_so_file(
) + ' ' + wrap_c_path
U.shell(compile_mpi_wrapper_command)
| 17,626 | 37.740659 | 138 | py |
PIRA | PIRA-master/lib/BatchSystemGenerator.py | """
File: BatchSystemGenerator.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description:
"""
import math
import os
import subprocess
import time
from typing import Optional, List, Union, Dict
import lib.Logging as L
import lib.Utility as U
from lib.Exception import PiraException
from lib.Configuration import BatchSystemHardwareConfig, SlurmConfig
# Exceptions:
class SlurmBaseException(PiraException):
"""
Base exception for SLURM errors.
"""
def __init__(self, message):
super().__init__(message)
def __str__(self):
return self._message
class ModuleDependencyConflict(SlurmBaseException):
"""
Exception for module system modules, if there are dependency conflicts.
"""
def __init__(self, message):
super().__init__(message)
class ScriptNotFoundException(SlurmBaseException):
"""
Exception for failed referencing scripts on disk.
"""
def __init__(self, message):
super().__init__(message)
class CommandExecutionException(SlurmBaseException):
"""
Exception for execution of commands.
"""
def __init__(self, command, invalid=False, non_zero=True):
if invalid:
super().__init__(f"Error while command execution of '{command}': Command not found.")
elif non_zero:
super().__init__(
f"Error while command execution of '{command}': Returned non-zero exit code.")
# Helper enum class
class MailType:
"""
Enum for SLURM Mail types (--mail-type setting).
"""
BEGIN = "BEGIN"
END = "END"
FAIL = "FAIL"
ALL = "ALL"
NONE = "NONE"
# Helper classes
class _Module:
"""
Helper class for module system modules.
"""
def __init__(self,
name: str,
version: Optional[str] = None,
depends_on: Optional[List[str]] = None) -> None:
"""
Constructor.
:param name: The modules name.
:param version: The modules version.
:param depends_on: List of module names the modules depends on. These will be loaded bevor this module.
"""
self.name = name
self.version = version
self.depends_on = depends_on
def check_dependency_conflicts(self, other_modules) -> None:
"""
Checks if there are circular conflicts in dependencies. It only checks on circular imports of two modules.
:param other_modules: List of other modules to check with.
:return: void, raises ModuleDependencyConflict exception if a conflict was detected.
"""
if self.depends_on:
for dependency in self.depends_on:
for compare_module in other_modules:
# if compare module in general is a dependency, or the compare module in its version.
if compare_module.name == dependency or \
(compare_module.version is not None and compare_module.name + "/" + compare_module.version == dependency):
if compare_module.depends_on:
# if either general module or module with version depends back on this module
if self.name in compare_module.depends_on or \
(self.version is not None and self.name + "/" + self.version in compare_module.depends_on):
L.get_logger().log(
f"_Module::check_dependency_conflicts: Dependency conflict: Module {self.name} "
f"depends on {dependency}, but {compare_module.name} "
f"depends on {self.name}.",
level="error")
raise ModuleDependencyConflict(
f"Dependency conflict: Module {self.name} depends on "
f"{dependency}, but {compare_module.name} "
f"depends on {self.name}.")
def deps_satisfied_with(self, others) -> bool:
"""
Return if these modules dependencies are satisfied with the given list of other ones.
:param others: List of modules to compare to.
"""
# If this module has no dependencies it is satisfied with any other list of modules
if not self.depends_on:
return True
all_fulfilled = True
for dependency in self.depends_on:
this_fulfilled = False
for other in others:
if "/" in dependency:
if other.version and dependency == f"{other.name}/{other.version}":
this_fulfilled = True
else:
if dependency == other.name:
this_fulfilled = True
if not this_fulfilled:
all_fulfilled = False
return all_fulfilled
def __str__(self):
"""
Return the module as string.
That is either <name>, or if version given <name/version>.
"""
if self.version is not None:
return f"{self.name}/{self.version}"
else:
return self.name
class BatchSystemGenerator:
"""
Base class for generators for batch system configs.
This is meant to be the base class for all workload managers to derive from.
A generator will take in the configuration for the batch system, and prepare
everything according to the workload managers specifications to dispatch a job to it.
E.g. for the SLURM workload manager there is the SlurmGenerator class, which will care
about generating arguments, options and scripts to be used with the slurm workload manager.
"""
def __init__(self, config: BatchSystemHardwareConfig) -> None:
"""
Constructor.
:param config: The batch system config.
"""
self.config = config
class SlurmGenerator(BatchSystemGenerator):
"""
Generates options, arguments and scripts for the SLURM workload manager. It mainly offers:
- Generating sbatch options for the command line in various formats.
- Generating a sbatch alike option dict to be used with pyslurm
- Generating and saving a slurm script to disk.
- Passively generating and returning, or actively sbatch-ing the options to the machine.
- to do the same with the srun command for interactive jobs.
- Checking with squeue if your job is finished.
- Support of a module system, with module dependencies and sorting.
- Convenience options like checking the output directories exist.
"""
def __init__(self, slurm_config: SlurmConfig) -> None:
"""
Constructor.
"""
super().__init__(slurm_config)
self.job_id = None
self.modules = []
self.commands = []
self.slurm_options = None
def add_module(self,
name: str,
version: str = None,
depends_on: Optional[List[str]] = None) -> None:
"""
Adds a module to be loaded from the module system. If this module (same name) was already added, this will
only override the version, or do nothing.
This is just like adding a commands "module load <name>",
just for more convince on systems with a module system in place.
:param name: The name of the module to load.
:param version: The version of the module. If not give it will be loaded without a specific version,
therefore it will be the default version of the module system.
:param depends_on: List of module names the modules depends on. These will be loaded bevor this module.
:return: void.
"""
module = _Module(name, version, depends_on)
try:
module.check_dependency_conflicts(self.modules)
except ModuleDependencyConflict as e:
print(e)
raise RuntimeError(e)
self.modules.append(module)
def add_modules(self) -> None:
"""
Wrapper for add_module. Will call add_module for
every module that comes with the SlurmConfiguration.
"""
for mod in self.config.modules:
depends_on = None
if "depends_on" in mod and mod["depends_on"] is not None:
depends_on = []
for dep in mod["depends_on"]:
d = dep["name"]
if "version" in dep:
d = f"{d}/{dep['version']}"
depends_on.append(d)
if "version" in mod:
self.add_module(mod["name"], mod["version"], depends_on)
else:
self.add_module(mod["name"], depends_on=depends_on)
def load_modules(self):
"""
Actively load all modules on the machine. Uses add_modules to
initialize the modules. This is meant for use in cases where you cannot
load the modules toghter with the method you use to interface to slurm.
E.g. with pyslurm, you have to load modules separately, but in a slurm script
you can put the module loads in there.
"""
self.add_modules()
self.__sort_module_loads()
if self.config.purge_modules_at_start:
U.shell("module purge")
for mod in self.modules:
U.shell(f"module load {str(mod)}")
def clear_modules(self):
"""
Clears the modules.
"""
self.modules = []
def add_command(self, command: str) -> None:
"""
Adds a commands to run from inside the sbatch job. These are the actual run commands,
not the SLURM config comments.
:param command: The command to add.
:return: void.
"""
self.commands.append(command)
def clear_commands(self) -> None:
"""
Clears commands.
:return: void.
"""
self.commands.clear()
def __sort_module_loads(self) -> None:
"""
Sorts the module loads, by dependencies.
A simple/'stupid' implementation. It just tries to sort the modules correctly, and gives up
if no correct way can be found after n! tries (all permutations theoretically). No fancy
cyclic graph detection or similar. It at least should be enough for the most simple cases
(such as tested in the unit tests).
:return: void.
"""
modules_sorted = []
modules_with_deps = []
# move modules without dependencies to the front
for module in self.modules:
if module.depends_on is None:
modules_sorted.append(module)
else:
modules_with_deps.append(module)
modules_with_deps = sorted(modules_with_deps, key=lambda mod: len(mod.depends_on))
max_tries = math.factorial(len(modules_with_deps))
tries = 0
while len(modules_with_deps) > 0:
module = modules_with_deps.pop(0)
if module.deps_satisfied_with(modules_sorted):
modules_sorted.append(module)
else:
modules_with_deps.append(module)
tries = tries + 1
if tries > max_tries:
conflicts_on = [
f"{mod.name} (depends on {mod.depends_on})"
if mod.version is None else f"{mod.name}/{mod.version} (depends on {mod.depends_on})"
for mod in modules_with_deps
]
# delete duplicates
conflicts_on = list(set(conflicts_on))
# raise error
L.get_logger().log(
f"SlurmGenerator::__sort_module_loads: Modules could not be sorted in a way they do not "
f"conflict each other or some module dependencies cannot be fulfilled. "
f"Modules that cannot be sorted in are: {conflicts_on}.",
level="error")
raise ModuleDependencyConflict(
f"Modules could not be sorted in a way they do not conflict each other "
f"or some module dependencies cannot be fulfilled. "
f"Modules that cannot be sorted in are: {conflicts_on}.")
self.modules = modules_sorted
def to_slurm_options(self) -> None:
"""
Save the contents of the config of the slurm args in key-value format.
This resembles the sbatch options corresponding to the config.
"""
self.slurm_options = {}
if self.config.account:
self.slurm_options["--account"] = self.config.account
if self.config.reservation:
self.slurm_options["--reservation"] = self.config.reservation
if self.config.partition:
self.slurm_options["--partition"] = self.config.partition
if self.config.job_name:
self.slurm_options["--job-name"] = self.config.job_name
if self.config.std_out_path:
if self.config.job_array_start is not None and self.config.job_array_end is not None:
self.slurm_options["--output"] = f"{self.config.std_out_path}.%A_%a"
else:
self.slurm_options["--output"] = f"{self.config.std_out_path}.%j"
if self.config.std_err_path:
if self.config.job_array_start is not None and self.config.job_array_end is not None:
self.slurm_options["--error"] = f"{self.config.std_err_path}.%A_%a"
else:
self.slurm_options["--error"] = f"{self.config.std_err_path}.%j"
if self.config.time_str:
self.slurm_options["--time"] = self.config.time_str
if self.config.mem_per_cpu:
self.slurm_options["--mem-per-cpu"] = self.config.mem_per_cpu
if self.config.number_of_tasks:
self.slurm_options["--ntasks"] = self.config.number_of_tasks
if self.config.number_of_cores_per_task:
self.slurm_options["--cpus-per-task"] = self.config.number_of_cores_per_task
if self.config.exclusive:
self.slurm_options["--exclusive"] = None
if self.config.wait:
self.slurm_options["--wait"] = None
if self.config.job_array_start is not None and self.config.job_array_end is not None:
self.slurm_options["--array"] = f"{self.config.job_array_start}-" \
f"{self.config.job_array_end}:{self.config.job_array_step}"
if self.config.force_sequential:
# "%1" means maximal 1 job in parallel
self.slurm_options["--array"] += "%1"
if self.config.cpu_frequency_str:
self.slurm_options["--cpu-freq"] = self.config.cpu_frequency_str
if self.config.dependencies:
self.slurm_options["--dependency"] = self.config.dependencies
if self.config.mail_address:
self.slurm_options["--mail-user"] = self.config.mail_address
if self.config.mail_types:
self.slurm_options["--mail-type"] = ",".join(self.config.mail_types)
def to_arg_strings(self) -> List[str]:
"""
Generate SLURM flags from key-value config.
"""
self.to_slurm_options()
args = []
for flag, value in self.slurm_options.items():
if value:
args.append(f"{flag}={value}")
else:
# if value is None, it is a flag, without a variable to it
# essentially for the exclusive flag
args.append(f"{flag}")
return args
def get_pyslurm_args(self) -> Dict[str, Union[str, int]]:
"""
Returns the slurm arguments as dict for pyslurm.
To dispatch jobs with pyslurm, there is a need to re-format some data.
(It would be best, if the method is not necessary, but at the moment it is)
Changes needed for pyslurm:
(0. All the leading "--" need to be removed)
1. There is no "--time" for pyslurm. Instead, you can set "time_limit": <minutes as int>.
2. There is no "--array" for pyslurm. Instead, you specify "array_inx": "0,1,2" (a comma separated
list of task/array ids as a string - e.g. this is the same as "--array=0-2[:1]").
3. "job-name" needs to be "job_name".
4. "mem-per-cpu" needs to be "mem_per_cpu".
5. "cpus-per-task" needs to be "cpus_per_task".
6. "cpu-freq" needs to be split into min and max, named "cpu_freq_min" and "cpu_freq_max".
7. List of options, that are not supported by pyslurm. For now, we need to exclude them, because
otherwise the dispatching will fail: "wait", "exclusive"
8. Last but not least, put in the commands as "wrap".
"""
res = {}
self.to_slurm_options()
for flag, value in self.slurm_options.items():
flag = flag[2:] # fixes 1.
if flag == "time":
s = value
had_days = False
# calculate number of minutes from time_str
minutes = 0
# See: https://slurm.schedmd.com/sbatch.html: "Acceptable time formats include "minutes",
# "minutes:seconds", "hours:minutes:seconds", "days-hours", "days-hours:minutes"
# and "days-hours:minutes:seconds"."
if "-" in s:
had_days = True
# days in format
days, s = s.split("-", 1)
minutes = minutes + (int(days) * 60 * 24)
if s.count(":") == 2:
# hh:mm:ss layout
hours, mins, secs = s.split(":")
# add a minute if it had seconds not 0 ("ceil")
mins = int(mins) + 1 if int(secs) > 0 else int(mins)
minutes = minutes + (int(hours) * 60 + mins)
elif s.count(":") == 1:
# mm:ss layout - or hh:mm if it had days in it
if had_days:
hours, mins = s.split(":")
minutes = minutes + (int(hours) * 60 + int(mins))
else:
mins, secs = s.split(":")
mins = int(mins) + 1 if int(secs) > 0 else int(mins)
minutes = minutes + mins
elif s.count(":") == 0:
# just minutes - or hours if it had days in it
if had_days:
minutes = minutes + (60 * int(s))
else:
minutes = minutes + int(s)
res["time_limit"] = minutes
elif flag == "array":
rest = value
if "%" in value:
# TODO max_parr jobs not supported by pyslurm
L.get_logger().log(
"SlurmGenerator::get_pyslurm_args: 'force-sequential' cannot be enforced "
"with interface 'pyslurm' for repetitions. They may run in parallel.",
level="warn")
value = value.split("%")[0]
step = 1
if ":" in value:
rest, step = value.split(":")
begin, end = rest.split("-")
ids = []
for i in range(int(begin), int(end) + 1, int(step)):
ids.append(str(i))
res["array_inx"] = ",".join(ids)
elif flag == "job-name":
res["job_name"] = value
elif flag == "cpu-freq":
freq_min, freq_max = value.split("-")
res["cpu_freq_min"] = freq_min
res["cpu_freq_max"] = freq_max
elif flag == "mem-per-cpu":
res["mem_per_cpu"] = value
elif flag == "cpus-per-task":
res["cpus_per_task"] = value
# TODO: wait is not supported by pyslurm
elif flag == "wait":
L.get_logger().log(
"SlurmGenerator::get_pyslurm_args: You try to use SLURMs 'wait' option "
"with interface 'pyslurm', which is not supported by pyslurm.",
level="warn")
continue
# TODO: exclusive is not supported by pyslurm
elif flag == "exclusive":
L.get_logger().log(
"SlurmGenerator::get_pyslurm_args: You try to use SLURMs 'exclusive' option "
"with interface 'pyslurm', which is not supported by pyslurm.",
level="warn")
continue
else:
# put everything else in as is
res[flag] = value
# put in the command(s)
res["wrap"] = ";".join(self.commands)
return res
def get_slurm_cmd_line_args(self) -> str:
"""
Return the plain command line slurm arguments.
"""
self.to_slurm_options()
return " ".join(self.to_arg_strings())
def write_slurm_script(self, script_path: str, load_modules=False) -> None:
"""
Generates a slurm script and writs it to disk.
"""
# sort modules
try:
self.__sort_module_loads()
except ModuleDependencyConflict as e:
print(e)
raise RuntimeError(f"Conflict while module sorting: {e}")
# slurm script
try:
with open(script_path, "w") as f:
# write slurm options
f.write(f"#!{self.config.shell}\n")
for slurm_opt in self.to_arg_strings():
f.write(f"#SBATCH {slurm_opt}\n")
if load_modules:
# write module loads
if self.config.uses_module_system:
if self.config.purge_modules_at_start:
f.write("module purge\n")
for module in self.modules:
if module.version:
f.write(f"module load {module.name}/{module.version}\n")
else:
f.write(f"module load {module.name}\n")
# write commands
for command in self.commands:
f.write(f"{command}\n")
except FileNotFoundError as e:
L.get_logger().log(f"Slurm script file cannot be found or created: {e}", level="error")
raise ScriptNotFoundException(f"Slurm script file cannot be found or created: {e}.")
def make_dirs(self): # TODO: No usages anymore!
"""
Checks if directories to out- and err directory are in place.
Create the necessary directories that are missing.
"""
err_dir = "/".join(self.config.std_err_path.split("/")[:-1])
out_dir = "/".join(self.config.std_out_path.split("/")[:-1])
if not os.path.isdir(err_dir):
res = subprocess.run(["mkdir", "-p", err_dir])
if not res.returncode == 0:
raise CommandExecutionException(f"mkdir -p {err_dir}")
if not os.path.isdir(out_dir):
res = subprocess.run(["mkdir", "-p", out_dir])
if not res.returncode == 0:
L.get_logger().log(
f"SlurmGenerator::make_dirs: Creation of directories with mkdir -p {out_dir} "
f"failed",
level="warn")
raise CommandExecutionException(f"mkdir -p {out_dir}")
def sbatch(self,
script_path: Optional[str] = None,
active: bool = False,
wait: bool = False,
load_modules: bool = False) -> Union[int, str]:
"""
Saves the SLURM script to the file and submits the job on the system via "sbatch"-commands.
:param script_path: Path for the slurm script. If given a slurm script will be saved and sbatched,
if not given all options will be appended to the sbatch command directly.
:param active: Set to False to not run the command, just return it.
:param wait: Use the wait flag to sbatch the job.
:param load_modules: Whether to use module system in script or not.
:return: The job id resulted from sbatch-ing. Or if not active: The sbatch command.
"""
self.to_slurm_options()
if wait:
self.config.wait = wait
self.to_slurm_options()
sbatch_command = ["sbatch"]
if not script_path:
# slurm options
for flag, value in self.slurm_options.items():
if value:
sbatch_command.append(f"{flag}={value}")
else:
sbatch_command.append(f"{flag}")
if load_modules:
if self.config.uses_module_system:
# module purge
if self.config.purge_modules_at_start:
sbatch_command.append("module purge;")
# module loads
self.__sort_module_loads()
for module in self.modules:
sbatch_command.append(f"module load {str(module)};")
# commands
sbatch_command.append(";".join(self.commands))
else:
self.write_slurm_script(script_path, load_modules)
sbatch_command.append(script_path)
if not active:
return " ".join(sbatch_command)
else:
res, rt = U.shell(" ".join(sbatch_command))
res = res.split("\n")[0]
res = res.split("Submitted batch job ")[1]
self.job_id = int(res)
L.get_logger().log(f"SlurmGenerator::sbatch: Submitted batch job {self.job_id}",
level="debug")
return self.job_id
def __check_squeue(self, job_ids: List[int] = None) -> bool:
"""
Checks the output of the "squeue" commands and returns whether all jobs in the job_ids are completed or not.
You can give just a list of one, to check for one.
:param job_ids: A list of job ids to find the status for.
:return: True if the job is/jobs are completed, false otherwise. If an empty list was given, None will be returned.
"""
sq = subprocess.run(["bash", "-c", "squeue --format=%F --noheader"], stdout=subprocess.PIPE)
sq = sq.stdout.decode("utf-8")
squeue_ids = sq.splitlines(keepends=False)
map = {}
finished = None
if job_ids:
finished = True
for i, job_id in enumerate(job_ids):
map[job_id] = True
for squeue_job in squeue_ids:
# need to test if id is part of squeue output to make it work with job arrays
if str(job_id) in squeue_job:
finished = False
map[job_id] = False
# print a usefull summary
log = "State summary:\n"
for i, (job, fin) in enumerate(map.items()):
endwith = '' if i == len(list(map.keys())) - 1 else '\n'
log = log + f"Job {job} is{'' if fin else ' not yet'} finished.{endwith}"
L.get_logger().log(f"SlurmGenerator::__check_squeue: {log}", level="debug")
del map, log
return finished
def wait(self, job_id: int = None, job_ids: List[int] = None) -> None:
"""
Wait for the SLURM job given by job_id, or for the list of SLURM jobs given by job_ids to finish.
:param job_id: Job id for waiting for a single job.
:param job_ids: List of job ids for waiting for multiple jobs.
:return: void.
"""
check_for = []
if job_id:
self.job_id = job_id
check_for.append(job_id)
elif job_ids:
check_for += job_ids
while not self.__check_squeue(check_for):
time.sleep(self.config.check_interval)
| 24,903 | 37.196319 | 126 | py |
PIRA | PIRA-master/lib/DefaultFlags.py | """
File: DefaultFlags.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module holds a selection of default flags.
"""
import typing
import os
from lib.Configuration import InvocationConfig
class BackendDefaults:
"""
Meant to hold different defaults for, e.g., flags.
"""
class _BackendDefaultsImpl:
def __init__(self):
self._c_compiler = 'clang'
self._cpp_compiler = 'clang++'
self._compiler_instr_flag = '-finstrument-functions'
self._compiler_instr_wl_flag = '-finstrument-functions-whitelist-inputfile'
self._num_compile_procs = 8
self._pira_exe_name = 'pira.built.exe'
self.pira_dir = InvocationConfig.get_instance().get_pira_dir()
def get_default_c_compiler_name(self) -> str:
return self._c_compiler
def get_default_cpp_compiler_name(self) -> str:
return self._cpp_compiler
def get_default_instrumentation_flag(self) -> str:
return self._compiler_instr_flag
def get_default_instrumentation_selection_flag(self) -> str:
return self._compiler_instr_wl_flag
def get_default_number_of_processes(self) -> int:
return self._num_compile_procs
def get_default_exe_name(self) -> str:
return self._pira_exe_name
def get_default_kwargs(self) -> dict:
kwargs = {
'CC': '\"' + self.get_default_c_compiler_name() + '\"',
'CXX': '\"' + self.get_default_cpp_compiler_name() + '\"',
'PIRANAME': self.get_default_exe_name(),
'NUMPROCS': self._num_compile_procs
}
return kwargs
def get_pira_dir(self) -> str:
return self.pira_dir
def get_wrap_w_file(self) -> str:
return os.path.join(self.pira_dir, 'pira-mpi-filter.w')
def get_wrap_c_file(self) -> str:
return os.path.join(self.pira_dir, 'pira-mpi-filter.c')
def get_wrap_so_file(self) -> str:
return os.path.join(self.pira_dir, 'PIRA_MPI_Filter.so')
def get_MPI_wrap_LD_PRELOAD(self) -> str:
return 'LD_PRELOAD=' + self.get_wrap_so_file()
instance = None
def __init__(self):
if not BackendDefaults.instance:
BackendDefaults.instance = BackendDefaults._BackendDefaultsImpl()
def __getattr__(self, name):
return getattr(self.instance, name)
| 2,339 | 29 | 126 | py |
PIRA | PIRA-master/lib/Checker.py | """
File: Checker.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file
at https://github.com/tudasc/pira
Description: Checks if files and paths in config-file are valid.
"""
import lib.Utility as U
import lib.Configuration as C
import lib.Logging as L
class Checker:
@staticmethod
def check_configfile_v1(configuration):
error_message = 'Error in configuration-file:\n\n'
exception_occured = False
for build_dir in configuration.directories:
if not (U.check_provided_directory(build_dir)):
error_message += 'Build-directory ' + build_dir + ' does not exist.\n\n'
exception_occured = True
for item in configuration.builds[build_dir]['items']:
analysis_functor_dir = configuration.items[build_dir][item]['instrument_analysis'][0]
if not (U.check_provided_directory(analysis_functor_dir)):
error_message += 'Analysis-functor dir ' + analysis_functor_dir + ' does not exist.\n'
exception_occured = True
analysis_binary_dir = configuration.items[build_dir][item]['instrument_analysis'][2]
if not (U.check_provided_directory(analysis_binary_dir)):
error_message += 'Analysis-binary dir ' + analysis_binary_dir + ' does not exist.\n'
exception_occured = True
# Instead of prompting an error, we just create a log if the cubes-directory does not exist.
# This is due to that the directory is created in ProfileSink
cubes_dir = configuration.items[build_dir][item]['instrument_analysis'][1]
if not (U.check_provided_directory(cubes_dir)):
L.get_logger().log('Creating Cubes directory ' + cubes_dir, level='info')
if not (U.check_provided_directory(configuration.items[build_dir][item]['builders'])):
error_message += 'Builders-directory ' + configuration.items[build_dir][item][
'builders'] + ' does not exist.\n'
exception_occured = True
for arg in configuration.items[build_dir][item]['args']:
if not (U.check_provided_directory(arg)):
error_message += 'args' + arg + 'does not exist.\n'
exception_occured = True
if not (U.check_provided_directory(configuration.items[build_dir][item]['runner'])):
error_message += 'runner' + configuration.items[build_dir][item][
'runner'] + 'does not exist.\n'
exception_occured = True
if exception_occured:
raise C.PiraConfigErrorException(error_message)
@staticmethod
def check_configfile_v2(configuration):
if isinstance(configuration, C.PiraConfigAdapter):
configuration = configuration.get_adapted()
error_message = 'Error in configuration-file:\n\n'
exception_occured = False
if (not bool(configuration.get_directories())):
raise C.PiraConfigErrorException('Error at Parsing of Pira-config-file, check the syntax!')
# check if directories exist
for dir in configuration.get_directories():
if not U.check_provided_directory(configuration.get_place(dir)):
error_message += 'Directory ' + dir + ' does not exist.\n'
exception_occured = True
for item in configuration.get_items(dir):
if not U.check_provided_directory(item.get_analyzer_dir()):
error_message += 'Analyzer-Directory ' + item.get_analyzer_dir() + ' does not exist\n'
exception_occured = True
# instead of throwing an error, only an info is logged. This is due to that the directory is created in ProfileSink
if not U.check_provided_directory(item.get_cubes_dir()):
L.get_logger().log('Creating Cubes-Directory ' + item.get_cubes_dir(), level='info')
if not U.check_provided_directory(item.get_functor_base_path()):
error_message += 'Functors-Base-Directory ' + item.get_functor_base_path(
) + ' does not exist\n'
exception_occured = True
# if there is no flavor,the flavors-array is filled with an empty entry and the underscore in the filename is removed
if len(item.get_flavors()) == 0:
flavors = ['']
underscore = ''
else:
flavors = item.get_flavors()
underscore = '_'
# check if functor-files exist
for flavor in flavors:
functors = ['analyze_', 'clean_', 'no_instr_', 'runner_', '']
for functor in functors:
path_to_check = item.get_functor_base_path(
) + '/' + functor + item._name + underscore + flavor + '.py'
L.get_logger().log('Checker::check_v2: ' + path_to_check)
if not (U.is_file(path_to_check)):
error_message += functor + '-functor of flavor ' + flavor + ' does not exist' + '.\n'
exception_occured = True
if exception_occured:
raise C.PiraConfigErrorException(error_message)
@staticmethod
def check_configfile(configuration):
if C.InvocationConfig.get_instance().get_config_version() == 1:
Checker.check_configfile_v1(configuration)
else:
Checker.check_configfile_v2(configuration)
| 5,138 | 41.122951 | 125 | py |
PIRA | PIRA-master/lib/Analyzer.py | """
File: Analyzer.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to encapsulate the underlying analysis engine.
"""
import lib.Utility as U
import lib.Logging as L
import lib.TimeTracking as T
import lib.FunctorManagement as F
import lib.DefaultFlags as D
import lib.Exception as E
from lib.Configuration import TargetConfig, InvocationConfig as InvocCfg
class PiraAnalyzerException(E.PiraException):
def __init__(self, m):
super().__init__(m)
class Analyzer:
def __init__(self, configuration) -> None:
if configuration.is_empty():
raise PiraAnalyzerException('Constructing analyzer from empty configuration not supported.')
self.config = configuration
self.error = None
self._profile_sink = None
def set_profile_sink(self, sink) -> None:
self._profile_sink = sink
def analyze(self, target_config: TargetConfig, iteration_number: int, was_rebuilt: bool) -> str:
# The sink needs to be set for the Analyzer to run
if self._profile_sink is None:
raise RuntimeError('Analyzer::analyze: Profile Sink in Analyzer not set!')
if target_config is None:
raise RuntimeError('Analyzer::analyze: TargetConfiguration object is None!')
default_provider = D.BackendDefaults()
kwargs = default_provider.get_default_kwargs()
return self.analyze_local(target_config, kwargs, iteration_number, was_rebuilt)
def analyze_local(self, target_config: TargetConfig, kwargs: dict, iterationNumber: int,
was_rebuild: bool) -> str:
flavor = target_config.get_flavor()
build = target_config.get_build()
benchmark = target_config.get_target()
hybrid_filter = InvocCfg.get_instance().is_hybrid_filtering()
fm = F.FunctorManager()
analyze_functor = fm.get_or_load_functor(build, benchmark, flavor, 'analyze')
analyzer_dir = self.config.get_analyzer_dir(build, benchmark)
kwargs['analyzer_dir'] = analyzer_dir
# The invoke args can be retrieved from the configuration object.
# Since the invoke args are iterable, we can create all necessary argument tuples here.
extrap_config_file = None
if self._profile_sink.has_config_output():
extrap_config_file = self._profile_sink.output_config(benchmark, analyzer_dir)
if analyze_functor.get_method()['active']:
analyze_functor.active(benchmark, **kwargs)
else:
L.get_logger().log('Analyzer::analyze_local: Using passive mode')
try:
exp_dir = self.config.get_analyzer_exp_dir(build, benchmark)
isdirectory_good = U.check_provided_directory(analyzer_dir)
command = analyze_functor.passive(benchmark, **kwargs)
L.get_logger().log('Analyzer::analyze_local: Command = ' + command)
benchmark_name = self.config.get_benchmark_name(benchmark)
if isdirectory_good:
U.change_cwd(analyzer_dir)
L.get_logger().log('Analyzer::analyzer_local: Flavor = ' + flavor +
' | benchmark_name = ' + benchmark_name)
instr_files = U.build_instr_file_path(analyzer_dir, flavor, benchmark_name)
L.get_logger().log('Analyzer::analyzer_local: instrumentation file = ' + instr_files)
numbered_instr_file = U.build_numbered_instr_file_path(analyzer_dir, flavor,
benchmark_name, iterationNumber)
else:
raise RuntimeError(
f'Analyzer::analyzer_local: Analyzer directory {analyzer_dir} does not exist')
tracker = T.TimeTracker()
# TODO: Alternate between expansion and pure filtering.
if iterationNumber > 0 and U.is_file(instr_files):
L.get_logger().log('Analyzer::analyze_local: instr_file available')
U.remove(instr_files)
tracker.f_track('Analysis', self.run_analyzer_command, command, analyzer_dir, flavor,
benchmark_name, exp_dir, iterationNumber, extrap_config_file, was_rebuild)
L.get_logger().log('Analyzer::analyze_local: command finished', level='debug')
else:
tracker.f_track('Initial analysis', self.run_analyzer_command_no_instr, command,
analyzer_dir, flavor, benchmark_name)
U.copy_file(instr_files, numbered_instr_file)
self.tear_down(build, exp_dir)
return instr_files
except Exception as e:
L.get_logger().log(str(e), level='error')
raise Exception('Problem in Analyzer')
def set_up(self):
pass
def tear_down(self, old_dir, exp_dir):
isdirectory_good = U.check_provided_directory(exp_dir)
if isdirectory_good:
try:
U.change_cwd(old_dir)
except Exception as e:
L.get_logger().log(str(e), level='error')
@staticmethod
def run_analyzer_command(command: str,
analyzer_dir: str,
flavor: str,
benchmark_name: str,
exp_dir: str,
iterationNumber: int,
extrap_config_file: str,
was_rebuilt: bool,
hybrid_filter: bool = False) -> None:
export_performance_models = InvocCfg.get_instance().is_export()
export_runtime_only = InvocCfg.get_instance().is_export_runtime_only()
use_cs_instrumentation = InvocCfg.get_instance().use_cs_instrumentation()
analysis_parameters_path = InvocCfg.get_instance().get_analysis_parameters_path()
export_str = ' '
if export_performance_models:
export_str += ' --export'
if export_runtime_only:
export_str += ' --runtime-only'
ipcg_file = U.get_ipcg_file_name(analyzer_dir, benchmark_name, flavor)
cubex_dir = U.get_cube_file_path(exp_dir, flavor, iterationNumber - 1)
cubex_file = U.get_cubex_file(cubex_dir, benchmark_name, flavor)
# PIRA version 1 runner, i.e., only consider raw runtime of single run
if extrap_config_file is None:
if InvocCfg.get_instance().is_lide_enabled():
# load imbalance detection mode
L.get_logger().log('Utility::run_analyzer_command: using Load Imbalance Detection Analyzer',
level='info')
if analysis_parameters_path == '':
L.get_logger().log(
'Utility::run_analyzer_command: An analysis parameters file is required for PIRA LIDe!',
level='error')
sh_cmd = command + export_str + ' --scorep-out -c ' + cubex_file + ' --lide 1 --parameter-file ' + analysis_parameters_path + ' --debug 1 --export ' + ipcg_file
else:
# vanilla PIRA version 1 runner
L.get_logger().log('Utility::run_analyzer_command: using PIRA 1 Analyzer', level='info')
sh_cmd = command + export_str + ' --scorep-out ' + ipcg_file + ' -c ' + cubex_file
L.get_logger().log('Utility::run_analyzer_command: INSTR: Run cmd: ' + sh_cmd)
out, _ = U.shell(sh_cmd)
L.get_logger().log('Utility::run_analyzer_command: Output of analyzer:\n' + out,
level='debug')
return
if hybrid_filter and not was_rebuilt:
command += ' --model-filter'
if use_cs_instrumentation:
command += ' --use-cs-instrumentation'
if analysis_parameters_path == '':
L.get_logger().log(
'Utility::run_analyzer_command: An analysis parameters file is required for Extra-P mode!',
level='error')
sh_cmd = command + export_str + ' --scorep-out --debug 1 --parameter-file ' + analysis_parameters_path + ' --extrap ' + extrap_config_file + ' ' + ipcg_file
L.get_logger().log('Utility::run_analyzer_command: INSTR: Run cmd: ' + sh_cmd)
out, _ = U.shell(sh_cmd)
L.get_logger().log('Utility::run_analyzer_command: Output of analyzer:\n' + out, level='debug')
@staticmethod
def run_analyzer_command_no_instr(command: str, analyzer_dir: str, flavor: str,
benchmark_name: str) -> None:
ipcg_file = U.get_ipcg_file_name(analyzer_dir, benchmark_name, flavor)
sh_cmd = command + ' --scorep-out --static '
# load imbalancee detection mode
if InvocCfg.get_instance().is_lide_enabled():
sh_cmd = sh_cmd + ' --debug 1 --lide 1 ' + InvocCfg.get_instance(
).get_analysis_parameters_path()
sh_cmd = sh_cmd + ' ' + ipcg_file
L.get_logger().log('Utility::run_analyzer_command_noInstr: NO INSTR: Run cmd: ' + sh_cmd)
out, _ = U.shell(sh_cmd)
L.get_logger().log('Utility::run_analyzer_command_noInstr: Output of analyzer:\n' + out,
level='debug')
| 8,705 | 41.262136 | 168 | py |
PIRA | PIRA-master/lib/Utility.py | """
File: Utility.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to support other tasks.
"""
import re
import sys
sys.path.append('..')
import lib.Logging as L
from lib.Exception import PiraException
import os
import subprocess
import filecmp
import shutil
import tempfile
import typing
from random import choice
from string import ascii_uppercase
from timeit import timeit
home_directory = ''
def exit(code="1"):
sys.exit(code)
# --- Files / Directories --- #
def set_home_dir(home_dir: str) -> None:
global home_directory
home_directory = home_dir
def get_home_dir() -> str:
if home_directory == '':
raise PiraException('Utility::get_home_dir: No Home Directory Set!')
return home_directory
def get_pira_code_dir() -> str:
"""
Returns the top-level directory of the pira code.
This is useful for testing and to reference the BatchSystemTimer file while dispatching.
"""
this_file = os.path.dirname(__file__)
this_file = get_absolute_path(this_file)
# pira dir is a level up, and cut the file off
# .../pira/lib/Utility.py
pira_code_dir = "/".join(this_file.split("/")[:-1])
return pira_code_dir
def set_export_perfomance_models(export: bool) -> None:
global export_performance_models
export_performance_models = export
def set_export_runtime_only(rt_only: bool) -> None:
global export_runtime_only
export_runtime_only = rt_only
def get_cwd() -> str:
return os.getcwd()
def change_cwd(path: str) -> None:
L.get_logger().log('Utility::change_cwd: to ' + path, level='debug')
os.chdir(path)
def read_file(file_name: str) -> str:
with open(file_name) as f:
content = f.read()
return content
def copy_file(source_file: str, target_file: str) -> None:
L.get_logger().log('Utility::copy_file: ' + source_file + ' -to- ' + target_file)
# Workaround to '[Errno 11] Resource temporarily unavailable' problem on IBM's GPFS
# see also:
# - https://bugs.python.org/issue43743
# - https://www.ibm.com/support/pages/apar/IJ28891
# This may slow down the copy process and should be removed as soon as the underlying problem has been fixed.
shutil._USE_CP_SENDFILE = False
try:
shutil.copyfile(source_file, target_file)
except Exception as e:
L.get_logger().log('Utility::copy_file: shutil.copyfile went wrong ' + str(e))
def lines_in_file(file_name: str) -> int:
if is_file(file_name):
content = read_file(file_name)
lines = len(content.split('\n'))
return lines
L.get_logger().log('Utility::lines_in_file: No file ' + file_name + ' to read. Return 0 lines',
level='debug')
return 0
def check_provided_directory(path: str) -> bool:
if os.path.isdir(path):
return True
return False
def get_absolute_path(path: str) -> str:
if path[0] == '/':
return path
return os.path.abspath(path)
def is_absolute_path(path: str) -> bool:
if not check_provided_directory(path):
"""
This is a hack for our tests, as we fake file paths in the tests.
If the path is invalid from a system's point of view, but looks like an absolute directory
we keep going, and return that it is an absolute path.
"""
if path[0] == '/':
return True
return os.path.isabs(path)
def create_directory(path: str) -> None:
os.makedirs(path)
def get_tempdir():
return tempfile.gettempdir()
def make_dir(path):
if not (check_provided_directory(path)):
os.mkdir(path)
def make_dirs(path):
os.makedirs(path, 0o777, True)
def write_file(file_path: str, file_content: str) -> str:
L.get_logger().log('Utility::write_file: file_path to write: ' + file_path)
with open(file_path, 'w+') as out_file:
out_file.write(file_content)
def get_base_dir(file_path: str) -> str:
return os.path.dirname(file_path)
def is_file(path: str) -> bool:
if os.path.isfile(path):
return True
return False
def check_file(path: str) -> bool:
if os.path.exists(path):
return True
return False
def is_valid_file_name(file_name: str) -> bool:
search = re.compile(r'[^a-zA-z0-9/\._-]').search
return not bool(search(file_name))
def rename(old: str, new: str) -> None:
os.rename(old, new)
def remove(path: str) -> None:
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def remove_dir(path: str):
if os.path.isdir(path):
shutil.rmtree(path)
def remove_file(path: str) -> bool:
if is_file(path):
os.remove(path)
return True
return False
def remove_file_with_pattern(path: str, pattern: str):
"""
Removes all files that match the pattern. Patterns are defined by python's re module.
"""
for f in os.listdir(path):
if re.search(pattern, f) and re.search(pattern, f).group(0) == f:
remove_file(os.path.join(path, f))
def get_default_pira_dir() -> str:
pira_dir = os.path.join(os.path.expanduser('~'), '.local/share/pira')
if 'XDG_DATA_HOME' in os.environ:
if os.environ['XDG_DATA_HOME'] != '':
pira_dir = os.path.join(os.environ['XDG_DATA_HOME'], 'pira')
return pira_dir
def get_default_slurm_config_path() -> str:
return f"{get_default_pira_dir()}/batchsystem.json"
def get_default_config_file() -> str:
return get_cwd() + '/config.json'
def get_default_analysis_parameters_config_file() -> str:
return get_cwd() + '/parameters.json'
# --- File-related utils --- #
def json_to_canonic(json_elem):
if isinstance(json_elem, list):
new_list = []
for entry in json_elem:
new_list.append(json_to_canonic(entry))
return new_list
elif isinstance(json_elem, str):
new_str = str(json_elem)
return new_str
elif isinstance(json_elem, dict):
new_dict = {}
for k in json_elem:
key_v = json_to_canonic(k)
new_dict[key_v] = json_to_canonic(json_elem[key_v])
return new_dict
elif isinstance(json_elem, int):
return int(json_elem)
elif isinstance(json_elem, type(None)):
return None
else:
return str(json_elem)
def remove_from_pgoe_out_dir(directory: str) -> None:
remove(directory + "/" + "out")
def remove_arrow_lines(file_name: str) -> None:
if check_file(file_name):
with open(file_name, "r") as f:
lines = f.readlines()
with open(file_name, "w") as f:
for line in lines:
if '->' not in line:
f.write(line)
else:
L.get_logger().log('Utility.remove_arrow_lines: Scorep-filter-file does not exist!',
level='error')
raise Exception('Utility.remove_arrow_lines: Scorep-filter-file does not exist!')
def diff_inst_files(file1: str, file2: str) -> bool:
if (filecmp.cmp(file1, file2)):
return True
return False
def set_env(env_var: str, val) -> None:
L.get_logger().log('Utility::set_env: Setting ' + env_var + ' to ' + str(val), level='debug')
os.environ[env_var] = val
def generate_random_string() -> str:
return ''.join(choice(ascii_uppercase) for i in range(12))
# --- Shell execution and timing --- #
def timed_invocation(command: str, stderr_fd) -> typing.Tuple[str, float]:
t1 = os.times() # start time
out = subprocess.check_output(command, stderr=stderr_fd, shell=True)
t2 = os.times() # end time
cutime = t2[2] - t1[2]
cstime = t2[3] - t1[3]
elapsed = t2[4] - t1[4]
# FIXME: How to actually compute this? Make it configurable?
# Problem is: util.shell('sleep 4s') returns cutime + cstime == 0
runtime = cutime + cstime
runtime = elapsed
return out, runtime
def shell(command: str,
silent: bool = True,
dry: bool = False,
time_invoc: bool = False) -> typing.Tuple[str, float]:
if dry:
L.get_logger().log('Utility::shell: DRY RUN SHELL CALL: ' + command, level='debug')
return '', 1.0
stderr_fn = os.path.join(get_default_pira_dir(), 'stderr-bp-' + generate_random_string())
stderr_fd = open(stderr_fn, 'w+')
try:
L.get_logger().log('Utility::shell: util executing: ' + str(command), level='debug')
if time_invoc:
out, rt = timed_invocation(command, stderr_fd)
L.get_logger().log('Util::shell: timed_invocation took: ' + str(rt), level='debug')
return str(out.decode('utf-8')), rt
else:
out = subprocess.check_output(command, stderr=stderr_fd, shell=True)
return str(out.decode('utf-8')), -1.0
except subprocess.CalledProcessError as e:
stderr_fd.seek(0) # jump to beginning of file
err_out = ''
L.get_logger().log('Utility::shell: Attempt to write stderr file', level='debug')
err_out += stderr_fd.read()
L.get_logger().log('Utility::shell: Error output: ' + str(err_out), level='debug')
L.get_logger().log('Utility::shell: Caught Exception ' + str(e), level='error')
raise Exception('Utility::shell: Running command ' + command + ' did not succeed')
finally:
stderr_fd.close()
remove_file(stderr_fn)
L.get_logger().log('Utility::shell Cleaning up temp files for subprocess communication.',
level='debug')
def shell_for_submitter(command: str, silent: bool = True, dry: bool = False):
if dry:
L.get_logger().log('Utility::shell_for_submitter: SHELL CALL: ' + command, level='debug')
return ''
try:
out = subprocess.check_output(command, shell=True)
return out
except subprocess.CalledProcessError as e:
if e.returncode == 1:
if command.find('grep '):
return ''
L.get_logger().log('Utility.shell: Caught Exception ' + str(e), level='error')
raise Exception('Utility::shell_for_submitter: Running command ' + command + ' did not succeed')
# --- Functor utilities --- #
def load_functor(directory: str, module: str):
if not check_provided_directory(directory):
L.get_logger().log('Utility::load_functor: Functor directory invalid', level='warn')
if not is_valid_file_name(directory + '/' + module):
L.get_logger().log('Utility::load_functor: Functor filename invalid', level='warn')
# TODO: Add error if functor path does not exist!!!
L.get_logger().log('Utility::load_functor: Appending ' + directory + ' to system path.',
level='debug')
append_to_sys_path(directory)
# Adding 'fromList' argument loads exactly the module.
functor = __import__(module)
remove_from_sys_path(directory)
L.get_logger().log('Utility::load_functor: Returning from load_functor', level='debug')
return functor
def append_to_sys_path(path: str) -> None:
sys.path.append(path)
def remove_from_sys_path(path: str) -> None:
sys.path.remove(path)
def concat_a_b_with_sep(a: str, b: str, sep: str) -> str:
return a + sep + b
def build_runner_functor_filename(IsForDB: bool, benchmark_name: str, flavor: str) -> str:
if IsForDB:
return '/runner_' + concat_a_b_with_sep(benchmark_name, flavor, '')
else:
return 'runner_' + concat_a_b_with_sep(benchmark_name, flavor, '_')
def build_builder_functor_filename(IsForDB: bool, IsNoInstr: bool, benchmark_name: str,
flavor: str) -> str:
if IsForDB:
return '/' + concat_a_b_with_sep(benchmark_name, flavor, '')
else:
if IsNoInstr:
return 'no_instr_' + concat_a_b_with_sep(benchmark_name, flavor, '_')
else:
return concat_a_b_with_sep(benchmark_name, flavor, '_')
def build_clean_functor_filename(benchmark_name: str, flavor: str) -> str:
return 'clean_' + concat_a_b_with_sep(benchmark_name, flavor, '_')
def build_analyze_functor_filename(IsForDB: bool, benchmark_name: str, flavor: str) -> str:
if IsForDB:
return '/analyze_' + concat_a_b_with_sep(benchmark_name, flavor, '')
else:
return 'analyze_' + concat_a_b_with_sep(benchmark_name, flavor, '_')
def build_instr_file_path(analyzer_dir: str, flavor: str, benchmark_name: str) -> str:
return analyzer_dir + "/" + 'out/instrumented-' + benchmark_name + '_' + flavor + '.txt'
def build_numbered_instr_file_path(analyzer_dir: str, flavor: str, benchmark_name: str,
iteration_number: int) -> str:
return analyzer_dir + "/" + 'out/instrumented-' + benchmark_name + '_' + flavor + '_it-' + str(
iteration_number) + '.txt'
def get_ipcg_file_name(base_dir: str, b_name: str, flavor: str) -> str:
return base_dir + "/" + b_name + '_' + flavor + '.mcg'
def get_cubex_file(cubex_dir: str, b_name: str, flavor: str) -> str:
return cubex_dir + '/' + flavor + '-' + b_name + '.cubex'
def get_cube_file_path(experiment_dir: str, flavor: str, iter_nr: int) -> str:
L.get_logger().log('Utility::get_cube_file_path: ' + experiment_dir + '-' + flavor + '-' +
str(iter_nr))
return experiment_dir + '-' + flavor + '-' + str(iter_nr)
def build_cube_file_path_for_db(exp_dir: str, flavor: str, iterationNumber: int) -> str:
fp = get_cube_file_path(exp_dir, flavor, iterationNumber)
if is_valid_file_name(fp):
return fp
raise Exception('Utility::build_cube_file_path_for_db: Built file path to Cube not valid. fp: ' +
fp)
| 13,131 | 27.180258 | 126 | py |
PIRA | PIRA-master/lib/Runner.py | """
File: Runner.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to run the target software.
"""
import sys
sys.path.append('..')
import lib.Utility as U
import lib.Logging as L
import lib.FunctorManagement as F
import lib.Measurement as M
import lib.DefaultFlags as D
import lib.ProfileSink as S
from lib.Configuration import PiraConfig, TargetConfig, InstrumentConfig, InvocationConfig
from lib.BatchSystemBackends import BatchSystemInterface, SlurmBackend, SlurmInterfaces
from lib.BatchSystemGenerator import SlurmGenerator
from lib.Configuration import SlurmConfig
from lib.Measurement import RunResultSeries
import typing
class Runner:
def __init__(self, configuration: PiraConfig, sink):
""" Runner are initialized once with a PiraConfiguration """
self._config = configuration
self._sink = sink
def has_sink(self) -> bool:
if self._sink is None:
return False
if isinstance(self._sink, S.NopSink):
return False
return True
def get_sink(self):
return self._sink
class LocalBaseRunner(Runner):
"""
The base class for execution on the same machine. It implements the basic *run* method, which invokes the target.
"""
def __init__(self, configuration: PiraConfig, sink):
""" Runner are initialized once with a PiraConfiguration """
super().__init__(configuration, sink)
def run(
self,
target_config: TargetConfig,
instrument_config: InstrumentConfig,
) -> float:
""" Implements the actual invocation """
functor_manager = F.FunctorManager()
run_functor = functor_manager.get_or_load_functor(target_config.get_build(),
target_config.get_target(),
target_config.get_flavor(), 'run')
default_provider = D.BackendDefaults()
kwargs = default_provider.get_default_kwargs()
kwargs['util'] = U
kwargs['LD_PRELOAD'] = default_provider.get_MPI_wrap_LD_PRELOAD()
runtime = .0
if run_functor.get_method()['active']:
run_functor.active(target_config.get_target(), **kwargs)
L.get_logger().log('For the active functor we can barely measure runtime', level='warn')
runtime = 1.0
try:
U.change_cwd(target_config.get_place())
invoke_arguments = target_config.get_args_for_invocation()
kwargs['args'] = invoke_arguments
if invoke_arguments is not None:
L.get_logger().log('LocalBaseRunner::run: (args) ' + str(invoke_arguments))
command = run_functor.passive(target_config.get_target(), **kwargs)
_, runtime = U.shell(command, time_invoc=True)
L.get_logger().log('LocalBaseRunner::run::passive_invocation -> Returned runtime: ' +
str(runtime),
level='debug')
except Exception as e:
L.get_logger().log('LocalBaseRunner::run Exception\n' + str(e), level='error')
raise RuntimeError('LocalBaseRunner::run caught exception. ' + str(e))
# TODO: Insert the data into the database
return runtime
class LocalRunner(LocalBaseRunner):
"""
The LocalRunner invokes the target application with the first argument string given in the config.
For scalability studies, i.e., iterate over all given input sizes, use the LocalScalingRunner.
"""
def __init__(self, configuration: PiraConfig, sink):
""" Runner are initialized once with a PiraConfiguration """
super().__init__(configuration, sink)
self._num_repetitions = InvocationConfig.get_instance().get_num_repetitions()
def get_num_repetitions(self) -> int:
return self._num_repetitions
def do_baseline_run(self, target_config: TargetConfig) -> M.RunResult:
L.get_logger().log('LocalRunner::do_baseline_run')
accu_runtime = .0
if not target_config.has_args_for_invocation():
L.get_logger().log(
'LocalRunner::do_baseline_run: BEGIN not target_config.has_args_for_invocation()')
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
L.get_logger().log('LocalRunner::do_baseline_run: args: ' + str(args))
target_config.set_args_for_invocation(args[0])
L.get_logger().log(
'LocalRunner::do_baseline_run: END not target_config.has_args_for_invocation()')
# TODO Better evaluation of the obtained timings.
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
for y in range(0, self.get_num_repetitions()):
L.get_logger().log('LocalRunner::do_baseline_run: Running iteration ' + str(y), level='debug')
l_runtime = self.run(target_config, InstrumentConfig())
accu_runtime += l_runtime
time_series.add_values(l_runtime, self.get_num_repetitions())
run_result = M.RunResult(accu_runtime, self.get_num_repetitions())
L.get_logger().log('[Vanilla][RUNTIME] Vanilla avg: ' + str(run_result.get_average()) + '\n',
level='perf')
L.get_logger().log('[Vanilla][RTSeries] Average: ' + str(time_series.get_average()),
level='perf')
L.get_logger().log('[Vanilla][RTSeries] Median: ' + str(time_series.get_median()), level='perf')
L.get_logger().log('[Vanilla][RTSeries] Stdev: ' + str(time_series.get_stdev()), level='perf')
L.get_logger().log('[Vanilla][REPETITION SUM] Vanilla sum: ' +
str(time_series.get_accumulated_runtime()),
level='perf')
return time_series
def do_profile_run(self, target_config: TargetConfig, instr_iteration: int) -> M.RunResult:
L.get_logger().log('LocalRunner::do_profile_run: Received instrumentation file: ' +
target_config.get_instr_file(),
level='debug')
scorep_helper = M.ScorepSystemHelper(self._config)
instrument_config = InstrumentConfig(True, instr_iteration)
scorep_helper.set_up(target_config, instrument_config)
runtime = .0
if not target_config.has_args_for_invocation():
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
target_config.set_args_for_invocation(args[0])
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
for y in range(0, self._num_repetitions):
L.get_logger().log('LocalRunner::do_profile_run: Running instrumentation iteration ' + str(y),
level='debug')
l_runtime = self.run(target_config, instrument_config)
runtime += l_runtime
time_series.add_values(l_runtime, self.get_num_repetitions())
# Enable further processing of the resulting profile
self._sink.process(scorep_helper.get_exp_dir(), target_config, instrument_config)
run_result = M.RunResult(runtime, self.get_num_repetitions())
L.get_logger().log('[Instrument][RUNTIME] $' + str(instr_iteration) + '$ ' +
str(run_result.get_average()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Average: ' + str(time_series.get_average()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Median: ' + str(time_series.get_median()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Stdev: ' + str(time_series.get_stdev()),
level='perf')
return time_series
class LocalScalingRunner(LocalRunner):
"""
The LocalScalingRunner performs measurements related to Extra-P modelling.
The arguments given in the configuration are treated as the different input sizes, i.e.,
the first string is the smallest input configuration, the second is the next larger configuration, etc.
"""
def __init__(self, configuration: PiraConfig, sink):
super().__init__(configuration, sink)
def do_profile_run(self, target_config: TargetConfig, instr_iteration: int) -> M.RunResult:
L.get_logger().log('LocalScalingRunner::do_profile_run')
# We run as many experiments as we have input data configs
# TODO: How to handle the model parameter <-> input parameter relation, do we care?
args = self._config.get_args(target_config.get_build(), target_config.get_target())
# TODO: How to handle multiple MeasurementResult items? We get a vector of these after this function.
#run_result = M.RunResult()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for arg_cfg in args:
# Call the runner method with the correct arguments.
target_config.set_args_for_invocation(arg_cfg)
rr = super().do_profile_run(target_config, instr_iteration)
run_result.add_from(rr)
# At this point we have all the data we need to construct an Extra-P model
return run_result
def do_baseline_run(self, target_config: TargetConfig) -> M.RunResult:
L.get_logger().log('LocalScalingRunner::do_baseline_run')
args = self._config.get_args(target_config.get_build(), target_config.get_target())
#run_result = M.RunResult()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for arg_cfg in args:
target_config.set_args_for_invocation(arg_cfg)
rr = super().do_baseline_run(target_config)
run_result.add_from(rr)
return run_result
class SlurmBaseRunner(Runner):
"""
Base for all slurm runners.
"""
def __init__(self, configuration: PiraConfig, slurm_configuration: SlurmConfig,
batch_interface: BatchSystemInterface, sink):
super().__init__(configuration, sink)
self._slurm_config = slurm_configuration
self.batch_interface = batch_interface
def add_run_command(self, target_config: TargetConfig,
instrument_config: InstrumentConfig) -> str:
"""
Prepares the command and adds it via the batch interface.
Returns a key to identify the results after batch system jobs ran.
"""
functor_manager = F.FunctorManager()
run_functor = functor_manager.get_or_load_functor(target_config.get_build(),
target_config.get_target(),
target_config.get_flavor(), 'run')
default_provider = D.BackendDefaults()
kwargs = default_provider.get_default_kwargs()
kwargs['util'] = U
kwargs['LD_PRELOAD'] = default_provider.get_MPI_wrap_LD_PRELOAD()
runtime = .0
if run_functor.get_method()['active']:
L.get_logger().log(
'SlurmBaseRunner::add_run_command: Active running is not possible while '
'dispatching to a cluster. Exiting.',
level='error')
raise RuntimeError('Active running is not possible while dispatching to a cluster.')
try:
U.change_cwd(target_config.get_place())
invoke_arguments = target_config.get_args_for_invocation()
kwargs['args'] = invoke_arguments
if invoke_arguments is not None:
L.get_logger().log('SlurmBaseRunner::add_run_command: (args) ' + str(invoke_arguments),
level="debug")
command = run_functor.passive(target_config.get_target(), **kwargs)
# We add the command to the batch config via the interface here. We have to care
# about telling the interface to run everything later, and care about retrieving
# to results by "key" later to return them.
key = U.generate_random_string()
L.get_logger().log("SlurmBaseRunner::add_run_command: Using key to reference results: " + key,
level="debug")
# Repetition not used, determined by the slurm config
self.batch_interface.add_timed_command(key=key, cmd=command)
L.get_logger().log(
f"SlurmBaseRunner::add_run_command: Added command via batch interface: {command}",
level="debug")
except Exception as e:
L.get_logger().log('SlurmBaseRunner::add_run_command: Exception\n' + str(e), level='error')
raise RuntimeError('SlurmBaseRunner::add_run_command: Caught exception. ' + str(e))
# TODO: Insert the data into the database
return key
def dispatch(self, key: str) -> int:
"""
Start the execution of the added command(s) by dispatching to the cluster.
:return: The job_id of the dispatched job.
"""
L.get_logger().log(f"SlurmBaseRunner::run: Dispatch added commands.", level="debug")
job_id = self.batch_interface.dispatch(key)
return job_id
def wait(self) -> None:
"""
Wait for the execution of commands to finish.
"""
L.get_logger().log(f"SlurmBaseRunner::run: Waiting for added commands to finish.",
level="debug")
self.batch_interface.wait()
def get_runtime(self, key: str, repetition: int) -> float:
"""
Return the results from a run, by requesting the batch system interface.
"""
L.get_logger().log(
f"SlurmBaseRunner::get_runtime: Reading runtime for key {key}, repetition"
f" {repetition}",
level="debug")
return self.batch_interface.get_results(key, repetition)[0]
class SlurmRunner(SlurmBaseRunner):
"""
The SlurmRunner invokes the target application with the first argument string given in the config.
For scalability studies, i.e., iterate over all given input sizes, use the SlurmScalingRunner.
"""
def __init__(self, configuration: PiraConfig, slurm_configuration: SlurmConfig,
batch_interface: BatchSystemInterface, sink):
super().__init__(configuration, slurm_configuration, batch_interface, sink)
generator = SlurmGenerator(slurm_configuration)
generator.add_modules()
self.batch_interface.configure(slurm_configuration, generator)
self._num_repetitions = InvocationConfig.get_instance().get_num_repetitions()
def get_num_repetitions(self) -> int:
return self._num_repetitions
def do_profile_run(self, target_config: TargetConfig, instr_iteration: int) -> M.RunResultSeries:
L.get_logger().log('SlurmRunner::do_profile_run: Received instrumentation file: ' +
target_config.get_instr_file(),
level='debug')
scorep_helper = M.ScorepSystemHelper(self._config)
instrument_config = InstrumentConfig(True, instr_iteration)
scorep_helper.set_up(target_config, instrument_config)
# List of tupels (iteration number, key)
command_result_map: typing.List[typing.Tuple[int, str]] = []
if not target_config.has_args_for_invocation():
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
target_config.set_args_for_invocation(args[0])
self.dispatch_run(target_config, instrument_config, command_result_map, scorep_var_export=True)
self.wait_run()
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
# instead of setting append_iteration, we need to copy back one of the cubes to the original
# cube dir here, this is where the analyzer expects to find it
# this ensures, that we know which cube will be used in the analyzer, instead of just using one of them.
# one of them would work, but it can be good to know which one it is for error tracing.
last_rep = str(self.get_num_repetitions() - 1)
last_rep_dir = f"{scorep_helper.get_exp_dir()}-{last_rep}"
cube_src = f"{U.get_cubex_file(last_rep_dir, target_config.get_target(), target_config.get_flavor())}"
cube_to = f"{U.get_cubex_file(scorep_helper.get_exp_dir(), target_config.get_target(), target_config.get_flavor())}"
U.copy_file(f"{cube_src}", f"{cube_to}")
L.get_logger().log(
f"SlurmRunner::do_profile_run: Copied cube form last repetition {cube_src} to {cube_to} for "
f"examination by the analyzer.",
level="debug")
del last_rep, last_rep_dir, cube_src, cube_to
self.collect_run(command_result_map, time_series, scorep_helper, target_config,
instrument_config, instr_iteration)
# cleanup for the next iteration
self.batch_interface.cleanup()
return time_series
def do_baseline_run(self, target_config: TargetConfig) -> RunResultSeries:
"""
Do the baseline run.
"""
L.get_logger().log('SlurmRunner::do_baseline_run', level="debug")
# List of tupels (iteration number, key)
command_result_map: typing.List[typing.Tuple[int, str]] = []
if not target_config.has_args_for_invocation():
L.get_logger().log(
'SlurmRunner::do_baseline_run: BEGIN not target_config.has_args_for_invocation()',
level="debug")
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
L.get_logger().log('SlurmRunner::do_baseline_run: args: ' + str(args), level="debug")
target_config.set_args_for_invocation(args[0])
L.get_logger().log(
'SlurmRunner::do_baseline_run: END not target_config.has_args_for_invocation()',
level="debug")
self.dispatch_run(target_config, InstrumentConfig(), command_result_map)
self.wait_run()
# TODO Better evaluation of the obtained timings.
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
self.collect_run(command_result_map, time_series)
# cleanup for the next iteration
self.batch_interface.cleanup()
return time_series
# Methods for each part of the pipeline. They are puzzled togehter in different orders
# by the do_baseline_run and do_profile_run methods of each this and the derived Scalability runner
# in order in which they might be used
def dispatch_run(self,
target_config: TargetConfig,
instrumentation_config: InstrumentConfig,
command_result_map: typing.List[typing.Tuple[int, str]],
scorep_var_export: bool = False) -> int:
"""
Set up and dispatch a run to SLURM.
:param target_config: the target config that should be used.
:param instrumentation_config: the instrumentation configuration to be used by the run.
:param command_result_map: A out parameter. Will be used to add the key and repetitions
to the upper methods command_result_map for the run to know about.
:param scorep_var_export: For the Score-P setup to work with parallel repetitions, it is needed
that the SCOREP_EXPERIMENT_DIR env var is modified before and together with the actual execution of the
target, to ensure all repetitions can be resolved later. Set this to true, if you want SLURM to care about this.
:return: Additionally returns the job_id of the dispatched job. Might be used for
dependency modelling in scalability experiments (derived runner).
"""
L.get_logger().log('SlurmRunner::set_up_run: Adding command for repetitions', level='debug')
key = self.add_run_command(target_config, instrumentation_config)
if scorep_var_export:
# add export command for SCOREP_EXPERIMENT_DIRECTORY
self.batch_interface.add_preparation_command(
key, "export SCOREP_EXPERIMENT_DIRECTORY="
"$SCOREP_EXPERIMENT_DIRECTORY-$SLURM_ARRAY_TASK_ID")
for y in range(0, self.get_num_repetitions()):
# Add key for all repetitions
# We can use this key, and the iteration number to retreive the result
command_result_map.append((y, key))
L.get_logger().log('SlurmRunner::dispatch_run: Running all iterations on batch system',
level='debug')
job_id = self.dispatch(key)
return job_id
def wait_run(self):
"""
Wait for the runs job to finish.
"""
self.wait()
def collect_run(self,
command_result_map,
time_series,
scorep_helper=None,
target_config=None,
instrument_config=None,
instr_iteration: int = None,
append_repetition: bool = False) -> float:
"""
Collect the results for all dispatched runs.
:param command_result_map: The list of entries to collect results for.
:param time_series: Out parameter. The RunResultSeries to which the runtimes should be
added, to later evaluation form the above methods.
All following parameters: Only needed if called form a do_profile_run method:
:param scorep_helper: The scorep helper object.
:param target_config: The target config.
:param instrument_config: The instrumentation config.
:param instr_iteration: The current instrumentation iteration (for printing).
:param append_repetition: If the repetition should be appended to the experiment-dir of score-p.
:return: The runtime of the job.
"""
accu_runtime = 0
for i, (repetition, key) in enumerate(command_result_map):
l_runtime = self.get_runtime(key, repetition)
accu_runtime += l_runtime
time_series.add_values(l_runtime, self.get_num_repetitions())
if scorep_helper is not None:
# Enable further processing of the resulting profile
self._sink.process(
f"{scorep_helper.get_exp_dir()}{'-'+str(repetition) if append_repetition else ''}",
target_config, instrument_config)
run_result = M.RunResult(accu_runtime, self.get_num_repetitions())
if scorep_helper is not None:
# instrumentation prints
L.get_logger().log('[Instrument][RUNTIME] $' + str(instr_iteration) + '$ ' +
str(run_result.get_average()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Average: ' + str(time_series.get_average()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Median: ' + str(time_series.get_median()),
level='perf')
L.get_logger().log('[Instrument][RTSeries] Stdev: ' + str(time_series.get_stdev()),
level='perf')
else:
# vanilla perf prints
L.get_logger().log('[Vanilla][RUNTIME] Vanilla avg: ' + str(run_result.get_average()) + '\n',
level='perf')
L.get_logger().log('[Vanilla][RTSeries] Average: ' + str(time_series.get_average()),
level='perf')
L.get_logger().log('[Vanilla][RTSeries] Median: ' + str(time_series.get_median()),
level='perf')
L.get_logger().log('[Vanilla][RTSeries] Stdev: ' + str(time_series.get_stdev()), level='perf')
L.get_logger().log('[Vanilla][REPETITION SUM] Vanilla sum: ' +
str(time_series.get_accumulated_runtime()),
level='perf')
return accu_runtime
class SlurmScalingRunner(SlurmRunner):
"""
The SlurmScalingRunner performs measurements related to Extra-P modelling.
The arguments given in the configuration are treated as the different input sizes, i.e.,
the first string is the smallest input configuration, the second is the next larger configuration, etc.
About the "interface" option in scalability experiments:
Dispatching a scalability experiment, means dispatching
multiple jobs to the same time. This way the may run in parallel. If self.force_sequential is set
dependencies between the jobs will be added, SLURM will take care that they actually run sequential.
So in summary: First dispatch each job, wait for them all to finish, and gather all their results.
But this brings some problems with the "interface" option along:
1) We need to make sure that we do use a non-blocking dispatch method - otherwise it will block until
finished in the dispatch loop, which means it will be sequential again. So, valid is "pyslurm", and "os",
but not "sbatch-wait".
2) We need to make sure that we do use a non-blocking wait function for each job, since we now need to
wait for a set of multiple jobs. Sadly, "pyslurm" as well as "sbatch-wait" (see bevor), are blocking.
The only method which can handle waiting for a group of jobs at the moment is to use "os" (Since it
is pulling and not handling the blocking of to sbatch or pyslurm).
That's because we're manipulating the batch interfaces "interface" option before these steps here, to adhere
to these rules.
"""
def __init__(self, configuration: PiraConfig, slurm_configuration: SlurmConfig,
batch_interface: BatchSystemInterface, sink):
"""
Constructor.
"""
super().__init__(configuration, slurm_configuration, batch_interface, sink)
# force scalability if set in config
self.force_sequential = slurm_configuration.force_sequential
def do_profile_run(self, target_config: TargetConfig, instr_iteration: int) -> RunResultSeries:
"""
Slurm profile scaling run.
"""
L.get_logger().log('SlurmScalingRunner::do_baseline_run', level="debug")
# saving the batch interface from the config, to restore it later
config_batch_interface = self.batch_interface.interface
args = self._config.get_args(target_config.get_build(), target_config.get_target())
# List of command_result_maps for all args
cmd_maps = []
# list to save the job_ids
jobs = []
# check if interface for dispatching adheres to rule 1), see class docstring
if self.batch_interface.interface == SlurmInterfaces.SBATCH_WAIT:
L.get_logger().log(
"SlurmScalingRunner::do_profile_run: Interface 'sbatch-wait' is a blocking "
"dispatch interface, which cannot be used with scaling experiments."
" Downgrading to 'os'.",
level="warn")
self.batch_interface.interface = SlurmInterfaces.OS
# map to save setup for the score-p related stuff
tool_map = {}
# dispatch job for each arg
for i, arg in enumerate(args):
# setup args for invocation
target_config.set_args_for_invocation(arg)
# set up score-p related stuff upfront (needs to be initialized bevor the target runs)
L.get_logger().log('SlurmScalingRunner::do_profile_run: Received instrumentation file: ' +
target_config.get_instr_file(),
level='debug')
scorep_helper = M.ScorepSystemHelper(self._config)
instrument_config = InstrumentConfig(True, instr_iteration)
# give the arg along, to set up the experiment dir of score-p:
# this way we can run multiple args in parallel, and still retrieving
# the results individually later in this iteration
scorep_helper.set_up(target_config, instrument_config, arg)
tool_map[i] = (scorep_helper, instrument_config)
# List of tupels (iteration number, key)
command_result_map: typing.List[typing.Tuple[int, str]] = []
# if force sequential: add dependency to job_id before
if self.force_sequential and i > 0:
self.batch_interface.generator.config.dependencies = f"afterok:{jobs[i - 1]}"
else:
self.batch_interface.generator.config.dependencies = ""
# set scorep_var_export to split repetitions
job_id = self.dispatch_run(target_config,
InstrumentConfig(),
command_result_map,
scorep_var_export=True)
jobs.append(job_id)
cmd_maps.append(command_result_map)
# waiting needs to be done with non-blocking wait 'os' - rule 2), see class docstring
if self.batch_interface.interface != SlurmInterfaces.OS:
L.get_logger().log(
f"SlurmScalingRunner::do_profile_run: {str(self.batch_interface.interface)} is a blocking "
"wait interface, which cannot be used with scaling experiments."
" Downgrading to 'os'.",
level="warn")
self.batch_interface.interface = SlurmInterfaces.OS
# wait for the group of all jobs to finish
self.wait_run()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for i, (cmd_map, arg) in enumerate(zip(cmd_maps, args)):
# args overwrite each other, so we have to do this also before evaluating, to process for the correct args
target_config.set_args_for_invocation(arg)
# get score-p related helpers again, were set up in dispatch loop
scorep_helper = tool_map[i][0]
instrument_config = tool_map[i][1]
# init timing saving container and read results
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
# set append_repetition to read from repetition cube-dirs - opponent to scorep_var_export from above
self.collect_run(cmd_map,
time_series,
scorep_helper,
target_config,
instrument_config,
instr_iteration,
append_repetition=True)
run_result.add_from(time_series)
self.batch_interface.cleanup()
# restore the batch interface
self.batch_interface.interface = config_batch_interface
return run_result
def do_baseline_run(self, target_config: TargetConfig) -> RunResultSeries:
"""
Slurm baseline scaling run.
"""
L.get_logger().log('SlurmScalingRunner::do_baseline_run', level="debug")
# saving the batch interface from the config, to restore it later
config_batch_interface = self.batch_interface.interface
args = self._config.get_args(target_config.get_build(), target_config.get_target())
# List of command_result_maps for all args
cmd_maps = []
jobs = []
# check if interface for dispatching adheres to rule 1), see class docstring
if self.batch_interface.interface == SlurmInterfaces.SBATCH_WAIT:
L.get_logger().log(
"SlurmScalingRunner::do_baseline_run: Interface 'sbatch-wait' is a blocking "
"dispatch interface, which cannot be used with scaling experiments. "
"Downgrading to 'os'.",
level="warn")
self.batch_interface.interface = SlurmInterfaces.OS
# dispatch job for each arg
for i, arg in enumerate(args):
# setup args for invocation
target_config.set_args_for_invocation(arg)
# List of tupels (iteration number, key)
command_result_map: typing.List[typing.Tuple[int, str]] = []
# if force sequential: add dependency to job_id before
if self.force_sequential and i > 0:
self.batch_interface.generator.config.dependencies = f"afterok:{jobs[i-1]}"
else:
self.batch_interface.generator.config.dependencies = ""
job_id = self.dispatch_run(target_config, InstrumentConfig(), command_result_map)
jobs.append(job_id)
cmd_maps.append(command_result_map)
# waiting needs to be done with non-blocking wait 'os' - rule 2)s
if self.batch_interface.interface != SlurmInterfaces.OS:
L.get_logger().log(
f"SlurmScalingRunner::do_baseline_run: {str(self.batch_interface.interface)} is a blocking "
"wait interface, which cannot be used with scaling experiments."
" Downgrading to 'os'.",
level="warn")
self.batch_interface.interface = SlurmInterfaces.OS
# wait for the group of all jobs to finish
self.wait_run()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for map in cmd_maps:
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
self.collect_run(map, time_series)
run_result.add_from(time_series)
self.batch_interface.cleanup()
# restore the batch interface
self.batch_interface.interface = config_batch_interface
return run_result
| 31,852 | 44.374644 | 126 | py |
PIRA | PIRA-master/lib/ConfigurationLoader.py | """
File: ConfigurationLoader.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to read the PIRA configuration from file.
"""
import lib.Utility as U
import lib.Logging as L
from lib.BatchSystemBackends import BatchSystemInterface, BatchSystemBackendType, SlurmBackend, SlurmInterfaces, \
BatchSystemTimingType
from lib.Configuration import PiraConfig, PiraConfigII, PiraItem, PiraConfigAdapter, PiraConfigErrorException, \
InvocationConfig, SlurmConfig
from lib.ArgumentMapping import CmdlineCartesianProductArgumentMapper, CmdlineLinearArgumentMapper, ArgumentMapperFactory
from lib.Configuration import BatchSystemHardwareConfig
import os
import sys
import json
import typing
"""
These defines are the JSON field names for the configuration
"""
_BUILDS = 'builds'
_DESC = 'description'
_DIRS = 'directories'
_ITEMS = 'items'
_FLAVORS = 'flavors'
_G_FLAVORS = 'glob-flavors'
_G_SUBMITTER = 'glob-submitter'
_PREFIX = 'prefix'
_BUILDERS = 'builders'
_RUN = 'run'
_ARGS = 'args'
_RUNNER = 'runner'
_SUBMITTER = 'submitter'
_BATCH_SCRIPT = 'batch_script'
_INSTRUMENT_ANALYSIS = 'instrument-analysis'
class ConfigurationLoader:
"""
Loads a provided configuration file.
"""
def __init__(self):
self.config_cache = {}
def load_conf(self) -> PiraConfig:
config_file = InvocationConfig.get_instance().get_path_to_cfg()
if config_file in self.config_cache:
return self.config_cache[config_file]
try:
file_content = U.read_file(config_file)
json_tree = json.loads(file_content)
configuration = self.construct_from_json(json_tree)
self.config_cache[config_file] = configuration
return configuration
except PiraConfigErrorException as e:
L.get_logger().log(str(e), level='error')
sys.exit()
except Exception as e:
print('Exception occured ' + str(e))
def construct_from_json(self, json_tree) -> PiraConfig:
conf = PiraConfig()
# json_to_canonic can construct lists
conf.set_build_directories(U.json_to_canonic(json_tree[_DESC][_DIRS]))
conf.populate_build_dict(conf.directories)
conf.set_global_flavors(U.json_to_canonic(json_tree[_DESC][_G_FLAVORS]))
for glob_flav in conf.get_global_flavors():
conf.set_glob_submitter(U.json_to_canonic(json_tree[_DESC][_G_SUBMITTER][glob_flav]),
glob_flav)
for build_dir in conf.directories:
conf.set_prefix(U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_PREFIX]), build_dir)
conf.set_items(U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_ITEMS]), build_dir)
conf.initialize_item_dict(build_dir, conf.builds[build_dir][_ITEMS])
for item in conf.builds[build_dir][_ITEMS]:
conf.set_item_instrument_analysis(
U.json_to_canonic(
json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_INSTRUMENT_ANALYSIS][item]),
build_dir, item)
conf.set_item_builders(
U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_BUILDERS][item]),
build_dir, item)
conf.set_item_args(
U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_RUN][item][_ARGS]),
build_dir, item)
conf.set_item_runner(
U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_RUN][item][_RUNNER]),
build_dir, item)
conf.set_item_submitter(
U.json_to_canonic(
json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_RUN][item][_SUBMITTER]), build_dir,
item)
conf.set_item_batch_script(
U.json_to_canonic(
json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][_RUN][item][_BATCH_SCRIPT]),
build_dir, item)
conf.set_flavours(U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][item]),
build_dir)
conf.set_item_flavor(
U.json_to_canonic(json_tree[_DESC][_BUILDS][build_dir][_FLAVORS][item]), build_dir,
item)
conf._empty = False
return conf
class SimplifiedConfigurationLoader:
def __init__(self):
self._config = PiraConfigII()
self.base_mapper = None
def load_conf(self) -> PiraConfig:
config_file = InvocationConfig.get_instance().get_path_to_cfg()
if not U.is_file(config_file):
raise RuntimeError(
'SimplifiedConfigurationLoader::load_conf: Invalid config file location "' + config_file +
'" [no such file].')
config_abs = U.get_absolute_path(config_file)
config_abs_path = config_abs[:config_abs.rfind('/')]
self._config.set_absolute_base_path(config_abs_path)
try:
file_content = U.read_file(config_file)
json_tree = json.loads(file_content)
self.parse_from_json(json_tree)
except Exception as e:
L.get_logger().log('SimplifiedConfigurationLoader::load_conf: Caught exception "' + str(e))
return PiraConfigAdapter(self._config)
def is_escaped(self, string: str) -> bool:
return string.startswith('%')
def get_parameter(self, item_tree, item_key):
run_opts = {}
run_opts['mapper'] = U.json_to_canonic(item_tree[item_key]['argmap']['mapper'])
params = {}
param_tree = item_tree[item_key]['argmap']
file_mapper = False
if 'pira-file' in param_tree:
run_opts['pira-file'] = []
run_opts['pira-file'] = U.json_to_canonic(param_tree['pira-file']['names'])
param_tree = param_tree['pira-file']
file_mapper = True
for param in param_tree:
parameter = U.json_to_canonic(param)
if param == 'mapper':
continue
if file_mapper and param == 'names':
continue
try:
params[parameter]
except:
params[parameter] = []
params[parameter] = U.json_to_canonic(param_tree[param])
# For slurm integration U.json_to_canonic was updated to respect types int and None
# But config hold argmap parameters as ints, which PIRA expected to be parsed to string,
# which is not the case anymore. To correct for this, there are tests for
# params of type int here, and they are changed to string.
if isinstance(params[parameter], int):
params[parameter] = str(params[parameter])
elif isinstance(params[parameter], list):
new_params = []
for p in params[parameter]:
if isinstance(p, int):
new_params.append(str(p))
else:
new_params.append(p)
params[parameter] = new_params
del new_params
run_opts['argmap'] = params
return run_opts
def create_item_from_json(self, item_key: str, item_tree):
pira_item = PiraItem(item_key)
analyzer_dir = item_tree[item_key]['analyzer']
if analyzer_dir == '':
analyzer_dir = U.get_base_dir(__file__) + '/../extern/install/pgis/bin'
L.get_logger().log('Analyzer: using analyzer default: ' + analyzer_dir, level='debug')
cubes_dir = item_tree[item_key]['cubes']
flavors = item_tree[item_key]['flavors']
functors_base_path = item_tree[item_key]['functors']
mode = item_tree[item_key]['mode']
run_opts = self.get_parameter(item_tree, item_key)
run_options = ArgumentMapperFactory.get_mapper(run_opts)
# expand environment variables in directory attributes
analyzer_dir = os.path.expandvars(analyzer_dir)
cubes_dir = os.path.expandvars(cubes_dir)
functors_base_path = os.path.expandvars(functors_base_path)
pira_item.set_analyzer_dir(analyzer_dir)
pira_item.set_cubes_dir(cubes_dir)
pira_item.set_flavors(flavors)
pira_item.set_functors_base_path(functors_base_path)
pira_item.set_mode(mode)
pira_item.set_run_options(run_options)
return pira_item
def parse_from_json(self, json_tree) -> None:
# Top-level key elements // theoretically not required
try:
directories = U.json_to_canonic(json_tree[_DIRS])
except Exception as e:
L.get_logger().log('SimplifiedConfigurationLoader::parse_from_json: ' + str(e))
directories = {}
for tld_build in json_tree[_BUILDS]:
# These are the elements, i.e., %astar and alike
directory_for_item = U.json_to_canonic(tld_build)
if self.is_escaped(directory_for_item):
directory_for_item = directories[directory_for_item[1:]]
# expand environment variables in directory value
directory_for_item = os.path.expandvars(directory_for_item)
item_tree = U.json_to_canonic(json_tree[_BUILDS][tld_build][_ITEMS])
for item_key in item_tree:
L.get_logger().log('SimplifiedConfigurationLoader::parse_from_json: ' + str(item_key))
pira_item = self.create_item_from_json(item_key, item_tree)
self._config.add_item(directory_for_item, pira_item)
self._config._empty = False
class BatchSystemConfigurationLoader:
"""
Loader for the BatchSystemConfiguration.
"""
def __init__(self, invoc_cfg: InvocationConfig):
"""
Constructor.
"""
self.config_file = invoc_cfg.get_slurm_config_path()
self.invoc_cfg = invoc_cfg
self.backend = None
self.interface = None
self.timings = None
self.modules = None
def get_config(self) -> BatchSystemHardwareConfig:
"""
Read and return the batch system configuration from the config file.
"""
if not U.is_file(self.config_file):
raise RuntimeError('BatchSystemConfigLoader::get_config: Invalid config file location "' +
self.config_file + '" [no such file].')
self.config_file = U.get_absolute_path(self.config_file)
try:
file_content = U.read_file(self.config_file)
json_tree = json.loads(file_content)
return self.load_from_json(json_tree)
except Exception as e:
L.get_logger().log('BatchSystemConfigLoader::get_config: Caught exception "' + str(e))
raise RuntimeError(e)
def load_from_json(self, json_tree: dict) -> typing.Union[BatchSystemHardwareConfig, SlurmConfig]:
"""
Loads config values form the json file contents.
"""
general = None
if "general" in json_tree:
general = U.json_to_canonic(json_tree["general"])
mod_loads = None
if "module-loads" in json_tree:
mod_loads = U.json_to_canonic(json_tree["module-loads"])
if "batch-settings" in json_tree:
batch_settings = U.json_to_canonic(json_tree["batch-settings"])
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'batch-settings' section not found in config "
"file.",
level="error")
raise RuntimeError("'batch-settings' section not found in config file.")
force_sequential = False
if not general:
# defaults for general section
self.backend = BatchSystemBackendType.SLURM
self.interface = SlurmInterfaces.PYSLURM
self.timings = BatchSystemTimingType.SUBPROCESS
else:
if "backend" in general:
if general["backend"] == "slurm":
self.backend = BatchSystemBackendType.SLURM
else:
# use default
self.backend = BatchSystemBackendType.SLURM
if "interface" in general:
if general["interface"] == "pyslurm":
self.interface = SlurmInterfaces.PYSLURM
elif general["interface"] == "sbatch-wait":
self.interface = SlurmInterfaces.SBATCH_WAIT
elif general["interface"] == "os":
self.interface = SlurmInterfaces.OS
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'general/interface' holds an illegal value: "
"" + str(general["backend"]) + ". Choices are: 'pyslurm', 'sbatch-wait' or 'os'.",
level="error")
raise RuntimeError("'general/interface' holds an illegal value: " +
str(general["backend"]) + ". Choices are: "
"'pyslurm', 'sbatch-wait' or 'os'.")
else:
# use default
self.interface = SlurmInterfaces.PYSLURM
if "timings" in general:
if general["timings"] == "subprocess":
self.timings = BatchSystemTimingType.SUBPROCESS
elif general["timings"] == "os":
self.timings = BatchSystemTimingType.OS_TIME
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'general/timings' holds an illegal value:"
" " + str(general["timings"]) + ". Choices are: 'subprocess' or 'os'.",
level="error")
raise RuntimeError("'general/timings' holds an illegal value: " +
str(general["timings"]) + ". "
"Choices are: 'subprocess' or 'os'.")
else:
# use default
self.timings = self.timings = BatchSystemTimingType.SUBPROCESS
if "force-sequential" in general:
force_sequential = general["force-sequential"]
modules = None
if mod_loads:
if mod_loads is not None:
modules = []
for module in mod_loads:
mod = {}
if "name" in module:
mod["name"] = module["name"]
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'module-loads': Every module have to "
"have a name.",
level="error")
raise RuntimeError("'module-loads': Every module have to have a name.")
if "version" in module:
if module["version"] is not None:
mod["version"] = module["version"]
if "depends-on" in module:
if module["depends-on"] is not None:
mod["depends-on"] = []
for dep_module, index in zip(module["depends-on"], range(len(module["depends-on"]))):
dep_mod = {}
if "name" in dep_module:
dep_mod["name"] = module["depends-on"][index]["name"]
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'module-loads': Every module dependency "
"have to have a name.",
level="error")
raise RuntimeError(f"module-loads': Every module dependency have to have a "
f"name (in module {mod['name']}).")
if "version" in dep_module:
dep_mod["version"] = module["depends-on"][index]["version"]
mod["depends-on"].append(dep_mod)
modules.append(mod)
if not batch_settings:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'batch-settings': 'batch-settings' section not found but mandatory.",
level="error")
raise RuntimeError(
"'general/batch-settings': 'batch-settings' section not found but mandatory.")
else:
if "time" in batch_settings:
time_str = batch_settings["time"]
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'batch-settings/time' option not found but mandatory.",
level="error")
raise RuntimeError("'batch-settings/time' option not found but mandatory.")
if "mem-per-cpu" in batch_settings:
memcpu = batch_settings["mem-per-cpu"]
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'batch-settings/mem-per-cpu' option not found but mandatory.",
level="error")
raise RuntimeError("'batch-settings/mem-per-cpu' option not found but mandatory.")
if "ntasks" in batch_settings:
ntasks = batch_settings["ntasks"]
else:
L.get_logger().log(
"BatchSystemConfigLoader::load_from_json: 'batch-settings/ntasks' option not found but mandatory.",
level="error")
raise RuntimeError("'batch-settings/ntasks' option not found but mandatory.")
partition = None
if "partition" in batch_settings:
partition = batch_settings["partition"]
reservation = None
if "reservation" in batch_settings:
reservation = batch_settings["reservation"]
account = None
if "account" in batch_settings:
account = batch_settings["account"]
cpupertask = 1
if "cpus-per-task" in batch_settings:
cpupertask = batch_settings["cpus-per-task"]
exclusive = True
if "exclusive" in batch_settings:
exclusive = batch_settings["exclusive"]
cpufreqstr = None
if "cpu-freq" in batch_settings:
cpufreqstr = batch_settings["cpu-freq"]
return SlurmConfig(mem_per_cpu=memcpu,
number_of_tasks=ntasks,
number_of_cores_per_task=cpupertask,
time_str=time_str,
cpu_frequency_str=cpufreqstr,
partition=partition,
reservation=reservation,
account=account,
job_array_start=0,
job_array_end=self.invoc_cfg.get_num_repetitions() - 1,
job_array_step=1,
exclusive=exclusive,
uses_module_system=True if modules else False,
purge_modules_at_start=True,
modules=modules,
force_sequential=force_sequential)
def get_batch_interface(self) -> BatchSystemInterface:
"""
Get the correct BatchInterface subclass upon the parameters.
"""
batch_interface = None
if self.backend == BatchSystemBackendType.SLURM:
batch_interface = SlurmBackend(backend_type=BatchSystemBackendType.SLURM,
interface_type=self.interface,
timing_type=self.timings)
return batch_interface
| 17,955 | 37.781857 | 126 | py |
PIRA | PIRA-master/lib/ProfileSink.py | """
File: ProfileSink.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module hosts different profile sinks. These can process resulting profile files outside of regular PIRA iteration.
"""
import sys
sys.path.append('../')
import lib.Logging as L
import lib.Utility as U
from lib.Configuration import TargetConfig, InstrumentConfig, InvocationConfig
from lib.Exception import PiraException
import json
class FolderRenamer:
class __FolderRenamerImpl:
def __init__(self) -> None:
self.currentStr = U.generate_random_string()
def get_renamed_folder(self, old_folder: str) -> str:
return old_folder + '_' + self.currentStr
instance = None
def __init__(self):
if not FolderRenamer.instance:
FolderRenamer.instance = FolderRenamer.__FolderRenamerImpl()
def __getattr__(self, name):
return getattr(self.instance, name)
class ProfileSinkException(PiraException):
def __init__(self, msg):
super().__init__(msg)
class ProfileSinkBase:
def __init__(self):
self._sink_target = ''
def process(self, exp_dir: str, target_config: TargetConfig, instr_config: InstrumentConfig):
L.get_logger().log('ProfileSinkBase::process. ABSTRACT not implemented. Aborting')
raise RuntimeError('ProfileSinkBase::process. ABSTRACT not implemented. Aborting')
def get_target(self):
return self._sink_target
def has_config_output(self):
return False
class NopSink(ProfileSinkBase):
'''
NopSink: To be used whenever a sink is required as an argument, but not needed for functionality
'''
def __init__(self):
super().__init__()
def process(self, exp_dir, target_conf, instr_config):
self._sink_target = exp_dir
def has_config_output(self):
return False
class PiraOneProfileSink(ProfileSinkBase):
'''
PiraOneProfileSink: To be used in PIRA version 1 mode.
'''
def __init__(self):
super().__init__()
def process(self, exp_dir, target_conf, instr_config):
self._sink_target = exp_dir
def output_config(self, benchmark, analyzer_dir):
return None
def has_config_output(self):
return False
class ExtrapProfileSink(ProfileSinkBase):
def __init__(self, dir: str, params, prefix: str, postfix: str, filename: str):
super().__init__()
self._base_dir = dir
self._params = params
self._prefix = prefix
self._postfix = postfix
self._filename = filename
self._iteration = -1
self._repetition = 0
self._total_reps = InvocationConfig.get_instance().get_num_repetitions()
self._VALUE = ()
def has_config_output(self):
return True
def output_config(self, benchmark, output_dir):
L.get_logger().log('ExtrapProfileSink::output_config:\ndir: ' + self._base_dir + '\nprefix: ' +
self._prefix + '\npostfix: ' + self._postfix + '\nreps: ' +
str(self._total_reps) + '\nNiter: ' + str(self._iteration + 1))
s = ''
for p in self._params:
s += p + ', '
L.get_logger().log('params: ' + s)
json_str = json.dumps({
'dir': self._base_dir,
'prefix': self._prefix,
'postfix': self._postfix,
'reps': int(self._total_reps),
'iter': int(self._iteration + 1),
'params': self._params
})
out_file_final = output_dir + '/pgis_cfg_' + benchmark + '.json'
U.write_file(out_file_final, json_str)
return out_file_final
def get_target(self):
return self._sink_target
def get_param_mapping(self, target_config: TargetConfig) -> str:
if not target_config.has_args_for_invocation():
return '.'
args = target_config.get_args_for_invocation()
param_str = ''
# TODO FIX ME!
if isinstance(args, list):
L.get_logger().log('ExtrapProfileSink::get_param_mapping: isinstance of list')
param_str = str(args[1]) + str(args[2]) + '.' + str(args[4]) + str(args[0])
elif not isinstance(args, tuple):
L.get_logger().log('ExtrapProfileSink::get_param_mapping: not isinstance of tuple')
param_str = str(args) # PiraArgument knows how to unparse to string
else:
for v in args:
param_str += v
L.get_logger().log('ExtrapProfileSink::get_param_mapping: ' + param_str)
return param_str
def get_extrap_dir_name(self, target_config: TargetConfig, instr_iteration: int) -> str:
dir_name = self._base_dir + '/' + 'i' + str(instr_iteration) + '/' + self._prefix + '.'
dir_name += self.get_param_mapping(target_config)
dir_name += '.' + self._postfix + '.r' + str(self._repetition + 1)
return dir_name
def check_and_prepare(self, experiment_dir: str, target_config: TargetConfig,
instr_config: InstrumentConfig) -> str:
cur_ep_dir = self.get_extrap_dir_name(target_config,
instr_config.get_instrumentation_iteration())
if not U.is_valid_file_name(cur_ep_dir):
L.get_logger().log(
'ExtrapProfileSink::check_and_prepare: Generated directory name no good. Abort\n' +
cur_ep_dir,
level='error')
else:
if U.check_provided_directory(cur_ep_dir):
renamer = FolderRenamer()
#new_dir_name = cur_ep_dir + '_' + U.generate_random_string()
new_dir_name = renamer.get_renamed_folder(cur_ep_dir)
L.get_logger().log(
'ExtrapProfileSink::check_and_prepare: Moving old experiment directory (' + cur_ep_dir +
') to: ' + new_dir_name,
level='info')
U.rename(cur_ep_dir, new_dir_name)
U.create_directory(cur_ep_dir)
cubex_name = U.get_cubex_file(experiment_dir, target_config.get_target(),
target_config.get_flavor())
L.get_logger().log(cubex_name)
if not U.is_file(cubex_name):
L.get_logger().log(
'ExtrapProfileSink::check_and_prepare: Returned experiment cube name is no file: ' +
cubex_name)
else:
return cubex_name
raise ProfileSinkException(
'ExtrapProfileSink: Could not create target directory or Cube dir bad.')
def do_copy(self, src_cube_name: str, dest_dir: str) -> None:
L.get_logger().log('ExtrapProfileSink::do_copy: ' + src_cube_name + ' => ' + dest_dir + '/' +
self._filename)
# return # TODO make this actually work
U.copy_file(src_cube_name, dest_dir + '/' + self._filename)
def process(self, exp_dir: str, target_config: TargetConfig,
instr_config: InstrumentConfig) -> None:
L.get_logger().log('ExtrapProfileSink::process: ' +
str(instr_config.get_instrumentation_iteration()))
if instr_config.get_instrumentation_iteration(
) > self._iteration or target_config.get_args_for_invocation() is not self._VALUE:
self._iteration = instr_config.get_instrumentation_iteration()
self._repetition = -1
self._VALUE = ()
self._repetition += 1
self._VALUE = target_config.get_args_for_invocation()
src_cube_name = self.check_and_prepare(exp_dir, target_config, instr_config)
self._sink_target = self.get_extrap_dir_name(target_config, self._iteration)
self.do_copy(src_cube_name, self._sink_target)
| 7,292 | 32 | 127 | py |
PIRA | PIRA-master/lib/Logging.py | """
File: Logging.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to handle output of PIRA.
"""
import logging
import sys
class Logger:
"""
Class to steer output
"""
def __init__(self):
self.state = {'debug': False, 'info': True, 'warn': True, 'error': True, 'perf': True}
self.tape = []
self.perf_tape = []
def log(self, msg, level='debug') -> None:
if self.state[level]:
self.print_level(level, msg)
self.record(level, msg)
def set_state(self, state_id, val='True') -> None:
self.state[state_id] = val
def toggle_state(self, state_id) -> None:
self.state[state_id] = not self.state[state_id]
def print_level(self, level, msg) -> None:
if level == 'debug':
self.print_debug(msg)
elif level == 'info':
self.print_info(msg)
elif level == 'warn':
self.print_warn(msg)
elif level == 'error':
self.print_error(msg)
def print_debug(self, msg) -> None:
msg_str = '[Debug] // Start\n' + str(msg) + '\n[Debug] // End'
print(msg_str)
def print_info(self, msg) -> None:
msg_str = '[Info] ' + str(msg)
print(msg_str)
def print_warn(self, msg) -> None:
msg_str = '[Warning!] ' + str(msg)
print(msg_str)
def print_error(self, msg) -> None:
msg_str = '[Error] ' + str(msg)
print(msg_str)
def record(self, level, msg) -> None:
msg_str = '[' + level + '] ' + str(msg)
self.tape.append(msg_str)
if level == 'perf':
self.perf_tape.append('[PERF] ' + str(msg))
def show_perf(self) -> None:
for p in self.perf_tape:
print(p)
def get_last_msg(self) -> str:
return self.tape[len(self.tape) - 1]
def dump_tape(self, out_file=None, cli=False) -> None:
if out_file is not None:
of = open(str(out_file), 'w')
for m in self.tape:
of.write(m)
of.write('\n')
of.close()
if cli:
for msg in self.tape:
print(msg)
# to enable logging with datetime, comment in this line instead
#FORMAT = "[%(levelname)s at %(asctime)s in '%(caller_filename)s' in '%(caller_function)s' line %(caller_line_number)d]: %(message)s"
FORMAT = "[%(levelname)s in '%(caller_filename)s' in '%(caller_function)s' line %(caller_line_number)d]: %(message)s"
DATEFORMAT = "%Y-%m-%d-%H:%M:%S"
class PiraLogger:
"""
Logger via the logging library to console, can also log to a logfile.
"""
def __init__(self, file: str = None, log_stack_in_debug=False):
self.state = {'debug': False, 'info': True, 'warn': True, 'error': True, 'perf': True}
self.tape = []
self.perf_tape = []
self.stack_debug = log_stack_in_debug
# configuring logging library
self.loglevel = None
if self.state["error"]:
self.loglevel = logging.ERROR
if self.state["warn"]:
self.loglevel = logging.WARNING
if self.state["info"]:
self.loglevel = logging.INFO
if self.state["debug"]:
self.loglevel = logging.DEBUG
# logger
self.logger = logging.getLogger(__name__)
# formatter
formatter = logging.Formatter(fmt=FORMAT, datefmt=DATEFORMAT)
# handler for stdout
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
#handler.setLevel(self.loglevel)
self.logger.addHandler(handler)
self.logger.setLevel(self.loglevel)
# handler for file
if file:
handler = logging.FileHandler(file, mode="a", encoding="utf-8")
handler.setLevel(self.loglevel)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def get_caller(self):
return self.logger.findCaller(stack_info=True, stacklevel=1)
def log(self, msg, level='debug') -> None:
filename, line_number, function_name, stack_info = self.get_caller()
extras = {
"caller_filename": filename,
"caller_line_number": line_number,
"caller_function": function_name,
"caller_stack_info": stack_info,
}
if self.state[level]:
self.log_level(level, msg, extras)
self.record(level, msg)
def set_state(self, state_id, val='True') -> None:
self.state[state_id] = val
self.loglevel = None
if self.state["error"]:
self.loglevel = logging.ERROR
if self.state["warn"]:
self.loglevel = logging.WARNING
if self.state["info"]:
self.loglevel = logging.INFO
if self.state["debug"]:
self.loglevel = logging.DEBUG
self.logger.setLevel(self.loglevel)
def toggle_state(self, state_id) -> None:
self.state[state_id] = not self.state[state_id]
self.loglevel = None
if self.state["error"]:
self.loglevel = logging.ERROR
if self.state["warn"]:
self.loglevel = logging.WARNING
if self.state["info"]:
self.loglevel = logging.INFO
if self.state["debug"]:
self.loglevel = logging.DEBUG
self.logger.setLevel(self.loglevel)
def log_level(self, level, msg, extras) -> None:
if level != "debug":
extras.pop("caller_stack_info")
if level == 'debug':
if self.stack_debug:
self.logger.debug("\n[DEBUG] // Start\n" + msg + "\n" +
str(extras["caller_stack_info"] + "\n[DEBUG] // End"),
extra=extras)
else:
self.logger.debug("\n[DEBUG] // Start\n" + msg + "\n[DEBUG] // End", extra=extras)
elif level == 'info':
self.logger.info(msg, extra=extras)
elif level == 'warn':
self.logger.warning(msg, extra=extras)
elif level == 'error':
self.logger.error(msg, extra=extras)
def record(self, level, msg) -> None:
msg_str = '[' + level + '] ' + str(msg)
self.tape.append(msg_str)
if level == 'perf':
self.perf_tape.append('[PERF] ' + str(msg))
def show_perf(self) -> None:
for p in self.perf_tape:
print(p)
def get_last_msg(self) -> str:
return self.tape[len(self.tape) - 1]
def dump_tape(self, out_file=None, cli=False) -> None:
if out_file is not None:
of = open(str(out_file), 'w')
for m in self.tape:
of.write(m)
of.write('\n')
of.close()
if cli:
for msg in self.tape:
print(msg)
logger = Logger()
logger_new = PiraLogger()
def get_logger(old: bool = False):
if old:
return logger
return logger_new
| 6,342 | 27.443946 | 133 | py |
PIRA | PIRA-master/lib/BatchSystemTimer.py | """
File: BatchSystemTimer.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Wrapper for timings in PIRA Slurm jobs, utilizes subprocess and os.times(). This way it is the
same as in PIRA, with in the U.shell call.
"""
import json
import os
import subprocess
import sys
class BatchSystemTimer:
"""
A timer class for batch systems.
This is not meant to be used in pira itself.
It is a class that is used in the job script,
as a python wrapper to obtain the timing results
like it is done in PIRA local, with os.times().
The times will be written to a json file, which will
be read by PIRA later.
For this to work, you have to give four arguments to this script:
- key: The timings key from within PIRA.
- job_id: The job id of the slurm job.
- job_array_id: The index of the Slurm array job.
- export_path: The path where the json results should go.
- command: The command to run/to time.
"""
def __init__(self, key: str, job_id: str, job_array_id: int, export_path: str, command: str):
"""
Constructor.
"""
self.key = key
self.job_id = job_id
self.job_array_id = job_array_id
self.export_path = export_path
self.command = command
self.results = {}
def run(self):
"""
Run and time the command.
Start the exporting.
"""
t1 = os.times() # start time
out = subprocess.check_output(command, shell=True)
t2 = os.times() # end time
cutime = t2[2] - t1[2]
cstime = t2[3] - t1[3]
elapsed = t2[4] - t1[4]
runtime = elapsed
out = str(out.decode('utf-8'))
res = {"cutime": cutime, "cstime": cstime, "elapsed": runtime, "output": str(out)}
self.export_results(res)
def export_results(self, res):
"""
Export results to the json file.
"""
filename = f"{self.export_path}/pira-slurm-{self.job_id}-{self.key}-{self.job_array_id}.json"
with open(filename, "w") as f:
json.dump(res, f, indent=4)
if __name__ == "__main__":
args = sys.argv[1:]
key = args[0]
job_id = args[1]
job_array_id = int(args[2])
export_path = args[3]
command = args[4]
BatchSystemTimer(key, job_id, job_array_id, export_path, command).run()
| 2,266 | 29.226667 | 126 | py |
PIRA | PIRA-master/lib/Database.py | """
File: Database.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to run the target software.
"""
import sys
sys.path.append('..')
import lib.FunctorManagement as F
import lib.Utility as U
import lib.tables as T
from lib.Exception import PiraException
import sqlite3 as db
class DBException(PiraException):
def __init__(self, message):
super().__init__(message)
class DBManager:
"""
This class is used to communicate with PIRA DB.
"""
class DBImpl:
"""
Inner class to implement singleton pattern.
Takes care of the actual database connection.
"""
def __init__(self, name):
self.conn = None
self.cursor = None
try:
self.conn = db.connect(name)
except Exception:
raise DBException('Error in creating the database / connection')
def create_cursor(self):
try:
self.cursor = self.conn.cursor()
return self.cursor
except Exception:
raise DBException('Error in creating cursor')
def create_table(self, table_name):
try:
self.cursor.execute(table_name)
except Exception:
raise DBException('Problem creating tables')
def insert_data_application(self, values):
self.create_table(T.create_application_table)
sql = ''' INSERT INTO Application(AppID,App_Name,Global_Flavor,Global_Submitter)
VALUES(?,?,?,?) '''
self.cursor.execute(sql, values)
self.conn.commit()
def insert_data_builds(self, values):
self.create_table(T.create_builds_table)
sql = ''' INSERT INTO Builds(BuildID,Build_Name,Prefix,Flavors,AppName)
VALUES(?,?,?,?,?) '''
self.cursor.execute(sql, values)
self.conn.commit()
def insert_data_items(self, values):
self.create_table(T.create_items_table)
sql = ''' INSERT INTO Items(ItemID,Item_Name,Inst_Analysis_Functor_Path,Builders_Funtor_Path,Run_Args,Runner_Functor_Path,Submitter_Functor_Path,Exp_Data_Dir_Base_Path,BuildName)
VALUES(?,?,?,?,?,?,?,?,?) '''
self.cursor.execute(sql, values)
self.conn.commit()
def insert_data_experiment(self, values):
self.create_table(T.create_experiment_table)
sql = ''' INSERT INTO Experiment(Experiment_ID,BenchmarkName,Iteration_No,IsWithInstrumentation,CubeFilePath,Runtime,Item_ID)
VALUES(?,?,?,?,?,?,?) '''
self.cursor.execute(sql, values)
self.conn.commit()
def prep_db_for_build_item_in_flavor(self, config, build, item, flavor):
"""Generates all the necessary build work to write to the db.
:config: PIRA configuration
:build: the build
:item: current item
:flavor: current flavor
:returns: unique ID for current item
"""
build_tuple = (U.generate_random_string(), build, '', flavor, build)
self.insert_data_builds(build_tuple)
# XXX My implementation returns the full path, including the file extension.
# In case something in the database goes wild, this could be it.
func_manager = F.FunctorManager()
analyze_functor = func_manager.get_analyzer_file(build, item, flavor)
build_functor = func_manager.get_builder_file(build, item, flavor)
run_functor = func_manager.get_runner_file(build, item, flavor)
# TODO implement the get_submitter_file(build, item, flavor) method!
benchmark_name = config.get_benchmark_name(item)
submitter_functor = config.get_runner_func(
build, item) + '/slurm_submitter_' + benchmark_name + flavor
exp_dir = config.get_analyzer_exp_dir(build, item)
db_item_id = U.generate_random_string()
db_item_data = (db_item_id, benchmark_name, analyze_functor, build_functor, '', run_functor,
submitter_functor, exp_dir, build)
self.insert_data_items(db_item_data)
return db_item_id
def enter_run_data(self, unique_id: str, item_name: str, iteration_no: int,
is_instrumented_run: bool, path_to_cube: str, runtime: float,
db_item_id) -> None:
pass
#### END OF INNER CLASS ###
db_name = '_pira'
db_ext = 'sqlite'
instance = None
def __init__(self, dbname):
if not DBManager.instance:
DBManager.instance = DBManager.DBImpl(dbname)
def __getattr__(self, name):
return getattr(self.instance, name)
| 4,484 | 31.035714 | 184 | py |
PIRA | PIRA-master/lib/Builder.py | """
File: Builder.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to build the target software.
"""
import lib.Utility as U
import lib.Logging as L
import lib.FunctorManagement as F
import lib.DefaultFlags as D
from lib.Configuration import TargetConfig, InvocationConfig
from lib.Measurement import ScorepSystemHelper
from lib.Exception import PiraException
import typing
class BuilderException(PiraException):
def __init__(self, message):
super().__init__(message)
class Builder:
"""
Class which builds the benchmark executable, given a TargetConfiguration
"""
def __init__(self, target_config: TargetConfig, instrument: bool, instr_file: str = None) -> None:
if target_config is None:
raise BuilderException('Builder::ctor: Target Configuration was None')
self.target_config = target_config
self.directory = target_config.get_place()
self.old_cwd = ''
self.build_instr = instrument
self.instrumentation_file = instr_file
self.error = None
def build(self) -> None:
try:
self.set_up()
self.build_detail()
self.tear_down()
except BuilderException as e:
L.get_logger().log('Builder::build: Caught exception ' + str(e), level='warn')
if self.error:
raise Exception('Severe Problem in Builder::build')
def set_up(self) -> None:
L.get_logger().log('Builder::set_up for ' + self.directory)
directory_good = U.check_provided_directory(self.directory)
if directory_good:
self.old_cwd = U.get_cwd()
U.change_cwd(self.directory)
else:
self.error = True
raise Exception('Builder::set_up: Could not change to directory')
def tear_down(self) -> None:
U.change_cwd(self.old_cwd)
def build_detail(self) -> None:
kwargs = {'compiler': 'clang++'}
self.build_flavors(kwargs)
def construct_pira_instr_kwargs(self) -> typing.Dict:
L.get_logger().log('Builder::construct_pira_instr_keywords', level='debug')
if not self.build_instr:
raise BuilderException('Should not construct instrument kwargs in non-instrumentation mode.')
pira_cc = ScorepSystemHelper.get_scorep_compliant_CC_command(self.instrumentation_file)
pira_cxx = ScorepSystemHelper.get_scorep_compliant_CXX_command(self.instrumentation_file)
pira_clflags = ScorepSystemHelper.get_scorep_needed_libs_c()
pira_cxxlflags = ScorepSystemHelper.get_scorep_needed_libs_cxx()
default_provider = D.BackendDefaults()
pira_name = default_provider.get_default_exe_name()
pira_kwargs = {
'CC': pira_cc,
'CXX': pira_cxx,
'CLFLAGS': pira_clflags,
'CXXLFLAGS': pira_cxxlflags,
'PIRANAME': pira_name,
'NUMPROCS': default_provider.get_default_number_of_processes(),
'filter-file': self.instrumentation_file
}
L.get_logger().log('Builder::construct_pira_instr_keywords Returning.', level='debug')
return pira_kwargs
def construct_pira_kwargs(self) -> typing.Dict:
L.get_logger().log('Builder::construct_pira_keywords', level='debug')
if self.build_instr:
raise BuilderException('Should not construct non-instrument kwargs in instrumentation mode.')
default_provider = D.BackendDefaults()
kwargs = default_provider.get_default_kwargs()
kwargs['CLFLAGS'] = ''
kwargs['CXXLFLAGS'] = ''
L.get_logger().log('Builder::construct_pira_keywords Returning.', level='debug')
return kwargs
def check_build_prerequisites(self) -> None:
return
ScorepSystemHelper.check_build_prerequisites()
def build_flavors(self, kwargs) -> None:
L.get_logger().log('Builder::build_flavors: Building for ' + self.target_config.get_target() +
' in ' + self.target_config.get_flavor(),
level='debug')
build = self.target_config.get_build()
benchmark = self.target_config.get_target()
flavor = self.target_config.get_flavor()
f_man = F.FunctorManager() # Returns the currently loaded FM
clean_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'clean')
kwargs = {}
if self.build_instr:
L.get_logger().log('Builder::build_flavors: Instrumentation', level='debug')
try:
self.check_build_prerequisites()
L.get_logger().log('Builder::build_flavors: Prerequisite check successfull.')
except Exception as e:
raise BuilderException('Precheck failed.\n' + str(e))
if not InvocationConfig.get_instance().is_compile_time_filtering():
L.get_logger().log('Builder::build_flavors: Runtime filtering enabled.')
self.target_config.set_instr_file(self.instrumentation_file)
build_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'build')
kwargs = self.construct_pira_instr_kwargs()
ScorepSystemHelper.prepare_MPI_filtering(self.instrumentation_file)
else:
L.get_logger().log('Builder::build_flavors: No instrumentation', level='debug')
build_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'basebuild')
kwargs = self.construct_pira_kwargs()
if build_functor.get_method()['active']:
L.get_logger().log('Builder::build_flavors: Running the passive functor.', level='debug')
build_functor.active(benchmark, **kwargs)
else:
try:
L.get_logger().log('Builder::build_flavors: Running the passive functor.', level='debug')
''' The build command uses CC and CXX to pass flags that are needed by PIRA for the given toolchain. '''
build_command = build_functor.passive(benchmark, **kwargs)
clean_command = clean_functor.passive(benchmark, **kwargs)
L.get_logger().log('Builder::build_flavors: Clean in ' + benchmark + '\n Using ' +
clean_command,
level='debug')
U.shell(clean_command)
L.get_logger().log('Builder::build_flavors: Building: ' + build_command, level='debug')
U.shell(build_command)
except Exception as e:
L.get_logger().log('Builder::build_flavors: ' + str(e), level='error')
| 6,185 | 37.42236 | 126 | py |
PIRA | PIRA-master/lib/Exporter.py | """
File: Exporter.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module that implements various exporters, e.g., CSV-export.
"""
import csv
import lib.Logging as L
import typing
from lib.Exception import PiraException
from lib.Measurement import RunResult
class CSVExporter:
def __init__(self, name):
if name is None:
raise RuntimeError('name argument for CSVExport-ctor must not be None')
self._name = name
self._exports = {}
def get_name(self):
return self._name
def add_new_export(self, name, values):
if name is None:
raise RuntimeError('name argument needs to be not None')
if values is None:
raise RuntimeError('values argument needs to be not None')
if name in self._exports:
raise KeyError('Key already exists')
self._exports[name] = values
def add_export(self, name, values):
self._exports[name] += values
def export(self, file_name, keys=None):
if keys is None:
keys = [str(x) for x in self._exports.keys()]
L.get_logger().log('[CSVExporter::export] Keys to export: ' + str(keys))
class RunResultExporter:
def __init__(self):
self.rows = []
self.width = 0
def add_row(self, run_type: str, rr: RunResult):
# first element is type of run
row = [run_type]
if (len(rr.get_accumulated_runtime()) != len(rr.get_nr_of_iterations())):
raise PiraException(
"Could not add row to RunResultExporter; lengths of accumulated runtimes and number of iterations do not match"
)
else:
# assemble row content
for i in range(len(rr.get_accumulated_runtime())):
row.append(rr.get_accumulated_runtime()[i])
row.append(rr.get_nr_of_iterations()[i])
# add row to table
self.rows.append(row)
# check if width attribute needs to be updated
if (len(row) > self.width):
self.width = len(row)
def export(self, file_name: str, dialect='unix'):
with open(file_name, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, dialect)
# construct table header
fieldnames = ['Type of Run']
for i in range((self.width - 1) // 2):
fieldnames.append('Accumulated Runtime')
fieldnames.append('Number of Runs')
# write table header as first row
writer.writerow(fieldnames)
# write rows
writer.writerows(self.rows)
class PiraRuntimeExporter:
class MetaInformationProvider:
def __init__(self, str_for_average: str, str_for_median: str, str_for_stdev: str):
self._average = str_for_average
self._median = str_for_median
self._stdev = str_for_stdev
def get_average(self, unused_void, unused_void_2):
return self._average
def get_median(self, unused_void, unused_void_2):
return self._median
def get_stdev(self, unused_void, unused_void_2):
return self._stdev
def get_num_data_sets(self):
return 1
def __init__(self):
self._iteration_data = [
('Data', PiraRuntimeExporter.MetaInformationProvider('Average', 'Median', 'Stdev'))
]
def add_iteration_data(self, name: str, rt_info) -> None:
self._iteration_data.append((name, rt_info))
def export(self, file_name: str, dialect='unix'):
with open(file_name, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, dialect)
writer_data = []
for el in self._iteration_data:
for nd in range(0, el[1].get_num_data_sets()):
writer_data.append(el[0])
writer.writerow(writer_data)
writer_data = []
for el in self._iteration_data:
for nd in range(0, el[1].get_num_data_sets()):
writer_data.append(el[1].get_average(0, nd))
writer.writerow(writer_data)
writer_data = []
for el in self._iteration_data:
for nd in range(0, el[1].get_num_data_sets()):
writer_data.append(el[1].get_median(0, nd))
writer.writerow(writer_data)
writer_data = []
for el in self._iteration_data:
for nd in range(0, el[1].get_num_data_sets()):
writer_data.append(el[1].get_stdev(0, nd))
writer.writerow(writer_data)
| 4,239 | 27.648649 | 126 | py |
PIRA | PIRA-master/lib/BatchSystemBackends.py | """
File: BatchSystemBackends.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description:
"""
import time
import json
from typing import Type, Tuple, Dict, Union, List, Any
import lib.Logging as L
import lib.Utility as U
from lib.BatchSystemGenerator import SlurmGenerator, BatchSystemGenerator
from lib.Configuration import BatchSystemHardwareConfig, SlurmConfig
class BatchSystemBackendType:
"""
Backends to be used with batch system running.
Currently, this supports:
- SLURM: The SLURM workload manager (https://slurm.schedmd.com/overview.html).
"""
SLURM = "Slurm"
class BatchSystemInterfaceType:
"""
A base class for different workload managers interfaces.
More or less only for type-hinting.
"""
pass
class SlurmInterfaces(BatchSystemInterfaceType):
"""
Interfaces for the SLURM backend_type.
Currently, supports:
- PYSLURM: Interface to SLURM with the pyslurm project: https://github.com/PySlurm/pyslurm.
- SBATCH_WAIT: Use sbatch with the --wait option.
- OS: Plain sbatch/squeue checking via the OS with pythons subprocess.
"""
PYSLURM = "pyslurm"
SBATCH_WAIT = "sbatch_wait"
OS = "os"
class BatchSystemTimingType:
"""
Which way to use to take the timings.
"""
SUBPROCESS = "subprocess"
OS_TIME = "os"
class BatchSystemInterface:
"""
Interface for batch system runs. Derive this, if you are going to write another
Backend for a workload manager other than SLURM.
Reference implementation: These functions should be implemented by the concrete
Backends. Or: The basic implementations of this abstract class can be used for basic
functionality.
Along with every of these abstract methods the interface is documented. Each method has either a
implementation (to also maybe be used by derived classes), or not (e.g. body "pass", or "return None")
to be implemented by the derived classes. In this case there is documentation in its docstring, which
will describe how to use these methods, and what is expected to happen when the method is executed.
The concept of keys:
For many of the interfaces functions, a key is needed. You may use U.generate_random_string() to generate one.
A key is used to reference a batch system job. This means, since these keys are used to obtain the results
for timed commands, only one timed command per job can be added. But this should be enough for PIRA.
See individual *Backend classes for more detail.
"""
def __init__(self,
backend_type: BatchSystemBackendType = BatchSystemBackendType.SLURM,
interface_type: BatchSystemInterfaceType = None,
timings_type: BatchSystemTimingType = None,
check_interval_in_seconds: int = 30) -> None:
"""
Constructor.
"""
self.check_interval = check_interval_in_seconds
self.backend = backend_type
self.interface = interface_type
self.timing_type = timings_type
self.config = None
self.generator = None
self.preparation_commands = {}
self.timed_commands = {}
self.teardown_commands = {}
self.job_id_map = {}
self.results = {}
def get_interfaces(self) -> Union[None, Type[BatchSystemInterfaceType]]:
"""
Returns the interfaces for a specific backend_type. The interfaces are represented as enum classes.
"""
return None
def set_interface(self, interface: BatchSystemInterfaceType) -> None:
"""
Set the interface_type you want to use.
:param interface: The interface.
:return: None.
"""
self.interface = interface
def configure(self, batch_config: BatchSystemHardwareConfig,
batch_generator: BatchSystemGenerator) -> None:
"""
Set up the configuration for the batch system. This is used to add a batch system configuration
and a batch system generator to be used, form the outside.
:param batch_config: The configuration for the batch system.
:param batch_generator: The generator for the batch system.
:return: None.
"""
self.config = batch_config
self.generator = batch_generator
def add_preparation_command(self, key: str, cmd: str) -> None:
"""
Add a command that needs to be executed bevor the timed command,
but should not be timed itself. E.g. do a cd before executing the actual target.
:param key: A key (see class docstring).
:param cmd: The command.
:return: None.
"""
# TODO: Add pre- post- hook/functor to use this.
self.preparation_commands[key] = cmd
def add_teardown_command(self, key: str, cmd: str) -> None:
"""
Add a command that needs to be executed bevor the timed command,
but should not be timed itself. E.g. a cd back after executing the actual target.
:param key: A key (see class docstring).
:param cmd: The command.
:return: None.
"""
# TODO: Add pre- post- hook/functor to use this.
self.teardown_commands[key] = cmd
def add_timed_command(self, key: str, cmd: str):
"""
Add a command, that needs to be timed. Only one command per key is allowed,
which means this will overwrite the command, if the key exists in the maps.
Any implementation is expected to put a key-value pair of the key
and the command in the self.timed_commands map. It is further expected to
add a key-value pair of the key and None to self.job_id map (placeholder to
be filled by the dispatching process). Also, it is expected
to add key-value pairs of tuple(key, repetition) and None to the self.results map
(effectively adding placeholders for the results to be filled later).
:param key: A key (see class docstring).
:param cmd: The command.
"""
self.timed_commands[key] = cmd
self.job_id_map[key] = None
self.results[(key, None)] = None
def dispatch(self, key: str) -> Any:
"""
Start execution(s) by dispatching the job details referenced by the key
to the batch system. To dispatch a group of jobs, use this method on any key you got.
Any implementation of this method is expected to dispatch the job details referenced
by the key to the batch system. This means setting up the needed scripts/commands for
this batch system (preferably be using the generator class meant for this batch system),
dispatch it, and keeping the output form the dispatch process. It is further expected to
set the value (overwrite the None-placeholder) for the key in self.job_id_map to a
job identifier (e.g. job id), retrieved from the dispatch output.
:param key: The key to reference the job to be dispatched (see class docstring).
:return: An identifier for the dispatched job, e.g. a job id.
"""
pass
def get_job_id_by_key(self, key: str) -> Any:
"""
Getter for retrieving a job identifier by a known key.
:param key: A key (see class docstring).
:return: None if job referenced by key was not yet dispatched (job
identifier unknown / the placeholder added by add_timed_command), or
the job identifier otherwise.
"""
def wait(self) -> None:
"""
Wait until the results are ready. Blocks until this is the case.
Any implementation of this is expected to block until the job(s) are finished.
This means all jobs that can be identified by key-value entries in the self.job_id_map
(So only jobs that where dispatched before). This way, you can wait for either single jobs,
or groups of jobs. Further, it is expected to gather the results and put them into
the self.results map. The keys are tuples of (key, repetition) each, which means this should
overwrite the None-placeholders set by the add_timed_command methods.
This way it can be checked later, if errors occurred somehow, by checking for None in the results.
Results (the map values) are expected to be added as tuples of (runtime, output).
:return: None.
"""
pass
def get_results(self, key: str, repetition: int) -> Tuple[float, str]:
"""
Get the results (runtime, output), for a job/key.
:param key: A key (see class docstring).
:param repetition: A repetition number to obtain results for.
:return: Tuple of (runtime[float], output[str]) for this jobs repetition of its command.
"""
return self.results[(key, repetition)]
def cleanup(self) -> None:
"""
Cleanup variables for the next run.
:return: None.
"""
self.results = {}
self.job_id_map = {}
self.preparation_commands = {}
self.timed_commands = {}
self.teardown_commands = {}
# clean up the runtime files
U.remove_file_with_pattern(U.get_default_pira_dir(), "pira-slurm-.*")
class SlurmBackend(BatchSystemInterface):
"""
Backend for the SLURM batch system runner.
"""
def __init__(self,
backend_type: BatchSystemBackendType = BatchSystemBackendType.SLURM,
interface_type: BatchSystemInterfaceType = SlurmInterfaces.PYSLURM,
timing_type: BatchSystemTimingType = BatchSystemTimingType.SUBPROCESS) -> None:
"""
Constructor
"""
super(SlurmBackend, self).__init__(backend_type, interface_type, timing_type)
def get_interfaces(self) -> Type[SlurmInterfaces]:
"""
Getter for the Slurm Backend interfaces
"""
return SlurmInterfaces
def set_interface(self, interface: Type[SlurmInterfaces]) -> None:
"""
Setter for the interface_type
"""
self.interface = interface
def configure(self, slurm_config: SlurmConfig, slurm_generator: SlurmGenerator):
"""
Configures the batch system to use the SLURM options wanted by the user.
"""
self.config = slurm_config
self.generator = slurm_generator
def add_timed_command(self, key: str, cmd: str):
"""
Add a command, that needs to be timed.
:param key: A key to reference the command.
:param cmd: The command.
"""
for array_id in range(
self.config.job_array_start,
self.config.job_array_end + 1,
self.config.job_array_step,
):
# add a placeholder for the results
self.results[(key, array_id)] = None
# add a placeholder for the job_id
self.job_id_map[key] = None
# add command once
self.timed_commands[key] = cmd
def dispatch(self, key: str) -> int:
"""
Submits the job referenced by key to the cluster via SLURM.
How this is actually done depends on the interface in use.
:return: The job_id obtained while dispatching as int.
"""
self.generator.clear_commands()
self.generator.clear_modules()
L.get_logger().log(f"SlurmBackend::dispatch: Dispatching for key {key}", level="debug")
L.get_logger().log(f"SlurmBackend::dispatch: Interface is: {self.interface}", level="debug")
# pass commands to the config
if key in self.preparation_commands:
self.generator.add_command(self.preparation_commands[key])
if key not in self.timed_commands:
L.get_logger().log(f"SlurmBackend::dispatch: There is no command to be added for key {key}.",
level="error")
# add the timing with the command
if self.timing_type == BatchSystemTimingType.SUBPROCESS:
cmd = f"python3 {U.get_pira_code_dir()}/lib/BatchSystemTimer.py {key} $SLURM_ARRAY_JOB_ID " \
f"$SLURM_ARRAY_TASK_ID {U.get_default_pira_dir()} '{self.timed_commands[key]}'"
self.generator.add_command(cmd)
L.get_logger().log(f"SlurmBackend::dispatch: Added command '{cmd}'", level="debug")
elif self.timing_type == BatchSystemTimingType.OS_TIME:
if self.timed_commands[key].startswith("mpirun"):
# /usr/bin/time crashed on mpi targets when command is not in quotes
self.generator.add_command(f"/usr/bin/time --format=%e '{self.timed_commands[key]}'")
else:
# but local commands seem to crash with it when in quotes
self.generator.add_command(f"/usr/bin/time --format=%e {self.timed_commands[key]}")
L.get_logger().log(
f"SlurmBackend::dispatch: Added command '/usr/bin/time --format="
f"%e {self.timed_commands[key]}'",
level="debug")
else:
L.get_logger().log("SlurmBackend::dispatch: Invalid timing_type. Exiting.", level="error")
U.exit(1)
if key in self.teardown_commands:
self.generator.add_command(self.teardown_commands[key])
# dispatch for different methods
if self.interface == SlurmInterfaces.PYSLURM:
try:
pyslurm = __import__("pyslurm")
except ModuleNotFoundError:
L.get_logger().log("PySlurm Module not found. Exiting.", level="error")
raise RuntimeError("PySlurm Module not found. Exiting.")
job_controller = pyslurm.job()
job_opts = self.generator.get_pyslurm_args()
job_id = job_controller.submit_batch_job(job_opts)
job_id = int(job_id)
L.get_logger().log(f"SlurmBackend::dispatch: Dispatched job {job_id} to slurm via PySlurm.",
level="debug")
del job_controller
elif self.interface == SlurmInterfaces.SBATCH_WAIT:
L.get_logger().log(
f"SlurmBackend::dispatch: Starting execution of repetitions via sbatch --wait...",
level="debug")
job_id = self.generator.sbatch(script_path=self.config.slurm_script_file,
active=True,
wait=True)
elif self.interface == SlurmInterfaces.OS:
# sbatch it
job_id = self.generator.sbatch(script_path=self.config.slurm_script_file, active=True)
L.get_logger().log(
f"SlurmBackend::dispatch: Sbatch'ed jobscript {self.config.slurm_script_file} via systems sbatch.",
level="debug")
else:
L.get_logger().log(f"SlurmBackend::dispatch: Interface is None. Exiting.", level="error")
raise RuntimeError("SlurmBackend::dispatch: Interface is None. Exiting.")
# add job id to map for further processing
self.job_id_map[key] = job_id
L.get_logger().log(f"SlurmBackend::dispatch: Dispatched batch job {job_id}.", level="info")
return job_id
def wait(self) -> None:
"""
Waits for all dispatched jobs to finish (single job or group). Saves the results.
"""
# check which jobs we need to wait for
jobs = []
for key, job_id in self.job_id_map.items():
if job_id is not None:
jobs.append(job_id)
# waiting for a single job. All methods allowed
if len(jobs) == 1:
if self.interface == SlurmInterfaces.PYSLURM:
try:
pyslurm = __import__("pyslurm")
except ModuleNotFoundError:
L.get_logger().log("PySlurm Module not found. Exiting.", level="error")
raise RuntimeError("PySlurm Module not found. Exiting.")
exit_code = pyslurm.job().wait_finished(jobs[0])
elif self.interface == SlurmInterfaces.SBATCH_WAIT:
# waiting is done by SLURM, just pass on
pass
elif self.interface == SlurmInterfaces.OS:
# use the generators wait functions
# give single job along
self.generator.wait(job_id=jobs[0])
else:
L.get_logger().log(f"SlurmBackend::wait: Interface is None. Exiting.", level="error")
raise RuntimeError("SlurmBackend::wait: Interface is None. Exiting.")
# waiting for a group of jobs: only "os" method allowed
else:
if self.interface == SlurmInterfaces.OS:
# use the generators wait functions
# give all jobs along
self.generator.wait(job_ids=jobs)
else:
L.get_logger().log(
"SlurmBackend::wait: Trying to wait for a group of jobs with method other then 'os'."
"Only non-blocking wait methods are allowed for waiting on groups: 'os'.",
level="error")
# After waiting: Read/obtain the results, and populate the result dict with it
self.populate_result_dict()
def populate_result_dict(self) -> None:
"""
Read the output and runtime from the run methods' results.
:return: None.
"""
# filter for dispatched (and finished) jobs
key_job_map = {key: value for key, value in self.job_id_map.items() if value is not None}
for job_key, job_id in key_job_map.items():
L.get_logger().log(
f"SlurmBackend::populate_result_dict: Obtaining results for job {str(job_id)}, "
f"key {job_key}",
level="debug")
L.get_logger().log("SlurmBackend::populate_result_dict: Timing method is: " +
str(self.timing_type),
level="debug")
if self.timing_type == BatchSystemTimingType.SUBPROCESS:
# for all repetitions of the job_key
for key, repetition in [(k, r) for (k, r) in self.results.keys() if k == job_key]:
try:
with open(f"{U.get_default_pira_dir()}/pira-slurm-{job_id}-{key}-{repetition}.json",
"r") as f:
try:
result_dict = json.load(f)
self.results[(key,
repetition)] = float(result_dict["elapsed"]), result_dict["output"]
except KeyError:
L.get_logger().log(
f"SlurmBackend::populate_result_dict: Failed to read results for "
f"key {key}, repetition {repetition} from json result file.",
level="error")
except FileNotFoundError:
L.get_logger().log(
f"SlurmBackend::populate_result_dict: Opening runtime json "
f"file failed: {U.get_default_pira_dir()}/pira-"
f"slurm-{job_id}-{key}-{repetition}.json. Exiting.",
level="error")
raise RuntimeError(
f"SlurmBackend::populate_result_dict: Reading runtime from runtime json "
f"file failed: {U.get_default_pira_dir()}/pira-"
f"slurm-{job_id}-{key}-{repetition}.json. Exiting.")
elif self.timing_type == BatchSystemTimingType.OS_TIME:
# for all repetitions of the job_key
for key, repetition in [(k, r) for (k, r) in self.results.keys() if k == job_key]:
try:
with open(f"{self.config.std_err_path}.{job_id}_{repetition}", "r") as f:
lines = f.readlines()
try:
runtime = float(lines[-1].strip())
except (ValueError, IndexError):
L.get_logger().log(
f"SlurmBackend::populate_result_dict: Reading runtime from out-file "
f"failed: {self.config.std_out_path}.{job_id}_{repetition}. Exiting.",
level="error")
raise RuntimeError(
f"SlurmBackend::populate_result_dict: Reading runtime from out-file "
f"failed: {self.config.std_out_path}.{job_id}_{repetition}. Exiting.")
self.results[(key, repetition)] = runtime, "\n".join(lines[:-1])
except FileNotFoundError:
L.get_logger().log(
f"SlurmBackend::populate_result_dict: Opening out-file "
f"failed: {self.config.std_err_path}.{job_id}_{repetition}. Exiting.",
level="error")
raise RuntimeError(
f"SlurmBackend::populate_result_dict: Opening "
f"out-file failed: {self.config.std_err_path}.{job_id}_{repetition}. Exiting.")
else:
L.get_logger().log(f"SlurmBackend::populate_result_dict: Timing type is None. Exiting.",
level="error")
raise RuntimeError("SlurmBackend::populate_result_dict: Timing type is None. Exiting.")
| 19,657 | 41.366379 | 126 | py |
PIRA | PIRA-master/lib/Configuration.py | """
File: Configuration.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module that provides to main data structures.
"""
import sys
# from lib.BatchSystemGenerator import MailType
sys.path.append('..')
import lib.Logging as L
import lib.Exception as E
import lib.Utility as U
import typing
from argparse import Namespace
class PiraConfigErrorException(E.PiraException):
def __init__(self, m):
super().__init__(m)
class PiraItem:
def __init__(self, name):
self._name = name
self._base_path = None
self._analyzer_dir = None
self._cubes_dir = None
self._flavors = None
self._functor_base_path = None
self._mode = None
self._run_options = None
def __str__(self):
return '[PiraItem] ' + self._name
def get_name(self):
return self._name
def get_analyzer_dir(self):
if U.is_absolute_path(self._analyzer_dir):
return self._analyzer_dir
return self._base_path + '/' + self._analyzer_dir
def get_cubes_dir(self):
if U.is_absolute_path(self._cubes_dir):
return self._cubes_dir
return self._base_path + '/' + self._cubes_dir
def get_flavors(self):
return self._flavors
def get_functor_base_path(self):
if U.is_absolute_path(self._functor_base_path):
return self._functor_base_path
return self._base_path + '/' + self._functor_base_path
def get_mode(self):
return self._mode
def get_run_options(self):
return self._run_options
def set_base_path(self, path: str) -> None:
self._base_path = path
def set_analyzer_dir(self, directory) -> None:
self._analyzer_dir = directory
def set_cubes_dir(self, directory) -> None:
self._cubes_dir = directory
def set_flavors(self, flavors) -> None:
self._flavors = flavors
def set_functors_base_path(self, path) -> None:
self._functor_base_path = path
def set_mode(self, mode) -> None:
self._mode = mode
def set_run_options(self, run_opts) -> None:
self._run_options = run_opts
class PiraConfigII:
def __init__(self):
self._directories = {}
self._abs_base_path = None
self._empty = True
def add_item(self, name, item) -> None:
try:
self._directories[name]
except:
self._directories[name] = []
item.set_base_path(self._abs_base_path)
self._directories[name].append(item)
def get_directories(self):
return self._directories.keys()
def get_place(self, build):
place = build
for k in self._directories.keys():
if place == k:
if not U.is_absolute_path(k):
place = self._abs_base_path + '/' + str(k)
return place
def get_items(self, directory):
return self._directories[directory]
def set_absolute_base_path(self, path):
self._abs_base_path = path
def get_absolute_base_path(self):
return self._abs_base_path
def is_empty(self) -> bool:
return self._empty
class PiraConfigAdapter:
def __init__(self, pc2):
self._pcii = pc2
def get_adapted(self):
return self._pcii
def get_builds(self):
return self._pcii.get_directories()
def get_place(self, build):
return self._pcii.get_place(build)
def get_items(self, build):
return [item.get_name() for item in self._pcii.get_items(build)]
def has_local_flavors(self, build, item):
return True
def get_item_w_name(self, build, item):
items = self._pcii.get_items(build)
for item_obj in items:
if item_obj.get_name() == item:
return item_obj
raise RuntimeError('Flavors not found for item ' + item)
def get_flavors(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_flavors()
def get_analyzer_path(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_functor_base_path()
def get_analyzer_dir(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_analyzer_dir()
def get_benchmark_name(self, item):
return item
def get_builder_path(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_functor_base_path()
def get_runner_path(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_functor_base_path()
def get_runner_func(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_functor_base_path()
def get_cleaner_path(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_functor_base_path()
def get_analyzer_exp_dir(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_cubes_dir()
def get_args(self, build, item):
io = self.get_item_w_name(build, item)
return io.get_run_options().as_list()
def is_empty(self) -> bool:
return self._pcii.is_empty()
class PiraConfig:
"""
A configuration for PIRA
TODO: Test the actual internal data structure.
Remove unnecessary functions from this interface.
Get rid of direct dependency on this data structure as much as possible.
"""
def __init__(self) -> None:
self.directories = []
self.builds = {}
self.items = {}
self.prefix = []
self.flavors = {}
self.instrument_analysis = []
self.builders = []
self.args = []
self.runner = []
self.submitter = []
self.global_flavors = []
self.global_submitter = {}
self.stop_iteration = {}
self.is_first_iteration = {}
self.base_mapper = None
self._empty = True
def is_empty(self) -> bool:
return self._empty
def set_build_directories(self, dirs) -> None:
self.directories = dirs
def set_global_flavors(self, glob_flavors) -> None:
self.global_flavors = glob_flavors
def get_global_flavors(self):
return self.global_flavors
def set_glob_submitter(self, glob_submitter, glob_flavor) -> None:
self.global_submitter.update({glob_flavor: glob_submitter})
def set_prefix(self, prefix, dir) -> None:
self.builds[dir].update({'prefix': prefix})
def set_items(self, items, dir) -> None:
self.builds[dir].update({'items': items})
def set_flavours(self, flavours, dir) -> None:
self.builds[dir].update({'flavours': flavours})
def populate_build_dict(self, dir) -> None:
for dirs in dir:
self.builds.update({dirs: {}})
self.items.update({dirs: {}})
self.flavors.update({dirs: {}})
def initialize_item_dict(self, dir, items) -> None:
for item in items:
self.items[dir].update({item: {}})
self.flavors[dir].update({item: {}})
def set_item_instrument_analysis(self, inst_analysis, dir, item) -> None:
self.items[dir][item].update({'instrument_analysis': inst_analysis})
def set_item_builders(self, builders, dir, item) -> None:
self.items[dir][item].update({'builders': builders})
def set_item_args(self, args, dir, item) -> None:
self.items[dir][item].update({'args': args})
def set_item_runner(self, runner, dir, item) -> None:
self.items[dir][item].update({'runner': runner})
def set_item_submitter(self, submitter, dir, item) -> None:
self.items[dir][item].update({'submitter': submitter})
def set_item_batch_script(self, batch_script, dir, item) -> None:
self.items[dir][item].update({'batch_script': batch_script})
def set_item_flavor(self, flavors, dir, item) -> None:
self.flavors[dir][item].update({'flavors': flavors})
def get_builds(self) -> typing.List[str]:
return [x for x in self.builds.keys()]
def get_place(self, dir):
return dir
def get_items(self, b: str) -> typing.List[str]:
return [x for x in self.items[b].keys()]
def get_flavors(self, b: str, it: str) -> typing.List[str]:
return self.flavors[b][it]['flavors']
def has_local_flavors(self, b: str, it: str) -> bool:
return len(self.flavors[b][it]['flavors']) > 0
def get_args(self, b: str, it: str) -> typing.List[typing.List[str]]:
return [self.items[b][it]['args']]
def get_cleaner_path(self, b: str, i: str) -> str:
return self.items[b][i]['builders']
def get_builder_path(self, b: str, i: str) -> str:
L.get_logger().log('Old: get_builder_path: ' + self.items[b][i]['builders'], level='debug')
return self.items[b][i]['builders']
def get_analyzer_path(self, b: str, i: str) -> str:
return self.items[b][i]['instrument_analysis'][0]
def get_runner_path(self, b: str, i: str) -> str:
return self.items[b][i]['runner']
# FIXME Rename some more reasonable // get_builder_path
def get_flavor_func(self, build: str, item: str) -> str:
L.get_logger().log('Using a deprecated function: get_flavor_func', level='warn')
return self.items[build][item]['builders']
# TODO: We should lift all the accesses to these functor paths etc to the FunctorManagement
# entity.
def get_runner_func(self, build: str, item: str) -> str:
return self.items[build][item]['runner']
def get_analyze_func(self, build, item) -> str:
return self.items[build][item]['instrument_analysis'][0]
def get_analyzer_exp_dir(self, build, item) -> str:
return self.items[build][item]['instrument_analysis'][1]
def get_analyzer_dir(self, build, item) -> str:
return self.items[build][item]['instrument_analysis'][2]
def get_analyze_slurm_func(self, build, item) -> str:
return self.items[build][item]['instrument_analysis'][3]
def is_submitter(self, build: str, item: str) -> bool:
return self.items[build][item]['submitter'] != ''
# XXX Apparrently not used
def get_submitter_func(self, build, item) -> str:
return self.items[build][item]['submitter']
def get_batch_script_func(self, build, item) -> str:
return self.items[build][item]['batch_script']
@staticmethod
def get_benchmark_name(benchmark) -> str:
return benchmark.split('/')[-1:][0]
def initialize_stopping_iterator(self) -> None:
for build in self.builds:
for item in self.builds[build]['items']:
for flavor in self.builds[build]['flavours']:
self.stop_iteration[build + item + flavor] = False
def initialize_first_iteration(self) -> None:
for build in self.builds:
for item in self.builds[build]['items']:
for flavor in self.builds[build]['flavours']:
self.is_first_iteration[build + item + flavor] = False
class TargetConfig:
""" The TargetConfiguration encapsulates the relevant information for a specific target, i.e., its place and a given flavor.
Using TargetConfiguration all steps of building and executing are possible. """
def __init__(self, place: str, build: str, target: str, flavor: str, db_item_id: str):
""" Initializes the TargetConfiguration with its necessary parameters.
:place: str: TODO
:target: str: TODO
:flavor: str: TODO
:db_item_id: str: The unique ID for this target
"""
self._place: str = place
self._build: str = build
self._target: str = target
self._flavor: str = flavor
self._db_item_id: str = db_item_id
self._instr_file = ''
self._args_for_invocation = None
def get_place(self) -> str:
"""Return the place stored in this TargetConfiguration
:lf: TODO
:returns: The top-level items, i.e., "builds"
"""
return self._place
def get_build(self) -> str:
"""Return the build stored in this TargetConfiguration
:lf: TODO
:returns: The top-level items, i.e., "builds"
"""
return self._build
def get_target(self) -> str:
"""Return the target / item stored in this TargetConfiguration
:returns: the targets / items (children of build)
"""
return self._target
def get_flavor(self) -> str:
"""Return the flavor stored in this TargetConfiguration
:returns: TODO
"""
return self._flavor
def get_db_item_id(self) -> str:
"""Return the DB item id stored in this TargetConfiguration
:f: TODO
:returns: TODO
"""
return self._db_item_id
def has_args_for_invocation(self) -> bool:
return self._args_for_invocation is not None
def get_args_for_invocation(self) -> str:
if self._args_for_invocation is None:
L.get_logger().log('TargetConfiguration::get_args_for_invocation: args are None.',
level='warn')
return self._args_for_invocation
def set_args_for_invocation(self, args) -> None:
self._args_for_invocation = args
def set_instr_file(self, instr_file: str) -> None:
self._instr_file = instr_file
def get_instr_file(self) -> str:
"""
Only valid IFF is_compile_time_filtering returns False!
:returns: Iff this run is a runtime-filter run, returns the instrumentation file.
"""
return self._instr_file
class InstrumentConfig:
""" Holds information how instrumentation is handled in the different run phases. """
def __init__(self, is_instrumentation_run=False, instrumentation_iteration=None):
self._is_instrumentation_run = is_instrumentation_run
self._instrumentation_iteration = instrumentation_iteration
def get_instrumentation_iteration(self) -> int:
return self._instrumentation_iteration
def is_instrumentation_run(self) -> bool:
return self._is_instrumentation_run
class ExtrapConfig:
def __init__(self, dir: str, prefix: str, postfix: str):
self._dir = dir
self._prefix = prefix
self._postfix = postfix
def get_dir(self) -> str:
return self._dir
def get_prefix(self) -> str:
return self._prefix
class InvocationConfig:
__instance = None
@staticmethod
def get_instance():
if InvocationConfig.__instance == None:
L.get_logger().log('InvocationConfig::get_instance: InvocationConfig not initialized.',
level='error')
raise Exception('InvocationConfiguration not initialized!')
return InvocationConfig.__instance
def __init__(self, cmdline_args: Namespace):
if InvocationConfig.__instance != None:
L.get_logger().log('InvocationConfig::__init__: InvocationConfig already initialized!',
level='error')
raise Exception('re-initializing Singleton')
else:
InvocationConfig.__instance = self
self._pira_dir = cmdline_args.pira_dir
self._slurm_config_path = cmdline_args.slurm_config
self._config_version = cmdline_args.config_version
self._config_path = cmdline_args.config
self._compile_time_filtering = not (cmdline_args.runtime_filter or
(cmdline_args.hybrid_filter_iters != 0))
self._pira_iters = cmdline_args.iterations
self._repetitions = cmdline_args.repetitions
self._hybrid_filter_iters = cmdline_args.hybrid_filter_iters
self._export = cmdline_args.export
self._export_runtime_only = cmdline_args.export_runtime_only
self._use_call_site_instrumentation = cmdline_args.call_site_instrumentation
self._lide = cmdline_args.lide
self._analysis_parameters_path = cmdline_args.analysis_parameters
def __str__(self) -> str:
cf_str = 'runtime filtering'
if self.is_hybrid_filtering():
cf_str = 'hybrid filtering: rebuilding every ' + str(
self.get_hybrid_filter_iters()) + ' iterations'
if self.is_compile_time_filtering():
cf_str = 'compiletime filtering'
return 'Running PIRA in ' + cf_str + ' with configuration\n ' + str(self.get_path_to_cfg())
@staticmethod
def reset_to_default() -> None:
if InvocationConfig.__instance == None:
L.get_logger().log(
'InvocationConfig::reset_to_default: InvocationConfig not initialized, creating a new instance',
level='warn')
cmdline_args = Namespace(pira_dir=U.get_default_pira_dir(),
slurm_config=None,
config_version=2,
config=U.get_default_config_file(),
runtime_filter=False,
hybrid_filter_iters=0,
iterations=4,
repetitions=5,
export=False,
export_runtime_only=False,
lide=False,
analysis_parameters=U.get_default_analysis_parameters_config_file(),
call_site_instrumentation=False)
InvocationConfig(cmdline_args)
else:
instance = InvocationConfig.__instance
instance._pira_dir = U.get_default_pira_dir()
instance._slurm_config_path = None
instance._config_version = 2
instance._config_path = U.get_default_config_file()
instance._compile_time_filtering = True
instance._pira_iters = 4
instance._repetitions = 3
instance._hybrid_filter_iters = 0
instance._export = False
instance._export_runtime_only = False
instance._lide = False
instance._use_call_site_instrumentation = False
instance._analysis_parameters_path = U.get_default_analysis_parameters_config_file()
@staticmethod
def create_from_kwargs(args: dict) -> None:
required_args = [
'runtime_filter', 'iterations', 'repetitions', 'hybrid_filter_iters', 'export',
'export_runtime_only', 'config_version'
]
for arg in required_args:
if args.get(arg) == None or InvocationConfig.__instance == None:
InvocationConfig.reset_to_default()
L.get_logger().log(
"Invocation-Config not fully initialized. Assuming one or more default values",
level='warn')
break
instance = InvocationConfig.__instance
if args.get('config') != None:
instance._config_path = args['config']
if args.get('pira_dir') != None:
instance._pira_dir = args['pira_dir']
if args.get('slurm-config') != None:
instance._slurm_config_path = args['slurm-config']
else:
# set it None, if flag was not given
# (will decide the local vs. slurm branch)
instance._slurm_config_path = None
if args.get('config_version') != None:
instance._config_version = args['config_version']
if args.get('hybrid_filter_iters') != None and args.get('runtime_filter') != None:
instance._compile_time_filtering = not (args['runtime_filter'] or
(args['hybrid_filter_iters'] != 0))
if args.get('iterations') != None:
instance._pira_iters = args['iterations']
if args.get('repetitions') != None:
instance._repetitions = args['repetitions']
if args.get('hybrid_filter_iters') != None:
instance._hybrid_filter_iters = args['hybrid_filter_iters']
if args.get('export') != None:
instance._export = args['export']
if args.get('export_runtime_only') != None:
instance._export_runtime_only = args['export_runtime_only']
if args.get('use_cs_instrumentation') != None:
instance._use_call_site_instrumentation = args['use_cs_instrumentation']
def get_pira_dir(self) -> str:
return self._pira_dir
def get_slurm_config_path(self) -> str:
return self._slurm_config_path
def get_config_version(self) -> str:
return self._config_version
def is_compile_time_filtering(self) -> bool:
return self._compile_time_filtering
def get_path_to_cfg(self) -> str:
return self._config_path
def get_hybrid_filter_iters(self) -> int:
return self._hybrid_filter_iters
def is_hybrid_filtering(self) -> bool:
return not self.get_hybrid_filter_iters() == 0
def get_pira_iters(self) -> int:
return self._pira_iters
def get_num_repetitions(self) -> int:
return self._repetitions
def is_export(self) -> bool:
return self._export
def is_export_runtime_only(self) -> bool:
return self._export_runtime_only
def is_lide_enabled(self) -> bool:
return self._lide
def get_analysis_parameters_path(self) -> str:
return self._analysis_parameters_path
def use_cs_instrumentation(self) -> bool:
return self._use_call_site_instrumentation
class CSVConfig:
def __init__(self, csv_dir: str, csv_dialect: str):
self._csv_dir = csv_dir
self._csv_dialect = csv_dialect
def should_export(self) -> bool:
return self._csv_dir != ''
def get_csv_dir(self) -> str:
return self._csv_dir
def get_csv_dialect(self) -> str:
return self._csv_dialect
class BatchSystemHardwareConfig:
"""
Base class for Hardware config. Holding all hardware data.
"""
def __init__(self,
mem_per_cpu: int,
number_of_tasks: int,
number_of_cores_per_task: int,
cpu_frequency_str: typing.Optional[str] = None,
shell: str = "/bin/bash"):
"""
Constructor.
:param mem_per_cpu: Memory per thread, the --mem-per-cpu setting.
:param number_of_tasks: Number of cores/individual processing units you want, the -n or --ntasks setting.
E.g. important for MPI.
:param number_of_cores_per_task: Number of threads per process you want, the --cpus-per-task or -c setting.
:param cpu_frequency_str: Set this to ensure the processors run on equal speeds
(disables all fancy overclocking, hyperboots, ... features), the --cpu-freq setting.
Do not specify if you do not want a fixed cpu speed. Default: None.
:param shell: The shell/shebang for the system. Default: "/bin/bash".
"""
self.mem_per_cpu = mem_per_cpu
self.number_of_tasks = number_of_tasks
self.number_of_cores_per_task = number_of_cores_per_task
self.cpu_frequency_str = cpu_frequency_str
self.shell = shell
class SlurmConfig(BatchSystemHardwareConfig):
"""
Holds config for SLURM run. This info can than be used to sbatch it as SLURM job via a job script, or srun it.
"""
def __init__(self,
slurm_script_file: str = f"{U.get_default_pira_dir()}/pira-slurm-job.sh",
job_name: str = "pira-slurm-job",
std_out_path: str = f"{U.get_default_pira_dir()}/pira-slurm-out",
std_err_path: str = f"{U.get_default_pira_dir()}/pira-slurm-err",
mem_per_cpu: int = 1000,
number_of_tasks: int = 1,
number_of_cores_per_task: int = 1,
time_str: str = "00:10:00",
cpu_frequency_str: typing.Optional[str] = None,
shell: str = "/bin/bash",
partition: typing.Optional[str] = None,
reservation: typing.Optional[str] = None,
account: typing.Optional[str] = None,
job_array_start: typing.Optional[int] = None,
job_array_end: typing.Optional[int] = None,
job_array_step: int = 1,
exclusive: bool = False,
wait: bool = False,
dependencies: typing.Optional[typing.List[str]] = None,
mail_types=None,
mail_address: typing.Optional[str] = None,
uses_module_system: bool = False,
purge_modules_at_start: bool = True,
check_interval_in_seconds: int = 5,
modules: typing.List[typing.Dict[str,
typing.Union[str,
typing.List[typing.Dict[str,
str]]]]] = {},
force_sequential: bool = False) -> None:
"""
Constructor. Check here for defaults of the config. If not given by the loader, these defaults will be in place
in the resulting config.
:param slurm_script_file: The file to save the SLURM script.
:param job_name: Job name, -J or --job-name setting.
:param std_out_path: Path to put the stdout of the job, -o or --out setting. Give the absolute file path.
The job id will be added at the back automatically.
:param std_err_path: Path to put the stderr of the job, -e or --err setting. Give the absolute file path.
The job id will be added at the back automatically.
:param mem_per_cpu: Memory per thread, the --mem-per-cpu setting.
:param number_of_tasks: Number of cores/individual processing units you want, the -n or --ntasks setting.
E.g. important for MPI.
:param number_of_cores_per_task: Number of threads per process you want, the --cpus-per-task or -c setting.
:param time_str: Time limit for the job, --time or -t setting.
:param cpu_frequency_str: Set this to ensure the processors run on equal speeds, the --cpu-freq setting.
Do not specify if you do not want a fixed cpu speed.
:param shell: The shell/shebang for the system.
:param partition: Partition for the job, -p or --partition setting.
:param reservation: Reservation for the job, the --reservation setting.
:param account: Allocation for the job, the -A option.
:param job_array_start: Start id for job array, part of the -a setting.
If you do not want a job array, do not specify job_array_start and job_array_end.
:param job_array_end: End id for job array, part of the -a setting.
If you do not want a job array, do not specify job_array_start and job_array_end.
:param job_array_step: Step for the job array, part of the -a setting.
:param wait: Whether the sbatch command should block until the job is finished.
:param dependencies: List of dependency stings.
:param mail_types: Mail types, the --mail-type setting.
If not specified, --mail-type=NONE will be set automatically.
:param mail_address: Mail address, the --mail-user setting.
:param uses_module_system: Whether the system uses a module system, with "module load/purge" commands.
:param purge_modules_at_start: If modules should be purged bevor module load commands.
Has no effect if "uses_module_system" is not True.
:param check_interval: Check interval in seconds for wait method.
:param force_sequential: Whether to force all executions to be sequential on slurm runs.
"""
super().__init__(mem_per_cpu, number_of_tasks, number_of_cores_per_task, cpu_frequency_str,
shell)
self.slurm_script_file = slurm_script_file
self.job_name = job_name
self.std_out_path = std_out_path
self.std_err_path = std_err_path
self.time_str = time_str
self.partition = partition
self.reservation = reservation
self.account = account
self.job_array_start = job_array_start
self.job_array_end = job_array_end
self.job_array_step = job_array_step
self.wait = wait
self.exclusive = exclusive
self.dependencies = dependencies
self.mail_types = mail_types
self.mail_address = mail_address
self.uses_module_system = uses_module_system
self.purge_modules_at_start = purge_modules_at_start
self.check_interval = check_interval_in_seconds
self.modules = modules
self.force_sequential = force_sequential
| 26,954 | 32.609726 | 128 | py |
PIRA | PIRA-master/lib/ArgumentMapping.py | """
File: ArgumentMapping.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module provides mapper classes to handle argument.
"""
import sys
sys.path.append('..')
class PiraArgument:
"""
In this class the 0-th entry is always the name of the parameter.
The 1-st entry is always the value to pass to the target application - it might be a file name.
When a string is constructed from this class, always parametername+parametervalue is returned.
"""
def __init__(self, param_name, param_value, file_name=None):
self._param_name = param_name
self._param_val = param_value
self._file_name = file_name
def __getitem__(self, key):
if key == 0:
return self._param_name
elif key == 1:
if self._file_name is None:
return self._param_val
else:
return self._file_name
else:
raise IndexError('PiraArgument only accepts 0 or 1 index.')
def __str__(self):
return self._param_name + self._param_val
def get_params(self):
return [self._param_name]
class PiraListArgument(PiraArgument):
def __init__(self, param_names, param_vals, file=None):
self._p_names = param_names
self._p_vals = param_vals
self._p_files = file
def __getitem__(self, key):
if key >= 3 * len(self._p_names):
raise IndexError('Out of Range in PiraListArgument')
if key % 3 == 2:
return self._p_files
elif key % 3 == 1:
if key >= 3:
e_key = key - 3
else:
e_key = key - 1
return self._p_vals[e_key]
elif key % 3 == 0:
if key >= 3:
e_key = key - 2
else:
e_key = key
return self._p_names[e_key]
raise IndexError('Wrong index')
def __str__(self):
s = ''
for n, v in zip(self._p_names, self._p_vals):
s += n + v
return s
def get_params(self):
return self._p_names # This should already be a list
class ArgumentMapper:
def __iter__(self):
raise StopIteration('Not implemented.')
def as_list(self):
l = []
for p in self:
l.append(p)
return l
def as_string(self) -> str:
s = ''
for p in self:
s += str(p[0]) + str(p[1]) + '.'
return s
def __str__(self):
return self.as_string()
class CmdlineLinearArgumentMapper(ArgumentMapper):
"""
Mapper to create a linear mapping of one or more commandline passed arguments.
If given more than one argument, it acts like a zip iterator, therefore, all arguments
must receive the same number of values.
"""
def __init__(self, argmap, files=None):
self._argmap = argmap
self._files = files
arg_vals = self._argmap.values()
l_elem = list(arg_vals)[0]
for e in arg_vals:
if len(e) != len(l_elem):
raise RuntimeError(
'CmdlineLinearArgumentMapper: All parameters need the same number of values')
self._num_elems = len(l_elem)
def __iter__(self):
if len(self._argmap.keys()) == 1:
key = list(self._argmap.keys())[0]
# If this is not a file mapper, we just return as normal
if self._files == None:
for v in self._argmap[key]:
yield PiraArgument(key, v)
else:
# If this is a file mapper, we need to give the correct file as well
for v, f in zip(self._argmap[key], self._files):
yield PiraArgument(key, v, f)
else:
keys = self._argmap.keys()
values = []
names = []
files = []
#for f in self._files:
# print(f)
for counter in range(0, self._num_elems):
for k in keys:
val = self._argmap[k][counter]
names.append(k)
values.append(val)
if self._files is not None:
files.append(self._files[counter])
#print('CmdlineLinearArgumentMapper::__iter__: names ' + str(names) + ' | values ' + str(values) + ' | files ' + str(files))
yield PiraListArgument(names, values, files)
names = []
values = []
files = []
def __getitem__(self, key):
if key == 0:
key = list(self._argmap.keys())[0]
return PiraArgument(key, self._argmap[key][0])
else:
raise IndexError('Only direct access to first element allowed.')
def get_argmap(self):
return self._argmap
class CmdlineCartesianProductArgumentMapper(ArgumentMapper):
"""
Mapper to create the Cartesian product of all given argument/values. All arguments passed
via the commandline. Here, the arguments do not need to have equally many values.
FIXME: Does not work for more than 2 parameters.
"""
def __init__(self, argmap):
self._argmap = argmap
def __iter__(self):
keys = self._argmap.keys()
res = []
for k in keys:
for v in self._argmap[k]:
for kk in keys:
if k == kk:
continue
for vv in self._argmap[kk]:
res.append(k)
res.append(v)
res.append(kk)
res.append(vv)
yield tuple(res)
res = []
def __getitem__(self, key):
pass
class MPIArgumentMapper(ArgumentMapper):
def __init__(self, argmap, base_mapper):
self._n_mpi = argmap['np']
self._base_mapper = base_mapper
def __iter__(self):
for nps in self._n_mpi:
for args in self._base_mapper:
# args is PiraListArgument
#print('MPI Argument Mapper: ' + str(args))
#print('ARGS: ' + str(args[1]) + ' | ' + str(args[4]))
res = []
res.append(nps)
for v in args:
res.append(v)
print('Yielded: ' + str(res))
yield res
def get_argmap(self):
return self._base_mapper.get_argmap()
class UserArgumentMapper(ArgumentMapper):
"""
Used for complex mappings of arguments to inputs / files.
TODO: How should this be implemented? Ideas:
1) Loads another functor that does the final mapping.
2) Config has explicit mapping that is loaded.
"""
pass
class ArgumentMapperFactory:
"""
Creates the correct ArgumentMapper for the specific circumstance.
"""
@classmethod
def get_mapper(cls, options):
requested_mapper = options['mapper']
is_file_mapper = 'pira-file' in options
# The term 'pira-file' indicates that a FileMapper needs to be used instead of a regular mpapper.
# The options have a field called pira-file, which holds a list of filenames to be used.
# Currently, this can only be used with a linear mapper.
if requested_mapper == 'Linear':
if is_file_mapper:
return CmdlineLinearArgumentMapper(options['argmap'], options['pira-file'])
return CmdlineLinearArgumentMapper(options['argmap'])
elif requested_mapper == 'CartesianProduct':
return CmdlineCartesianProductArgumentMapper(options['argmap'])
elif requested_mapper == 'MPILinear':
if is_file_mapper:
return MPIArgumentMapper(
options['argmap'], CmdlineLinearArgumentMapper(options['argmap'], options['pira-file']))
return MPIArgumentMapper(options['argmap'], CmdlineLinearArgumentMapper(options['argmap']))
else:
raise RuntimeError('Unknown Mapper: ' + requested_mapper)
| 7,201 | 26.7 | 132 | py |
PIRA | PIRA-master/lib/TimeTracking.py | """
File: TimeTracking.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: This module allows to track timings of the various bits and pieces with convenience functions.
"""
import os
import lib.Logging as L
class TimeTracker():
"""
Lightweight covenience class for time tracking of various components
"""
def __init__(self):
self._s = os.times()
self._e = self._s
def f_track(self, sec_name, function, *args):
self._start()
res = function(*args)
self.stop()
time_tuple = self.get_time()
L.get_logger().log(sec_name + ' took %.3f seconds' % time_tuple[0], level='perf')
return (res, time_tuple)
def m_track(self, sec_name, obj, method_name, *args):
obj_method = self._get_callable(obj, method_name)
self._start()
res = obj_method(*args)
self.stop()
time_tuple = self.get_time()
L.get_logger().log(sec_name + ' took %.3f seconds' % time_tuple[0], level='perf')
return (res, time_tuple)
def get_time(self):
u_time = self._e[2] - self._s[2]
s_time = self._e[3] - self._s[3]
return (u_time, s_time)
def _start(self):
self._s = os.times()
def stop(self):
self._e = os.times()
def _get_callable(self, obj, name):
try:
obj_method = getattr(obj, name)
return obj_method
except Exception as e:
L.get_logger().log('No such attribute', level='error')
raise e
| 1,483 | 25.981818 | 126 | py |
PIRA | PIRA-master/lib/RunnerFactory.py | """
File: RunnerFactory.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to create different Runner objects, depending on the configuration.
"""
import lib.Logging as L
from lib.BatchSystemBackends import BatchSystemInterface
from lib.Configuration import PiraConfig, ExtrapConfig, SlurmConfig
from lib.Configuration import PiraConfigII, PiraConfigAdapter, PiraConfigErrorException
from lib.Runner import LocalRunner, LocalScalingRunner, SlurmRunner, SlurmScalingRunner
from lib.ProfileSink import NopSink, ExtrapProfileSink, PiraOneProfileSink
class PiraRunnerFactory:
def __init__(self, configuration: PiraConfig):
self._config = configuration
def get_simple_local_runner(self):
return LocalRunner(self._config, PiraOneProfileSink())
def get_scalability_runner(self, extrap_config: ExtrapConfig):
if self._config.is_empty():
raise PiraConfigErrorException('Configuration is None in RunnerFactory')
pc_ii = None
params = None
ro = None
if isinstance(self._config, PiraConfigAdapter):
L.get_logger().log(
'PiraRunnerFactory::get_scalability_runner: Configuration is PiraConfigurationAdapter')
pc_ii = self._config.get_adapted()
elif isinstance(self._config, PiraConfigII):
pc_ii = self._config
if pc_ii is not None:
L.get_logger().log('PiraRunnerFactory::get_scalability_runner: pc_ii is not none.')
if pc_ii is not None and isinstance(pc_ii, PiraConfigII):
L.get_logger().log('PiraRunnerFactory::get_scalability_runner: pc_ii is PiraConfigurationII')
params = {}
L.get_logger().log('PiraRunnerFactory::get_scalability_runner: Preparing params')
for k in pc_ii.get_directories():
L.get_logger().log('PiraRunnerFactory::get_scalability_runner: ' + str(k))
for pi in pc_ii.get_items(k):
L.get_logger().log('PiraRunnerFactory::get_scalability_runner: ' + str(pi))
# This should be only one element anyway.
# for p in pi.get_run_options():
ro = pi.get_run_options()
# for pa in p.get_params():
# params[pa] = True
# L.get_logger().log('PiraRunnerFactory::get_scalability_runner: ' + str(params))
if params is None:
raise RuntimeError(
'PiraRunnerFactory::get_scalability_runner: Cannot use extra-p with old configuration')
attached_sink = ExtrapProfileSink(extrap_config.get_dir(), ro.get_argmap(),
extrap_config.get_prefix(), 'pofi', 'profile.cubex')
return LocalScalingRunner(self._config, attached_sink)
def get_simple_slurm_runner(self, slurm_config: SlurmConfig,
batch_interface: BatchSystemInterface):
return SlurmRunner(self._config, slurm_config, batch_interface, PiraOneProfileSink())
def get_scalability_slurm_runner(self, slurm_config: SlurmConfig,
batch_interface: BatchSystemInterface,
extrap_config: ExtrapConfig):
if self._config.is_empty():
raise PiraConfigErrorException('Configuration is None in RunnerFactory')
pc_ii = None
params = None
ro = None
if isinstance(self._config, PiraConfigAdapter):
L.get_logger().log(
'PiraRunnerFactory::get_scalability_slurm_runner: Configuration is PiraConfigurationAdapter'
)
pc_ii = self._config.get_adapted()
elif isinstance(self._config, PiraConfigII):
pc_ii = self._config
if pc_ii is not None:
L.get_logger().log('PiraRunnerFactory::get_scalability_slurm_runner: pc_ii is not none.')
if pc_ii is not None and isinstance(pc_ii, PiraConfigII):
L.get_logger().log(
'PiraRunnerFactory::get_scalability_slurm_runner: pc_ii is PiraConfigurationII')
params = {}
L.get_logger().log('PiraRunnerFactory::get_scalability_slurm_runner: Preparing params')
for k in pc_ii.get_directories():
L.get_logger().log('PiraRunnerFactory::get_scalability_slurm_runner: ' + str(k))
for pi in pc_ii.get_items(k):
L.get_logger().log('PiraRunnerFactory::get_scalability_slurm_runner: ' + str(pi))
# This should be only one element anyway.
# for p in pi.get_run_options():
ro = pi.get_run_options()
# for pa in p.get_params():
# params[pa] = True
# L.get_logger().log('PiraRunnerFactory::get_scalability_runner: ' + str(params))
if params is None:
raise RuntimeError(
'PiraRunnerFactory::get_scalability_runner: Cannot use extra-p with old configuration')
attached_sink = ExtrapProfileSink(extrap_config.get_dir(), ro.get_argmap(),
extrap_config.get_prefix(), "pofi", "profile.cubex")
return SlurmScalingRunner(self._config, slurm_config, batch_interface, attached_sink)
| 4,947 | 44.814815 | 126 | py |
PIRA | PIRA-master/lib/FunctorManagement.py | """
File: FunctorManagement.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to load and manage the user-supplied functors.
"""
import typing
import lib.Utility as U
import lib.Logging as L
from lib.Configuration import PiraConfig, PiraConfigErrorException
from lib.Exception import PiraException
class FunctorManagementException(PiraException):
"""The exception indicates a problem with the funtor management"""
def __init__(self, msg):
super().__init__(msg)
class FunctorManager:
""" Entity to query for functors. Needs to be initialized with a PiraConfiguration once per PIRA configuration file. """
class __FunctorManagerImpl:
def __init__(self, cfg: PiraConfig) -> None:
if cfg.is_empty():
raise FunctorManagementException('Cannot construct from empty Configuration')
self.config = cfg
self.functor_cache = {}
def get_or_load_functor(self, build: str, item: str, flavor: str, func: str):
'''
We use the wholename, i.e. fully qualified path to the functor, as the key
in our functor cache.
'''
if func == 'basebuild':
path, name, wnm = self.get_builder(build, item, flavor, True)
elif func == 'build':
path, name, wnm = self.get_builder(build, item, flavor)
elif func == 'clean':
path, name, wnm = self.get_cleaner(build, item, flavor)
elif func == 'analyze':
path, name, wnm = self.get_analyzer(build, item, flavor)
elif func == 'run':
path, name, wnm = self.get_runner(build, item, flavor)
else:
raise Exception('No such option available to load functor for. Value = ' + func)
try:
_ = self.functor_cache[name]
except KeyError:
self.functor_cache[name] = U.load_functor(path, name)
L.get_logger().log('FunctorManager::get_or_load: The retrieved ' + func + ' functor: ' +
str(self.functor_cache[name]),
level='debug')
return self.functor_cache[name]
def get_builder(self,
build: str,
item: str,
flavor: str,
base: bool = False) -> typing.Tuple[str, str, str]:
p = self.config.get_builder_path(build, item)
n = self.get_builder_name(build, item, flavor)
if base:
n = U.concat_a_b_with_sep('no_instr', n, '_')
wnm = self.get_builder_file(build, item, flavor)
return p, n, wnm
def get_cleaner(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_cleaner_path(build, item)
n = self.get_cleaner_name(build, item, flavor)
wnm = self.get_cleaner_file(build, item, flavor)
return p, n, wnm
def get_analyzer(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_analyzer_path(build, item)
n = self.get_analyzer_name(build, item, flavor)
wnm = self.get_analyzer_file(build, item, flavor)
return p, n, wnm
def get_runner(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_runner_path(build, item)
n = self.get_runner_name(build, item, flavor)
wnm = self.get_runner_file(build, item, flavor)
return p, n, wnm
def get_raw_name(self, build: str, item: str, flavor: str) -> str:
b_nm = self.config.get_benchmark_name(item)
raw_nm = U.concat_a_b_with_sep(b_nm, flavor, '_')
return raw_nm
def get_cleaner_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = U.concat_a_b_with_sep('clean', raw_nm, '_')
return cl_nm
def get_cleaner_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_cleaner_path(build, item)
nm = self.get_cleaner_name(build, item, flavor)
file_path = U.concat_a_b_with_sep(path, nm, '/')
full_path = U.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_builder_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
# FIXME: remove as soon as the new uniform naming is in place
return raw_nm
cl_nm = U.concat_a_b_with_sep('build', raw_nm, '_')
return cl_nm
def get_builder_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_builder_path(build, item)
nm = self.get_builder_name(build, item, flavor)
file_path = U.concat_a_b_with_sep(path, nm, '/')
full_path = U.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_analyzer_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = U.concat_a_b_with_sep('analyze', raw_nm, '_')
return cl_nm
def get_analyzer_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_analyzer_path(build, item)
nm = self.get_analyzer_name(build, item, flavor)
file_path = U.concat_a_b_with_sep(path, nm, '/')
full_path = U.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_runner_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = U.concat_a_b_with_sep('runner', raw_nm, '_')
return cl_nm
def get_runner_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_runner_path(build, item)
nm = self.get_runner_name(build, item, flavor)
file_path = U.concat_a_b_with_sep(path, nm, '/')
full_path = U.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
instance = None
def __init__(self, cfg=None):
if not FunctorManager.instance:
if cfg is None:
raise FunctorManagementException('Cannot create from None')
FunctorManager.instance = FunctorManager.__FunctorManagerImpl(cfg)
else:
if cfg is not None:
if not cfg.is_valid():
raise PiraConfigErrorException('Invalid configuration passed to FunctorManager')
FunctorManager.instance.cfg = cfg
FunctorManager.instance.functor_cache.clear()
@classmethod
def from_config(cls, p_config: PiraConfig):
""" Needs to be called once per configuration. """
return cls(p_config)
def __getattr__(self, name):
return getattr(self.instance, name)
def reset(self):
FunctorManager.instance = None
| 6,589 | 37.313953 | 126 | py |
PIRA | PIRA-master/lib/Exception.py | """
File: Exception.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module holding base exception for all PIRA Exceptions.
"""
class PiraException(Exception):
def __init__(self, message):
super().__init__()
self._message = message
def __str__(self):
return self._message
| 386 | 23.1875 | 126 | py |
PIRA | PIRA-master/lib/Pira.py | """
File: Pira.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module implementing the main workflow of PIRA.
"""
import lib.Logging as L
import lib.Utility as U
import lib.FunctorManagement as F
import lib.TimeTracking as T
import lib.Database as D
import lib.Exporter as E
import lib.Checker as C
from lib.DefaultFlags import BackendDefaults
from lib.RunnerFactory import PiraRunnerFactory
from lib.ConfigurationLoader import SimplifiedConfigurationLoader as SCLoader, BatchSystemConfigurationLoader
from lib.ConfigurationLoader import ConfigurationLoader as CLoader
from lib.Configuration import TargetConfig, PiraConfig, ExtrapConfig, InvocationConfig, \
PiraConfigErrorException, CSVConfig, SlurmConfig
from lib.Runner import Runner, LocalRunner, LocalScalingRunner
from lib.Builder import Builder as BU
from lib.Analyzer import Analyzer as A
import typing
import sys
import os
def execute_with_config(runner: Runner, analyzer: A, target_config: TargetConfig,
csv_config: CSVConfig) -> None:
try:
instrument = False
was_rebuilt = True
#rr_exporter = E.RunResultExporter()
rr_exporter = E.PiraRuntimeExporter()
# Build without any instrumentation
L.get_logger().log('Building vanilla version for baseline measurements', level='info')
vanilla_builder = BU(target_config, instrument)
tracker = T.TimeTracker()
tracker.m_track('Vanilla Build', vanilla_builder, 'build')
# Run without instrumentation for baseline
L.get_logger().log('Running baseline measurements', level='info')
vanilla_rr = runner.do_baseline_run(target_config)
L.get_logger().log('Pira::execute_with_config: RunResult: ' + str(vanilla_rr) + ' | avg: ' +
str(vanilla_rr.get_average()),
level='debug')
instr_file = ''
if (csv_config.should_export()):
rr_exporter.add_iteration_data('Vanilla', vanilla_rr)
for iteration in range(0, InvocationConfig.get_instance().get_pira_iters()):
L.get_logger().log('Running instrumentation iteration ' + str(iteration), level='info')
# Only run the pgoe to get the functions name
iteration_tracker = T.TimeTracker()
# Analysis Phase
instr_file = analyzer.analyze(target_config, iteration, was_rebuilt)
was_rebuilt = False
L.get_logger().log('[WHITELIST] $' + str(iteration) + '$ ' + str(U.lines_in_file(instr_file)),
level='perf')
U.shell('stat ' + instr_file)
# After baseline measurement is complete, do the instrumented build/run
# This is only necessary in every iteration when run in compile-time mode.
# For hybrid-filtering this is done after the specified amount of iterations
if needs_rebuild(iteration):
was_rebuilt = True
instrument = True
instr_builder = BU(target_config, instrument, instr_file)
tracker.m_track('Instrument Build', instr_builder, 'build')
# Run Phase
L.get_logger().log('Running profiling measurements', level='info')
instr_rr = runner.do_profile_run(target_config, iteration)
if (csv_config.should_export()):
rr_exporter.add_iteration_data('Instrumented ' + str(iteration), instr_rr)
# Compute overhead of instrumentation
ovh_percentage = instr_rr.compute_overhead(vanilla_rr)
L.get_logger().log('[RUNTIME] $' + str(iteration) + '$ ' + str(instr_rr.get_average()),
level='perf')
L.get_logger().log('[OVERHEAD] $' + str(iteration) + '$ ' + str(ovh_percentage), level='perf')
L.get_logger().log('[REPETITION SUM] $' + str(iteration) + '$ ' +
str(instr_rr.get_accumulated_runtime()),
level='perf')
iteration_tracker.stop()
user_time, system_time = iteration_tracker.get_time()
L.get_logger().log('[ITERTIME] $' + str(iteration) + '$ ' + str(user_time) + ', ' +
str(system_time),
level='perf')
if (csv_config.should_export()):
file_name = target_config.get_target() + '-' + target_config.get_flavor() + '.csv'
csv_file = os.path.join(csv_config.get_csv_dir(), file_name)
try:
U.make_dir(csv_config.get_csv_dir())
rr_exporter.export(csv_file, csv_config.get_csv_dialect())
except Exception as e:
L.get_logger().log('Pira::execute_with_config: Problem writing CSV file\nMessage:\n' +
str(e),
level='error')
except Exception as e:
L.get_logger().log('Pira::execute_with_config: Problem during preparation of run.\nMessage:\n' +
str(e),
level='error')
raise RuntimeError(str(e))
def do_rebuild(build_name: str,
target_config: TargetConfig,
instrument: bool,
instr_file: str = None) -> None:
if instrument and instr_file == None:
L.get_logger().log('Should instrument but no instrumentation file.', level='error')
raise Exception('instrument and no instrumentation file')
builder = BU(target_config, instrument, instr_file)
tracker = T.TimeTracker()
tracker.m_track(build_name, builder, 'build')
def needs_rebuild(iteration: int) -> bool:
hybrid_filtering = InvocationConfig.get_instance().is_hybrid_filtering()
hybrid_filter_iters = InvocationConfig.get_instance().get_hybrid_filter_iters()
compile_time_filtering = InvocationConfig.get_instance().is_compile_time_filtering()
return compile_time_filtering or (iteration == 0) or (hybrid_filtering and
(iteration % hybrid_filter_iters == 0))
def process_args_for_extrap(cmdline_args) -> typing.Tuple[bool, ExtrapConfig]:
use_extra_p = False
extrap_config = ExtrapConfig('', '', '')
if cmdline_args.extrap_dir != '':
use_extra_p = True
extrap_config = ExtrapConfig(cmdline_args.extrap_dir, cmdline_args.extrap_prefix, '')
num_reps = cmdline_args.repetitions
if num_reps < 5:
L.get_logger().log('At least 5 repetitions are recommended for Extra-P modelling.',
level='warn')
if num_reps < 0:
L.get_logger().log('REMEMBER TO REMOVE IN PIRA::process_args_for_extrap', level='warn')
L.get_logger().log('At least 3 repetitions are required for Extra-P modelling.',
level='error')
raise RuntimeError('At least 5 repetitions are needed for Extra-P modelling.')
return use_extra_p, extrap_config
def process_args_for_csv(cmdline_args):
csv_dir = cmdline_args.csv_dir
csv_dialect = cmdline_args.csv_dialect
csv_cfg = CSVConfig(csv_dir, csv_dialect)
return csv_cfg
def main(cmdline_args) -> None:
""" Main function for pira framework. Used to invoke the various components. """
invoc_cfg = InvocationConfig(cmdline_args)
L.get_logger().log(str(invoc_cfg), level='info')
use_extra_p, extrap_config = process_args_for_extrap(cmdline_args)
home_dir = U.get_cwd()
U.set_home_dir(home_dir)
U.make_dir(invoc_cfg.get_pira_dir())
csv_config = process_args_for_csv(cmdline_args)
try:
if invoc_cfg.get_config_version() == 1:
config_loader = CLoader()
else:
config_loader = SCLoader()
configuration = config_loader.load_conf()
C.Checker.check_configfile(configuration)
is_batch_system_run = invoc_cfg.get_slurm_config_path() is not None
if is_batch_system_run:
L.get_logger().log("Running the batch system case", "info")
else:
L.get_logger().log("Running the local case", "info")
# The FunctorManager manages loaded functors and generates the respective names
F.FunctorManager(configuration)
dbm = D.DBManager(D.DBManager.db_name + '.' + D.DBManager.db_ext)
dbm.create_cursor()
analyzer = A(configuration)
runner_factory = PiraRunnerFactory(configuration)
if is_batch_system_run:
# setup slurm config
slurm_config_loader = BatchSystemConfigurationLoader(invoc_cfg)
slurm_config = slurm_config_loader.get_config()
# get slurm runners
runner = runner_factory.get_simple_slurm_runner(slurm_config,
slurm_config_loader.get_batch_interface())
if use_extra_p:
L.get_logger().log('Running with Extra-P runner')
runner = runner_factory.get_scalability_slurm_runner(
slurm_config, slurm_config_loader.get_batch_interface(), extrap_config)
else:
# get local runners
runner = runner_factory.get_simple_local_runner()
if use_extra_p:
L.get_logger().log('Running with Extra-P runner')
runner = runner_factory.get_scalability_runner(extrap_config)
if runner.has_sink():
analyzer.set_profile_sink(runner.get_sink())
# A build/place is a top-level directory
for build in configuration.get_builds():
L.get_logger().log('Build: ' + str(build))
total_time = T.TimeTracker()
app_tuple = (U.generate_random_string(), build, '', '')
dbm.insert_data_application(app_tuple)
# An item is a target/software in that directory
for item in configuration.get_items(build):
L.get_logger().log('Running for item ' + str(item))
# A flavor is a specific version to build
if configuration.has_local_flavors(build, item):
for flavor in configuration.get_flavors(build, item):
L.get_logger().log('Running for local flavor ' + flavor, level='debug')
# prepare database, and get a unique handle for current item.
db_item_id = dbm.prep_db_for_build_item_in_flavor(configuration, build, item, flavor)
# Create configuration object for the item currently processed.
place = configuration.get_place(build)
t_config = TargetConfig(place, build, item, flavor, db_item_id)
# Execute it given the generated target description
execute_with_config(runner, analyzer, t_config, csv_config)
# If global flavor
else:
# TODO: Implement
L.get_logger().log('In this version of PIRA it is not yet implemented', level='error')
assert False
total_time.stop()
L.get_logger().log('PIRA total runtime: {}'.format(total_time.get_time()), level='perf')
U.change_cwd(home_dir)
except RuntimeError as rt_err:
U.change_cwd(home_dir)
L.get_logger().log('Runner.run caught exception. Message: ' + str(rt_err), level='error')
L.get_logger().dump_tape()
sys.exit(-1)
| 10,693 | 40.289575 | 126 | py |
PIRA | PIRA-master/lib/tables.py | """
File: tables.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
"""
import sqlite3 as db
create_application_table = """ CREATE TABLE IF NOT EXISTS Application (
AppID text PRIMARY KEY,
App_Name text,
Global_Flavor text,
Global_Submitter text
); """
create_builds_table = """ CREATE TABLE IF NOT EXISTS Builds (
BuildID text PRIMARY KEY,
Build_Name text NOT NULL,
Prefix text NOT NULL,
Flavors text NOT NULL,
AppName text NOT NULL,
FOREIGN KEY(AppName) REFERENCES Application(App_Name)
); """
create_items_table = """ CREATE TABLE IF NOT EXISTS Items (
ItemID text PRIMARY KEY,
Item_Name text NOT NULL,
Inst_Analysis_Functor_Path text NOT NULL,
Builders_Funtor_Path text NOT NULL,
Run_Args text NOT NULL,
Runner_Functor_Path text NOT NULL,
Submitter_Functor_Path text NOT NULL,
Exp_Data_Dir_Base_Path text NOT NULL,
BuildName text NOT NULL,
FOREIGN KEY(BuildName) REFERENCES Builds(Build_Name)
); """
create_experiment_table = """ CREATE TABLE IF NOT EXISTS Experiment (
Experiment_ID text PRIMARY KEY,
BenchmarkName text,
Iteration_No INTEGER,
IsWithInstrumentation INTEGER,
CubeFilePath text NOT NULL,
Runtime text NOT NULL,
Item_ID text NOT NULL,
FOREIGN KEY(Item_ID) REFERENCES Items(ItemID)
); """
| 2,553 | 54.521739 | 126 | py |
CPM-Live | CPM-Live-master/cpm-live/cpmbee_translator.py | from typing import Dict
from cpm_live.generation.bee import CPMBeeBeamSearch
from cpm_live.models import CPMBeeTorch, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
import torch
import spacy
import re
def is_chinese(ch: str):
if "\u4e00" <= ch <= "\u9fff":
return True
return False
def is_english(ch: str):
return ch.isalpha()
class Translator:
def __init__(self, ckpt_path, batch_size=8):
config = CPMBeeConfig.from_json_file("config/cpm-bee-10b.json")
self.tokenizer = CPMBeeTokenizer()
model = CPMBeeTorch(config=config)
model.load_state_dict(torch.load(ckpt_path))
model.cuda()
self._beam_search = CPMBeeBeamSearch(
model=model,
tokenizer=self.tokenizer,
)
self._batch_size = batch_size
self._nlp_eng = spacy.load("en_core_web_trf")
# self._nlp_chn = spacy.load("zh_core_web_trf")
def _auto_cut(self, text: str):
CUT_TABLE = {
".": 100,
"?": 100,
"!": 100,
"。": 48,
"?": 48,
"!": 48,
}
st = 0
sub_text = []
while st < len(text):
ed = st
while ed + 1 < len(text) and (
text[ed] not in CUT_TABLE or ed < st + CUT_TABLE[text[ed]]
):
ed += 1
sub_text.append(text[st : ed + 1])
st = ed + 1
return sub_text
def _remove_entity(self, nlp: spacy.language.Language, text: str):
doc = nlp(text)
ent_spans = []
for ent in doc.ents:
if ent.label_ in ["PRODUCT"]:
ent_spans.append((ent.start_char, ent.end_char))
sorted(ent_spans, key=lambda x: x[0])
sub_text = []
ent_map = {}
unk_map = {}
p = 0
for ent_s, ent_e in ent_spans:
sub_text.append(self.tokenizer.escape(text[p:ent_s]))
ent = text[ent_s:ent_e]
if ent not in ent_map:
ent_map[ent] = len(ent_map)
unk_map["<unk_{}>".format(ent_map[ent])] = ent
sub_text.append("<unk_{}>".format(ent_map[ent]))
p = ent_e
sub_text.append(self.tokenizer.escape(text[p:]))
return "".join(sub_text), unk_map
def _replace_entity(self, text: str, table: Dict[str, str]):
ret = []
for token in self.tokenizer.tokenize(text):
if token.is_special and token.token in table:
t = token.token
if t.startswith("the "):
t = t[4:]
ret.append(table[token.token])
else:
ret.append(token.token)
return "".join(ret)
def to_chn(self, text: str) -> str:
text, replace_table = self._remove_entity(self._nlp_eng, text)
sub_text = []
for line in text.split("\n"):
sub_text.extend(self._auto_cut(line))
sub_text.append("")
ret = ["\n" for _ in range(len(sub_text))]
curr_batch = []
curr_batch_idx = []
for i, t in enumerate(sub_text):
if len(t) == 0:
ret[i] = "\n"
else:
curr_batch.append(t)
curr_batch_idx.append(i)
if len(curr_batch) >= self._batch_size:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "英翻中", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self._replace_entity(res["<ans>"], replace_table)
curr_batch = []
curr_batch_idx = []
if len(curr_batch) > 0:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "英翻中", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self._replace_entity(res["<ans>"], replace_table)
curr_batch = []
curr_batch_idx = []
return "".join(ret)
def to_eng(self, text: str):
text = self.tokenizer.escape(text)
text = re.sub(r"([^\x00-\x7F])([a-zA-Z])", r"\1 \2", text)
sub_text = []
for line in text.split("\n"):
sub_text.extend(self._auto_cut(line))
sub_text.append("")
ret = ["\n" for _ in range(len(sub_text))]
curr_batch = []
curr_batch_idx = []
for i, t in enumerate(sub_text):
if len(t) == 0:
ret[i] = "\n"
else:
curr_batch.append(t)
curr_batch_idx.append(i)
if len(curr_batch) >= self._batch_size:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "中翻英", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self.tokenizer.unescape(res["<ans>"])
curr_batch = []
curr_batch_idx = []
if len(curr_batch) > 0:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "中翻英", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self.tokenizer.unescape(res["<ans>"])
curr_batch = []
curr_batch_idx = []
is_newline = True
for i in range(len(ret)):
if ret[i] == "\n":
is_newline = True
elif is_newline:
is_newline = False
else:
ret[i] = " " + ret[i]
return "".join(ret)
def main():
translator = Translator("path/to/model")
print(
translator.to_eng(
"""考虑到机器学习模型的“黑盒”本质,模型有可能在不受控的情况下输出包括但不限于虚假信息、错误政治言论、偏见与歧视性话语、对不良行为的煽动与暗示等内容。CPM-Live虽已对相关训练数据进行数据清洗,但仍有可能具有不限于如下所示使用风险。用户使用CPM-Live相关资源前,需明确本节涉及的相关风险,并在使用过程中承担全部风险与责任。
侵犯个人隐私。模型有可能直接或经引导后产生涉及个人隐私的内容。
侵犯内容版权。模型有可能直接或经引导后产生与其他出版物相同、相似的内容。
产生虚假信息。模型有可能直接或经引导后产生不符合事实或客观规律的虚假信息。用户不应故意使用与引导模型制作虚假内容。
产生政治敏感内容。模型有可能直接或经引导后产生与政策、法规等相关的政治敏感内容。
产生偏见与歧视性话语。模型有可能直接或经引导后产生包括但不限于性别、种族等方面的偏见与歧视性话语。
产生对不良行为的煽动与暗示。模型有可能直接或经引导后产生对于违法犯罪等不良行为的煽动与暗示。
产生个体伤害言论。模型有可能直接或经引导后产生对个体进行伤害的言论,如对个人的诋毁、打击言论或鼓励个体进行自我伤害行为的言论等。
"""
)
)
if __name__ == "__main__":
main()
| 6,854 | 32.768473 | 180 | py |
CPM-Live | CPM-Live-master/cpm-live/setup.py | from setuptools import setup, find_packages
setup(
name="cpm_live",
version="0.1.0",
author="OpenBMB",
author_email="openbmb@gmail.com",
description="Toolkit for CPM-Live",
packages=find_packages(),
install_requires=[
"numpy",
"torch>=1.10",
"bmtrain>=0.1.8",
"jieba",
"tqdm",
"tensorboard",
"numpy>=1.21.0",
],
package_data={"cpm_live": ["vocabs/*.txt"]},
)
| 452 | 20.571429 | 48 | py |
CPM-Live | CPM-Live-master/cpm-live/pretrain_cpm_bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from typing import Any, Dict, List, Union
import torch
import bmtrain as bmt
import os
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects, LogManager
from cpm_live.training_tasks.bee import MixedDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
if args.load is not None:
if os.path.exists(os.path.join(args.save, args.save_name + (".rank-%d.opt" % 0))):
# optimizer state exists
states = torch.load(
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank()))
)
optimizer.load_state_dict(states)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager
def initialize():
os.environ["MASTER_PORT"] = str(int(os.environ["MASTER_PORT"]) + 2333)
args = get_args(pretrain=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args
def see_memory(detail=False):
if detail:
res = torch.cuda.memory_summary()
else:
res = (
round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024), 2),
round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024), 2),
)
torch.cuda.reset_peak_memory_stats()
return res
def add_mem_time(info, mem_usage, tim_usage):
torch.cuda.synchronize()
mem_usage[info] = see_memory()
tim_usage[info] = time.time()
return mem_usage, tim_usage
class LossSpikeDetector:
def __init__(self, log_path: str) -> None:
self._last_loss: Dict[str, float] = {}
self._last_data: List[Any] = [None]
self._log_path = log_path
def update_data(self, data: Any):
self._last_data.append(data)
if len(self._last_data) > 2:
self._last_data = self._last_data[-2:]
def update_loss(self, iteration: int, loss_map: Dict[str, float]):
loss_spike_result = []
for task, loss in loss_map.items():
if task in self._last_loss:
if loss > self._last_loss[task] * 3:
# loss spike!
loss_spike_result.append(
{
"prev": self._last_loss[task],
"curr": loss,
"task": task,
}
)
self._last_loss[task] = float(loss)
if len(loss_spike_result) > 0:
self._write_log(iteration, self._last_data[-1], loss_spike_result)
def _write_log(self, iteration: int, data: Any, result: List[Dict[str, Any]]):
with open(self._log_path, "a", encoding="utf-8") as fp:
fp.write("=" * 20)
fp.write("\nloss spike at {}\n".format(iteration))
fp.write("{}\n".format(json.dumps(result, indent=4, ensure_ascii=False)))
fp.write("data: \n")
for d in data:
fp.write("{}\n".format(json.dumps(d, indent=4, ensure_ascii=False)))
fp.write("\n\n")
def pretrain(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
start_step = args.start_step
lsd = LossSpikeDetector("debug/spile.%d.log" % bmt.rank())
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
if args.log_dir is not None and bmt.rank() == 0:
log_mgr = LogManager(args.log_dir)
global_token_pass = 0.0
global_world_size = bmt.world_size()
dataloader = MixedDataset(
args.dataset, args.batch_size, args.max_length, tokenizer, max_depth=8
)
if os.path.exists(os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))):
# load dataset states if exists
dataset_states = torch.load(
os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))
)
missing = dataloader.load_state_dict(dataset_states)
if len(missing) > 0:
bmt.print_rank("Missing keys when loading dataset states: ", missing)
dataloader.start()
try:
for iteration, data in enumerate(dataloader):
iteration = iteration + start_step + 1
assert data["inputs"].shape[0] == args.batch_size
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
lsd.update_data(data["raw_data"])
# ===========
optim_manager.zero_grad()
# torch.cuda.empty_cache()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
global_loss = bmt.sum_loss(loss).item()
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
current_stream = torch.cuda.current_stream()
# some reduce ops of distributed parameter were launched on load stream
current_stream.wait_stream(bmt.config['load_stream'])
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].view(-1)
)
# global_task_loss = float(bmt.sum_loss(task_loss).item())
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
lsd.update_loss(iteration, task_loss_map)
train_info = {
"time": tim_usage["init"],
"iteration": iteration,
"loss": global_loss,
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Iter: {:6d} | loss: {:.4f} | lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
iteration,
global_loss,
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.log_dir is not None and bmt.rank() == 0:
log_mgr.write(**train_info)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", global_loss, iteration)
writer.add_scalar("Optimizer/lr", lr_scheduler.current_lr, iteration)
writer.add_scalar("Optimizer/scale", optim_manager.loss_scale, iteration)
writer.add_scalar("Optimizer/grad_norm", grad_norm.item(), iteration)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, iteration)
if args.save is not None and iteration % args.save_iters == 0:
bmt.save(model, os.path.join(args.save, args.save_name + ("-%d.pt" % iteration)))
torch.save(
optimizer.state_dict(),
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank())),
)
all_states = dataloader.state_dict()
if bmt.rank() == 0:
# rank 0 writes the dataloader state
torch.save(
all_states,
os.path.join(args.save, args.save_name + ("-%d.data.pt" % iteration)),
)
del all_states
finally:
dataloader.close()
bmt.save(model, os.path.join(args.save, args.save_name + ".pt"))
def main():
args = initialize()
tokenizer, model, optimizer, lr_scheduler, optim_manager = setup_model_and_optimizer(args)
pretrain(args, tokenizer, model, optimizer, lr_scheduler, optim_manager)
if __name__ == "__main__":
main()
| 15,523 | 38.805128 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/preprocess_dataset.py | import os
from cpm_live.dataset import build_dataset, shuffle_dataset
import shutil
from tqdm import tqdm
import json
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="raw dataset path", required=True)
parser.add_argument("--output_path", type=str, help="output dataset path", required=True)
parser.add_argument("--output_name", type=str, help="output dataset name", required=True)
args = parser.parse_args()
return args
def reformat_data(data):
"""set your data format"""
return data
def main():
args = get_args()
files = os.listdir(args.input)
for ds in files:
with build_dataset("tmp", "data") as dataset:
with open(os.path.join(args.input, ds), "r", encoding="utf-8") as fin:
for line in tqdm(fin.readlines(), desc=os.path.join(args.input, ds)):
data = json.loads(line)
dataset.write(reformat_data(data))
shuffle_dataset(
"tmp",
os.path.join(args.output_path, ds.split(".")[0]),
progress_bar=True,
output_name=args.output_name
)
shutil.rmtree("tmp")
return
if __name__ == "__main__":
main()
| 1,268 | 27.2 | 93 | py |
CPM-Live | CPM-Live-master/cpm-live/finetune_cpm_bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Dict, List, Union
import torch
import bmtrain as bmt
import os
from opendelta import LoraModel
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects
from cpm_live.training_tasks.bee import FinetuneDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
# insert LoRA
if args.use_delta:
delta_model = LoraModel(
backbone_model=model, modified_modules=["project_q", "project_v"], backend="bmt"
)
delta_model.freeze_module(exclude=["deltas"], set_state_dict=True)
delta_model.log()
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager
def initialize():
args = get_args(finetune=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args
def see_memory(detail=False):
if detail:
res = torch.cuda.memory_summary()
else:
res = (
round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024), 2),
round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024), 2),
)
torch.cuda.reset_peak_memory_stats()
return res
def add_mem_time(info, mem_usage, tim_usage):
torch.cuda.synchronize()
mem_usage[info] = see_memory()
tim_usage[info] = time.time()
return mem_usage, tim_usage
def evaluation(model, args, tokenizer, loss_func):
bmt.print_rank("evaluation begins...")
eval_dataloader = FinetuneDataset(
args.eval_dataset,
1,
args.max_length,
tokenizer,
max_depth=8,
task_name=args.task_name,
drop_last=args.drop_last,
)
eval_losses = []
last_data = None
with torch.no_grad():
for iteration, data in enumerate(eval_dataloader):
iteration = iteration + 1
skip_this_batch = False
if data is None:
if last_data is None:
raise RuntimeError(
"Dataset is too small, please use a smaller batch size or sequence length!"
)
data = last_data
skip_this_batch = True
else:
last_data = data
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
# ===========
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
if skip_this_batch:
loss = loss * 0
eval_losses.append(bmt.sum_loss(loss))
overall_loss = torch.stack(eval_losses).mean().item()
return overall_loss
def finetune(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
best_eval_loss, eval_loss_increase = 1e9, 0
global_token_pass = 0.0
global_steps = 0
global_world_size = bmt.world_size()
dataloader = FinetuneDataset(
args.dataset,
args.batch_size,
args.max_length,
tokenizer,
max_depth=8,
task_name=args.task_name,
drop_last=args.drop_last,
)
for epoch in range(args.epoch):
epoch = epoch + 1
last_data = None
for iteration, data in enumerate(dataloader):
iteration = iteration + 1
global_steps = global_steps + 1
skip_this_batch = False
if data is None:
if last_data is None:
raise RuntimeError(
"Dataset is too small, please use a smaller batch size or sequence length!"
)
data = last_data # use last data
skip_this_batch = True
else:
last_data = data
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
# ===========
optim_manager.zero_grad()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
if skip_this_batch:
loss = loss * 0
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
if not skip_this_batch:
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].view(-1)
)
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
train_info = {
"time": tim_usage["init"],
"epoch": epoch,
"iteration": iteration,
"loss": task_loss_map[args.task_name],
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Epoch: {:3d} | Iter: {:6d} | loss: {:.4f} "
+ "| lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
epoch,
iteration,
task_loss_map[args.task_name],
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", task_loss_map[args.task_name], global_steps)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, global_steps)
# evaluation
if global_steps % args.eval_interval == 0:
eval_loss = evaluation(model, args, tokenizer, loss_func)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/eval", eval_loss, global_steps)
if eval_loss < best_eval_loss:
best_eval_loss = eval_loss
eval_loss_increase = 0
if args.save is not None:
bmt.save(model, os.path.join(args.save, args.save_name + "-best.pt"))
else:
eval_loss_increase += 1
bmt.print_rank(
"| Eval loss: {:.4f} | Increase: {:2d}".format(eval_loss, eval_loss_increase)
)
if eval_loss_increase == args.early_stop_patience:
bmt.print_rank(
"Eval loss has increased {:d} times, the finetune loop early stopped."
.format(eval_loss_increase)
)
return
# end of finetune
def main():
args = initialize()
tokenizer, model, optimizer, lr_scheduler, optim_manager = setup_model_and_optimizer(args)
finetune(args, tokenizer, model, optimizer, lr_scheduler, optim_manager)
if __name__ == "__main__":
main()
| 16,454 | 37.900709 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/text_generation.py | from cpm_live.generation.bee import CPMBeeBeamSearch
from cpm_live.models import CPMBeeTorch, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
import torch
if __name__ == "__main__":
data_list = [
{"document": "今天天气是真的<mask_0>", "<ans>": {"<mask_0>": ""}},
]
config = CPMBeeConfig.from_json_file("config/cpm-bee-10b.json")
ckpt_path = "path/to/checkpoint.pt"
tokenizer = CPMBeeTokenizer()
model = CPMBeeTorch(config=config)
model.load_state_dict(torch.load(ckpt_path))
model.cuda()
# use beam search
beam_search = CPMBeeBeamSearch(
model=model,
tokenizer=tokenizer,
)
inference_results = beam_search.generate(data_list, max_length=100)
for res in inference_results:
print(res)
| 778 | 26.821429 | 71 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/arguments.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def add_model_config_args(parser: argparse.ArgumentParser):
"""Model arguments"""
group = parser.add_argument_group("model", "model configuration")
group.add_argument("--model-config", type=str, help="model configuration file")
return parser
def add_training_args(parser: argparse.ArgumentParser):
"""Training arguments."""
group = parser.add_argument_group("train", "training configurations")
group.add_argument("--dataset", type=str, default="dataset.json", help="Path to dataset")
group.add_argument(
"--load",
type=str,
default=None,
help="Path to a directory containing a model checkpoint.",
)
group.add_argument(
"--save",
type=str,
default=None,
help="Output directory to save checkpoints to.",
)
group.add_argument(
"--save-name",
type=str,
default=None,
help="Output filename to save checkpoints to.",
)
group.add_argument(
"--tensorboard",
type=str,
default=None,
help="tensorboard directory",
)
group.add_argument("--inspect-iters", type=int, default=1000, help="number of inspecting")
group.add_argument("--batch-size", type=int, default=32, help="Data Loader batch size")
group.add_argument("--clip-grad", type=float, default=1.0, help="gradient clipping")
group.add_argument(
"--train-iters",
type=int,
default=1000000,
help="total number of iterations to train over all training runs",
)
group.add_argument("--max-length", type=int, default=512, help="max length of input")
group.add_argument("--seed", type=int, default=1234, help="random seed for reproducibility")
# Learning rate.
group.add_argument("--lr", type=float, default=1.0e-4, help="initial learning rate")
group.add_argument("--weight-decay", type=float, default=1.0e-2, help="weight decay rate")
group.add_argument("--loss-scale", type=float, default=65536, help="loss scale")
group.add_argument(
"--warmup-iters",
type=float,
default=0.01,
help="percentage of data to warmup on (.01 = 1% of all " "training iters). Default 0.01",
)
group.add_argument(
"--lr-decay-style",
type=str,
default="noam",
choices=["constant", "linear", "cosine", "exponential", "noam"],
help="learning rate decay function",
)
group.add_argument("--lr-decay-iters", type=int, default=None, help="lr decay steps")
group.add_argument(
"--start-step", type=int, default=0, help="step to start or continue training"
)
return parser
def add_pretrain_args(parser: argparse.ArgumentParser):
group = parser.add_argument_group("pretrain", "pretrain configurations")
group.add_argument(
"--save-iters",
type=int,
default=1000,
help="number of iterations between saves",
)
group.add_argument(
"--log-dir",
type=str,
default=None,
help="log directory",
)
return parser
def add_finetune_args(parser: argparse.ArgumentParser):
group = parser.add_argument_group("finetune", "fintune configurations")
group.add_argument("--epoch", type=int, default=1, help="number of training epochs")
group.add_argument("--task-name", type=str, default="task", help="name of training task")
group.add_argument(
"--use-delta",
action="store_true",
default=False,
help="use delta tuning or not"
)
group.add_argument("--eval_dataset", type=str, help="path to eval dataset")
group.add_argument(
"--drop-last",
action="store_true",
default=False,
help="drop data from each epoch that cannot be formed into a complete batch at the end",
)
group.add_argument("--eval-interval", type=int, default=500, help="eval interval")
group.add_argument("--early-stop-patience", type=int, default=5, help="early stop steps")
return parser
def get_args(pretrain: bool = False, finetune: bool = False):
parser = argparse.ArgumentParser()
parser = add_model_config_args(parser)
parser = add_training_args(parser)
if pretrain:
parser = add_pretrain_args(parser)
if finetune:
parser = add_finetune_args(parser)
args = parser.parse_args()
return args
| 5,021 | 32.704698 | 97 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/__init__.py | 0 | 0 | 0 | py | |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/dataset/distributed_dataset.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import struct
from typing import List, Optional, Set
import torch
import bisect
import bmtrain as bmt
import json
from .serializer import Serializer, PickleSerializer
import random
import string
import time
def _random_string():
return "".join(random.choices(string.ascii_uppercase + string.digits, k=8))
_DEFAULT_BLOCK_SIZE = 16 << 20
class FileInfo:
def __init__(
self,
file_name: str = "",
block_begin: int = 0,
block_end: int = 0,
nbytes: int = 0,
nlines: int = 0,
mask: bool = False,
block_size: int = _DEFAULT_BLOCK_SIZE,
) -> None:
self.file_name = file_name
self.block_begin = block_begin
self.block_end = block_end
self.nbytes = nbytes
self.nlines = nlines
self.mask = mask
self.block_size = block_size
def state_dict(self):
return {
"file_name": self.file_name,
"block_begin": self.block_begin,
"block_end": self.block_end,
"nbytes": self.nbytes,
"nlines": self.nlines,
"mask": self.mask,
"block_size": self.block_size,
}
def load_state_dict(self, d):
self.file_name = d["file_name"]
self.block_begin = d["block_begin"]
self.block_end = d["block_end"]
self.nbytes = d["nbytes"]
self.nlines = d["nlines"]
self.mask = d["mask"]
self.block_size = d["block_size"]
def dumps(self) -> str:
return json.dumps(self.state_dict())
def loads(self, data: str) -> "FileInfo":
self.load_state_dict(json.loads(data))
return self
def dump(self, fp: io.TextIOWrapper) -> "FileInfo":
fp.write(self.dumps())
return self
def load(self, fp: io.TextIOWrapper) -> "FileInfo":
self.loads(fp.read())
return self
def _read_info_list(meta_path: str) -> List[FileInfo]:
info: List[FileInfo] = []
while True:
try:
with open(meta_path, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
info.append(FileInfo().loads(line))
return info
except Exception as e:
print("Error: reading info list in _read_info_list!,meta_path={path}, err={err}".
format(path=meta_path, err=str(e)))
time.sleep(10)
def _write_info_list(meta_path: str, info: List[FileInfo]):
base_path = os.path.dirname(meta_path)
random_fname = os.path.join(base_path, ".meta.bin.%s" % _random_string())
while True:
try:
with open(random_fname, "w", encoding="utf-8") as f:
for v in info:
f.write(v.dumps() + "\n")
os.rename(random_fname, meta_path)
return
except Exception:
print("Error: writing info list!")
time.sleep(10)
def _filtered_range(
begin: int, end: int, rank: int, world_size: int, filter_set: Optional[Set[int]] = None
):
begin = begin + (rank + (world_size - (begin % world_size))) % world_size
if filter_set is not None:
return [i for i in range(begin, end, world_size) if i in filter_set]
else:
return [i for i in range(begin, end, world_size)]
# for some bugs that may exist in hdfs
class SafeFile:
def __init__(self, fname, mode):
self.fname = None
self.mode = None
self._fp = None
self.open_file(fname, mode)
def read(self, size=-1):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.read(size)
self.offset = self._fp.tell()
return res
except Exception as e:
print("Error {}: reading blocks in read {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.read(size)
def tell(self):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.tell()
self.offset = res
return res
except Exception as e:
print("Error {}: reading blocks in tell {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.tell()
def seek(self, offset, whence=0):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.seek(offset, whence)
self.offset = self._fp.tell()
return res
except Exception as e:
print("Error {}: reading blocks in seek {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.seek(offset, whence)
def close(self):
if self._fp is not None:
try:
self._fp.close()
except Exception:
pass
self._fp = None
def open_file(self, fname, mode, offset=None):
if not os.path.exists(fname):
raise RuntimeError("Dataset does not exist")
try:
self.fname = fname
self.mode = mode
self._fp = open(fname, mode)
if offset is not None:
self._fp.seek(offset, io.SEEK_SET)
self.offset = self._fp.tell()
except Exception as e:
print("Error {}: reading blocks in open_file {}!".format(e, self.fname))
time.sleep(10)
self.open_file(fname, mode, offset)
class DistributedDataset:
"""Open dataset in readonly mode.
`DistributeDataset` is used to read datasets in a distributed manner.
Data in this dataset will be distributed evenly in blocks to each worker in the `distributed communicator`.
**Note** When all data has been read, reading dataset again will revert back to the first data.
Args:
path (str): Path to dataset.
rank (int): Rank in distributed communicator. See: bmtrain.rank()
world_size (int): Total workers in distributed communicator. See: bmtrain.world_size()
block_size (int): Size of each block in bytes. All files in the same dataset should have the same block size. Default: 16MB
Example:
>>> dataset = DistributedDataset("/path/to/dataset")
>>> for i in range(10):
>>> dataset.read()
""" # noqa: E501
def __init__(
self,
path: str,
rank: int = 0,
world_size: int = 1,
serializer: Optional[Serializer] = None,
max_repeat_times: Optional[int] = None,
shuffle: bool = True,
) -> None:
# config
self._path = path
self._rank = rank
self._world_size = world_size
self._max_repeat_times = max_repeat_times
self._repeat_times = 0
self._shuffle = shuffle
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
# dataset meta
self._unused_block: List[int] = []
self._file_info: List[FileInfo] = []
self._file_ends: List[int] = []
self._total_blocks = 0
self._nbytes = 0
self._nlines = 0
# states
self._curr_block = None
self._fp = None
# cache
self._last_mod_time = 0
self._curr_fname = None
self._update_states(fast_skip=False)
self._repeat_times += 1
def _update_states(self, fast_skip: bool = True):
meta_path = os.path.join(self._path, "meta.bin")
while True:
try:
mod_time = os.stat(meta_path).st_mtime
break
except Exception as e:
print("Error: reading info list in DistributedDataset._update_states, "
"meta_path={path}, err={err}!".format(path=meta_path, err=str(e)))
time.sleep(10)
if self._last_mod_time < mod_time:
# file changed
pass
else:
if fast_skip:
return
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
old_len = len(self._file_info)
if old_len > len(info):
raise RuntimeError("Dataset meta file: changed unexpectly")
mask_changed = False
for i in range(old_len):
if self._file_info[i].file_name != info[i].file_name:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].block_begin != info[i].block_begin:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].block_end != info[i].block_end:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].mask != info[i].mask:
mask_changed = True
if info[0].block_begin != 0:
raise RuntimeError("Dataset meta file: block error (0)")
for i in range(len(info) - 1):
if info[i].block_end != info[i + 1].block_begin:
raise RuntimeError("Dataset meta file: block error (%d)" % (i + 1))
if (old_len == len(info) and not mask_changed) and fast_skip:
# fast skip
return
if len(info) > 0:
total_blocks = info[-1].block_end
self._nbytes = 0
self._nlines = 0
for v in info:
self._nbytes += v.nbytes
self._nlines += v.nlines
else:
total_blocks = 0
self._nbytes = 0
self._nlines = 0
if total_blocks > 0:
unused_block_set = set(self._unused_block)
nw_unused_block: List[int] = []
for i in range(len(info)):
v = info[i]
if not v.mask:
if i < old_len:
nw_unused_block.extend(
_filtered_range(
v.block_begin,
v.block_end,
self._rank,
self._world_size,
unused_block_set,
)
)
else:
nw_unused_block.extend(
_filtered_range(
v.block_begin, v.block_end, self._rank, self._world_size
)
)
# re-shuffle unused blocks
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
self._file_ends = []
for v in info:
self._file_ends.append(v.block_end)
else:
self._unused_block = []
self._file_ends = []
self._total_blocks = total_blocks
self._file_info = info
assert len(self._file_ends) == len(self._file_info)
def _mask_file(self, f: FileInfo):
self._unused_block = [
block_id
for block_id in self._unused_block
if block_id < f.block_begin or block_id >= f.block_end
]
def _get_block_file(self, block_id: int):
# find block in which file
file_idx = bisect.bisect_right(self._file_ends, block_id)
return self._file_info[file_idx]
def _prepare_new_epoch(self):
if self._max_repeat_times is not None:
if self._repeat_times >= self._max_repeat_times:
raise EOFError("End of dataset")
nw_unused_block: List[int] = []
for v in self._file_info:
if not v.mask:
nw_unused_block.extend(
_filtered_range(v.block_begin, v.block_end, self._rank, self._world_size)
)
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
self._repeat_times += 1
def _get_next_block(self):
self._update_states()
if len(self._unused_block) == 0:
self._prepare_new_epoch()
if len(self._unused_block) == 0:
raise RuntimeError("Empty dataset {}".format(self._path))
mn_block: int = self._unused_block.pop()
return mn_block
def _state_dict(self):
self._update_states()
num_unused_block = len(self._unused_block)
if (self._fp is not None) and (self._curr_block is not None):
curr_block = self._curr_block
curr_f = self._get_block_file(curr_block)
inblock_offset = self._fp.tell() - (curr_block - curr_f.block_begin) * curr_f.block_size
else:
curr_block = -1
inblock_offset = 0
return {
"states": torch.tensor(self._unused_block, dtype=torch.long, device="cpu"),
"block": torch.tensor(
[curr_block, inblock_offset, num_unused_block, self._repeat_times],
dtype=torch.long,
device="cpu",
),
}
def state_dict(self):
"""Returns a state dict representing the read states of the dataset.
Example:
>>> state = dataset.state_dict()
>>> dataset.load_state_dict(state)
"""
self._update_states()
num_unused_block = len(self._unused_block)
if (self._fp is not None) and (self._curr_block is not None):
curr_block = self._curr_block
curr_f = self._get_block_file(curr_block)
inblock_offset = self._fp.tell() - (curr_block - curr_f.block_begin) * curr_f.block_size
else:
curr_block = -1
inblock_offset = 0
with torch.no_grad():
if self._world_size > 1:
gpu_num_unused_block = torch.tensor([num_unused_block], dtype=torch.long).cuda()
max_unused_blocks = (
bmt.distributed.all_reduce(gpu_num_unused_block, op="max").cpu().item()
)
gpu_states = torch.full((max_unused_blocks,), -1, dtype=torch.long).cuda()
gpu_states[:num_unused_block] = torch.tensor(
self._unused_block, dtype=torch.long
).cuda()
gpu_block = torch.tensor(
[curr_block, inblock_offset, num_unused_block, self._repeat_times],
dtype=torch.long,
).cuda()
global_states = bmt.distributed.all_gather(
gpu_states
).cpu() # (world_size, max_unused_blocks)
global_block = bmt.distributed.all_gather(gpu_block).cpu() # (world_size, 4)
return {"states": global_states, "block": global_block}
else:
return {
"states": torch.tensor([self._unused_block], dtype=torch.long, device="cpu"),
"block": torch.tensor(
[[curr_block, inblock_offset, num_unused_block, self._repeat_times]],
dtype=torch.long,
device="cpu",
),
}
def load_state_dict(self, state, strict: bool = True):
"""Load dataset state.
Args:
state (dict): dataset state dict.
strict (bool): If `strict` is True, world size needs to be the same as when exported.
Example:
>>> state = dataset.state_dict()
>>>
"""
block_states: torch.LongTensor = state["states"]
block_info: torch.LongTensor = state["block"]
if block_states.size(0) != self._world_size:
if strict:
raise ValueError(
"world_size changed (%d -> %d)" % (state["block"].size(0), self._world_size)
)
else:
self._curr_block = None
self._fp = None
self._curr_fname = None
self._repeat_times = int(block_info[0, 3].item())
# re-shuffle unused blocks
nw_unused_block: List[int] = []
for i in range(block_states.size(0)):
# filter blocks that are not in this rank
num_unused_blocks: int = int(block_info[i, 2].item())
nw_unused_block.extend(
[
block_id
for block_id in block_states[i, :num_unused_blocks].tolist()
if block_id % self._world_size == self._rank
]
)
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
else:
curr_block, inblock_offset, num_unused_blocks, self._repeat_times = block_info[
self._rank
].tolist()
if curr_block == -1:
self._curr_block = None
else:
while True:
try:
self._curr_block = curr_block
f_info = self._get_block_file(self._curr_block)
self._open_file(
f_info.file_name,
(self._curr_block - f_info.block_begin)
* f_info.block_size
+ inblock_offset,
)
self._unused_block = block_states[self._rank, :num_unused_blocks].tolist()
break
except Exception:
print("Error: reading block!")
time.sleep(10)
# end
self._update_states()
def _get_file_path(self, fname):
return os.path.join(self._path, fname)
def _open_file(self, fname, offset):
if self._curr_fname != fname:
if self._fp is not None:
self._fp.close()
self._curr_fname = None
# self._fp = open(self._get_file_path(fname), "rb")
self._fp = SafeFile(self._get_file_path(fname), "rb")
self._curr_fname = fname
else:
assert self._fp is not None, "Unexpected error"
self._fp.seek(offset, io.SEEK_SET) # move to block
def read(self):
"""Read a piece of data from dataset.
Workers in different ranks will read different data.
"""
if self._curr_block is None:
next_block_id = self._get_next_block()
f_info = self._get_block_file(next_block_id)
try:
self._open_file(
f_info.file_name,
(next_block_id - f_info.block_begin) * f_info.block_size,
)
self._curr_block = next_block_id
except FileNotFoundError:
print("ERR: reading again!")
self._mask_file(f_info)
return self.read() # read again
if self._fp is None:
raise RuntimeError("Dataset is not initialized")
MAGIC = self._fp.read(1)
if MAGIC == b"\x1F":
# correct
size = struct.unpack("I", self._fp.read(4))[0]
data = self._fp.read(size)
return self.serializer.deserialize(data)
elif MAGIC == b"\x00":
# end of block
self._curr_block = None
return self.read() # read next block
else:
raise ValueError("Invalid magic header")
@property
def nbytes(self):
return self._nbytes
class SimpleDataset(DistributedDataset):
def __init__(
self,
path: str,
serializer: Optional[Serializer] = None,
shuffle: bool = True,
) -> None:
super().__init__(
path,
0,
1,
serializer=serializer,
max_repeat_times=1,
shuffle=shuffle,
)
def __iter__(self):
while True:
try:
data = self.read()
except EOFError:
self._repeat_times = 0
break
yield data
def __len__(self):
return self._nlines
class DatasetWriter:
def __init__(self, fname: str, block_size: int, serializer: Optional[Serializer] = None):
self._fname = fname
self._block_size = block_size
self._fp = open(self._fname, "wb")
self._inblock_offset = 0
self._nbytes = 0
self._nlines = 0
self._nblocks = 1
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
def write(self, data):
"""Write a piece of data into dataset.
Args:
data (Any): Serialization will be done using pickle.
Example:
>>> writer.write( "anything you want" )
"""
byte_data = self.serializer.serialize(data)
byte_data = struct.pack("I", len(byte_data)) + byte_data
if self._inblock_offset + 2 + len(byte_data) > self._block_size:
self._fp.write(
b"\x00" * (self._block_size - self._inblock_offset)
) # fill the remaining space with 0
self._inblock_offset = 0
self._nblocks += 1
# we go to the next block
if self._inblock_offset + 2 + len(byte_data) > self._block_size:
raise ValueError("data is larger than block size")
self._nbytes += len(byte_data)
self._nlines += 1
self._inblock_offset += 1 + len(byte_data)
self._fp.write(b"\x1F")
self._fp.write(byte_data)
@property
def nbytes(self):
return self._nbytes
@property
def nblocks(self):
return self._nblocks
@property
def nlines(self):
return self._nlines
def close(self):
if not self._fp.closed:
self._fp.write(b"\x00" * (self._block_size - self._inblock_offset))
self._fp.close()
class DatasetBuilder:
def __init__(
self,
path: str,
dbname: str,
block_size=_DEFAULT_BLOCK_SIZE,
serializer: Optional[Serializer] = None,
) -> None:
self._block_size = block_size
self._path = path
self._dbname = dbname
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
if not os.path.exists(self._path):
os.makedirs(self._path)
meta_path = os.path.join(self._path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
for v in info:
if v.file_name == dbname:
raise ValueError("Dataset name exists")
self._db_path = os.path.join(self._path, self._dbname)
if os.path.exists(self._db_path):
raise ValueError("File exists `%s`" % self._db_path)
def __enter__(self):
self._writer = DatasetWriter(self._db_path, self._block_size, self.serializer)
return self._writer
def __exit__(self, exc_type, exc_value, exc_traceback):
if self._writer is None:
raise RuntimeError("Unexpected call to __exit__")
self._writer.close()
if exc_type is not None:
print("Error while writing file")
if os.path.exists(self._db_path):
os.unlink(self._db_path)
else:
meta_path = os.path.join(self._path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
last_block = 0
if len(info) > 0:
last_block = info[-1].block_end
info.append(
FileInfo(
self._dbname,
last_block,
last_block + self._writer.nblocks,
self._writer.nbytes,
self._writer.nlines,
False,
self._block_size,
)
)
# atomic write to meta file
_write_info_list(meta_path, info)
self._writer = None
def build_dataset(
path: str,
dbname: str,
block_size: int = _DEFAULT_BLOCK_SIZE,
serializer: Optional[Serializer] = None,
):
"""Open the dataset in write mode and returns a writer.
Args:
path (str): Path to dataset.
dbname (str): The name of the file to which the data will be written. The `dbname` needs to be unique in this `dataset`.
block_size (int): Size of each block in bytes. All files in the same dataset should have the same block size. Default: 16MB
Example:
>>> with build_dataset("/path/to/dataset", "data_part_1") as writer:
>>> for i in range(10):
>>> writer.write( { "anything you want" } )
""" # noqa: E501
return DatasetBuilder(path, dbname, block_size=block_size, serializer=serializer)
| 25,959 | 32.758127 | 131 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/dataset/utils.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
from typing import List, Optional
from .distributed_dataset import (
SimpleDataset,
build_dataset,
_read_info_list,
_write_info_list,
_random_string,
_DEFAULT_BLOCK_SIZE,
FileInfo,
)
from .serializer import RawSerializer
import random
import shutil
try:
from tqdm import tqdm
support_tqdm = True
except ModuleNotFoundError:
support_tqdm = False
_DEFAULT_SHUFFLE_BUCKET_SIZE = 1 << 30
def shuffle_dataset(
path_src: str,
path_tgt: str,
block_size: int = _DEFAULT_BLOCK_SIZE,
bucket_size: int = _DEFAULT_SHUFFLE_BUCKET_SIZE,
progress_bar: bool = False,
output_name: Optional[str] = None,
):
"""Shuffle one distributed datataset, write results to another dataset.
Args:
path_str (str): path to source dataset
path_tgt (str): path to write results
block_size (int): dataset block size (default: 16MB)
bucket_size (int): shuffle algorithm bucket size (default: 1GB)
progress_bar (bool): show progress bar
Example:
>>> shuffle_dataset("/path/to/source", "/path/to/output")
"""
if progress_bar and not support_tqdm:
raise RuntimeError("Requires `tqdm` to enable progress bar.")
ds = SimpleDataset(path_src, serializer=RawSerializer())
num_buckets = (ds.nbytes + bucket_size - 1) // bucket_size
tmp_files = [os.path.join(path_src, ".tmp.%s" % _random_string()) for _ in range(num_buckets)]
try:
# Step 1: write to bucket randomly
f_tmp = [open(fname, "wb") for fname in tmp_files]
try:
iterator = ds
if progress_bar:
iterator = tqdm(ds, desc="Shuffle step 1/2")
for data in iterator:
bucket_id = int(random.random() * num_buckets)
len_data = len(data)
f_tmp[bucket_id].write(struct.pack("I", len_data) + data)
finally:
# close all files
for fp in f_tmp:
if not fp.closed:
fp.close()
f_tmp = []
# Step 2: shuffle inside bucket
if output_name is None:
output_name = "%s.shuffle" % _random_string()
with build_dataset(
path_tgt,
output_name,
block_size=block_size,
serializer=RawSerializer(),
) as writer:
iterator = tmp_files
if progress_bar:
iterator = tqdm(tmp_files, desc="Shuffle step 2/2")
for fname in iterator:
fp = open(fname, "rb")
data_in_bucket = []
while True:
try:
raw_data = fp.read(4)
if len(raw_data) == 0:
# EOF
break
len_data = struct.unpack("I", raw_data)[0]
data_in_bucket.append(fp.read(len_data))
except EOFError:
break
random.shuffle(data_in_bucket)
for data in data_in_bucket:
writer.write(data)
fp.close()
os.unlink(fname)
finally:
# cleanup
for fname in tmp_files:
if os.path.exists(fname):
os.unlink(fname)
def compact_dataset(path: str):
"""Compact the dataset, removes blocks which the files were deleted.
**Note** This may affect the existing dataset state dict.
Args:
path (str): path to dataset
Example:
>>> compact_dataset("/path/to/dataset")
"""
meta_path = os.path.join(path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
else:
raise ValueError("Dataset not exists")
nw_info: List[FileInfo] = []
curr_block = 0
for v in info:
if not os.path.exists(v.file_name):
# file is deleted
pass
else:
num_file_block = v.block_end - v.block_begin
nw_info.append(
FileInfo(
v.file_name,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
_write_info_list(meta_path, nw_info)
def mask_dataset(path: str, dbname: str, mask: bool = True):
"""Mask one file in dataset. Blocks in masked datasets won't be read later.
Args:
path (str): path to dataset
dbname (str): file name in this dataset which you want to mask
mask (bool): True for mask, False for unmask
Example:
>>> mask_dataset("/path/to/dataset", "data_part_1", mask=True)
"""
meta_path = os.path.join(path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
else:
raise ValueError("Dataset not exists")
for v in info:
if v.file_name == dbname:
v.mask = mask
_write_info_list(meta_path, info)
def merge_dataset(dst: str, src: str):
meta_path_src = os.path.join(src, "meta.bin")
meta_path_dst = os.path.join(dst, "meta.bin")
info_src: List[FileInfo] = []
if os.path.exists(meta_path_src):
info_src = _read_info_list(meta_path_src)
else:
raise ValueError("Dataset not exists")
info_dst: List[FileInfo] = []
if os.path.exists(meta_path_dst):
info_dst = _read_info_list(meta_path_dst)
else:
raise ValueError("Dataset not exists")
curr_block = 0
nw_info: List[FileInfo] = []
for v in info_dst:
num_file_block = v.block_end - v.block_begin
nw_info.append(
FileInfo(
v.file_name,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
for v in info_src:
num_file_block = v.block_end - v.block_begin
dst_db_name = os.path.join(dst, v.file_name)
nw_fname = v.file_name
if os.path.exists(dst_db_name):
idx = 0
while os.path.exists(dst_db_name + "_{}".format(idx)):
idx += 1
dst_db_name = dst_db_name + "_{}".format(idx)
nw_fname = nw_fname + "_{}".format(idx)
shutil.copy(os.path.join(src, v.file_name), dst_db_name)
nw_info.append(
FileInfo(
nw_fname,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
_write_info_list(meta_path_dst, nw_info)
| 7,647 | 28.302682 | 98 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/dataset/serializer.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import json
class Serializer:
def __init__(self) -> None:
pass
def serialize(self, obj) -> bytes:
raise NotImplementedError()
def deserialize(self, data: bytes):
raise NotImplementedError()
class PickleSerializer(Serializer):
def __init__(self) -> None:
pass
def serialize(self, obj) -> bytes:
return pickle.dumps(obj)
def deserialize(self, data: bytes):
return pickle.loads(data)
class JsonSerializer(Serializer):
def __init__(self) -> None:
pass
def serialize(self, obj) -> bytes:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
def deserialize(self, data: bytes):
return json.loads(data.decode("utf-8"))
class RawSerializer(Serializer):
def __init__(self) -> None:
pass
def serialize(self, obj) -> bytes:
return obj
def deserialize(self, data: bytes):
return data
| 1,570 | 24.33871 | 74 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/dataset/__init__.py | from .distributed_dataset import DistributedDataset, SimpleDataset, build_dataset
from .utils import shuffle_dataset, compact_dataset, mask_dataset, merge_dataset
| 163 | 53.666667 | 81 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from typing_extensions import TypedDict
import torch
from ..utils import Config
from ..layers import Encoder, EmbeddingExt, BucketPositionBias
import bmtrain as bmt
from ..utils.gradient_shrink import gradient_shrink
class CPMBeeInferenceState(TypedDict):
buffer_position: torch.Tensor
buffer_context: torch.Tensor
buffer_sample_ids: torch.Tensor
buffer_num_segments: torch.Tensor
buffer_segments: torch.Tensor
buffer: List[Tuple[torch.Tensor, torch.Tensor]]
class CPMBeeConfig(Config):
def __init__(
self,
vocab_size=30720,
dim_model=4096,
num_heads=64,
dim_head=64,
dim_ff=10240,
num_layers=32,
dropout_p=0.0,
position_bias_num_buckets=256,
position_bias_num_segment_buckets=256,
position_bias_max_distance=2048,
eps=1e-6,
half: bool = True,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_ff = dim_ff
self.num_layers = num_layers
self.position_bias_num_buckets = position_bias_num_buckets
self.position_bias_num_segment_buckets = position_bias_num_segment_buckets
self.position_bias_max_distance = position_bias_max_distance
self.dropout_p = dropout_p
self.eps = eps
if half:
self.dtype = torch.half
else:
self.dtype = torch.float
self.vocab_size = vocab_size
self.mask_modules = mask_modules
class CPMBee(bmt.DistributedModule):
def __init__(self, config: CPMBeeConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.input_embedding = EmbeddingExt(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = BucketPositionBias(
num_heads=config.num_heads,
num_buckets=config.position_bias_num_buckets,
num_segment_bucket=config.position_bias_num_segment_buckets,
max_distance=config.position_bias_max_distance,
dtype=config.dtype,
)
def forward(
self,
input: torch.Tensor, # (batch, seqlen) int32
input_sub: torch.Tensor, # (batch, seqlen) int32
length: torch.Tensor, # (batch) int32
context: torch.Tensor, # (batch, seqlen) bool
sample_ids: torch.Tensor, # (batch, seq_len) int32
num_segments: torch.Tensor, # (batch, seq_len) int32
segment: torch.Tensor, # (batch, seqlen) int32
segment_rel_offset: torch.Tensor, # (batch, seq_len) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
span: torch.Tensor, # (batch, seqlen) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
):
batch = input.size(0)
seqlen = input.size(1)
# processing masks and position bias bucket
with torch.no_grad():
device = input.device
# calc segment bucket
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ segment[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
0, # avoid torch.gather overflow
).view(batch, seqlen * seqlen)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, seqlen, seqlen)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == sample_ids[:, None, :]
)
# context mask
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
# span mask
attention_mask = (
attention_mask & sample_mask_2d & (span[:, None, :] == span[:, :, None])
)
# length mask
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position = torch.arange(seqlen, device=device).expand(batch, seqlen)
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, position, segment_bucket)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, len_q) int32
input_sub: torch.Tensor, # (batch, len_q) int32
position: torch.Tensor, # (batch, len_q) int32
context: torch.Tensor, # (batch, len_q) bool
sample_ids: torch.Tensor, # (batch, len_q) int32
num_segments: torch.Tensor, # (batch, len_q) int32
segment: torch.Tensor, # (batch, len_q) int32
segment_rel_offset: torch.Tensor, # (batch, len_q) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
past_key_values: Optional[CPMBeeInferenceState] = None,
) -> Tuple[torch.Tensor, torch.Tensor, CPMBeeInferenceState]:
with torch.no_grad():
if past_key_values is None:
present_position = position
present_context = context
present_sample_ids = sample_ids
present_num_segments = num_segments
present_segments = segment
present_buffer = None
else:
present_position = torch.cat([past_key_values["buffer_position"], position], dim=-1)
present_context = torch.cat([past_key_values["buffer_context"], context], dim=-1)
present_sample_ids = torch.cat(
[past_key_values["buffer_sample_ids"], sample_ids], dim=-1
)
present_num_segments = torch.cat(
[past_key_values["buffer_num_segments"], num_segments], dim=-1
)
present_segments = torch.cat([past_key_values["buffer_segments"], segment], dim=-1)
present_buffer = past_key_values["buffer"]
batch = input.size(0)
len_q = input.size(1)
len_buffer = present_position.size(1)
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ present_segments[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same sample
0, # avoid torch.gather overflow
).view(batch, len_q * len_buffer)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, len_q, len_buffer)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = present_position[:, None, :] <= position[:, :, None]
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == present_sample_ids[:, None, :]
)
# context mask
attention_mask = present_context[:, None, :] | (
context[:, :, None].logical_not()
& directional_mask_2d.view(batch, len_q, len_buffer)
)
# span mask
attention_mask = attention_mask & sample_mask_2d
# length mask
mask_1d = present_num_segments != 0
attention_mask = mask_1d.view(batch, 1, len_buffer) & attention_mask
hidden_states = gradient_shrink(self.input_embedding(input, input_sub))
position_bias = gradient_shrink(
self.position_bias(position, present_position, segment_bucket)
)
hidden_states, present_key_values = self.encoder(
hidden_states,
attention_mask,
position_bias,
True,
present_buffer,
)
ext_table = gradient_shrink(self.input_embedding(ext_table_ids, ext_table_sub))
logits = self.input_embedding.projection(hidden_states, ext_table)
return (
logits,
hidden_states,
{
"buffer_position": present_position,
"buffer_context": present_context,
"buffer_sample_ids": present_sample_ids,
"buffer_num_segments": present_num_segments,
"buffer_segments": present_segments,
"buffer": present_key_values,
},
)
| 11,433 | 38.157534 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/ant.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
import torch
from ..utils import Config
from ..layers import Encoder, Embedding, SegmentPositionEmbedding
import bmtrain as bmt
class CPMAntConfig(Config):
def __init__(
self,
vocab_size=30720,
dim_model=4096,
num_heads=64,
dim_head=64,
dim_ff=10240,
num_layers=32,
dropout_p=0.0,
position_bias_num_buckets=512,
position_bias_max_distance=2048,
eps=1e-6,
half: bool = True,
prompt_types: int = 32,
prompt_length: int = 32,
segment_types: int = 32,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
**kwargs,
):
super().__init__()
self.prompt_types = prompt_types
self.prompt_length = prompt_length
self.segment_types = segment_types
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_ff = dim_ff
self.num_layers = num_layers
self.position_bias_num_buckets = position_bias_num_buckets
self.position_bias_max_distance = position_bias_max_distance
self.dropout_p = dropout_p
self.eps = eps
if half:
self.dtype = torch.half
else:
self.dtype = torch.float
self.vocab_size = vocab_size
self.mask_modules = mask_modules
class CPMAnt(bmt.DistributedModule):
def __init__(self, config: CPMAntConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.prompt_embedding = Embedding(
vocab_size=config.prompt_types * config.prompt_length,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.segment_embedding = Embedding(
vocab_size=config.segment_types,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.input_embedding = Embedding(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = SegmentPositionEmbedding(
num_heads=config.num_heads,
num_segments=config.segment_types,
num_buckets=config.position_bias_num_buckets,
max_distance=config.position_bias_max_distance,
bidirectional=True,
dtype=config.dtype,
)
self.prompt_length = config.prompt_length
def forward(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
):
batch = input.size(0)
seqlen = input.size(1)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
past_key_values=None, # num_layers * 2 * (batch, num_heads, seqlen, dim_head)
):
batch = input.size(0)
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.encoder.num_layers)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
else:
past_length = past_key_values[0][0].size(-2)
segment_states = self.segment_embedding(segment)
hidden_states = self.input_embedding(input) + segment_states[:, -1:, :]
seqlen = past_length + input.size(1)
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
# mask for left paddding
mask_1d = (
torch.tensor(list(range(seqlen))[::-1], device=device)[None, :].repeat(batch, 1)
< length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
attention_mask = attention_mask[:, past_length:, :]
position_bias = position_bias[:, :, past_length:, :]
hidden_states, present_key_values = self.encoder(
hidden_states, attention_mask, position_bias, True, past_key_values
)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states, present_key_values
| 7,842 | 35.47907 | 96 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/bee_torch.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from ..native_layers import Encoder, EmbeddingExt, BucketPositionBias
from .bee import CPMBeeConfig, CPMBeeInferenceState
class CPMBeeTorch(torch.nn.Module):
def __init__(self, config: CPMBeeConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.input_embedding = EmbeddingExt(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = BucketPositionBias(
num_heads=config.num_heads,
num_buckets=config.position_bias_num_buckets,
num_segment_bucket=config.position_bias_num_segment_buckets,
max_distance=config.position_bias_max_distance,
dtype=config.dtype,
)
def forward(
self,
input: torch.Tensor, # (batch, seqlen) int32
input_sub: torch.Tensor, # (batch, seqlen) int32
length: torch.Tensor, # (batch) int32
context: torch.Tensor, # (batch, seqlen) bool
sample_ids: torch.Tensor, # (batch, seq_len) int32
num_segments: torch.Tensor, # (batch, seq_len) int32
segment: torch.Tensor, # (batch, seqlen) int32
segment_rel_offset: torch.Tensor, # (batch, seq_len) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
span: torch.Tensor, # (batch, seqlen) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
):
batch = input.size(0)
seqlen = input.size(1)
# processing masks and position bias bucket
with torch.no_grad():
device = input.device
# calc segment bucket
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ segment[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
0, # avoid torch.gather overflow
).view(batch, seqlen * seqlen)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, seqlen, seqlen)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == sample_ids[:, None, :]
)
# context mask
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
# span mask
attention_mask = (
attention_mask & sample_mask_2d & (span[:, None, :] == span[:, :, None])
)
# length mask
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position = torch.arange(seqlen, device=device).expand(batch, seqlen)
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, position, segment_bucket)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, len_q) int32
input_sub: torch.Tensor, # (batch, len_q) int32
position: torch.Tensor, # (batch, len_q) int32
context: torch.Tensor, # (batch, len_q) bool
sample_ids: torch.Tensor, # (batch, len_q) int32
num_segments: torch.Tensor, # (batch, len_q) int32
segment: torch.Tensor, # (batch, len_q) int32
segment_rel_offset: torch.Tensor, # (batch, len_q) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
past_key_values: Optional[CPMBeeInferenceState] = None,
) -> Tuple[torch.Tensor, torch.Tensor, CPMBeeInferenceState]:
with torch.no_grad():
if past_key_values is None:
present_position = position
present_context = context
present_sample_ids = sample_ids
present_num_segments = num_segments
present_segments = segment
present_buffer = None
else:
present_position = torch.cat([past_key_values["buffer_position"], position], dim=-1)
present_context = torch.cat([past_key_values["buffer_context"], context], dim=-1)
present_sample_ids = torch.cat(
[past_key_values["buffer_sample_ids"], sample_ids], dim=-1
)
present_num_segments = torch.cat(
[past_key_values["buffer_num_segments"], num_segments], dim=-1
)
present_segments = torch.cat([past_key_values["buffer_segments"], segment], dim=-1)
present_buffer = past_key_values["buffer"]
batch = input.size(0)
len_q = input.size(1)
len_buffer = present_position.size(1)
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ present_segments[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same sample
0, # avoid torch.gather overflow
).view(batch, len_q * len_buffer)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, len_q, len_buffer)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = present_position[:, None, :] <= position[:, :, None]
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == present_sample_ids[:, None, :]
)
# context mask
attention_mask = present_context[:, None, :] | (
context[:, :, None].logical_not()
& directional_mask_2d.view(batch, len_q, len_buffer)
)
# span mask
attention_mask = attention_mask & sample_mask_2d
# length mask
mask_1d = present_num_segments != 0
attention_mask = mask_1d.view(batch, 1, len_buffer) & attention_mask
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, present_position, segment_bucket)
hidden_states, present_key_values = self.encoder(
hidden_states,
attention_mask,
position_bias,
True,
present_buffer,
)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return (
logits,
hidden_states,
{
"buffer_position": present_position,
"buffer_context": present_context,
"buffer_sample_ids": present_sample_ids,
"buffer_num_segments": present_num_segments,
"buffer_segments": present_segments,
"buffer": present_key_values,
},
)
| 9,873 | 39.970954 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/__init__.py | from .ant import CPMAntConfig, CPMAnt
from .bee import CPMBeeConfig, CPMBee
from .ant_torch import CPMAntTorch
from .bee_torch import CPMBeeTorch
| 146 | 28.4 | 37 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/ant_torch.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..native_layers import Encoder, Embedding, SegmentPositionEmbedding
from .ant import CPMAntConfig
class CPMAntTorch(torch.nn.Module):
def __init__(self, config: CPMAntConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.prompt_embedding = Embedding(
vocab_size=config.prompt_types * config.prompt_length,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.segment_embedding = Embedding(
vocab_size=config.segment_types,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.input_embedding = Embedding(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = SegmentPositionEmbedding(
num_heads=config.num_heads,
num_segments=config.segment_types,
num_buckets=config.position_bias_num_buckets,
max_distance=config.position_bias_max_distance,
bidirectional=True,
dtype=config.dtype,
)
self.prompt_length = config.prompt_length
def forward(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
):
batch = input.size(0)
seqlen = input.size(1)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
past_key_values=None, # num_layers * 2 * (batch, num_heads, seqlen, dim_head)
):
batch = input.size(0)
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.encoder.num_layers)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
else:
past_length = past_key_values[0][0].size(-2)
segment_states = self.segment_embedding(segment)
hidden_states = self.input_embedding(input) + segment_states[:, -1:, :]
seqlen = past_length + input.size(1)
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
# mask for left paddding
mask_1d = (
torch.tensor(list(range(seqlen))[::-1], device=device)[None, :].repeat(batch, 1)
< length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
attention_mask = attention_mask[:, past_length:, :]
position_bias = position_bias[:, :, past_length:, :]
hidden_states, present_key_values = self.encoder(
hidden_states, attention_mask, position_bias, True, past_key_values
)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states, present_key_values
| 6,552 | 37.547059 | 96 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/__init__.py | from . import ant
from . import bee
| 36 | 11.333333 | 17 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/ant/pretrain.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.utils.data as data
import random
import numpy as np
class CPMAntPretrainDataset(data.Dataset):
def __init__(self, ctx, max_length=1024, prompt_length=32, tokenizer=None):
self.ctx = ctx
self.max_length = max_length + prompt_length
self.prompt_length = prompt_length
self.tokenizer = tokenizer
def __len__(self):
return len(self.ctx)
@property
def dataset(self):
return self.ctx
def __get_item_data(self, raw_data):
global_task = raw_data[0]
n_segment = raw_data[1]
len_info = n_segment * 3 + 2
segment_len = raw_data[2:len_info:3]
segment_type = raw_data[3:len_info:3]
segment_task = raw_data[4:len_info:3]
ctx = raw_data[len_info:]
if ctx.shape[0] > self.max_length - self.prompt_length:
return None, None, None, None, None, None, None
len_ctx = min(ctx.shape[0], self.max_length - self.prompt_length)
context_inp = np.full(len_ctx, True)
position_inp = np.arange(len_ctx, dtype=np.int64)
segment_inp = np.full(len_ctx, 0, dtype=np.int64)
task_inp = np.full(len_ctx, 0, dtype=np.int64)
tgt = np.full(len_ctx, -100, dtype=np.int64)
# for each segment
segment_begin = 0
for i in range(n_segment):
segment_end = segment_begin + segment_len[i]
task = segment_task[i]
# generate target
if task == 0:
num_mask = random.randint(1, segment_len[i] - 1)
mask_idx = (
np.random.choice(segment_len[i] - 1, num_mask, replace=False) + segment_begin
)
context_inp[mask_idx + 1] = False
assert segment_type[i] == 1
elif task == 1:
num_mask = random.randint(1, segment_len[i] - 1)
context_inp[segment_end - num_mask : segment_end] = False
assert segment_type[i] == 2
elif task == 3:
if segment_type[i] == 2:
context_inp[1:] = False
elif task == 4:
if segment_type[i] == 3:
context_inp[1:] = False
task_inp[segment_begin:segment_end] = task
segment_inp[segment_begin:segment_end] = segment_type[i]
tgt[segment_begin : segment_end - 1] = np.where(
context_inp[segment_begin + 1 : segment_end],
-100,
ctx[segment_begin + 1 : segment_end],
)
segment_begin = segment_end
# prepend prompt segment
context_inp = np.concatenate((np.full(self.prompt_length, True), context_inp))
position_inp = np.concatenate(
(
np.arange(self.prompt_length, dtype=np.int64),
position_inp + self.prompt_length,
)
)
segment_inp = np.concatenate((np.full(self.prompt_length, 0, dtype=np.int64), segment_inp))
task_inp = np.concatenate((np.full(self.prompt_length, 0, dtype=np.int64), task_inp))
tgt = np.concatenate((np.full(self.prompt_length, -100, dtype=np.int64), tgt))
inp = np.concatenate(
(
np.arange(self.prompt_length, dtype=np.int64) + self.prompt_length * global_task,
ctx,
)
)
return inp, tgt, inp.shape[0], context_inp, position_inp, segment_inp, task_inp
def __iter__(self):
while True:
ctx = self.ctx.read()
(
th_ctx,
th_tgt,
len_ctx,
context_ctx,
position_ctx,
segment_ctx,
task_ctx,
) = self.__get_item_data(ctx)
yield th_ctx, th_tgt, len_ctx, context_ctx, position_ctx, segment_ctx, task_ctx
| 4,499 | 37.135593 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/ant/__init__.py | from .pretrain import CPMAntPretrainDataset
| 44 | 21.5 | 43 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/bee/pretrain.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import multiprocessing
import os
from queue import Empty
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from typing_extensions import TypedDict
from ...dataset import DistributedDataset
from ...tokenizers import CPMBeeTokenizer
from ...utils.config import load_dataset_config
import numpy as np
import time
from numpy.typing import NDArray
import torch
import bmtrain as bmt
import importlib.machinery
import importlib.util
import types
import random
class _MixedDatasetConfig(TypedDict):
weight: float
path: str
transforms: Union[List[Dict[str, Any]], str]
task_name: str
dataset_name: str
incontext_weight: List[float]
lines: int
dataset: DistributedDataset
CPMBeeInputType = Union[str, Dict[str, "CPMBeeInputType"]]
class _DictTree(TypedDict):
value: str
children: List["_DictTree"]
depth: int
segment_id: int
need_predict: bool
class _PrevExtTableStates(TypedDict):
ext_table: Dict[int, str]
token_id_table: Dict[str, Dict[int, int]]
class _TransformFuncDict(TypedDict):
loader: importlib.machinery.SourceFileLoader
module: types.ModuleType
last_m: float
_TransformFunction = Callable[[CPMBeeInputType, int, random.Random], CPMBeeInputType]
class CPMBeeBatch(TypedDict):
inputs: NDArray[np.int32]
inputs_sub: NDArray[np.int32]
length: NDArray[np.int32]
context: NDArray[np.bool_]
sample_ids: NDArray[np.int32]
num_segments: NDArray[np.int32]
segment_ids: NDArray[np.int32]
segment_rel_offset: NDArray[np.int32]
segment_rel: NDArray[np.int32]
spans: NDArray[np.int32]
target: NDArray[np.int32]
ext_ids: NDArray[np.int32]
ext_sub: NDArray[np.int32]
task_ids: NDArray[np.int32]
task_names: List[str]
raw_data: List[Any]
def rel_to_bucket(n_up: int, n_down: int, max_depth: int = 8):
ret = n_up * max_depth + n_down
if ret == 0:
return ret
else:
# bucket 1 is reserved for incontext samples
return ret + 1
def convert_data_to_id(
tokenizer: CPMBeeTokenizer,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
max_depth: int = 8,
):
root: _DictTree = {
"value": "<root>",
"children": [],
"depth": 0,
"segment_id": 0,
"need_predict": False,
}
segments = [root]
def _build_dict_tree(data: CPMBeeInputType, depth: int, need_predict: bool) -> List[_DictTree]:
if isinstance(data, dict):
ret_list: List[_DictTree] = []
curr_items = list(data.items())
if need_predict and shuffle_answer:
access_idx = np.arange(len(curr_items))
np.random.shuffle(access_idx)
curr_items = [curr_items[idx] for idx in access_idx]
for k, v in curr_items:
child_info: _DictTree = {
"value": k,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": False, # only leaves are contexts
}
segments.append(child_info)
child_info["children"] = _build_dict_tree(
v, depth + 1, need_predict or (depth == 1 and k == "<ans>")
) # elements in <root>.<ans>
ret_list.append(child_info)
return ret_list
else:
assert isinstance(data, str), "Invalid data {}".format(data)
ret: _DictTree = {
"value": data,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": need_predict,
}
segments.append(ret)
return [ret]
root["children"] = _build_dict_tree(data, 1, False)
num_segments = len(segments)
segment_rel = np.zeros((num_segments * num_segments,), dtype=np.int32)
def _build_segment_rel(node: _DictTree) -> List[Tuple[int, int]]:
ret: List[Tuple[int, int]] = [(node["segment_id"], node["depth"])]
for child in node["children"]:
sub = _build_segment_rel(child)
for seg_id_1, depth_1 in sub:
for seg_id_2, depth_2 in ret:
n_up = min(depth_1 - node["depth"], max_depth - 1)
n_down = min(depth_2 - node["depth"], max_depth - 1)
segment_rel[seg_id_1 * num_segments + seg_id_2] = rel_to_bucket(
n_up, n_down, max_depth=max_depth
)
segment_rel[seg_id_2 * num_segments + seg_id_1] = rel_to_bucket(
n_down, n_up, max_depth=max_depth
)
ret.extend(sub)
return ret
_build_segment_rel(root)
input_ids: List[int] = []
input_id_subs: List[int] = []
segment_bound: List[Tuple[int, int]] = []
ext_table: Dict[int, str] = {}
token_id_table: Dict[str, Dict[int, int]] = {}
if prev_ext_states is not None:
ext_table = prev_ext_states["ext_table"]
token_id_table = prev_ext_states["token_id_table"]
for seg in segments:
tokens, ext_table = tokenizer.encode(seg["value"], ext_table)
token_id_subs = []
reid_token_ids = []
for idx in tokens:
if idx in ext_table:
# unk or special token
token = ext_table[idx]
if token.startswith("<") and token.endswith(">"):
# special token
if "_" in token:
token_name = token[1:-1].split("_", maxsplit=1)[0]
else:
token_name = token[1:-1]
token_name = "<{}>".format(token_name)
else:
token_name = "<unk>"
if token_name not in token_id_table:
token_id_table[token_name] = {}
if idx not in token_id_table[token_name]:
token_id_table[token_name][idx] = len(token_id_table[token_name])
if token_name not in tokenizer.encoder:
raise ValueError("Invalid token {}".format(token))
reid_token_ids.append(tokenizer.encoder[token_name])
token_id_subs.append(token_id_table[token_name][idx])
else:
reid_token_ids.append(idx)
token_id_subs.append(0)
tokens = [tokenizer.bos_id] + reid_token_ids
token_id_subs = [0] + token_id_subs
if not seg["need_predict"]:
tokens = tokens + [tokenizer.eos_id]
token_id_subs = token_id_subs + [0]
else:
# no eos
pass
begin = len(input_ids)
input_ids.extend(tokens)
input_id_subs.extend(token_id_subs)
end = len(input_ids)
segment_bound.append((begin, end))
ids = np.array(input_ids, dtype=np.int32)
id_subs = np.array(input_id_subs, dtype=np.int32)
segs = np.zeros((ids.shape[0],), dtype=np.int32)
context = np.zeros((ids.shape[0],), dtype=np.int8)
for i, (begin, end) in enumerate(segment_bound):
if not segments[i]["need_predict"]:
context[begin:end] = 1
segs[begin:end] = i
curr_ext_table_states: _PrevExtTableStates = {
"ext_table": ext_table,
"token_id_table": token_id_table,
}
return ids, id_subs, context, segs, segment_rel, num_segments, curr_ext_table_states
def _dataset_identity(c: _MixedDatasetConfig):
return "{}.{}".format(c["task_name"], c["dataset_name"])
class _MixedDatasetBatchPacker:
def __init__(
self,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
) -> None:
self._batch_size = batch_size
self._max_length = max_length
self._max_depth = max_depth
self.tokenizer = tokenizer
self._transform_func_table: Dict[str, _TransformFuncDict] = {}
self._inputs: List[NDArray[np.int32]] = []
self._inputs_sub: List[NDArray[np.int32]] = []
self._context: List[NDArray[np.int8]] = []
self._sample_ids: List[NDArray[np.int32]] = []
self._segments: List[NDArray[np.int32]] = []
self._num_segments: List[NDArray[np.int32]] = []
self._segment_rel_offset: List[NDArray[np.int32]] = []
self._segment_rel: List[NDArray[np.int32]] = []
self._spans: List[List[int]] = []
self._task_ids: List[List[str]] = []
self._raw_data: List[List[Any]] = []
def __len__(self):
return len(self._inputs)
def apply_transform(
self,
data: CPMBeeInputType,
transform: Union[Dict[str, Any], Callable[[CPMBeeInputType], CPMBeeInputType], None],
) -> CPMBeeInputType:
if transform is None:
return data
if not isinstance(transform, dict):
# transform function
return transform(data)
mapping_list: List[Tuple[str, str]] = []
def _walk_transform_dict(data: Union[Dict[str, Any], str], prefix: str = ""):
if isinstance(data, dict):
for k, v in data.items():
if len(prefix) > 0:
_walk_transform_dict(v, prefix + "." + k)
else:
_walk_transform_dict(v, k)
else:
assert isinstance(data, str), "Invalid transform {}".format(data)
mapping_list.append((prefix, data))
_walk_transform_dict(transform)
expanded_mapping_list: List[Tuple[str, Any]] = []
def _expand_mapping(
data: CPMBeeInputType, stars: List[str], path: List[str], target: List[str]
):
if len(path) == 0:
num_stars = 0
for it in target:
if it == "*":
num_stars += 1
if num_stars != len(stars):
raise ValueError("Invalid transform {}".format(".".join(target)))
nw_tgt = []
num_stars = 0
for it in target:
if it == "*":
nw_tgt.append(stars[num_stars])
num_stars += 1
else:
nw_tgt.append(it)
expanded_mapping_list.append((".".join(nw_tgt), data))
else:
if not isinstance(data, dict):
raise ValueError("Invalid data {}".format(data))
if path[0] == "*":
for k, v in data.items():
_expand_mapping(v, stars + [k], path[1:], target)
else:
_expand_mapping(data[path[0]], stars, path[1:], target)
# expand mapping list
for tgt, src in mapping_list:
if src.startswith("$"):
# copy from src
_expand_mapping(data, [], src[1:].split("."), tgt.split("."))
else:
if "*" in tgt:
raise ValueError("Constant value is not allowed to have `*` in prefix")
expanded_mapping_list.append((tgt, src))
ret = {}
for tgt, val in expanded_mapping_list:
tgt = tgt.split(".")
cur = ret
while len(tgt) > 1:
cur = cur[tgt[0]]
tgt = tgt[1:]
cur[tgt[0]] = val
return ret
def data_to_id(
self,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
):
return convert_data_to_id(
self.tokenizer, data, prev_ext_states, shuffle_answer, self._max_depth
)
def _ensure_transform_function(
self, module_name: str, transform_script_path: str
) -> _TransformFunction:
module_name = "cpm_live.transforms.{}".format(module_name)
if transform_script_path not in self._transform_func_table:
loader = importlib.machinery.SourceFileLoader(module_name, transform_script_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
if spec is None:
raise RuntimeError("spec is none! {}".format(module_name))
mod = importlib.util.module_from_spec(spec)
self._transform_func_table[transform_script_path] = {
"loader": loader,
"module": mod,
"last_m": 0,
}
transform_script_info = self._transform_func_table[transform_script_path]
curr_m_time = float(
transform_script_info["loader"].path_stats(transform_script_path)["mtime"]
)
if curr_m_time > transform_script_info["last_m"]:
transform_script_info["last_m"] = curr_m_time
transform_script_info["loader"].exec_module(transform_script_info["module"])
transform_func = getattr(transform_script_info["module"], "transform", None)
if transform_func is None:
def _empty_transform_func(data: CPMBeeInputType, num_sample: int, r: random.Random):
raise NotImplementedError(
"Transform func for dataset {} not implemented".format(module_name)
)
return _empty_transform_func
else:
return transform_func
def build_instance(self, config: _MixedDatasetConfig):
_sample_weight = np.array(config["incontext_weight"], dtype=np.float32)
_sample_weight = _sample_weight / _sample_weight.sum()
num_incontext = np.random.choice(_sample_weight.shape[0], p=_sample_weight)
ds = config["dataset"]
transforms = config["transforms"]
if isinstance(transforms, str):
while True:
try:
if not os.path.exists(transforms):
raise RuntimeError(
"transform script file {} not exists".format(transforms)
)
# load transform script
transform_func = self._ensure_transform_function(
_dataset_identity(config), transforms
)
seed = random.random()
break
except Exception as e:
print(e)
time.sleep(10)
def _transform(data: CPMBeeInputType):
r = random.Random(seed)
return transform_func(data, num_incontext, r)
transform = _transform
elif len(transforms) == 0:
transform = None
else:
transform = transforms[np.random.choice(len(transforms))]
raw_data = {}
while True:
inp = ds.read()
inp = self.apply_transform(inp, transform)
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel,
n_segments,
table_states,
) = self.data_to_id(inp)
if input_ids.shape[0] > self._max_length:
# too long
continue
input_ids = input_ids[: self._max_length]
context = context[: self._max_length]
segment_ids = segment_ids[: self._max_length]
raw_data["input"] = inp
raw_data["samples"] = []
break
sample_ids = np.zeros(input_ids.shape, dtype=np.int32)
segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)
num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)
for i in range(num_incontext):
if input_ids.shape[0] >= self._max_length:
# early break
break
sample = ds.read()
sample = self.apply_transform(sample, transform)
(
sample_input_ids,
sample_id_subs,
_,
sample_segments,
sample_rel,
n_segments,
table_states,
) = self.data_to_id(sample, table_states)
if input_ids.shape[0] + sample_input_ids.shape[0] > self._max_length:
# too long, break
break
raw_data["samples"].append(sample)
input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)
input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)
context = np.concatenate(
[context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0
)
segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)
segment_rel_offset = np.concatenate(
[
segment_rel_offset,
np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),
],
axis=0,
)
segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)
sample_ids = np.concatenate(
[sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0
)
num_segments = np.concatenate(
[num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0
)
return (
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
)
def pack_batch(self, force: bool = False) -> CPMBeeBatch:
# pack batch
if len(self._inputs) < self._batch_size:
if not force:
raise RuntimeError("Batch insufficient")
batch_size = len(self._inputs)
else:
batch_size = self._batch_size
inputs = np.zeros((batch_size, self._max_length), dtype=np.int32)
inputs_sub = np.zeros((batch_size, self._max_length), dtype=np.int32)
context = np.zeros((batch_size, self._max_length), dtype=np.int8)
sample_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
num_segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
segment_rel_offset = np.zeros((batch_size, self._max_length), dtype=np.int32)
tgt = np.full((batch_size, self._max_length), -100, dtype=np.int32)
max_rel = 0
for i in range(batch_size):
max_rel = max(max_rel, self._segment_rel[i].shape[0])
segment_rel = np.zeros((batch_size, max_rel), dtype=np.int32)
spans = np.zeros((batch_size, self._max_length), dtype=np.int32)
length = np.zeros((batch_size,), dtype=np.int32)
task_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
all_task_names: Set[str] = set()
for i in range(batch_size):
for task_name in self._task_ids[i]:
all_task_names.add(task_name)
task_names: List[str] = list(all_task_names)
task_name_to_id = {name: i for i, name in enumerate(task_names)}
batch_ext_table_map: Dict[Tuple[int, int], int] = {}
batch_ext_table_ids: List[int] = []
batch_ext_table_sub: List[int] = []
raw_data_list: List[Any] = []
for i in range(batch_size):
instance_length = self._inputs[i].shape[0]
rel_size = self._segment_rel[i].shape[0]
inputs[i, :instance_length] = self._inputs[i]
inputs_sub[i, :instance_length] = self._inputs_sub[i]
context[i, :instance_length] = self._context[i]
sample_ids[i, :instance_length] = self._sample_ids[i]
segments[i, :instance_length] = self._segments[i]
num_segments[i, :instance_length] = self._num_segments[i]
segment_rel_offset[i, :instance_length] = self._segment_rel_offset[i]
segment_rel[i, :rel_size] = self._segment_rel[i]
span_begin = 0
for span_id, (span_end, task_name) in enumerate(zip(self._spans[i], self._task_ids[i])):
spans[i, span_begin:span_end] = span_id
task_ids[i, span_begin:span_end] = task_name_to_id[task_name]
span_begin = span_end
length[i] = instance_length
raw_data_list.extend(self._raw_data[i])
for j in range(instance_length):
idx, idx_sub = self._inputs[i][j], self._inputs_sub[i][j]
tgt_idx = idx
if idx_sub > 0:
# need to be in ext table
if (idx, idx_sub) not in batch_ext_table_map:
batch_ext_table_map[(idx, idx_sub)] = len(batch_ext_table_map)
batch_ext_table_ids.append(idx)
batch_ext_table_sub.append(idx_sub)
tgt_idx = batch_ext_table_map[(idx, idx_sub)] + self.tokenizer.vocab_size
if j > 1 and context[i, j - 1] == 0:
if idx != self.tokenizer.bos_id:
tgt[i, j - 1] = tgt_idx
else:
tgt[i, j - 1] = self.tokenizer.eos_id
if context[i, instance_length - 1] == 0:
tgt[i, instance_length - 1] = self.tokenizer.eos_id
if len(batch_ext_table_map) == 0:
# placeholder
batch_ext_table_ids.append(0)
batch_ext_table_sub.append(1)
self._inputs = self._inputs[batch_size:]
self._inputs_sub = self._inputs_sub[batch_size:]
self._context = self._context[batch_size:]
self._sample_ids = self._sample_ids[batch_size:]
self._segments = self._segments[batch_size:]
self._num_segments = self._num_segments[batch_size:]
self._segment_rel_offset = self._segment_rel_offset[batch_size:]
self._segment_rel = self._segment_rel[batch_size:]
self._spans = self._spans[batch_size:]
self._task_ids = self._task_ids[batch_size:]
self._raw_data = self._raw_data[batch_size:]
return {
"inputs": inputs,
"inputs_sub": inputs_sub,
"length": length,
"context": context > 0,
"sample_ids": sample_ids,
"num_segments": num_segments,
"segment_ids": segments,
"segment_rel_offset": segment_rel_offset,
"segment_rel": segment_rel,
"spans": spans,
"target": tgt,
"ext_ids": np.array(batch_ext_table_ids, dtype=np.int32),
"ext_sub": np.array(batch_ext_table_sub, dtype=np.int32),
"task_ids": task_ids,
"task_names": task_names,
"raw_data": raw_data_list,
}
def add_data(self, config: _MixedDatasetConfig) -> Optional[CPMBeeBatch]:
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
) = self.build_instance(config)
# add to batch
best_fit: Union[None, int] = None
best_fit_space: Union[None, int] = None
for i in range(len(self._inputs)):
space = self._max_length - self._inputs[i].shape[0]
if input_ids.shape[0] <= space:
if best_fit_space is None:
best_fit = i
best_fit_space = space
elif best_fit_space > space:
best_fit = i
best_fit_space = space
if best_fit is None:
# add a new instance
self._inputs.append(input_ids)
self._inputs_sub.append(input_id_subs)
self._context.append(context)
self._sample_ids.append(sample_ids)
self._segments.append(segment_ids)
self._num_segments.append(num_segments)
self._segment_rel_offset.append(segment_rel_offset)
self._segment_rel.append(segment_rel)
self._spans.append([input_ids.shape[0]])
self._task_ids.append([config["task_name"]])
self._raw_data.append([raw_data])
else:
# add to existing instance
self._inputs[best_fit] = np.concatenate([self._inputs[best_fit], input_ids], axis=0)
self._inputs_sub[best_fit] = np.concatenate(
[self._inputs_sub[best_fit], input_id_subs], axis=0
)
self._context[best_fit] = np.concatenate([self._context[best_fit], context], axis=0)
self._sample_ids[best_fit] = np.concatenate(
[self._sample_ids[best_fit], sample_ids], axis=0
)
self._segments[best_fit] = np.concatenate(
[self._segments[best_fit], segment_ids], axis=0
)
self._num_segments[best_fit] = np.concatenate(
[self._num_segments[best_fit], num_segments], axis=0
)
self._segment_rel_offset[best_fit] = np.concatenate(
[
self._segment_rel_offset[best_fit],
segment_rel_offset + self._segment_rel[best_fit].shape[0],
],
axis=0,
)
self._segment_rel[best_fit] = np.concatenate(
[self._segment_rel[best_fit], segment_rel], axis=0
)
self._spans[best_fit].append(self._inputs[best_fit].shape[0])
self._task_ids[best_fit].append(config["task_name"])
self._raw_data[best_fit].append(raw_data)
if len(self._inputs) > self._batch_size:
return self.pack_batch()
else:
# not ready
return None
class _MixedDatasetConfigMananger:
def __init__(self, config_path: str) -> None:
self._config_path: str = config_path
self._config: Union[List[_MixedDatasetConfig], None] = None
self._last_m = 0
def changed(self):
while True:
try:
m_time = os.stat(self._config_path).st_mtime
if m_time > self._last_m:
# try to load new config
try:
self._config = load_dataset_config(self._config_path)
except Exception as e:
# failed to load config
print(
"Error: load new config in changed, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e))
)
return False
# new config loaded
self._last_m = m_time
return True
return False
except Exception as e:
print("Error: reading info list in _MixedDatasetConfigMananger.changed!, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e)))
time.sleep(30)
def get_config(self) -> List[_MixedDatasetConfig]:
if self._config is None:
if not self.changed():
raise RuntimeError("Failed to load config")
if self._config is None:
raise RuntimeError("Failed to load config")
return self._config
def _mixed_dataset_process(
config_path: str,
q_cmd: multiprocessing.Queue,
q_cmd_out: multiprocessing.Queue,
q_data: multiprocessing.Queue,
rank: int,
world_size: int,
packer: _MixedDatasetBatchPacker,
):
# ignore SIGINT
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
config_base_path = os.path.dirname(os.path.abspath(config_path))
def _convert_to_abs_path(transform_path: str):
if transform_path.startswith("/"):
return transform_path
else:
return os.path.join(config_base_path, transform_path)
def _build_sample_weights(config: List[_MixedDatasetConfig]):
if len(config) == 0:
return np.array([], dtype=np.float32)
weights = [c["weight"] * c["lines"] for c in config]
weights = np.array(weights, dtype=np.float32)
sm_weight = weights.sum()
if sm_weight > 0:
weights = weights / sm_weight
return weights
else:
raise RuntimeError("Empty datasets")
cfg_mgr = _MixedDatasetConfigMananger(config_path)
config = cfg_mgr.get_config()
for c in config:
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
weights = _build_sample_weights(config)
should_stop = False
should_start = False
while not should_stop:
# update config first
if cfg_mgr.changed():
path_ds_map: Dict[str, _MixedDatasetConfig] = {}
nw_path_set: Set[str] = set()
# load new config
nw_config = cfg_mgr.get_config()
# build path -> dataset map
for c in config:
path_ds_map[_dataset_identity(c)] = c
# add new datasets
for c in nw_config:
if _dataset_identity(c) in path_ds_map:
# update values only
if "weight" in c:
path_ds_map[_dataset_identity(c)]["weight"] = c["weight"]
if "transform" in c:
if isinstance(c["transforms"], str):
path_ds_map[_dataset_identity(c)]["transforms"] = _convert_to_abs_path(
c["transforms"]
)
else:
path_ds_map[_dataset_identity(c)]["transforms"] = c["transforms"]
if "incontext_weight" in c:
path_ds_map[_dataset_identity(c)]["incontext_weight"] = c[
"incontext_weight"
]
else:
# new dataset
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
path_ds_map[_dataset_identity(c)] = c
nw_path_set.add(_dataset_identity(c))
# remove unused datasets
for c in config:
if _dataset_identity(c) not in nw_path_set:
del path_ds_map[_dataset_identity(c)]
config: List[_MixedDatasetConfig] = []
for c in nw_config:
config.append(path_ds_map[_dataset_identity(c)])
del path_ds_map
del nw_path_set
del nw_config
weights = _build_sample_weights(config)
# get cmds
while True:
try:
cmd = q_cmd.get_nowait()
except Empty:
break
if cmd == "stop":
should_stop = True
q_cmd_out.put(True)
break
elif cmd == "state_dict":
ret = OrderedDict()
for c in config:
ds_name = _dataset_identity(c)
ret[ds_name] = c["dataset"]._state_dict()
q_cmd_out.put(ret)
elif cmd == "load_state_dict":
state_dict = q_cmd.get()
missing = []
for c in config:
ds_name = _dataset_identity(c)
if ds_name in state_dict:
c["dataset"].load_state_dict(state_dict[ds_name], strict=False)
else:
# new dataset
missing.append(ds_name)
q_cmd_out.put(missing)
elif cmd == "start":
should_start = True
q_cmd_out.put(True)
else:
raise RuntimeError("Unknown command: {}".format(cmd))
if should_stop:
break
if not should_start:
# wait for start cmd
time.sleep(1)
continue
if len(config) == 0:
# no dataset available
time.sleep(1)
continue
if q_data.full():
# queue full
time.sleep(1)
continue
# sample a dataset
ds_id: int = 0
while True:
ds_id = np.random.choice(weights.shape[0], p=weights)
if config[ds_id]["dataset"]._nlines != config[ds_id]["lines"]:
# dataset size changed
for c in config:
c["lines"] = c["dataset"]._nlines
weights = _build_sample_weights(config)
continue
else:
break
batch = packer.add_data(config[ds_id])
if batch is not None:
# new batch comming
q_data.put(batch)
# clean queue
while True:
try:
q_data.get_nowait()
except Empty:
break
class MixedDataset:
def __init__(
self,
config_path: str,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
) -> None:
self._q_cmd = multiprocessing.Queue()
self._q_cmd_out = multiprocessing.Queue()
self._q_data = multiprocessing.Queue(maxsize=1)
self._packer = _MixedDatasetBatchPacker(batch_size, max_length, tokenizer, max_depth)
self._p = multiprocessing.Process(
target=_mixed_dataset_process,
args=(
config_path,
self._q_cmd,
self._q_cmd_out,
self._q_data,
bmt.rank(),
bmt.world_size(),
self._packer,
),
)
self._p.start()
self._closed = False
def close(self):
if not self._closed:
self._closed = True
self._q_cmd.put("stop")
assert self._q_cmd_out.get(), "Failed to stop process"
self._p.join()
@property
def closed(self):
return self._closed
def start(self):
self._q_cmd.put("start")
return self._q_cmd_out.get()
def state_dict(self):
self._q_cmd.put("state_dict")
states = self._q_cmd_out.get()
if not isinstance(states, OrderedDict):
raise RuntimeError("Invalid state dict {}".format(states))
if bmt.world_size() == 1:
for val in states.values():
val["states"].unsqueeze_(0)
val["block"].unsqueeze_(0)
return states
ret = OrderedDict()
for k, v in states.items():
num_unused_block = v["states"].size(0)
gpu_num_unused_block = torch.tensor([num_unused_block], dtype=torch.long).cuda()
max_unused_blocks = (
bmt.distributed.all_reduce(gpu_num_unused_block, op="max").cpu().item()
)
if max_unused_blocks == 0:
max_unused_blocks = 1
gpu_states = torch.full((max_unused_blocks,), -1, dtype=torch.long).cuda()
gpu_states[:num_unused_block] = v["states"].cuda()
gpu_block = v["block"].cuda()
global_states = bmt.distributed.all_gather(
gpu_states
).cpu() # (world_size, max_unused_blocks)
global_block = bmt.distributed.all_gather(gpu_block).cpu() # (world_size, 4)
ret[k] = {"states": global_states, "block": global_block}
return ret
def load_state_dict(self, data: OrderedDict, strict: bool = False):
self._q_cmd.put("load_state_dict")
self._q_cmd.put(data)
missing = self._q_cmd_out.get()
if strict:
if len(missing) > 0:
raise RuntimeError("Missing dataset state: {}".format(missing))
return missing
def get(self) -> CPMBeeBatch:
ret: CPMBeeBatch = self._q_data.get() # type: ignore
if not isinstance(ret, dict):
raise RuntimeError("Invalid data {}".format(ret))
return ret
def __iter__(self):
while True:
yield self.get()
def __del__(self):
if not self.closed:
try:
self.close()
except Exception:
pass
| 38,408 | 35.860845 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/bee/finetune.py | from ...tokenizers import CPMBeeTokenizer
from .pretrain import _MixedDatasetBatchPacker, _MixedDatasetConfig, CPMBeeBatch
from ...dataset import SimpleDataset
import bmtrain as bmt
class FinetuneDataset:
def __init__(
self,
dataset_path: str,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
task_name: str = "task",
drop_last: bool = False,
) -> None:
self._world_size = bmt.world_size()
self._rank = bmt.rank()
self._batch_size = batch_size
self._packer = _MixedDatasetBatchPacker(
batch_size * self._world_size, max_length, tokenizer, max_depth
)
self._drop_last = drop_last
ds = SimpleDataset(dataset_path, shuffle=False)
self._ds_cfg: _MixedDatasetConfig = {
"weight": 1.0,
"path": dataset_path,
"transforms": [],
"task_name": task_name,
"dataset_name": "finetune",
"incontext_weight": [1.0],
"lines": len(ds),
"dataset": ds,
}
def __batch_iter(self):
while True:
try:
batch = self._packer.add_data(self._ds_cfg)
except EOFError:
break
if batch is None:
continue
yield batch
if len(self._packer) > 0:
batch = self._packer.pack_batch(force=True)
if not self._drop_last:
yield batch
self._ds_cfg["dataset"]._repeat_times = 0
def __iter__(self):
batch_st = self._batch_size * self._rank
batch_end = self._batch_size * (self._rank + 1)
for batch in self.__batch_iter():
batch_size = batch["inputs"].shape[0]
if batch_size <= batch_st:
yield None
else:
ret: CPMBeeBatch = {
kw: val[batch_st:batch_end] # type: ignore
for kw, val in batch.items()
if kw not in ["task_names", "raw_data", "ext_ids", "ext_sub"]
} # type: ignore
ret["task_names"] = batch["task_names"]
ret["raw_data"] = batch["raw_data"]
ret["ext_ids"] = batch["ext_ids"]
ret["ext_sub"] = batch["ext_sub"]
yield ret
| 2,390 | 32.208333 | 81 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/bee/__init__.py | from .pretrain import MixedDataset
from .finetune import FinetuneDataset
| 73 | 23.666667 | 37 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
import torch.nn.functional as F
from .position_embedding import RotaryEmbedding
from typing import Optional
class Embedding(torch.nn.Module):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.dim_model = embedding_size
self.weight = torch.nn.parameter.Parameter(
torch.empty(vocab_size, embedding_size, dtype=dtype)
)
def forward(self, ids: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return embeds
def projection(self, x: torch.Tensor):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_output_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
return logits
class EmbeddingExt(torch.nn.Module):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
distance_scale: int = 16,
):
super().__init__()
self.dim_model = embedding_size
self.rotary_emb = RotaryEmbedding(
dim=embedding_size, distance_scale=distance_scale, dtype=dtype
)
self.weight = torch.nn.parameter.Parameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
)
def forward(self, ids: torch.Tensor, ids_sub: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
ids (:obj:`torch.Tensor` of shape ``(batch_size)``): Subscript of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return self.rotary_emb(embeds, ids_sub)
def projection(self, x: torch.Tensor, ext_table: Optional[torch.Tensor] = None):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
ext_table (:obj:`torch.Tensor` of shape ``(ext_table_size, dim_model)``): Ext vocab table.
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_size + ext_table_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
if ext_table is not None:
logits_ext = F.linear(x, ext_table)
logits = torch.cat([logits, logits_ext], dim=-1)
return logits
| 4,165 | 36.531532 | 151 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/position_embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Union
import torch
import torch.nn.functional as F
class SegmentPositionEmbedding(torch.nn.Module):
def __init__(
self,
num_heads,
num_segments=1,
num_buckets=32,
max_distance=128,
bidirectional=False,
dtype=torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.max_distance = max_distance
self.bidirectional = bidirectional
self.num_segments = num_segments
self.relative_attention_bias = torch.nn.parameter.Parameter(
torch.empty(num_segments * num_segments + num_buckets, num_heads, dtype=dtype)
)
def forward(
self,
key_pos: torch.Tensor,
query_pos: torch.Tensor,
key_segment: torch.Tensor,
query_segment: torch.Tensor,
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert keylen == key_segment.size(1) and querylen == query_segment.size(1)
key_pos = key_pos.view(batch, -1, keylen)
query_pos = query_pos.view(batch, querylen, -1)
key_segment = key_segment.view(batch, -1, keylen)
query_segment = query_segment.view(batch, querylen, -1)
relative_position_bucket = self._segment_relative_position_bucket(
query_segment, key_segment
)
relative_position_bucket = relative_position_bucket + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
absolute_position_bucket = self._position_bucket(
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[
None, :
]
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[
:, None
],
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
(key_segment == query_segment),
absolute_position_bucket[None, :, :],
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _segment_relative_position_bucket(self, query_segment, key_segment):
return query_segment * self.num_segments + key_segment
def _position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class BucketPositionBias(torch.nn.Module):
def __init__(
self,
num_heads: int,
num_buckets: int = 32,
num_segment_bucket: int = 32,
max_distance: int = 128,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
) -> None:
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.num_segment_bucket = num_segment_bucket
self.max_distance = max_distance
self.relative_attention_bias = torch.nn.parameter.Parameter(
torch.empty(num_buckets + num_segment_bucket, num_heads, dtype=dtype)
)
def forward(
self,
query_pos: torch.Tensor, # (batch, len_q)
key_pos: torch.Tensor, # (batch, len_k)
rel_buckets: torch.Tensor, # (batch, len_q, len_k)
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert (
rel_buckets.size(0) == batch
and rel_buckets.size(1) == querylen
and rel_buckets.size(2) == keylen
)
relative_position_bucket = rel_buckets - 1 + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
inner_segment_bucket = self._position_bucket(
key_pos[..., None, :] - query_pos[..., :, None],
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
rel_buckets == 0,
inner_segment_bucket,
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
relative_buckets = 0
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class RotaryEmbedding(torch.nn.Module):
def __init__(
self,
dim,
base=10000,
distance_scale: Union[int, float] = 1,
dtype: torch.dtype = torch.half,
):
super().__init__()
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, device="cuda", dtype=torch.float32) / dim)
)
inv_freq = inv_freq.to(dtype)
self.distance_scale = distance_scale
self.dtype = dtype
self.inv_freq = inv_freq
def forward(self, x: torch.Tensor, x_pos: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
x_pos = x_pos * self.distance_scale
freqs = x_pos[..., None].to(self.dtype) * self.inv_freq[None, :] # (..., dim/2)
# the same implementation as sat
emb = torch.cat((freqs, freqs), dim=-1) # (..., dim)
emb_cos = emb.cos() # (..., dim)
emb_sin = emb.sin() # (..., dim)
rotate_x = torch.cat(
[-x[..., x.size(-1) // 2 :], x[..., : x.size(-1) // 2]], dim=-1
) # (..., dim)
return x * emb_cos + rotate_x * emb_sin
| 8,848 | 34.681452 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/feedforward.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from .linear import Linear
class DenseGatedACT(torch.nn.Module):
def __init__(
self,
dim_in: int,
dim_ff: int,
dtype=torch.half,
):
super().__init__()
self.w_0 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.w_1 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.act = torch.nn.GELU()
def forward(self, x: torch.Tensor):
"""Transform an input tensor from one feature space to another via a nonlinear operation
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): Tensor that will be subject to nonlinear operations.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_ff)``)
""" # noqa: E501
gate_score = self.act(self.w_0(x))
x = self.w_1(x)
x = gate_score * x
return x
class FeedForward(torch.nn.Module):
r"""FeedForward module
Args:
dim_in (int): input dimension.
dim_ff (int): middle dimension.
dim_out (int, optional): output dimension. Defaults to None, which means dim_in = dim_out.
dtype (optional): Defaults to torch.half.
init_mean (float, optional): mean of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.
init_std (float, optional): std of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.02.
bias (bool, optional): whether to use bias term in fully-connected layers used in feed-forward module. Defaults to False.
activate_fn (str, optional): Defaults to `gated_gelu`.
dropout_p (int, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
dropout_p: Optional[float] = None,
):
super().__init__()
self.w_in = DenseGatedACT(
dim_in=dim_model,
dim_ff=dim_ff,
dtype=dtype,
)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
self.w_out = Linear(
dim_in=dim_ff,
dim_out=dim_model,
dtype=dtype,
scale_before=False,
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of feed-forward module.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of feed-forward module.
""" # noqa: E501
x = self.w_in(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.w_out(x)
return x
| 3,676 | 29.38843 | 176 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/layernorm.py | import torch
@torch.jit.script # type: ignore
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight
class LayerNorm(torch.nn.Module):
"""RMS LayerNorm"""
def __init__(
self,
dim_norm: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
init_var: float = 1.0,
):
super().__init__()
self.eps = eps
self.dim_norm = dim_norm
self.weight = torch.nn.parameter.Parameter(torch.full((dim_norm,), init_var, dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``): Input tensor that need to be normalized.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``: The layernorm output.
""" # noqa: E501
assert x.size(-1) == self.dim_norm
return rms_layernorm(x, self.weight, self.eps)
| 1,156 | 29.447368 | 122 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/linear.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
import torch.nn.functional as F
class Linear(torch.nn.Module):
def __init__(
self,
dim_in: int,
dim_out: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
scale_before: bool = False,
):
super().__init__()
self.dim_in = self.in_features = dim_in
self.dim_out = self.out_features = dim_out
self.scale_before = scale_before
self.weight = torch.nn.parameter.Parameter(torch.empty((dim_out, dim_in), dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of linear layer
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of the linear transform y.
""" # noqa: E501
if self.scale_before:
x = x / math.sqrt(self.dim_in)
x = F.linear(x, self.weight)
else:
x = F.linear(x, self.weight)
x = x / math.sqrt(self.dim_in)
return x
| 1,721 | 32.115385 | 109 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/transformer.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Optional, List, Tuple
from .blocks import TransformerBlock
from .layernorm import LayerNorm
class Encoder(torch.nn.Module):
"""Layers of encoder transformer blocks plus an final layernorm.
Args:
num_layers (int): number of layers.
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-6.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
num_layers: int,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.num_layers = num_layers
if mask_modules is not None:
assert (
len(mask_modules) == num_layers
), "The total number of masks should equal to num_layers"
for mask_module in mask_modules:
assert (
len(mask_module) == 2
), "For encoder, each mask should be (mask_att, mask_ffn)"
else:
mask_modules = [(False, False)] * num_layers
self.layers = torch.nn.ModuleList(
[
TransformerBlock(
dim_model=dim_model,
dim_ff=dim_ff,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
mask_att=mask_modules[ith][0],
mask_ffn=mask_modules[ith][1],
)
for ith in range(num_layers)
]
)
self.output_layernorm = LayerNorm(dim_norm=dim_model, dtype=dtype, eps=eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
):
"""
Args:
hidden-states (:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``): Input of encoder, might be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_enc, seq_enc)``): Avoid invalid areas to participate in the calculation
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, seq_enc, seq_enc)``) Provides position information to attention mechanism.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``: The encoder output.
""" # noqa: E501
if not use_cache:
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask, position_bias)
hidden_states = self.output_layernorm(hidden_states)
return hidden_states
else:
with torch.no_grad():
current_key_values = []
for i, module in enumerate(self.layers):
hidden_states = module(
hidden_states,
attention_mask,
position_bias,
past_key_value=past_key_values[i] if past_key_values else None,
use_cache=use_cache,
)
if use_cache:
current_key_values.append(hidden_states[1])
hidden_states = hidden_states[0]
hidden_states = self.output_layernorm(hidden_states)
if use_cache:
return hidden_states, current_key_values
else:
return hidden_states
| 4,852 | 37.515873 | 155 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/__init__.py | from .embedding import Embedding, EmbeddingExt
from .position_embedding import SegmentPositionEmbedding, BucketPositionBias, RotaryEmbedding
from .linear import Linear
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
from .blocks import TransformerBlock
from .transformer import Encoder
| 341 | 37 | 93 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/attention.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import math
from .linear import Linear
class Attention(torch.nn.Module):
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
dropout_p: Optional[float] = None,
) -> None:
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.project_q = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_k = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_v = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.attention_out = Linear(self.num_heads * self.dim_head, self.dim_model, dtype=dtype)
self.softmax = torch.nn.Softmax(dim=-1)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(p=dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_q: torch.Tensor,
hidden_kv: torch.Tensor,
attention_mask: torch.BoolTensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_q (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): Indices of input sequence tokens. It will be embedded by model's internal embedding lookup matrix.
hidden_kv (:obj:`torch.Tensor` of shape ``(batch, len_k, dim_model)``): Length of input sequence before padding.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, len_q, len_k)``): Used to avoid performing attention on padding token indices.
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, len_q, len_k)`` or ``(1, num_heads, len_k, len_q)``): Provide positional information about tensor `key_value` and `query`.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): The attention output.
""" # noqa: E501
batch_size = hidden_q.size(0)
len_q = hidden_q.size(1)
len_k = hidden_kv.size(1)
h_q = self.project_q(hidden_q)
h_k = self.project_k(hidden_kv)
h_v = self.project_v(hidden_kv)
h_q = h_q.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_k = h_k.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_v = h_v.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
if past_kv is not None:
h_k = torch.cat([past_kv[0], h_k], dim=-2)
h_v = torch.cat([past_kv[1], h_v], dim=-2)
len_k = h_k.size(-2)
# (b, n_h, len_q, d_h) @ (b, n_h, d_h, len_k) -> (b, n_h, len_q, len_k)
score = torch.matmul(h_q, h_k.transpose(-1, -2)) / math.sqrt(self.dim_head)
score = score + position_bias
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
)
score = self.softmax(score)
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
)
if self.dropout is not None:
score = self.dropout(score)
# (b, n_h, len_q, len_k) @ (b, n_h, len_k, d_h) -> (b, n_h, len_q, d_h)
score = torch.matmul(score, h_v)
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
score = self.attention_out(score)
if use_cache:
return score, (h_k, h_v)
else:
return score
| 4,604 | 38.025424 | 191 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/blocks.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
class SelfAttentionBlock(torch.nn.Module):
"""The whole cross-attention block. A sequence of operation. Consists of layernorm, self-attention and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
):
super().__init__()
self.layernorm_before_attention = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.self_attention = Attention(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of self-attention block. It can be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation.
position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of attention block.
""" # noqa: E501
x = self.layernorm_before_attention(hidden_states)
x = self.self_attention(x, x, attention_mask, position_bias, use_cache, past_key_value)
if use_cache:
x, current_key_value = x
else:
current_key_value = None
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
class FFNBlock(torch.nn.Module):
"""The whole feed-forward block. A sequence of operation. Consists of layernorm, feed-forward and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = 0,
):
super().__init__()
self.layernorm_before_ffn = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.ffn = FeedForward(
dim_model,
dim_ff,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Hidden states before feed forward layer.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of feed-forward block
""" # noqa: E501
x = self.layernorm_before_ffn(hidden_states)
x = self.ffn(x)
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
return hidden_states
class TransformerBlock(torch.nn.Module):
"""The whole transformer block. A sequence of operation. Consists of self-attention block[, cross-attention block] and feed-forward block.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_att: bool = False,
mask_ffn: bool = False,
):
super().__init__()
self.mask_att = mask_att
self.mask_ffn = mask_ffn
if not self.mask_att:
self.self_att = SelfAttentionBlock(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
if not self.mask_ffn:
self.ffn = FFNBlock(
dim_model=dim_model,
dim_ff=dim_ff,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
def forward(
self,
self_hidden_states: torch.Tensor,
self_attention_mask: torch.Tensor,
self_position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
self_hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
self_attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation of self-attention.
self_position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of transformer block.
""" # noqa: E501
# (batch, dim_model, seq_self)
current_key_value = None
if not self.mask_att:
hidden_states = self.self_att(
self_hidden_states,
attention_mask=self_attention_mask,
position_bias=self_position_bias,
use_cache=use_cache,
past_key_value=past_key_value,
)
if use_cache:
hidden_states, current_key_value = hidden_states
else:
hidden_states = self_hidden_states
# (batch, dim_model, seq_self)
if not self.mask_ffn:
hidden_states = self.ffn(hidden_states)
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
| 8,723 | 34.036145 | 198 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/tokenizers/bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
import io
from typing import IO, Dict, List, Optional, Tuple
def load_vocab(fp: IO[bytes]) -> Dict[str, int]:
"""Loads a vocabulary file into a dictionary."""
vocab: Dict[str, int] = {}
reader = io.TextIOWrapper(fp, encoding="utf-8")
for token in reader.readlines():
if token[-1] == "\n":
token = token[:-1]
if len(token) == 0:
continue
vocab[token] = len(vocab)
return vocab
class Token(object):
def __init__(self, token: str, start: int, is_unk: bool, is_special: bool):
self.token = token
self.start = start
self.is_unk = is_unk
self.is_special = is_special
def __str__(self):
return "Token(token={}, start={}, is_unk={}, is_special={})".format(
self.token, self.start, self.is_unk, self.is_special
)
def __repr__(self):
return self.__str__()
class CPMBeeTokenizer(object):
def __init__(
self,
):
self.unk_token = "<unk>"
self.mask_token = "<mask>"
self.bos_token = "<s>"
self.eos_token = "</s>"
self.line_token = "\n"
self.space_token = " "
self.encoder = load_vocab(pkg_resources.resource_stream("cpm_live", "vocabs/bee.txt"))
self.encoder[self.line_token] = self.encoder["</n>"]
self.encoder[self.space_token] = self.encoder["</_>"]
del self.encoder["</n>"]
del self.encoder["</_>"]
self.decoder = {v: k for k, v in self.encoder.items()}
self._special_tokens = {
k: v for k, v in self.encoder.items() if k.startswith("<") and k.endswith(">")
}
self._max_word_len = max([len(x) for x in self.encoder.keys()])
def get_piece(self, text: str) -> str:
text = text[: self._max_word_len]
len_text = len(text)
for i in range(len(text)):
sub = text[: len_text - i]
if (sub in self.encoder) and (sub not in self._special_tokens):
return sub
return text[0]
@property
def vocab_size(self):
return len(self.encoder)
@property
def eos_id(self):
return self.encoder[self.eos_token]
@property
def bos_id(self):
return self.encoder[self.bos_token]
@property
def unk_id(self):
return self.encoder[self.unk_token]
@property
def mask_id(self):
return self.encoder[self.mask_token]
def __len__(self):
return len(self.encoder)
def tokenize(self, text: str) -> List[Token]:
output_tokens: List[Token] = []
sentence_split = [""]
is_escape = False
is_special_token = False
for i, c in enumerate(text):
if is_special_token:
if c == "<":
raise ValueError("Invalid special token at pos {}".format(i))
elif c == ">":
# end of special token
sentence_split[-1] += c
is_special_token = False
sentence_split.append("")
else:
sentence_split[-1] += c
else:
if c == "<":
if is_escape:
# case: <<
sentence_split[-1] += c
is_escape = False
else:
# case: x<
is_escape = True
else:
if is_escape:
# case <x
is_special_token = True
is_escape = False
sentence_split.append("<" + c)
else:
# case xx
sentence_split[-1] += c
if is_escape or is_special_token:
raise ValueError("Unexpected end of text `{}`".format(text))
part_pos = 0
for i, part in enumerate(sentence_split):
if (i & 1) == 1:
# special token
output_tokens.append(Token(part, part_pos, False, True))
else:
part_st = 0
last_unk = None
while part_st < len(part):
piece = self.get_piece(part[part_st:])
if piece not in self.encoder:
if last_unk is None:
last_unk = piece
else:
last_unk += piece
else:
if last_unk is None:
output_tokens.append(Token(piece, part_st + part_pos, False, False))
else:
output_tokens.append(
Token(last_unk, part_st + part_pos - len(last_unk), True, False)
)
output_tokens.append(Token(piece, part_st + part_pos, False, False))
last_unk = None
part_st += len(piece)
if last_unk is not None:
# part end with UNK
output_tokens.append(
Token(last_unk, part_st + part_pos - len(last_unk), True, False)
)
part_pos += len(part)
return output_tokens
@staticmethod
def escape(text: str) -> str:
return text.replace("<", "<<")
@staticmethod
def unescape(text: str) -> str:
return text.replace("<<", "<")
def encode(
self, text: str, past_table: Dict[int, str] = {}
) -> Tuple[List[int], Dict[int, str]]:
ext_table_rev: Dict[str, int] = {}
ext_table: Dict[int, str] = {}
for idx, val in past_table.items():
ext_table[idx] = val
ext_table_rev[val] = idx
ret = []
for x in self.tokenize(text):
if x.is_unk or (x.is_special and (x.token not in self.encoder)):
if x.token not in ext_table_rev:
ext_table_rev[x.token] = len(ext_table_rev) + self.vocab_size
ext_table[ext_table_rev[x.token]] = x.token
ret.append(ext_table_rev[x.token])
elif x.token in self.encoder:
ret.append(self.encoder[x.token])
else:
raise ValueError("Unknown token `{}` at pos {}".format(x.token, x.start))
return ret, ext_table
def decode(self, tokens: List[int], ext_table: Optional[Dict[int, str]] = None):
"""Decode ids into a string."""
if ext_table is None:
ext_table = {}
ret = []
for token in tokens:
if token in ext_table:
ret.append(ext_table[token])
else:
if token >= 0:
w = self.decoder[token]
if w in self._special_tokens:
ret.append(w)
else:
ret.append(self.escape(w))
return "".join(ret)
| 7,675 | 33.267857 | 96 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/tokenizers/ant.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jieba
import pkg_resources
import io
from typing import IO
def load_vocab(fp: IO[bytes]):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
reader = io.TextIOWrapper(fp, encoding="utf-8")
for token in reader.readlines():
token = token.strip()
if len(token) == 0:
continue
vocab[token] = len(vocab)
return vocab
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token="<unk>", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, token):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
return [self.unk_token]
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(cur_substr)
start = end
return sub_tokens
class CPMAntTokenizer(object):
def __init__(
self,
bod_token="<d>",
eod_token="</d>",
bos_token="<s>",
eos_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
line_token="</n>",
space_token="</_>",
):
self.bod_token = bod_token
self.eod_token = eod_token
self.bos_token = bos_token
self.eos_token = eos_token
self.pad_token = pad_token
self.unk_token = unk_token
self.line_token = line_token
self.space_token = space_token
self.encoder = load_vocab(pkg_resources.resource_stream("cpm_live", "vocabs/ant.txt"))
self.encoder[" "] = self.encoder[space_token]
self.encoder["\n"] = self.encoder[line_token]
del self.encoder[self.space_token]
del self.encoder[self.line_token]
self.decoder = {v: k for k, v in self.encoder.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.encoder)
@property
def bod_id(self):
return self.encoder[self.bod_token]
@property
def eod_id(self):
return self.encoder[self.eod_token]
@property
def eos_id(self):
return self.encoder[self.eos_token]
@property
def bos_id(self):
return self.encoder[self.bos_token]
@property
def pad_id(self):
return self.encoder[self.pad_token]
@property
def unk_id(self):
return self.encoder[self.unk_token]
@property
def newline_id(self):
return self.encoder["\n"]
def __len__(self):
return len(self.encoder)
def tokenize(self, text):
"""Tokenize a string."""
output_tokens = []
for x in jieba.cut(text, cut_all=False):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))
return output_tokens
def encode(self, text):
"""Encode a string into ids."""
return [self.encoder[x] for x in self.tokenize(text)]
def decode(self, tokens):
"""Decode ids into a string."""
tokens = [i for i in tokens if i >= 0]
text = "".join([self.decoder[x] for x in tokens])
return text
def check(self, token):
return token in self.encoder
def convert_tokens_to_ids(self, tokens):
return [self.encoder.get(x, self.encoder[self.unk_token]) for x in tokens]
def convert_ids_to_tokens(self, ids):
return [self.decoder[x] if x >= 0 else self.unk_token for x in ids]
| 4,557 | 27.4875 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/tokenizers/__init__.py | from .ant import CPMAntTokenizer
from .bee import CPMBeeTokenizer
| 66 | 21.333333 | 32 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import bmtrain as bmt
import math
import torch.nn.functional as F
from .position_embedding import RotaryEmbedding
class Embedding(bmt.DistributedModule):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.dim_model = embedding_size
self.weight = bmt.DistributedParameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, ids: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return embeds
def projection(self, x: torch.Tensor):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_output_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
return logits
class EmbeddingExt(bmt.DistributedModule):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
distance_scale: int = 16,
):
super().__init__()
self.dim_model = embedding_size
self.rotary_emb = RotaryEmbedding(
dim=embedding_size, distance_scale=distance_scale, dtype=dtype
)
self.weight = bmt.DistributedParameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, ids: torch.Tensor, ids_sub: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
ids (:obj:`torch.Tensor` of shape ``(batch_size)``): Subscript of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return self.rotary_emb(embeds, ids_sub)
def projection(self, x: torch.Tensor, ext_table: Optional[torch.Tensor] = None):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
ext_table (:obj:`torch.Tensor` of shape ``(ext_table_size, dim_model)``): Ext vocab table.
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_size + ext_table_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
if ext_table is not None:
logits_ext = F.linear(x, ext_table)
logits = torch.cat([logits, logits_ext], dim=-1)
return logits
| 4,458 | 36.788136 | 151 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/position_embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Union
import torch
import bmtrain as bmt
import torch.nn.functional as F
class SegmentPositionEmbedding(bmt.DistributedModule):
def __init__(
self,
num_heads: int,
num_segments: int = 1,
num_buckets: int = 32,
max_distance: int = 128,
bidirectional: bool = False,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.max_distance = max_distance
self.bidirectional = bidirectional
self.num_segments = num_segments
self.relative_attention_bias = bmt.DistributedParameter(
torch.empty(num_segments * num_segments + num_buckets, num_heads, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(
self,
key_pos: torch.Tensor,
query_pos: torch.Tensor,
key_segment: torch.Tensor,
query_segment: torch.Tensor,
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert keylen == key_segment.size(1) and querylen == query_segment.size(1)
key_pos = key_pos.view(batch, -1, keylen)
query_pos = query_pos.view(batch, querylen, -1)
key_segment = key_segment.view(batch, -1, keylen)
query_segment = query_segment.view(batch, querylen, -1)
relative_position_bucket = self._segment_relative_position_bucket(
query_segment, key_segment
)
relative_position_bucket = relative_position_bucket + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
absolute_position_bucket = self._position_bucket(
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[
None, :
]
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[
:, None
],
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
(key_segment == query_segment),
absolute_position_bucket[None, :, :],
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _segment_relative_position_bucket(self, query_segment, key_segment):
return query_segment * self.num_segments + key_segment
def _position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class BucketPositionBias(bmt.DistributedModule):
def __init__(
self,
num_heads: int,
num_buckets: int = 32,
num_segment_bucket: int = 32,
max_distance: int = 128,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
) -> None:
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.num_segment_bucket = num_segment_bucket
self.max_distance = max_distance
self.relative_attention_bias = bmt.DistributedParameter(
torch.empty(num_buckets + num_segment_bucket, num_heads, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(
self,
query_pos: torch.Tensor, # (batch, len_q)
key_pos: torch.Tensor, # (batch, len_k)
rel_buckets: torch.Tensor, # (batch, len_q, len_k)
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert (
rel_buckets.size(0) == batch
and rel_buckets.size(1) == querylen
and rel_buckets.size(2) == keylen
)
relative_position_bucket = rel_buckets - 1 + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
inner_segment_bucket = self._position_bucket(
key_pos[..., None, :] - query_pos[..., :, None],
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
rel_buckets == 0,
inner_segment_bucket,
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
relative_buckets = 0
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class RotaryEmbedding(bmt.DistributedModule):
def __init__(
self,
dim,
base=10000,
distance_scale: Union[int, float] = 1,
dtype: torch.dtype = torch.half,
):
super().__init__()
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, device="cuda", dtype=torch.float32) / dim)
)
inv_freq = inv_freq.to(dtype)
self.distance_scale = distance_scale
self.dtype = dtype
self.inv_freq = inv_freq
def forward(self, x: torch.Tensor, x_pos: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
x_pos = x_pos * self.distance_scale
freqs = x_pos[..., None].to(self.dtype) * self.inv_freq[None, :] # (..., dim/2)
# the same implementation as sat
emb = torch.cat((freqs, freqs), dim=-1) # (..., dim)
emb_cos = emb.cos() # (..., dim)
emb_sin = emb.sin() # (..., dim)
rotate_x = torch.cat(
[-x[..., x.size(-1) // 2 :], x[..., : x.size(-1) // 2]], dim=-1
) # (..., dim)
return x * emb_cos + rotate_x * emb_sin
| 9,197 | 35.070588 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/feedforward.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import bmtrain as bmt
from .linear import Linear
class DenseGatedACT(bmt.DistributedModule):
def __init__(
self,
dim_in: int,
dim_ff: int,
dtype=torch.half,
):
super().__init__()
self.w_0 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.w_1 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.act = torch.nn.GELU()
def forward(self, x: torch.Tensor):
"""This model inherits from bmt.DistributedModule.
Transform an input tensor from one feature space to another via a nonlinear operation
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): Tensor that will be subject to nonlinear operations.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_ff)``)
""" # noqa: E501
gate_score = self.act(self.w_0(x))
x = self.w_1(x)
x = gate_score * x
return x
class FeedForward(bmt.DistributedModule):
r"""FeedForward module
Args:
dim_in (int): input dimension.
dim_ff (int): middle dimension.
dim_out (int, optional): output dimension. Defaults to None, which means dim_in = dim_out.
dtype (optional): Defaults to torch.half.
init_mean (float, optional): mean of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.
init_std (float, optional): std of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.02.
bias (bool, optional): whether to use bias term in fully-connected layers used in feed-forward module. Defaults to False.
activate_fn (str, optional): Defaults to `gated_gelu`.
dropout_p (int, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
dropout_p: Optional[float] = None,
):
super().__init__()
self.w_in = DenseGatedACT(
dim_in=dim_model,
dim_ff=dim_ff,
dtype=dtype,
)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
self.w_out = Linear(
dim_in=dim_ff,
dim_out=dim_model,
dtype=dtype,
scale_before=False,
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of feed-forward module.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of feed-forward module.
""" # noqa: E501
x = self.w_in(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.w_out(x)
return x
| 3,770 | 29.658537 | 176 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/layernorm.py | import torch
import bmtrain as bmt
@torch.jit.script # type: ignore
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight
class LayerNorm(bmt.DistributedModule):
"""RMS LayerNorm"""
def __init__(
self,
dim_norm: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
init_var: float = 1.0,
):
super().__init__()
self.eps = eps
self.dim_norm = dim_norm
self.weight = bmt.DistributedParameter(torch.full((dim_norm,), init_var, dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``): Input tensor that need to be normalized.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``: The layernorm output.
""" # noqa: E501
assert x.size(-1) == self.dim_norm
return rms_layernorm(x, self.weight, self.eps)
| 1,180 | 29.282051 | 122 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/linear.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import bmtrain as bmt
import math
import torch.nn.functional as F
class Linear(bmt.DistributedModule):
def __init__(
self,
dim_in: int,
dim_out: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
scale_before: bool = False,
):
super().__init__()
self.dim_in = self.in_features = dim_in
self.dim_out = self.out_features = dim_out
self.scale_before = scale_before
self.weight = bmt.DistributedParameter(
torch.empty((dim_out, dim_in), dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of linear layer
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of the linear transform y.
""" # noqa: E501
if self.scale_before:
x = x / math.sqrt(self.dim_in)
x = F.linear(x, self.weight)
else:
x = F.linear(x, self.weight)
x = x / math.sqrt(self.dim_in)
return x
| 1,901 | 31.793103 | 109 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/transformer.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import bmtrain as bmt
from typing import Optional, List, Tuple
from .blocks import TransformerBlock
from .layernorm import LayerNorm
class Encoder(bmt.DistributedModule):
"""Layers of encoder transformer blocks plus an final layernorm.
Args:
num_layers (int): number of layers.
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-6.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
num_layers: int,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.num_layers = num_layers
if mask_modules is not None:
assert (
len(mask_modules) == num_layers
), "The total number of masks should equal to num_layers"
for mask_module in mask_modules:
assert (
len(mask_module) == 2
), "For encoder, each mask should be (mask_att, mask_ffn)"
else:
mask_modules = [(False, False)] * num_layers
self.layers = bmt.TransformerBlockList(
[
bmt.CheckpointBlock(
TransformerBlock(
dim_model=dim_model,
dim_ff=dim_ff,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
mask_att=mask_modules[ith][0],
mask_ffn=mask_modules[ith][1],
)
)
for ith in range(num_layers)
]
)
self.output_layernorm = LayerNorm(dim_norm=dim_model, dtype=dtype, eps=eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
):
"""
Args:
hidden-states (:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``): Input of encoder, might be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_enc, seq_enc)``): Avoid invalid areas to participate in the calculation
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, seq_enc, seq_enc)``) Provides position information to attention mechanism.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``: The encoder output.
""" # noqa: E501
if not use_cache:
hidden_states = self.layers(hidden_states, attention_mask, position_bias)
hidden_states = self.output_layernorm(hidden_states)
return hidden_states
else:
with torch.no_grad():
current_key_values = []
for i, module in enumerate(self.layers):
hidden_states = module(
hidden_states,
attention_mask,
position_bias,
past_key_value=past_key_values[i] if past_key_values else None,
use_cache=use_cache,
)
if use_cache:
current_key_values.append(hidden_states[1])
hidden_states = hidden_states[0]
hidden_states = self.output_layernorm(hidden_states)
if use_cache:
return hidden_states, current_key_values
else:
return hidden_states
| 4,948 | 37.664063 | 155 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/__init__.py | from .embedding import Embedding, EmbeddingExt
from .position_embedding import SegmentPositionEmbedding, BucketPositionBias, RotaryEmbedding
from .linear import Linear
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
from .blocks import TransformerBlock
from .transformer import Encoder
| 341 | 37 | 93 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/attention.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import bmtrain as bmt
import math
from .linear import Linear
class Attention(bmt.DistributedModule):
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
dropout_p: Optional[float] = None,
) -> None:
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.project_q = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_k = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_v = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.attention_out = Linear(self.num_heads * self.dim_head, self.dim_model, dtype=dtype)
self.softmax = torch.nn.Softmax(dim=-1)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(p=dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_q: torch.Tensor,
hidden_kv: torch.Tensor,
attention_mask: torch.BoolTensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""This model inherits from bmt.DistributedModule.
Args:
hidden_q (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): Indices of input sequence tokens. It will be embedded by model's internal embedding lookup matrix.
hidden_kv (:obj:`torch.Tensor` of shape ``(batch, len_k, dim_model)``): Length of input sequence before padding.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, len_q, len_k)``): Used to avoid performing attention on padding token indices.
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, len_q, len_k)`` or ``(1, num_heads, len_k, len_q)``): Provide positional information about tensor `key_value` and `query`.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): The attention output.
""" # noqa: E501
batch_size = hidden_q.size(0)
len_q = hidden_q.size(1)
len_k = hidden_kv.size(1)
h_q = self.project_q(hidden_q) / math.sqrt(math.sqrt(self.dim_head))
h_k = self.project_k(hidden_kv) / math.sqrt(math.sqrt(self.dim_head))
h_v = self.project_v(hidden_kv)
h_q = h_q.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_k = h_k.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_v = h_v.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
if past_kv is not None:
h_k = torch.cat([past_kv[0], h_k], dim=-2)
h_v = torch.cat([past_kv[1], h_v], dim=-2)
len_k = h_k.size(-2)
# (b, n_h, len_q, d_h) @ (b, n_h, d_h, len_k) -> (b, n_h, len_q, len_k)
score = torch.matmul(
h_q, h_k.transpose(-1, -2)
) # / math.sqrt(self.dim_head) moved to line 75~76
score = score + position_bias
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
)
score = self.softmax(score)
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
)
if self.dropout is not None:
score = self.dropout(score)
# (b, n_h, len_q, len_k) @ (b, n_h, len_k, d_h) -> (b, n_h, len_q, d_h)
score = torch.matmul(score, h_v)
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
score = self.attention_out(score)
if use_cache:
return score, (h_k, h_v)
else:
return score
| 4,800 | 38.677686 | 191 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/blocks.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import bmtrain as bmt
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
class SelfAttentionBlock(bmt.DistributedModule):
"""The whole cross-attention block. A sequence of operation. Consists of layernorm, self-attention and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
):
super().__init__()
self.layernorm_before_attention = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.self_attention = Attention(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of self-attention block. It can be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation.
position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of attention block.
""" # noqa: E501
x = self.layernorm_before_attention(hidden_states)
x = self.self_attention(x, x, attention_mask, position_bias, use_cache, past_key_value)
if use_cache:
x, current_key_value = x
else:
current_key_value = None
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
class FFNBlock(torch.nn.Module):
"""The whole feed-forward block. A sequence of operation. Consists of layernorm, feed-forward and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = 0,
):
super().__init__()
self.layernorm_before_ffn = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.ffn = FeedForward(
dim_model,
dim_ff,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Hidden states before feed forward layer.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of feed-forward block
""" # noqa: E501
x = self.layernorm_before_ffn(hidden_states)
x = self.ffn(x)
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
return hidden_states
class TransformerBlock(torch.nn.Module):
"""The whole transformer block. A sequence of operation. Consists of self-attention block[, cross-attention block] and feed-forward block.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_att: bool = False,
mask_ffn: bool = False,
):
super().__init__()
self.mask_att = mask_att
self.mask_ffn = mask_ffn
if not self.mask_att:
self.self_att = SelfAttentionBlock(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
if not self.mask_ffn:
self.ffn = FFNBlock(
dim_model=dim_model,
dim_ff=dim_ff,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
def forward(
self,
self_hidden_states: torch.Tensor,
self_attention_mask: torch.Tensor,
self_position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
self_hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
self_attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation of self-attention.
self_position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of transformer block.
""" # noqa: E501
# (batch, dim_model, seq_self)
current_key_value = None
if not self.mask_att:
hidden_states = self.self_att(
self_hidden_states,
attention_mask=self_attention_mask,
position_bias=self_position_bias,
use_cache=use_cache,
past_key_value=past_key_value,
)
if use_cache:
hidden_states, current_key_value = hidden_states
else:
hidden_states = self_hidden_states
# (batch, dim_model, seq_self)
if not self.mask_ffn:
hidden_states = self.ffn(hidden_states)
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
| 8,751 | 34.008 | 198 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/export.py | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def rename_if_exists(file_path):
if not os.path.exists(file_path):
return
timestamp = time.strftime('%Y%m%d%H%M%S')
file_dir, file_name = os.path.split(file_path)
file_root, file_ext = os.path.splitext(file_name)
new_file_name = f"{file_root}_bak_{timestamp}{file_ext}"
new_file_path = os.path.join(file_dir, new_file_name)
try:
os.rename(file_path, new_file_path)
logger.info(f"File '{file_name}' already exists. Renamed to '{new_file_name}'")
except Exception as e:
logger.warn(
"rename file failed,file_path={file_path}, new_file_path={new_file_path},err={err}"
.format(file_path=file_path, new_file_path=new_file_path, err=str(e)))
def rename_if_exists_decorator(func):
@functools.wraps(func)
def wrapper(file_path, *args, **kwargs):
rename_if_exists(file_path)
return func(file_path, *args, **kwargs)
return wrapper
@rename_if_exists_decorator
def bmt_save(file_path: str, model: CPMBee, export_files: Optional[List[str]] = None):
bmt.save(model, file_path)
if export_files is not None:
export_files.append(file_path)
@rename_if_exists_decorator
def torch_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
torch.save(obj, file_path)
if export_files is not None:
export_files.append(file_path)
@rename_if_exists_decorator
def json_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
with open(file_path, "w") as data_f:
json.dump(obj, data_f)
if export_files is not None:
export_files.append(file_path)
| 1,820 | 30.947368 | 95 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/object.py | import bmtrain as bmt
import pickle
import torch
def allgather_objects(obj):
if bmt.world_size() == 1:
return [obj]
with torch.no_grad():
data_bytes: bytes = pickle.dumps(obj)
data_length: int = len(data_bytes)
gpu_data_length = torch.tensor([data_length], device="cuda", dtype=torch.long)
gathered_length = bmt.distributed.all_gather(gpu_data_length).view(-1).cpu()
max_data_length = gathered_length.max().item()
gpu_data_bytes = torch.zeros(max_data_length, dtype=torch.uint8, device="cuda")
byte_storage = torch.ByteStorage.from_buffer(data_bytes)
gpu_data_bytes[:data_length] = torch.ByteTensor(byte_storage)
gathered_data = bmt.distributed.all_gather(gpu_data_bytes).cpu()
ret = []
for i in range(gathered_data.size(0)):
data_bytes = gathered_data[i, : gathered_length[i].item()].numpy().tobytes()
ret.append(pickle.loads(data_bytes))
return ret
| 994 | 33.310345 | 88 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/data_utils.py | import torch
def pad(orig_items, key, padding_value=0, padding_side="left"):
items = []
if isinstance(orig_items[0][key], list):
assert isinstance(orig_items[0][key][0], torch.Tensor)
for it in orig_items:
for tr in it[key]:
items.append({key: tr})
else:
assert isinstance(orig_items[0][key], torch.Tensor)
items = orig_items
batch_size = len(items)
shape = items[0][key].shape
dim = len(shape)
assert dim <= 3
max_length = max(item[key].shape[-1] for item in items)
min_length = min(item[key].shape[-1] for item in items)
dtype = items[0][key].dtype
if dim == 1:
return torch.cat([item[key] for item in items], dim=0)
elif dim == 2:
if max_length == min_length:
return torch.cat([item[key] for item in items], dim=0)
tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
else:
tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
for i, item in enumerate(items):
if dim == 2:
if padding_side == "left":
tensor[i, -len(item[key][0]) :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0])] = item[key][0].clone()
elif dim == 3:
if padding_side == "left":
tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0]), :] = item[key][0].clone()
return tensor
| 1,550 | 33.466667 | 94 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/log.py | import os
import sys
from typing import Any, Dict, Optional, Tuple, Union
import datetime
import json
import logging
import bmtrain as bmt
# Set up the common logger
def _get_logger():
log = logging.getLogger('__name__')
log.setLevel(logging.INFO)
console_handle = logging.StreamHandler(sys.stdout)
node_name = os.getenv("NODE_NAME", str(bmt.rank()))
console_handle.setFormatter(
logging.Formatter(
'[%(levelname)s][%(asctime)s][{}][%(filename)s:%(lineno)d:%(process)d] - %(message)s'
.format(node_name),
datefmt='%Y-%m-%d %H:%M:%S'
)
)
log.addHandler(console_handle)
return log
logger = _get_logger()
class LogManager:
def __init__(self, path: str):
if not os.path.exists(path):
os.makedirs(path)
self.path = path
now = self.get_log_time()
latest_log: Union[Dict[str, Any], None] = None
for _ in range(15):
log_name = self.get_log_name(now)
if os.path.exists(log_name):
with open(log_name, "r") as flog:
latest_log = json.loads(flog.readlines()[-1]) # get last log
break
now -= datetime.timedelta(days=1)
if latest_log is None:
self.global_token_pass = 0
else:
self.global_token_pass = latest_log["token pass"]
def get_log_time(self) -> datetime.datetime:
return datetime.datetime.utcnow() + datetime.timedelta(hours=16)
def get_log_name(self, now: Optional[datetime.datetime] = None):
if now is None:
now = self.get_log_time()
return os.path.join(self.path, "log.%s.txt" % now.strftime("%Y%m%d"))
def write(
self,
time: float,
iteration: int,
loss: float,
lr: float,
lr_scale: float,
time_usage: Dict[str, float],
mem_usage: Dict[str, Tuple[float, float]],
avg_time: float,
token_max: float,
token_pass: float,
throughout: float,
grad_norm: float,
mask_max: float,
num_gpus: int,
task_loss: Dict[str, float],
model_inspect: Optional[Any] = None,
):
with open(self.get_log_name(), "a") as fp:
ret = {
"time": time,
"iter": iteration,
"loss": loss,
"lr": lr,
"lr scale": int(lr_scale),
"time usage": time_usage,
"mem usage": mem_usage,
"avg time (s)": avg_time,
"token/max": token_max,
"token pass": token_pass + self.global_token_pass,
"throughout (token/s)": throughout,
"grad_norm": grad_norm,
"mask/max": mask_max,
"num_gpus": num_gpus,
"task_loss": task_loss,
}
if model_inspect is not None:
ret["model_inspect"] = model_inspect
fp.write(json.dumps(ret, ensure_ascii=False) + "\n")
| 3,067 | 29.989899 | 97 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/gradient_shrink.py | import torch
class OpGradientShrink(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, alpha: float):
ctx.alpha = alpha
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.alpha, None
def gradient_shrink(x: torch.Tensor, alpha: float = 0.1):
return OpGradientShrink.apply(x, alpha)
| 382 | 21.529412 | 57 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/config.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import copy
from typing import Any, Dict, Union
from .log import logger
def load_dataset_config(dataset_path: str):
cfg = json.load(open(dataset_path, "r", encoding="utf-8"))
platform_config_path = os.getenv("PLATFORM_CONFIG_PATH")
if platform_config_path is None:
logger.info(
"no platform_config_path. Directly load dataset_path({dataset_path})"
.format(dataset_path=dataset_path)
)
return cfg
path_dict = json.load(open(platform_config_path, "r", encoding="utf-8"))["dataset_map"]
logger.info(
"load dataset_path({dataset_path}) with platform_config_path({platform_config_path})"
.format(dataset_path=dataset_path, platform_config_path=platform_config_path)
)
for dataset in cfg:
dataset["path"] = os.path.join(path_dict[dataset["dataset_name"]], dataset["path"])
dataset["transforms"] = os.path.join(
path_dict[dataset["dataset_name"]], dataset["transforms"]
)
return cfg
class Config(object):
"""model configuration"""
def __init__(self):
super().__init__()
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike], **args):
config_dict = cls._dict_from_json_file(json_file, **args)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike], **args):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
res = json.loads(text)
for key in args:
res[key] = args[key]
return res
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def to_json_string(self) -> str:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
return output
| 2,672 | 33.269231 | 93 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/__init__.py | from .config import Config
from .data_utils import pad
from .object import allgather_objects
from .log import LogManager, logger
from .config import load_dataset_config
| 169 | 27.333333 | 39 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/bee.py | from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from .generation_utils import BeamHypotheses, apply_repetition_penalty
from ..tokenizers.bee import CPMBeeTokenizer
from ..models.bee import CPMBee
from ..training_tasks.bee.pretrain import convert_data_to_id
from ..utils import pad
class CPMBeeGeneration:
def __init__(self, model: CPMBee, tokenizer: CPMBeeTokenizer):
model.eval()
self.model = model
self.tokenizer = tokenizer
def _convert_to_tensors(self, data: Any, in_context_samples: List[Any] = []):
answer_placeholders = []
def _put_placeholder(data: Any, path: List[str] = []):
if isinstance(data, dict):
ret = {}
for k, v in data.items():
ret[k] = _put_placeholder(v, path + [k])
return ret
else:
answer_placeholders.append(path)
return "<ans_{}>".format(len(answer_placeholders))
data["<ans>"] = _put_placeholder(data["<ans>"])
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel,
n_segments,
table_states,
) = convert_data_to_id(self.tokenizer, data, shuffle_answer=False, max_depth=8)
sub_ans_map: Dict[int, int] = {}
for fake_id, token_sub in table_states["token_id_table"]["<ans>"].items():
token = table_states["ext_table"][fake_id]
if token.startswith("<ans_") and token.endswith(">"):
ans_id = int(token[5:-1])
sub_ans_map[token_sub] = ans_id
tmp_input_ids = []
tmp_input_sub = []
tmp_input_seg = []
predict_segments: List[Tuple[int, int]] = []
for i in range(input_ids.shape[0]):
if context[i] == 0:
if input_ids[i] == self.tokenizer.encoder["<ans>"]:
# is ans
# (segment_id, ans_id)
predict_segments.append((segment_ids[i], sub_ans_map[input_id_subs[i]]))
else:
tmp_input_ids.append(input_ids[i])
tmp_input_sub.append(input_id_subs[i])
tmp_input_seg.append(segment_ids[i])
if len(predict_segments) == 0:
raise ValueError("No answer to predict")
input_ids = np.array(tmp_input_ids, dtype=np.int32)
input_id_subs = np.array(tmp_input_sub, dtype=np.int32)
context = np.full_like(tmp_input_ids, 1, dtype=np.int8)
segment_ids = np.array(tmp_input_seg, dtype=np.int32)
sample_ids = np.zeros(input_ids.shape, dtype=np.int32)
segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)
num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)
for i, sample in enumerate(in_context_samples):
(
sample_input_ids,
sample_id_subs,
_,
sample_segments,
sample_rel,
n_segments,
table_states,
) = convert_data_to_id(self.tokenizer, sample, table_states, max_depth=8)
input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)
input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)
context = np.concatenate(
[context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0
)
segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)
segment_rel_offset = np.concatenate(
[
segment_rel_offset,
np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),
],
axis=0,
)
segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)
sample_ids = np.concatenate(
[sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0
)
num_segments = np.concatenate(
[num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0
)
input_pos = np.arange(input_ids.shape[0], dtype=np.int32)
return (
input_ids,
input_id_subs,
input_pos,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
predict_segments,
answer_placeholders,
table_states["ext_table"],
table_states["token_id_table"],
)
def _process_list(self, data_list: List[Any]):
pack_tensor = []
other_info = []
segment_rel_pack = []
batch_ext_table_map: Dict[Tuple[int, int], int] = {}
batch_ext_table_ids: List[int] = []
batch_ext_table_sub: List[int] = []
for data in data_list:
(
input_ids,
input_id_subs,
input_pos,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
predict_segments,
answer_placeholders,
ext_table,
token_id_table,
) = self._convert_to_tensors(data, [])
rev_ext_table: Dict[int, str] = {}
for token, mp in token_id_table.items():
if token == "<ans>":
continue
token_id = self.tokenizer.encoder[token]
for fake_id, token_sub in mp.items():
if token_sub > 0:
if (token_id, token_sub) not in batch_ext_table_map:
batch_ext_table_map[(token_id, token_sub)] = (
len(batch_ext_table_ids) + self.tokenizer.vocab_size
)
batch_ext_table_ids.append(token_id)
batch_ext_table_sub.append(token_sub)
rev_ext_table[batch_ext_table_map[(token_id, token_sub)]] = ext_table[
fake_id
]
else:
rev_ext_table[token_id] = ext_table[fake_id]
pack_tensor.append(
{
"input": torch.from_numpy(input_ids).unsqueeze(0),
"input_sub": torch.from_numpy(input_id_subs).unsqueeze(0),
"input_pos": torch.from_numpy(input_pos).unsqueeze(0),
"context": torch.from_numpy(context).unsqueeze(0),
"sample_idx": torch.from_numpy(sample_ids).unsqueeze(0),
"num_segments": torch.from_numpy(num_segments).unsqueeze(0),
"segment": torch.from_numpy(segment_ids).unsqueeze(0),
"segment_rel_offset": torch.from_numpy(segment_rel_offset).unsqueeze(0),
}
)
segment_rel_pack.append(torch.from_numpy(segment_rel))
other_info.append(
{
"predict_segments": predict_segments,
"answer_placeholders": answer_placeholders,
"ext_table": rev_ext_table,
}
)
keys = set(pack_tensor[0].keys())
padded = {}
for key in keys:
padded[key] = pad(pack_tensor, key).cuda()
max_num_rels = 0
for rel in segment_rel_pack:
max_num_rels = max(max_num_rels, rel.size(0))
padded_rels = torch.zeros(len(segment_rel_pack), max_num_rels, dtype=torch.int32)
for i, rel in enumerate(segment_rel_pack):
padded_rels[i, : rel.size(0)] = rel
padded["segment_rel"] = padded_rels.cuda()
padded["batch_ext_table_ids"] = torch.tensor(
batch_ext_table_ids, dtype=torch.int32, device="cuda"
)
padded["batch_ext_table_sub"] = torch.tensor(
batch_ext_table_sub, dtype=torch.int32, device="cuda"
)
return padded, other_info
def generate(self, data_list, **kwargs):
model_inputs, other_info = self._process_list(data_list)
with torch.inference_mode():
result_ids = self._decode(model_inputs, other_info, **kwargs)
for sent_id, result in enumerate(result_ids):
ans_result_map: Dict[int, List[int]] = {}
for raw_word_id, ans_id in result:
if ans_id not in ans_result_map:
ans_result_map[ans_id] = []
ans_result_map[ans_id].append(raw_word_id)
answer_placeholders = other_info[sent_id]["answer_placeholders"]
ext_table = other_info[sent_id]["ext_table"]
data = data_list[sent_id]
for ans_id, token_ids in ans_result_map.items():
if token_ids[-1] == self.tokenizer.eos_id:
token_ids = token_ids[:-1]
text = self.tokenizer.decode(token_ids, ext_table)
path = answer_placeholders[ans_id - 1]
if len(path) > 0:
p = data["<ans>"]
for part in path[:-1]:
p = p[part]
p[path[-1]] = text
else:
data["<ans>"] = text
for ans_id in range(len(answer_placeholders)):
if (ans_id + 1) not in ans_result_map:
path = answer_placeholders[ans_id]
p = data["<ans>"]
for part in path[:-1]:
p = p[part]
p[path[-1]] = None
return data_list
def _decode(self, model_inputs, other_info, **kwargs):
raise NotImplementedError("_decode is not implemented.")
class CPMBeeBeamSearch(CPMBeeGeneration):
def _decode(
self,
model_inputs,
other_info,
beam_size=3,
max_length=100,
repetition_penalty=1.0,
repetition_window=None,
):
"""
Beam search
Args:
model_inputs (dict): input ids.
beam_size (int, optional, defaults to 3): beam size of beam search.
generate_length (int, optional, defaults to 100): maximum generation length.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
# expand dimmension
batch_size = model_inputs["input"].size(0)
input: torch.Tensor = (
model_inputs["input"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
input_sub: torch.Tensor = (
model_inputs["input_sub"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
input_pos: torch.Tensor = (
model_inputs["input_pos"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
context: torch.Tensor = (
model_inputs["context"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
sample_ids: torch.Tensor = (
model_inputs["sample_idx"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
num_segments: torch.Tensor = (
model_inputs["num_segments"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment: torch.Tensor = (
model_inputs["segment"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment_rel_offset: torch.Tensor = (
model_inputs["segment_rel_offset"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment_rel: torch.Tensor = (
model_inputs["segment_rel"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
ext_table_ids: torch.Tensor = model_inputs["batch_ext_table_ids"]
ext_table_sub: torch.Tensor = model_inputs["batch_ext_table_sub"]
ext_table_ids_cpu = ext_table_ids.cpu()
ext_table_sub_cpu = ext_table_sub.cpu()
done = [False for _ in range(batch_size)]
beam_scores = torch.zeros((batch_size, beam_size), dtype=torch.float, device=input.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# generated hypotheses
generated_hyps = [
BeamHypotheses(beam_size, max_length, length_penalty=1, early_stopping=False)
for _ in range(batch_size)
]
pred_start_index = input.size(-1)
_, _, past_key_values = self.model.inference(
input=input,
input_sub=input_sub,
position=input_pos,
context=context,
sample_ids=sample_ids,
num_segments=num_segments,
segment=segment,
segment_rel_offset=segment_rel_offset,
segment_rel=segment_rel,
ext_table_ids=ext_table_ids,
ext_table_sub=ext_table_sub,
past_key_values=None,
)
beam_states = []
for sent_id in range(batch_size):
instance_beam_states = []
for beam_id in range(beam_size):
instance_beam_states.append(
{
"idx": 0,
"ans": [],
"nx_token_id": self.tokenizer.bos_id,
"nx_token_sub": 0,
"nx_segment_id": other_info[sent_id]["predict_segments"][0][0],
"nx_position": 0,
}
)
beam_states.append(instance_beam_states)
for i in range(max_length + 1):
tmp_input = []
tmp_input_sub = []
tmp_position = []
tmp_segment = []
for sent_id in range(batch_size):
for beam_id in range(beam_size):
tmp_input.append(beam_states[sent_id][beam_id]["nx_token_id"])
tmp_input_sub.append(beam_states[sent_id][beam_id]["nx_token_sub"])
tmp_position.append(beam_states[sent_id][beam_id]["nx_position"])
tmp_segment.append(beam_states[sent_id][beam_id]["nx_segment_id"])
with torch.no_grad():
input = torch.cat(
[
input,
torch.tensor(tmp_input, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
],
dim=-1,
)
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
input_sub=torch.tensor(tmp_input_sub, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
position=torch.tensor(tmp_position, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
context=torch.ones(
batch_size * beam_size, dtype=torch.bool, device="cuda"
).view(batch_size * beam_size, 1),
sample_ids=torch.zeros(
batch_size * beam_size, dtype=torch.int32, device="cuda"
).view(batch_size * beam_size, 1),
num_segments=num_segments[:, -1:],
segment=torch.tensor(tmp_segment, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
segment_rel_offset=segment_rel_offset[:, -1:],
segment_rel=segment_rel,
ext_table_ids=ext_table_ids,
ext_table_sub=ext_table_sub,
past_key_values=past_key_values,
)
logits = logits[:, -1, :]
# skip all steps when we are done with each sentence
if all(done):
break
for sent_id in range(batch_size):
if self.tokenizer.unk_id not in other_info[sent_id]["ext_table"]:
# unk is not allowed, mask unk
logits[
sent_id * beam_size : (sent_id + 1) * beam_size, self.tokenizer.unk_id
] = -10000
ext_ids = set()
for v in other_info[sent_id]["ext_table"].keys():
ext_ids.add(v)
for ext_id in range(
self.tokenizer.vocab_size, self.tokenizer.vocab_size + ext_table_ids.size(0)
):
if ext_id not in ext_ids:
logits[sent_id * beam_size : (sent_id + 1) * beam_size, ext_id] = -10000
apply_repetition_penalty(
logits,
batch_size,
beam_size,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
scores = F.log_softmax(logits, dim=-1)
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * beam_size, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(batch_size, -1) # (batch_size, beam_size * vocab_size)
next_scores, next_words = torch.topk(
next_scores, 2 * beam_size, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_words.size() == (batch_size, 2 * beam_size)
next_beam_states = []
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item(), i
)
if done[sent_id]:
next_beam_states.append(
[
(
{
"idx": 0,
"ans": [],
"nx_token_id": 0,
"nx_token_sub": 0,
"nx_segment_id": 0,
"nx_position": 0,
},
0,
0,
)
]
* beam_size
) # pad the batch
continue
# next sentence beam content
next_instance_beam_states = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = torch.div(idx, scores.size(-1), rounding_mode="floor").item()
word_id = (idx % scores.size(-1)).item()
curr_info = beam_states[sent_id][beam_id]
# end of sentence, or next word
if (
word_id == self.tokenizer.eos_id
and (curr_info["idx"] + 1 == len(other_info[sent_id]["predict_segments"]))
) or i == max_length:
generated_hyps[sent_id].add(
beam_states[sent_id][beam_id]["ans"]
+ [
(
word_id,
other_info[sent_id]["predict_segments"][curr_info["idx"]][1],
)
],
value.item(),
)
elif word_id == self.tokenizer.eos_id:
next_instance_beam_states.append(
(
{
"idx": curr_info["idx"] + 1,
"ans": curr_info["ans"]
+ [
(
word_id,
other_info[sent_id]["predict_segments"][
curr_info["idx"]
][1],
)
],
"nx_token_id": self.tokenizer.bos_id,
"nx_token_sub": 0,
"nx_segment_id": other_info[sent_id]["predict_segments"][
curr_info["idx"] + 1
][0],
"nx_position": 0,
},
value.item(),
sent_id * beam_size + beam_id,
)
)
else:
raw_word_id = word_id
word_id_sub = 0
if word_id >= self.tokenizer.vocab_size:
word_id -= self.tokenizer.vocab_size
word_id_sub = int(ext_table_sub_cpu[word_id].item())
word_id = int(ext_table_ids_cpu[word_id].item())
next_instance_beam_states.append(
(
{
"idx": curr_info["idx"],
"ans": curr_info["ans"]
+ [
(
raw_word_id,
other_info[sent_id]["predict_segments"][
curr_info["idx"]
][1],
)
],
"nx_token_id": word_id,
"nx_token_sub": word_id_sub,
"nx_segment_id": curr_info["nx_segment_id"],
"nx_position": curr_info["nx_position"] + 1,
},
value.item(),
sent_id * beam_size + beam_id,
)
)
# the beam for next step is full
if len(next_instance_beam_states) == beam_size:
break
# update next beam content
assert len(next_instance_beam_states) == 0 if i == max_length else beam_size
next_beam_states.append(next_instance_beam_states)
# we have reached the last step
if i == max_length:
break
# sanity check / prepare next batch
beam_reorder_idx = []
beam_new_scores = []
beam_states = []
for sent_id in range(batch_size):
instance_beam_states = []
for beam_id in range(beam_size):
state, value, beam_idx = next_beam_states[sent_id][beam_id]
beam_reorder_idx.append(beam_idx)
beam_new_scores.append(value)
instance_beam_states.append(state)
beam_states.append(instance_beam_states)
input = input[beam_reorder_idx, :]
beam_scores = torch.tensor(beam_new_scores, dtype=torch.float, device=input.device)
for kw in past_key_values.keys():
if kw == "buffer":
buf_list = past_key_values[kw]
nw_buf_list = []
for buf in buf_list:
if buf is None:
nw_buf_list.append((None, None))
else:
k_buf, v_buf = buf
nw_buf_list.append(
(k_buf[beam_reorder_idx, :], v_buf[beam_reorder_idx, :])
)
past_key_values[kw] = nw_buf_list
else:
past_key_values[kw] = past_key_values[kw][beam_reorder_idx, :]
# select the best hypotheses
results = []
for sent_id, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
results.append(best_hyp)
return results
| 26,159 | 40.52381 | 148 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/ant.py | import torch
import torch.nn.functional as F
from .generation_utils import BeamHypotheses, apply_repetition_penalty, top_k_top_p_filtering
from ..utils import pad
class CPMAntGeneration:
def __init__(self, model, tokenizer, prompt_length=32):
model.eval()
self.model = model
self.tokenizer = tokenizer
self.prompt_length = prompt_length
def _convert_to_tensors(self, input_text, task_id=2):
model_inputs = {}
input_ids = [self.tokenizer.bos_id] + self.tokenizer.encode(input_text)
input_ids = [j for j in input_ids if j != self.tokenizer.unk_id]
model_inputs["input"] = [
x + self.prompt_length * task_id for x in range(self.prompt_length)
] + input_ids
model_inputs["length"] = len(model_inputs["input"])
model_inputs["position"] = list(range(len(model_inputs["input"])))
model_inputs["span"] = [0] * len(model_inputs["input"])
model_inputs["context"] = [True] * len(model_inputs["input"])
model_inputs["segment"] = [0] * self.prompt_length + [2] * len(input_ids)
for key in model_inputs:
model_inputs[key] = torch.tensor(model_inputs[key]).int().unsqueeze(0)
return model_inputs
def _process_texts(self, text_list):
input_tensors = list(map(self._convert_to_tensors, text_list))
keys = set(input_tensors[0].keys())
padded = {}
for key in keys:
padded[key] = pad(input_tensors, key, padding_side='left').cuda()
return padded
def generate(self, text_list, **kwargs):
model_inputs = self._process_texts(text_list)
with torch.inference_mode():
result = self._decode(model_inputs, **kwargs)
return result
def _decode(self, model_inputs, **kwargs):
raise NotImplementedError("_decode is not implemented.")
class CPMAntBeamSearch(CPMAntGeneration):
def _decode(
self,
model_inputs,
beam_size=3,
max_length=100,
repetition_penalty=1.0,
repetition_window=None,
**kwargs
):
"""
Beam search
Args:
model_inputs (dict): input ids.
beam_size (int, optional, defaults to 3): beam size of beam search.
generate_length (int, optional, defaults to 100): maximum generation length.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
# expand dimmension
batch_size = model_inputs["input"].size(0)
input = (
model_inputs["input"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
length = (
model_inputs["length"]
.unsqueeze(1)
.expand(batch_size, beam_size)
.contiguous()
.view(
batch_size * beam_size,
)
)
context = (
model_inputs["context"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
position = (
model_inputs["position"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment = (
model_inputs["segment"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
span = (
model_inputs["span"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
done = [False for _ in range(batch_size)]
beam_scores = torch.zeros((batch_size, beam_size), dtype=torch.float, device=input.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# generated hypotheses
generated_hyps = [
BeamHypotheses(beam_size, max_length, length_penalty=1, early_stopping=False)
for _ in range(batch_size)
]
pred_start_index = input.size(-1)
past_key_values = None
for i in range(max_length + 1):
if i == 0:
logits, _, past_key_values = self.model.inference(
input=input,
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
else:
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
# skip all steps when we are done with each sentence
if all(done):
break
# (batch * beam, seqlen, model_dim)
logits = logits[:, -1, :]
if i == 0:
logits[:, self.tokenizer.eos_id] = -float("inf")
logits[:, self.tokenizer.newline_id] = -float("inf")
apply_repetition_penalty(
logits,
batch_size,
beam_size,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
scores = F.log_softmax(logits, dim=-1)
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * beam_size, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(batch_size, -1) # (batch_size, beam_size * vocab_size)
next_scores, next_words = torch.topk(
next_scores, 2 * beam_size, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_words.size() == (batch_size, 2 * beam_size)
next_batch_beam = []
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item(), i
)
if done[sent_id]:
next_batch_beam.extend(
[(0, self.tokenizer.pad_id, 0)] * beam_size
) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = torch.div(idx, scores.size(-1), rounding_mode="floor")
word_id = idx % scores.size(-1)
# end of sentence, or next word
if word_id == self.tokenizer.eos_id or i == max_length:
generated_hyps[sent_id].add(
input[sent_id * beam_size + beam_id, pred_start_index:]
.clone()
.cpu()
.tolist(),
value.item(),
)
else:
next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))
# the beam for next step is full
if len(next_sent_beam) == beam_size:
break
# update next beam content
assert len(next_sent_beam) == 0 if i == max_length else beam_size
if len(next_sent_beam) == 0:
next_sent_beam = [(0, self.tokenizer.pad_id, 0)] * beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == beam_size * (sent_id + 1)
# we have reached the last step
if i == max_length:
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * beam_size
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = input.new([x[1] for x in next_batch_beam])
beam_idx = length.new([x[2] for x in next_batch_beam]).long()
# re-order batch and internal states
input = input[beam_idx, :]
past_key_values = [list(each) if each is not None else each for each in past_key_values] # type: ignore # noqa: E501
for key_value_layer in past_key_values:
if key_value_layer is not None:
key_value_layer[0] = key_value_layer[0][beam_idx]
key_value_layer[1] = key_value_layer[1][beam_idx]
# update input ids
input = torch.cat([input, beam_words.unsqueeze(1)], dim=-1)
length += 1
context = torch.cat(
[context, torch.ones((context.size(0), 1), dtype=torch.int, device=context.device)],
dim=-1,
)
position = torch.cat([position, position[:, -1:] + 1], dim=-1)
segment = torch.cat(
[segment, segment[:, -1:]], dim=-1
) # segment id always the same as the previous token
span = torch.cat([span, span[:, -1:]], dim=-1)
# select the best hypotheses
results = []
for i, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
results.append(best_hyp)
result_text = list(map(self.tokenizer.decode, results))
return result_text
class CPMAntRandomSampling(CPMAntGeneration):
def _decode(
self,
model_inputs,
max_length=100,
top_k=0,
top_p=0.9,
temperature=0.9,
repetition_penalty=1.0,
repetition_window=None,
**kwargs
):
"""
Top-k and top-p sampling.
Args:
model_inputs (dict): input ids
generate_length (int, optional, defaults to 100): maximum generation length
top_k (int, optional, defaults to 0): keep only top k tokens with highest probability. 0 means keeping all tokens.
top_p (int, optional, defaults to 0.9): keep the top tokens with cumulative probability >= top_p.
temperature (int, optional, defaults to 0.9): the value that can cool down the logits distribution.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
input = model_inputs["input"]
length = model_inputs["length"]
context = model_inputs["context"]
position = model_inputs["position"]
segment = model_inputs["segment"]
span = model_inputs["span"]
batch_size = input.size(0)
pred_start_index = input.size(-1)
past_key_values = None
done = [False for _ in range(batch_size)]
results = [None for _ in range(batch_size)]
for i in range(max_length):
if i == 0:
logits, _, past_key_values = self.model.inference(
input=input,
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
else:
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
logits = logits[:, -1, :]
if i == 0:
logits[:, self.tokenizer.eos_id] = -float("inf")
logits[:, self.tokenizer.newline_id] = -float("inf")
apply_repetition_penalty(
logits,
batch_size,
1,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
logits = logits / temperature
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
for idx in range(batch_size):
if not done[idx] and (
next_token[idx].item() == self.tokenizer.eos_id or i == max_length - 1
):
done[idx] = True
results[idx] = input[idx, pred_start_index:].clone().cpu().tolist() # type: ignore # noqa: E501
if sum(done) == batch_size:
break
# update input ids
input = torch.cat([input, next_token], dim=-1)
length += 1
context = torch.cat(
[context, torch.ones((context.size(0), 1), dtype=torch.int, device=context.device)],
dim=-1,
)
position = torch.cat([position, position[:, -1:] + 1], dim=-1)
segment = torch.cat(
[segment, segment[:, -1:]], dim=-1
) # segment id always the same as the previous token
span = torch.cat([span, span[:, -1:]], dim=-1)
result_text = list(map(self.tokenizer.decode, results))
return result_text
| 14,654 | 36.966321 | 148 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/generation_utils.py | import torch
import torch.nn.functional as F
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float("inf")):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
batch_size = logits.size()[0]
if top_p > 0.0:
logits = logits.view(batch_size, -1).contiguous()
for index in range(len(logits)):
sorted_logits, sorted_indices = torch.sort(logits[index].view(-1), descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[index][indices_to_remove] = filter_value
logits = logits.view(batch_size, -1).contiguous()
return logits
def apply_repetition_penalty(
logits,
batch_size,
num_beams,
prev_output_tokens,
repetition_penalty,
start_idx=None,
end_idx=None,
window_size=None,
):
# only conduct repetition penalty for the output
assert repetition_penalty >= 1, "repetition penalty coefficient should >= 1"
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
for i in range(batch_size * num_beams):
if start_idx is None or end_idx is None:
output_tokens = prev_output_tokens[i].tolist()
else:
if end_idx >= start_idx:
if window_size:
output_tokens = prev_output_tokens[i][
max(start_idx, end_idx + 1 - window_size) : end_idx + 1
].tolist()
else:
output_tokens = prev_output_tokens[i][start_idx : end_idx + 1].tolist()
else:
output_tokens = []
for previous_token in set(output_tokens):
# if score < 0 then repetition penalty has to
# multiplied to reduce the previous token probability
if logits[i, previous_token] < 0:
logits[i, previous_token] *= repetition_penalty
else:
logits[i, previous_token] /= repetition_penalty
class BeamHypotheses:
def __init__(self, n_hyp, max_len, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_len = max_len
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / cur_len**self.length_penalty
| 4,382 | 37.787611 | 122 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/__init__.py | from .ant import CPMAntBeamSearch, CPMAntRandomSampling, CPMAntGeneration
| 74 | 36.5 | 73 | py |
LOSTIN | LOSTIN-main/GNN-supernode/inference.py | import torch
from torch_geometric.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch_geometric.utils import degree
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
import ARMA
import film
import gat
import pna
import pan
import sage
import sgn
import unet
import rgcn
#import ggnn
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def train(model, device, loader, optimizer, task_type):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--dataset', type=str, default="ogbg-molhiv",
help='dataset name (default: ogbg-molhiv)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
split_idx = dataset.get_idx_split()
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers = 0)
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'arma':
model = ARMA.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'film':
model = film.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'sgn':
model = sgn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'sage':
model = sage.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'gat':
model = gat.Net(heads=8, num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'pna':
deg = torch.zeros(30, dtype=torch.long)
train_dataset = dataset[split_idx["train"]]
for data in train_dataset:
d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long)
deg += torch.bincount(d, minlength=deg.numel())
model = pna.Net(deg=deg, num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'pan':
model = pan.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'unet':
model= unet.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'rgcn':
model = rgcn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'ggnn':
model = ggnn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
PATH='model/'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+'_model.pt'
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
test_perf, t_true, t_pred = eval(model, device, test_loader, evaluator)
test_true_value=reduce(operator.add, t_true.tolist())
test_pred_value=reduce(operator.add, t_pred.tolist())
f = open('inf_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+ '.json', 'w')
result=dict(test_true=test_true_value, test_pred=test_pred_value)
json.dump(result, f)
f.close()
if __name__ == "__main__":
main()
| 7,700 | 43.514451 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/node_encoder.py | import torch
from features import get_node_feature_dims, get_edge_feature_dims
full_node_feature_dims = get_node_feature_dims()
full_edge_feature_dims = get_edge_feature_dims()
class NodeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(NodeEncoder, self).__init__()
self.node_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_node_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.node_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
#x_embedding = self.node_embedding_list[0](x[:,0])
for i in range(1, x.shape[1]):
x_embedding += self.node_embedding_list[i](x[:,i])
#x_embedding = torch.cat((x_embedding, self.node_embedding_list[i](x[:,i])),1)
return x_embedding
class EdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(EdgeEncoder, self).__init__()
self.edge_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_edge_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.edge_embedding_list.append(emb)
def forward(self, edge_attr):
edge_embedding = 0
for i in range(edge_attr.shape[1]):
edge_embedding += self.edge_embedding_list[i](edge_attr[:,i])
return edge_embedding
if __name__ == '__main__':
from dataset_pyg import PygGraphPropPredDataset
dataset = PygGraphPropPredDataset(name = 'node_embedding_area')
node_enc = NodeEncoder(2)
edge_enc = EdgeEncoder(5)
print(node_enc(dataset[1].x))
print(edge_enc(dataset[1].edge_attr))
| 1,815 | 28.770492 | 90 | py |
LOSTIN | LOSTIN-main/GNN-supernode/features.py | import json
import pandas as pd
from os import listdir
from os.path import isfile, join
allowable_features = {
'node_type' : ['input', 'intermediate', 'output'],
'command_type' : ['b', 'rf', 'rfz', 'rw', 'rwz', 'resub', 'resub -z'],
'op_type' : ['and_oper', 'or_oper', 'not_oper', 'misc'],
}
node_list = ['input', 'output', 'and_oper', 'or_oper', 'not_oper']
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
def node_type(opcode):
if opcode == 'input' or opcode == 'output':
return opcode
if opcode in {'and_oper', 'or_oper', 'not_oper'}:
t='intermediate'
return t
def node_to_feature_vector(node):
"""
Converts node object to feature list of indices
:return: list
"""
node_feature = [
safe_index(allowable_features['node_type'], node_type(node['node_attributes']['node_type'])),
safe_index(allowable_features['op_type'], node['node_attributes']['node_type']),
0, 0, 0, 0, 0, 0
]
return node_feature
def get_node_feature_dims():
dim_list = list(map(len, [allowable_features['node_type'], allowable_features['op_type']]))
last_dim = dim_list[-1]
for i in range(6):
dim_list.append(last_dim)
return dim_list
def edge_to_feature_vector(source, sink):
"""
Converts edge to feature list of indices
:return: list
"""
bond_feature = [node_list.index(source), node_list.index(sink)]
return bond_feature
def get_edge_feature_dims():
return list(map(len, [
node_list,
node_list
]))
def get_command_idx(cmd):
if cmd in allowable_features['command_type']:
return allowable_features['command_type'].index(cmd) + 1
else:
raise NotImplementedError
if __name__ == '__main__':
ff_10 = pd.read_csv('flow_10.csv',header=None)
ff_15 = pd.read_csv('flow_15.csv',header=None)
ff_20 = pd.read_csv('flow_20.csv',header=None)
ff_25 = pd.read_csv('flow_25.csv',header=None)
keyword = 'delay' # area or delay
label_dir = 'dataset'
label_list = [f for f in listdir(label_dir) if isfile(join(label_dir, f))]
read_dir = 'epfl_graph/'
vgraphs = ['adder', 'arbiter', 'bar', 'div', 'log2', 'max', 'multiplier', 'sin', 'sqrt', 'square', 'voter']
node_feat = []
super_nodes = []
edge_list = []
edge_feat = []
graph_label = []
graph_choice = []
num_node_list = []
num_edge_list = []
count_10 = 0
count_15 = 0
count_20 = 0
count_25 = 0
for idx, vgraph in enumerate(vgraphs):
f = open(read_dir + vgraph + '.json', 'r')
d = json.load(f)
f.close()
label_file_10 = None
label_file_15 = None
label_file_20 = None
label_file_25 = None
for f in label_list:
if (keyword in f) and (vgraph in f) and ('25' in f):
label_file_25 = f
elif (keyword in f) and (vgraph in f) and ('20' in f):
label_file_20 = f
elif (keyword in f) and (vgraph in f) and ('15' in f):
label_file_15 = f
elif (keyword in f) and (vgraph in f) and ('10' in f):
label_file_10 = f
label_10 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
label_15 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
label_20 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
label_25 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
nodes = d['nodes']
edges = d['edges']
node_index_map = dict() # map the node name to the index
for index, n in enumerate(nodes):
if n[0] not in node_index_map:
node_index_map[n[0]] = index
current_node_feat = node_to_feature_vector(n[1])
node_feat.append(current_node_feat)
for e in edges:
source = node_index_map[e[0]]
sink = node_index_map[e[1]]
edge_list.append([source,sink])
source_type = nodes[source][1]['node_attributes']['node_type']
sink_type = nodes[sink][1]['node_attributes']['node_type']
edge_feat.append(edge_to_feature_vector(source_type, sink_type))
num_node_list.append(len(nodes))
num_edge_list.append(len(edges))
# Processing Length 10 Flow
for i in range(50000):
commands = ff_10[0][count_10+i].split(';')
super_node = []
# Embed super node
for j in range(10):
if commands[j] == 'b':
super_node.append(1)
elif commands[j] == 'rf':
super_node.append(2)
elif commands[j] == 'rfz':
super_node.append(3)
elif commands[j] == 'rw':
super_node.append(4)
elif commands[j] == 'rwz':
super_node.append(5)
elif commands[j] == 'resub':
super_node.append(6)
elif commands[j] == 'resub -z':
super_node.append(7)
else:
raise NotImplementedError
for j in range(15):
super_node.append(0)
graph_choice.append(idx)
graph_label.append([label_10[0][count_10+i]])
super_nodes.append(super_node)
# Processing Length 15 Flow
for i in range(50000):
commands = ff_15[0][count_15+i].split(';')
super_node = []
# Embed super node
for j in range(15):
if commands[j] == 'b':
super_node.append(1)
elif commands[j] == 'rf':
super_node.append(2)
elif commands[j] == 'rfz':
super_node.append(3)
elif commands[j] == 'rw':
super_node.append(4)
elif commands[j] == 'rwz':
super_node.append(5)
elif commands[j] == 'resub':
super_node.append(6)
elif commands[j] == 'resub -z':
super_node.append(7)
else:
raise NotImplementedError
for j in range(10):
super_node.append(0)
graph_choice.append(idx)
graph_label.append([label_15[0][count_15+i]])
super_nodes.append(super_node)
# Processing Length 20 Flow
for i in range(100000):
commands = ff_20[0][count_20+i].split(';')
super_node = []
# Embed super node
for j in range(20):
if commands[j] == 'b':
super_node.append(1)
elif commands[j] == 'rf':
super_node.append(2)
elif commands[j] == 'rfz':
super_node.append(3)
elif commands[j] == 'rw':
super_node.append(4)
elif commands[j] == 'rwz':
super_node.append(5)
elif commands[j] == 'resub':
super_node.append(6)
elif commands[j] == 'resub -z':
super_node.append(7)
else:
raise NotImplementedError
for j in range(5):
super_node.append(0)
graph_choice.append(idx)
graph_label.append([label_20[0][count_20+i]])
super_nodes.append(super_node)
# Processing Length 25 Flow
for i in range(100000):
commands = ff_25[0][count_25+i].split(';')
super_node = []
# Embed super node
for j in range(25):
if commands[j] == 'b':
super_node.append(1)
elif commands[j] == 'rf':
super_node.append(2)
elif commands[j] == 'rfz':
super_node.append(3)
elif commands[j] == 'rw':
super_node.append(4)
elif commands[j] == 'rwz':
super_node.append(5)
elif commands[j] == 'resub':
super_node.append(6)
elif commands[j] == 'resub -z':
super_node.append(7)
else:
raise NotImplementedError
graph_choice.append(idx)
graph_label.append([label_25[0][count_25+i]])
super_nodes.append(super_node)
NODE = pd.DataFrame(node_feat)
SUPER_NODE = pd.DataFrame(super_nodes)
GRAPH = pd.DataFrame(graph_choice)
EDGE_list = pd.DataFrame(edge_list)
EDGE_feat = pd.DataFrame(edge_feat)
node_num = pd.DataFrame(num_node_list)
edge_num = pd.DataFrame(num_edge_list)
labels = pd.DataFrame(graph_label)
NODE.to_csv('node-feat.csv', index=False, header=False)
SUPER_NODE.to_csv('node-super.csv', index=False, header=False)
GRAPH.to_csv('graph-choice.csv', index=False, header=False)
EDGE_list.to_csv('edge.csv', index=False, header=False)
EDGE_feat.to_csv('edge-feat.csv', index=False, header=False)
node_num.to_csv('num-node-list.csv', index=False, header=False)
edge_num.to_csv('num-edge-list.csv', index=False, header=False)
labels.to_csv('graph-label.csv', index=False, header=False)
| 9,664 | 30.792763 | 111 | py |
LOSTIN | LOSTIN-main/GNN-supernode/test_evaluation.py | import torch
from torch_geometric.loader import DataLoader
from torch.utils.data import TensorDataset
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def gen_batch_dat(batch, graphs):
edge_index, edge_attr, x, bat = None, None, None, None
for idx in range(len(batch.y)):
if idx == 0:
edge_index = graphs[int(batch.graph_selection[idx])].edge_index
edge_attr = graphs[int(batch.graph_selection[idx])].edge_attr
x = graphs[int(batch.graph_selection[idx])].x
bat = torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))
else:
edge_index = torch.cat((edge_index, graphs[int(batch.graph_selection[idx])].edge_index), 1)
edge_attr = torch.cat((edge_attr, graphs[int(batch.graph_selection[idx])].edge_attr), 0)
x = torch.cat((x, graphs[int(batch.graph_selection[idx])].x), 0)
bat = torch.cat((bat, idx+torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))), 0)
batch.edge_index = edge_index
batch.edge_attr = edge_attr
batch.x = x
batch.batch = bat.to(torch.long)
return batch
def train(model, device, loader, optimizer, task_type, graphs):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator, graphs):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--num_layer', type=int, default=10,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=8,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--virtual_emb_dim', type=int, default=25,
help='dimensionality of hidden units of virtual node in GNNs (default: 25)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 300)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="pita_delay",
help='dataset name (default: ogbg-molhiv)')
parser.add_argument('--ckpt', type=str, default="TBD",
help='checkpoint file path')
parser.add_argument('--feature', type=str, default="full",
help='full feature or simple feature')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
if args.feature == 'full':
pass
elif args.feature == 'simple':
print('using simple feature')
# only retain the top two node/edge features
dataset.data.x = dataset.data.x[:,:2]
dataset.data.edge_attr = dataset.data.edge_attr[:,:2]
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
verilog_list = ['adder', 'arbiter', 'bar', 'div', 'log2', 'max', 'multiplier', 'sin', 'sqrt', 'square', 'voter']
graphs = dataset.graphs
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
ckpt = args.ckpt
ckpt_path = f'model/{ckpt}.pt' # args['ckpt']
model.load_state_dict(torch.load(ckpt_path)['model_state_dict'])
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
valid_curve = []
test_curve = []
train_curve = []
test_predict_value= []
test_true_value= []
valid_predict_value= []
valid_true_value= []
for idx, verilog in enumerate(verilog_list):
data = DataLoader(dataset[300000*idx:(300000*idx+3000)], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_perf, t_true, t_pred = eval(model, device, data, evaluator, graphs)
print("Done with evaluation design:", verilog)
print(test_perf)
np.save(f'delays/{verilog}_true.npy', t_true)
np.save(f'delays/{verilog}_pred.npy', t_pred)
if __name__ == "__main__":
main()
| 7,667 | 40.225806 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/evaluate.py | from sklearn.metrics import roc_auc_score, average_precision_score
import pandas as pd
import os
import numpy as np
try:
import torch
except ImportError:
torch = None
### Evaluator for graph classification
class Evaluator:
def __init__(self, name):
self.name = name
meta_info = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in meta_info:
print(self.name)
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(meta_info.keys())
raise ValueError(error_mssg)
self.num_tasks = int(meta_info[self.name]['num tasks'])
self.eval_metric = meta_info[self.name]['eval metric']
def _parse_and_check_input(self, input_dict):
if self.eval_metric == 'rocauc' or self.eval_metric == 'ap' or self.eval_metric == 'rmse' or self.eval_metric == 'rmae' or self.eval_metric == 'acc':
if not 'y_true' in input_dict:
raise RuntimeError('Missing key of y_true')
if not 'y_pred' in input_dict:
raise RuntimeError('Missing key of y_pred')
y_true, y_pred = input_dict['y_true'], input_dict['y_pred']
'''
y_true: numpy ndarray or torch tensor of shape (num_graph, num_tasks)
y_pred: numpy ndarray or torch tensor of shape (num_graph, num_tasks)
'''
# converting to torch.Tensor to numpy on cpu
if torch is not None and isinstance(y_true, torch.Tensor):
y_true = y_true.detach().cpu().numpy()
if torch is not None and isinstance(y_pred, torch.Tensor):
y_pred = y_pred.detach().cpu().numpy()
## check type
if not (isinstance(y_true, np.ndarray) and isinstance(y_true, np.ndarray)):
raise RuntimeError('Arguments to Evaluator need to be either numpy ndarray or torch tensor')
if not y_true.shape == y_pred.shape:
raise RuntimeError('Shape of y_true and y_pred must be the same')
if not y_true.ndim == 2:
raise RuntimeError('y_true and y_pred mush to 2-dim arrray, {}-dim array given'.format(y_true.ndim))
if not y_true.shape[1] == self.num_tasks:
raise RuntimeError('Number of tasks for {} should be {} but {} given'.format(self.name, self.num_tasks, y_true.shape[1]))
return y_true, y_pred
elif self.eval_metric == 'F1':
if not 'seq_ref' in input_dict:
raise RuntimeError('Missing key of seq_ref')
if not 'seq_pred' in input_dict:
raise RuntimeError('Missing key of seq_pred')
seq_ref, seq_pred = input_dict['seq_ref'], input_dict['seq_pred']
if not isinstance(seq_ref, list):
raise RuntimeError('seq_ref must be of type list')
if not isinstance(seq_pred, list):
raise RuntimeError('seq_pred must be of type list')
if len(seq_ref) != len(seq_pred):
raise RuntimeError('Length of seq_true and seq_pred should be the same')
return seq_ref, seq_pred
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
def eval(self, input_dict):
if self.eval_metric == 'rocauc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rocauc(y_true, y_pred)
if self.eval_metric == 'ap':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_ap(y_true, y_pred)
elif self.eval_metric == 'rmse':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rmse(y_true, y_pred)
elif self.eval_metric == 'rmae':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rmae(y_true, y_pred)
elif self.eval_metric == 'acc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_acc(y_true, y_pred)
elif self.eval_metric == 'F1':
seq_ref, seq_pred = self._parse_and_check_input(input_dict)
return self._eval_F1(seq_ref, seq_pred)
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
@property
def expected_input_format(self):
desc = '==== Expected input format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc' or self.eval_metric == 'ap':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where y_pred stores score values (for computing AUC score),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'rmse':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'rmae':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'acc':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_node, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_node, num_task)\n'
desc += 'where y_pred stores predicted class label (integer),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
elif self.eval_metric == 'F1':
desc += '{\'seq_ref\': seq_ref, \'seq_pred\': seq_pred}\n'
desc += '- seq_ref: a list of lists of strings\n'
desc += '- seq_pred: a list of lists of strings\n'
desc += 'where seq_ref stores the reference sequences of sub-tokens, and\n'
desc += 'seq_pred stores the predicted sequences of sub-tokens.\n'
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
@property
def expected_output_format(self):
desc = '==== Expected output format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc':
desc += '{\'rocauc\': rocauc}\n'
desc += '- rocauc (float): ROC-AUC score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'ap':
desc += '{\'ap\': ap}\n'
desc += '- ap (float): Average Precision (AP) score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'rmse':
desc += '{\'rmse\': rmse}\n'
desc += '- rmse (float): root mean squared error averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'rmae':
desc += '{\'rmae\': rmae}\n'
desc += '- rmae (float): root mean squared error averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'acc':
desc += '{\'acc\': acc}\n'
desc += '- acc (float): Accuracy score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'F1':
desc += '{\'F1\': F1}\n'
desc += '- F1 (float): F1 score averaged over samples.\n'
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
def _eval_rocauc(self, y_true, y_pred):
'''
compute ROC-AUC averaged across tasks
'''
rocauc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
rocauc_list.append(roc_auc_score(y_true[is_labeled,i], y_pred[is_labeled,i]))
if len(rocauc_list) == 0:
raise RuntimeError('No positively labeled data available. Cannot compute ROC-AUC.')
return {'rocauc': sum(rocauc_list)/len(rocauc_list)}
def _eval_ap(self, y_true, y_pred):
'''
compute Average Precision (AP) averaged across tasks
'''
ap_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
ap = average_precision_score(y_true[is_labeled,i], y_pred[is_labeled,i])
ap_list.append(ap)
if len(ap_list) == 0:
raise RuntimeError('No positively labeled data available. Cannot compute Average Precision.')
return {'ap': sum(ap_list)/len(ap_list)}
def _eval_rmse(self, y_true, y_pred):
'''
compute RMSE score averaged across tasks
'''
rmse_list = []
for i in range(y_true.shape[1]):
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
rmse_list.append(np.sqrt(((y_true[is_labeled] - y_pred[is_labeled])**2).mean()))
return {'rmse': sum(rmse_list)/len(rmse_list)}
def _eval_rmae(self, y_true, y_pred):
'''
compute RMAE score averaged across tasks
'''
error = np.sum(np.abs((y_true - y_pred) / y_true))
rmae = error / len(y_true)
return {'rmae': rmae}
def _eval_acc(self, y_true, y_pred):
acc_list = []
for i in range(y_true.shape[1]):
is_labeled = y_true[:,i] == y_true[:,i]
correct = y_true[is_labeled,i] == y_pred[is_labeled,i]
acc_list.append(float(np.sum(correct))/len(correct))
return {'acc': sum(acc_list)/len(acc_list)}
def _eval_F1(self, seq_ref, seq_pred):
# '''
# compute F1 score averaged over samples
# '''
precision_list = []
recall_list = []
f1_list = []
for l, p in zip(seq_ref, seq_pred):
label = set(l)
prediction = set(p)
true_positive = len(label.intersection(prediction))
false_positive = len(prediction - label)
false_negative = len(label - prediction)
if true_positive + false_positive > 0:
precision = true_positive / (true_positive + false_positive)
else:
precision = 0
if true_positive + false_negative > 0:
recall = true_positive / (true_positive + false_negative)
else:
recall = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
else:
f1 = 0
precision_list.append(precision)
recall_list.append(recall)
f1_list.append(f1)
return {'precision': np.average(precision_list),
'recall': np.average(recall_list),
'F1': np.average(f1_list)}
if __name__ == '__main__':
evaluator = Evaluator('ogbg-code')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
seq_ref = [['tom', 'is'], ['he'], ['he'], ['hey', 'fea', 'he'], ['alpha'], ['fe4qfq', 'beta'], ['aa']]
seq_pred = [['tom', 'is'], ['he'], ['he'], ['hey', 'he', 'fea'], ['alpha'], ['beta', 'fe4qfq'], ['aa']] # [['tom', 'is'] , ['he'], ['the', 'he'], ['hey', 'fea', 'he'], ['alpha'], ['beta', 'fe4qfq', 'c', 'fe4qf'], ['']]
input_dict = {'seq_ref': seq_ref, 'seq_pred': seq_pred}
result = evaluator.eval(input_dict)
print(result)
# exit(-1)
evaluator = Evaluator('ogbg-molpcba')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = torch.tensor(np.random.randint(2, size = (100,128)))
y_pred = torch.tensor(np.random.randn(100,128))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
evaluator = Evaluator('ogbg-molhiv')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = torch.tensor(np.random.randint(2, size = (100,1)))
y_pred = torch.tensor(np.random.randn(100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### rmse case
evaluator = Evaluator('ogbg-mollipo')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randn(100,1)
y_pred = np.random.randn(100,1)
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### acc
evaluator = Evaluator('ogbg-ppa')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randint(5, size = (100,1))
y_pred = np.random.randint(5, size = (100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
| 14,336 | 40.677326 | 222 | py |
LOSTIN | LOSTIN-main/GNN-supernode/make_master_file.py | ### script for writing meta information of datasets into master.csv
### for graph property prediction datasets.
import pandas as pd
dataset_list = []
dataset_dict = {}
### add cdfg_lut
name = 'pita_db'
dataset_dict[name] = {'eval metric': 'rmse'}
dataset_dict[name]['download_name'] = 'cdfg_lut'
dataset_dict[name]['version'] = 1
dataset_dict[name]['add_inverse_edge'] = False
dataset_dict[name]['split'] = 'scaffold'
dataset_dict[name]['num tasks'] = 1
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = True
dataset_dict[name]['task type'] = 'regression'
dataset_dict[name]['num classes'] = -1
dataset_dict[name]['additional node files'] = 'None'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['binary'] = False
df = pd.DataFrame(dataset_dict)
# saving the dataframe
df.to_csv('master.csv') | 854 | 29.535714 | 67 | py |
LOSTIN | LOSTIN-main/GNN-supernode/read_graph_pyg.py | import pandas as pd
import torch
from torch_geometric.data import Data
import os.path as osp
import numpy as np
from read_graph_raw import read_csv_graph_raw
from tqdm import tqdm
def read_graph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):
graph_list, super_node, graph_selection = read_csv_graph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)
pyg_graph_list = []
graph_info_container = []
for i in range(len(super_node)):
data = Data()
data.super_node = torch.from_numpy(super_node[i])
data.graph_selection = torch.from_numpy(graph_selection[i])
graph_info_container.append(data)
print('Converting graphs into PyG objects...')
for graph in tqdm(graph_list):
g = Data()
g.__num_nodes__ = graph['num_nodes']
g.edge_index = torch.from_numpy(graph['edge_index'])
del graph['num_nodes']
del graph['edge_index']
if graph['edge_feat'] is not None:
g.edge_attr = torch.from_numpy(graph['edge_feat'])
del graph['edge_feat']
if graph['node_feat'] is not None:
g.x = torch.from_numpy(graph['node_feat'])
del graph['node_feat']
for key in additional_node_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
for key in additional_edge_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
pyg_graph_list.append(g)
return pyg_graph_list, graph_info_container
if __name__ == '__main__':
pass | 1,697 | 29.321429 | 185 | py |
LOSTIN | LOSTIN-main/GNN-supernode/read_graph_raw.py | import pandas as pd
import os.path as osp
import os
import numpy as np
from tqdm import tqdm
### reading raw files from a directory.
### for homogeneous graph
def read_csv_graph_raw(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = []):
'''
raw_dir: path to the raw directory
add_inverse_edge (bool): whether to add inverse edge or not
return: graph_list, which is a list of graphs.
Each graph is a dictionary, containing edge_index, edge_feat, node_feat, and num_nodes
edge_feat and node_feat are optional: if a graph does not contain it, we will have None.
additional_node_files and additional_edge_files must be in the raw directory.
- The name should be {additional_node_file, additional_edge_file}.csv.gz
- The length should be num_nodes or num_edges
additional_node_files must start from 'node_'
additional_edge_files must start from 'edge_'
'''
print('Loading necessary files...')
print('This might take a while.')
# loading necessary files
try:
edge = pd.read_csv(osp.join(raw_dir, 'edge.csv.gz'), compression='gzip', header = None).values.T.astype(np.int64) # (2, num_edge) numpy array
num_node_list = pd.read_csv(osp.join(raw_dir, 'num-node-list.csv.gz'), compression='gzip', header = None).astype(np.int64)[0].tolist() # (num_graph, ) python list
num_edge_list = pd.read_csv(osp.join(raw_dir, 'num-edge-list.csv.gz'), compression='gzip', header = None).astype(np.int64)[0].tolist() # (num_edge, ) python list
except FileNotFoundError:
raise RuntimeError('No necessary file')
try:
node_feat = pd.read_csv(osp.join(raw_dir, 'node-feat.csv.gz'), compression='gzip', header = None).values
if 'int' in str(node_feat.dtype):
node_feat = node_feat.astype(np.int64)
else:
# float
node_feat = node_feat.astype(np.float32)
except FileNotFoundError:
node_feat = None
try:
edge_feat = pd.read_csv(osp.join(raw_dir, 'edge-feat.csv.gz'), compression='gzip', header = None).values
if 'int' in str(edge_feat.dtype):
edge_feat = edge_feat.astype(np.int64)
else:
#float
edge_feat = edge_feat.astype(np.float32)
except FileNotFoundError:
edge_feat = None
try:
super_node = pd.read_csv(osp.join(raw_dir, 'node-super.csv.gz'), compression='gzip', header = None).values
super_node = np.expand_dims(super_node, axis=1)
if 'int' in str(super_node.dtype):
super_node = super_node.astype(np.int64)
else:
#float
super_node = super_node.astype(np.float32)
except FileNotFoundError:
super_node = None
try:
graph_selection = pd.read_csv(osp.join(raw_dir, 'graph-choice.csv.gz'), compression='gzip', header = None).values
if 'int' in str(graph_selection.dtype):
graph_selection = graph_selection.astype(np.int64)
else:
#float
graph_selection = graph_selection.astype(np.float32)
except FileNotFoundError:
graph_selection = None
additional_node_info = {}
for additional_file in additional_node_files:
assert(additional_file[:5] == 'node_')
# hack for ogbn-proteins
if additional_file == 'node_species' and osp.exists(osp.join(raw_dir, 'species.csv.gz')):
os.rename(osp.join(raw_dir, 'species.csv.gz'), osp.join(raw_dir, 'node_species.csv.gz'))
temp = pd.read_csv(osp.join(raw_dir, additional_file + '.csv.gz'), compression='gzip', header = None).values
if 'int' in str(temp.dtype):
additional_node_info[additional_file] = temp.astype(np.int64)
else:
# float
additional_node_info[additional_file] = temp.astype(np.float32)
additional_edge_info = {}
for additional_file in additional_edge_files:
assert(additional_file[:5] == 'edge_')
temp = pd.read_csv(osp.join(raw_dir, additional_file + '.csv.gz'), compression='gzip', header = None).values
if 'int' in str(temp.dtype):
additional_edge_info[additional_file] = temp.astype(np.int64)
else:
# float
additional_edge_info[additional_file] = temp.astype(np.float32)
graph_list = []
num_node_accum = 0
num_edge_accum = 0
print('Processing graphs...')
for num_node, num_edge in tqdm(zip(num_node_list, num_edge_list), total=len(num_node_list)):
graph = dict()
### handling edge
if add_inverse_edge:
### duplicate edge
duplicated_edge = np.repeat(edge[:, num_edge_accum:num_edge_accum+num_edge], 2, axis = 1)
duplicated_edge[0, 1::2] = duplicated_edge[1,0::2]
duplicated_edge[1, 1::2] = duplicated_edge[0,0::2]
graph['edge_index'] = duplicated_edge
if edge_feat is not None:
graph['edge_feat'] = np.repeat(edge_feat[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)
else:
graph['edge_feat'] = None
for key, value in additional_edge_info.items():
graph[key] = np.repeat(value[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)
else:
graph['edge_index'] = edge[:, num_edge_accum:num_edge_accum+num_edge]
if edge_feat is not None:
graph['edge_feat'] = edge_feat[num_edge_accum:num_edge_accum+num_edge]
else:
graph['edge_feat'] = None
for key, value in additional_edge_info.items():
graph[key] = value[num_edge_accum:num_edge_accum+num_edge]
num_edge_accum += num_edge
### handling node
if node_feat is not None:
graph['node_feat'] = node_feat[num_node_accum:num_node_accum+num_node]
else:
graph['node_feat'] = None
for key, value in additional_node_info.items():
graph[key] = value[num_node_accum:num_node_accum+num_node]
graph['num_nodes'] = num_node
num_node_accum += num_node
graph_list.append(graph)
return graph_list, super_node, graph_selection | 6,248 | 37.103659 | 170 | py |
LOSTIN | LOSTIN-main/GNN-supernode/main_gnn.py | import torch
from torch_geometric.loader import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def gen_batch_dat(batch, graphs):
edge_index, edge_attr, x, bat = None, None, None, None
for idx in range(len(batch.y)):
if idx == 0:
edge_index = graphs[int(batch.graph_selection[idx])].edge_index
edge_attr = graphs[int(batch.graph_selection[idx])].edge_attr
x = graphs[int(batch.graph_selection[idx])].x
bat = torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))
else:
edge_index = torch.cat((edge_index, graphs[int(batch.graph_selection[idx])].edge_index), 1)
edge_attr = torch.cat((edge_attr, graphs[int(batch.graph_selection[idx])].edge_attr), 0)
x = torch.cat((x, graphs[int(batch.graph_selection[idx])].x), 0)
bat = torch.cat((bat, idx+torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))), 0)
batch.edge_index = edge_index
batch.edge_attr = edge_attr
batch.x = x
batch.batch = bat.to(torch.long)
return batch
def train(model, device, loader, optimizer, task_type, graphs):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator, graphs):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--num_layer', type=int, default=10,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=8,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--virtual_emb_dim', type=int, default=25,
help='dimensionality of hidden units of virtual node in GNNs (default: 25)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 300)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="pita_area",
help='dataset name (default: ogbg-molhiv)')
parser.add_argument('--feature', type=str, default="full",
help='full feature or simple feature')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
if args.feature == 'full':
pass
elif args.feature == 'simple':
print('using simple feature')
# only retain the top two node/edge features
dataset.data.x = dataset.data.x[:,:2]
dataset.data.edge_attr = dataset.data.edge_attr[:,:2]
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
# [‘adder’, ‘arbiter’, ‘bar’, ‘div’, ‘log2’, ‘max’] / [‘multiplier’, ‘sin’, ‘sqrt’, ‘square’, ‘voter’]
data_0, dataset_test_0 = dataset[0:1800000], dataset[1800000:3300000]
dataset_ratio = [660000, 165000, 975000]
dataset_train, dataset_valid, dataset_test_1 = torch.utils.data.random_split(data_0, dataset_ratio)
train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valid_loader = DataLoader(dataset_valid, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
# test_loader = DataLoader(dataset_test_0 + dataset_test_1, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
graphs = dataset.graphs
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
valid_curve = []
test_curve = []
train_curve = []
test_predict_value= []
test_true_value= []
valid_predict_value= []
valid_true_value= []
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train(model, device, train_loader, optimizer, dataset.task_type, graphs)
print('Evaluating...')
# train_perf, _, _ = eval(model, device, train_loader, evaluator, graphs)
valid_perf, v_true, v_pred= eval(model, device, valid_loader, evaluator, graphs)
# test_perf, t_true, t_pred = eval(model, device, test_loader, evaluator, graphs)
print({'Validation': valid_perf})
# train_curve.append(train_perf[dataset.eval_metric])
valid_curve.append(valid_perf[dataset.eval_metric])
# test_curve.append(test_perf[dataset.eval_metric])
# test_predict_value.append(reduce(operator.add, t_pred.tolist()))
valid_predict_value.append(reduce(operator.add, v_pred.tolist()))
# test_loss=test_perf[dataset.eval_metric]
valid_loss=valid_perf[dataset.eval_metric]
if valid_loss<=np.min(np.array(valid_curve)):
PATH='model/1_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+'_model.pt'
torch.save({'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': valid_loss
}, PATH)
# test_true_value=reduce(operator.add, t_true.tolist())
valid_true_value=reduce(operator.add, v_true.tolist())
if 'classification' in dataset.task_type:
best_val_epoch = np.argmax(np.array(valid_curve))
else:
best_val_epoch = np.argmin(np.array(valid_curve))
print('Finished training!')
print('Best validation score: {}'.format(valid_curve[best_val_epoch]))
f = open('1_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+ '.json', 'w')
result=dict(val=valid_curve[best_val_epoch],
valid_pred=valid_predict_value,
valid_true=valid_true_value,
valid_curve=valid_curve)
json.dump(result, f)
f.close()
if not args.filename == '':
torch.save({'Val': valid_curve[best_val_epoch]}, args.filename)
if __name__ == "__main__":
main()
| 9,703 | 41.375546 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/gnn.py | import torch
from torch_geometric.nn import MessagePassing,BatchNorm
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_geometric.nn.inits import uniform
from torch.nn import Sequential, ReLU, Linear, ModuleList
from conv import GNN_node, GNN_node_Virtualnode
from torch_scatter import scatter_mean
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer = 5, emb_dim = 300,
gnn_type = 'gin', virtual_node = True, residual = False, drop_ratio = 0.5, JK = "sum", graph_pooling = "sum"):
'''
num_tasks (int): number of labels to be predicted
virtual_node (bool): whether to add virtual node or not
'''
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
### GNN to generate node embeddings
if virtual_node:
self.gnn_node = GNN_node_Virtualnode(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
else:
self.gnn_node = GNN_node(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
self.graph_pred_linear=ModuleList()
self.graph_norm=ModuleList()
if graph_pooling == "set2set":
self.graph_pred_linear.append(Linear(2*emb_dim, 2*emb_dim))
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
self.graph_pred_linear.append(Linear(emb_dim, self.num_tasks))
self.graph_norm.append(BatchNorm(2*emb_dim))
self.graph_norm.append(BatchNorm(emb_dim))
else:
self.graph_pred_linear.append(Linear(emb_dim, 2*emb_dim))
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
self.graph_pred_linear.append(Linear(emb_dim, self.num_tasks))
self.graph_norm.append(BatchNorm(2*emb_dim))
self.graph_norm.append(BatchNorm(emb_dim))
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
### final predictions
#h_graph = self.graph_pred_linear[0](h_graph)
#h_graph = self.graph_norm[0](h_graph)
#h_graph = F.dropout(F.relu(h_graph), self.drop_ratio, training = self.training)
#h_graph = self.graph_pred_linear[1](h_graph)
#h_graph = self.graph_norm[1](h_graph)
#h_graph = F.dropout(F.relu(h_graph), self.drop_ratio, training = self.training)
return self.graph_pred_linear[2](h_graph)
if __name__ == '__main__':
GNN(num_tasks = 10) | 3,673 | 39.822222 | 188 | py |
LOSTIN | LOSTIN-main/GNN-supernode/conv.py | import torch
from torch_geometric.nn import MessagePassing
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from node_encoder import NodeEncoder,EdgeEncoder
from torch_geometric.utils import degree
import math
### GIN convolution along the graph structure
class GINConv(MessagePassing):
def __init__(self, emb_dim):
'''
emb_dim (int): node embedding dimensionality
'''
super(GINConv, self).__init__(aggr = "add")
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_embedding = self.edge_encoder(edge_attr)
out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.edge_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype = x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr = edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
num_layer (int): number of GNN message passing layers
'''
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
###List of GNNs
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
'''
super(GNN_node_Virtualnode, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
### set the initial virtual node embedding to 0.
self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
self.supernode_layer = torch.nn.Linear(25, 8)
### List of GNNs
self.convs = torch.nn.ModuleList()
### batch norms applied to node embeddings
self.batch_norms = torch.nn.ModuleList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
for layer in range(num_layer - 1):
self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \
torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### virtual node embeddings for graphs
virtualnode_data = self.supernode_layer(batched_data.super_node.to(torch.float))
virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))
virtualnode_embedding += virtualnode_data
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + virtualnode_embedding[batch]
### Message passing among graph nodes
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.num_layer - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| 8,910 | 35.520492 | 182 | py |
LOSTIN | LOSTIN-main/GNN-supernode/dataset_pyg.py | from torch_geometric.data import InMemoryDataset
import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from read_graph_pyg import read_graph_pyg
class PygGraphPropPredDataset(InMemoryDataset):
def __init__(self, name, root = 'dataset', transform=None, pre_transform = None, meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- transform, pre_transform (optional): transform/pre-transform graph objects
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbg-molhiv
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_pyg')):
self.dir_name = self.dir_name + '_pyg'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.__num_classes__ = int(self.meta_info['num classes'])
self.binary = self.meta_info['binary'] == 'True'
super(PygGraphPropPredDataset, self).__init__(self.root, transform, pre_transform)
self.graphs = torch.load(self.processed_paths[0])
self.data, self.slices = torch.load(self.processed_paths[1])
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]
return {'train': torch.tensor(train_idx, dtype = torch.long), 'valid': torch.tensor(valid_idx, dtype = torch.long), 'test': torch.tensor(test_idx, dtype = torch.long)}
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
if self.binary:
return ['data.npz']
else:
file_names = ['edge']
if self.meta_info['has_node_attr'] == 'True':
file_names.append('node-feat')
if self.meta_info['has_edge_attr'] == 'True':
file_names.append('edge-feat')
return [file_name + '.csv.gz' for file_name in file_names]
@property
def processed_file_names(self):
return 'geometric_data_processed.pt', 'graph_info_processed.pt'
def process(self):
### read pyg graph list
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
data_list, g_info_list = read_graph_pyg(self.raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)
if self.task_type == 'subtoken prediction':
graph_label_notparsed = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
graph_label = [str(graph_label_notparsed[i][0]).split(' ') for i in range(len(graph_label_notparsed))]
for i, g in enumerate(data_list):
g.y = graph_label[i]
else:
if self.binary:
graph_label = np.load(osp.join(self.raw_dir, 'graph-label.npz'))['graph_label']
else:
graph_label = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
has_nan = np.isnan(graph_label).any()
for i, g in enumerate(g_info_list):
if 'classification' in self.task_type:
if has_nan:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
# g_data, g_slices = self.collate(data_list)
data, slices = self.collate(g_info_list)
self.slices = slices
print('Saving...')
torch.save(data_list, self.processed_paths[0])
torch.save((data, slices), self.processed_paths[1])
| 6,385 | 41.291391 | 212 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_delay.py | ### Libraries
import numpy as np
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
# torchtext 0.6.0
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
### dir
data_folder = 'lstm/data_area'
destination_folder = 'model_ckt/area'
# Hybrid model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = 'gin', num_tasks = 1, num_layer = 2, emb_dim = graph_emb, drop_ratio = 0.5, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
graph_loader,
num_epochs,
eval_every,
args,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# read graphs
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model_batch_'+str(args.batch_size)+'.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='Customized model for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=10, help='dimensionality of input embedding of transformations')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
delay_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('delay', delay_field)]
# TabularDataset
print('Data loading ...')
train, valid, test = TabularDataset.splits(path=data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, args = args,\
train_loader = train_iter, valid_loader = valid_iter, graph_loader = graph_loader, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,155 | 36.679012 | 148 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/node_encoder.py | import torch
from features import get_node_feature_dims, get_edge_feature_dims
full_node_feature_dims = get_node_feature_dims()
full_edge_feature_dims = get_edge_feature_dims()
class NodeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(NodeEncoder, self).__init__()
self.node_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_node_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.node_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
#x_embedding = self.node_embedding_list[0](x[:,0])
for i in range(1, x.shape[1]):
x_embedding += self.node_embedding_list[i](x[:,i])
#x_embedding = torch.cat((x_embedding, self.node_embedding_list[i](x[:,i])),1)
return x_embedding
class EdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(EdgeEncoder, self).__init__()
self.edge_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_edge_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.edge_embedding_list.append(emb)
def forward(self, edge_attr):
edge_embedding = 0
for i in range(edge_attr.shape[1]):
edge_embedding += self.edge_embedding_list[i](edge_attr[:,i])
return edge_embedding
if __name__ == '__main__':
from dataset_pyg import PygGraphPropPredDataset
dataset = PygGraphPropPredDataset(name = 'vgraph')
node_enc = NodeEncoder(2)
edge_enc = EdgeEncoder(5)
print(node_enc(dataset[1].x).shape)
print(node_enc(dataset[0].x).shape)
print(edge_enc(dataset[1].edge_attr).shape[1])
| 1,857 | 28.967742 | 90 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/features.py | import json
import pandas as pd
allowable_features = {
'node_type': ['input', 'intermediate', 'output'],
'op_type': ['and_oper', 'or_oper', 'not_oper', 'misc'],
}
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
def node_type(opcode):
if opcode == 'input' or opcode == 'output':
return opcode
if opcode in {'and_oper', 'or_oper', 'not_oper'}:
t='intermediate'
return t
def node_to_feature_vector(node):
"""
Converts node object to feature list of indices
:return: list
"""
node_feature = [
safe_index(allowable_features['node_type'], node_type(node['node_attributes']['node_type'])),
safe_index(allowable_features['op_type'], node['node_attributes']['node_type']),
]
return node_feature
def get_node_feature_dims():
return list(map(len, [
allowable_features['node_type'],
allowable_features['op_type']
]))
def edge_to_feature_vector(edge):
"""
Converts edge to feature list of indices
:return: list
"""
bond_feature = [0]
return bond_feature
def get_edge_feature_dims():
return [1]
'''
num_graph=11
read_dir='epfl_graph/'
vgraphs = ['adder', 'arbiter', 'bar', 'div', 'log2', 'max', 'multiplier', 'sin', 'sqrt', 'square', 'voter']
node_feat=[]
edge_list=[]
edge_feat=[]
num_node_list=[]
num_edge_list=[]
for i in range(num_graph):
f = open(read_dir+vgraphs[i]+'.json', 'r')
d = json.load(f)
f.close()
nodes=d['nodes']
edges=d['edges']
node_index_map=dict() # map the node name to the index
index=0
for n in nodes:
if n[0] not in node_index_map:
node_index_map[n[0]]=index
node_feat.append(node_to_feature_vector(n[1]))
index=index+1
for e in edges:
source=node_index_map[e[0]]
sink=node_index_map[e[1]]
edge_list.append([source,sink])
edge_feat.append(edge_to_feature_vector(e[2]))
num_node_list.append(len(nodes))
num_edge_list.append(len(edges))
NODE=pd.DataFrame(node_feat)
EDGE_list=pd.DataFrame(edge_list)
EDGE_feat=pd.DataFrame(edge_feat)
node_num = pd.DataFrame(num_node_list)
edge_num = pd.DataFrame(num_edge_list)
NODE.to_csv('node-feat.csv',index=False,header=False)
EDGE_list.to_csv('edge.csv',index=False,header=False)
EDGE_feat.to_csv('edge-feat.csv',index=False,header=False)
node_num.to_csv('num-node-list.csv',index=False,header=False)
edge_num.to_csv('num-edge-list.csv',index=False,header=False)
'''
| 2,664 | 23.906542 | 107 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.