Search is not available for this dataset
text
stringlengths
75
104k
def load_pex(path_to_pex, include_deps=True): """Loads pex file and its dependencies to the current python path""" abs_path_to_pex = os.path.abspath(path_to_pex) Log.debug("Add a pex to the path: %s" % abs_path_to_pex) if abs_path_to_pex not in sys.path: sys.path.insert(0, os.path.dirname(abs_path_to_pex)) # add dependencies to path if include_deps: for dep in _get_deps_list(abs_path_to_pex): to_join = os.path.join(os.path.dirname(abs_path_to_pex), dep) if to_join not in sys.path: Log.debug("Add a new dependency to the path: %s" % dep) sys.path.insert(0, to_join) Log.debug("Python path: %s" % str(sys.path))
def resolve_heron_suffix_issue(abs_pex_path, class_path): """Resolves duplicate package suffix problems When dynamically loading a pex file and a corresponding python class (bolt/spout/topology), if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts with this Heron Instance pex package (heron.instance.src.python...), making the Python interpreter unable to find the target class in a given pex file. This function resolves this issue by individually loading packages with suffix `heron` to avoid this issue. However, if a dependent module/class that is not directly specified under ``class_path`` and has conflicts with other native heron packages, there is a possibility that such a class/module might not be imported correctly. For example, if a given ``class_path`` was ``heron.common.src.module.Class``, but it has a dependent module (such as by import statement), ``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that ``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError. The best way to avoid this issue is NOT to dynamically load a pex file whose top level package name is ``heron``. Note that this method is included because some of the example topologies and tests have to have a pex with its top level package name of ``heron``. """ # import top-level package named `heron` of a given pex file importer = zipimport.zipimporter(abs_pex_path) importer.load_module("heron") # remove 'heron' and the classname to_load_lst = class_path.split('.')[1:-1] loaded = ['heron'] loaded_mod = None for to_load in to_load_lst: sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded))) loaded_mod = sub_importer.load_module(to_load) loaded.append(to_load) return loaded_mod
def import_and_get_class(path_to_pex, python_class_name): """Imports and load a class from a given pex file path and python class name For example, if you want to get a class called `Sample` in /some-path/sample.pex/heron/examples/src/python/sample.py, ``path_to_pex`` needs to be ``/some-path/sample.pex``, and ``python_class_name`` needs to be ``heron.examples.src.python.sample.Sample`` """ abs_path_to_pex = os.path.abspath(path_to_pex) Log.debug("Add a pex to the path: %s" % abs_path_to_pex) Log.debug("In import_and_get_class with cls_name: %s" % python_class_name) split = python_class_name.split('.') from_path = '.'.join(split[:-1]) import_name = python_class_name.split('.')[-1] Log.debug("From path: %s, import name: %s" % (from_path, import_name)) # Resolve duplicate package suffix problem (heron.), if the top level package name is heron if python_class_name.startswith("heron."): try: mod = resolve_heron_suffix_issue(abs_path_to_pex, python_class_name) return getattr(mod, import_name) except: Log.error("Could not resolve class %s with special handling" % python_class_name) mod = __import__(from_path, fromlist=[import_name], level=-1) Log.debug("Imported module: %s" % str(mod)) return getattr(mod, import_name)
def new_source(self, source): """Adds a new source to the computation DAG""" source_streamlet = None if callable(source): source_streamlet = SupplierStreamlet(source) elif isinstance(source, Generator): source_streamlet = GeneratorStreamlet(source) else: raise RuntimeError("Builder's new source has to be either a Generator or a function") self._sources.append(source_streamlet) return source_streamlet
def build(self, bldr): """Builds the topology and returns the builder""" stage_names = sets.Set() for source in self._sources: source._build(bldr, stage_names) for source in self._sources: if not source._all_built(): raise RuntimeError("Topology cannot be fully built! Are all sources added?")
def get(self, path): """ get method """ t = Template(utils.get_asset("file.html")) if path is None: self.set_status(404) self.write("No such file") self.finish() return if not utils.check_path(path): self.write("Only relative paths are allowed") self.set_status(403) self.finish() return args = dict( filename=path, jquery=utils.get_asset("jquery.js"), pailer=utils.get_asset("jquery.pailer.js"), css=utils.get_asset("bootstrap.css"), ) self.write(t.generate(**args)) self.finish()
def load_state_manager_locations(cluster, state_manager_config_file='heron-conf/statemgr.yaml', overrides={}): """ Reads configs to determine which state manager to use and converts them to state manager locations. Handles a subset of config wildcard substitution supported in the substitute method in org.apache.heron.spi.common.Misc.java""" with open(state_manager_config_file, 'r') as stream: config = yaml.load(stream) home_dir = os.path.expanduser("~") wildcards = { "~" : home_dir, "${HOME}" : home_dir, "${CLUSTER}" : cluster, } if os.getenv('JAVA_HOME'): wildcards["${JAVA_HOME}"] = os.getenv('JAVA_HOME') config = __replace(config, wildcards, state_manager_config_file) # merge with overrides if overrides: config.update(overrides) # need to convert from the format in statemgr.yaml to the format that the python state managers # takes. first, set reasonable defaults to local state_manager_location = { 'type': 'file', 'name': 'local', 'tunnelhost': '127.0.0.1', 'rootpath': '~/.herondata/repository/state/local', } # then map the statemgr.yaml config keys to the python state manager location key_mappings = { 'heron.statemgr.connection.string': 'hostport', 'heron.statemgr.tunnel.host': 'tunnelhost', 'heron.statemgr.root.path': 'rootpath', } for config_key in key_mappings: if config_key in config: state_manager_location[key_mappings[config_key]] = config[config_key] state_manager_class = config['heron.class.state.manager'] if state_manager_class == 'org.apache.heron.statemgr.zookeeper.curator.CuratorStateManager': state_manager_location['type'] = 'zookeeper' state_manager_location['name'] = 'zk' return [state_manager_location]
def __replace(config, wildcards, config_file): """For each kvp in config, do wildcard substitution on the values""" for config_key in config: config_value = config[config_key] original_value = config_value if isinstance(config_value, str): for token in wildcards: if wildcards[token]: config_value = config_value.replace(token, wildcards[token]) found = re.findall(r'\${[A-Z_]+}', config_value) if found: raise ValueError("%s=%s in file %s contains unsupported or unset wildcard tokens: %s" % (config_key, original_value, config_file, ", ".join(found))) config[config_key] = config_value return config
def get_command_handlers(): ''' Create a map of command names and handlers ''' return { 'activate': activate, 'config': hconfig, 'deactivate': deactivate, 'help': cli_help, 'kill': kill, 'restart': restart, 'submit': submit, 'update': update, 'version': version }
def create_parser(command_handlers): ''' Main parser :return: ''' parser = argparse.ArgumentParser( prog='heron', epilog=HELP_EPILOG, formatter_class=config.SubcommandHelpFormatter, add_help=True) subparsers = parser.add_subparsers( title="Available commands", metavar='<command> <options>') command_list = sorted(command_handlers.items()) for command in command_list: command[1].create_parser(subparsers) return parser
def run(handlers, command, parser, command_args, unknown_args): ''' Run the command :param command: :param parser: :param command_args: :param unknown_args: :return: ''' if command in handlers: return handlers[command].run(command, parser, command_args, unknown_args) else: err_context = 'Unknown subcommand: %s' % command return result.SimpleResult(result.Status.InvocationError, err_context)
def cleanup(files): ''' :param files: :return: ''' for cur_file in files: if os.path.isdir(cur_file): shutil.rmtree(cur_file) else: shutil.rmtree(os.path.dirname(cur_file))
def server_deployment_mode(command, parser, cluster, cl_args): ''' check the server deployment mode for the given cluster if it is valid return the valid set of args :param cluster: :param cl_args: :return: ''' # Read the cluster definition, if not found client_confs = cdefs.read_server_mode_cluster_definition(cluster, cl_args) if not client_confs[cluster]: return dict() # tell the user which definition that we are using if not cl_args.get('service_url', None): Log.debug("Using cluster definition from file %s" \ % cliconfig.get_cluster_config_file(cluster)) else: Log.debug("Using cluster service url %s" % cl_args['service_url']) # if cluster definition exists, but service_url is not set, it is an error if not 'service_url' in client_confs[cluster]: config_file = cliconfig.get_cluster_config_file(cluster) Log.error('No service url for %s cluster in %s', cluster, config_file) sys.exit(1) # get overrides if 'config_property' in cl_args: pass try: cluster_role_env = (cl_args['cluster'], cl_args['role'], cl_args['environ']) config.server_mode_cluster_role_env(cluster_role_env, client_confs) cluster_tuple = config.defaults_cluster_role_env(cluster_role_env) except Exception as ex: Log.error("Argument cluster/[role]/[env] is not correct: %s", str(ex)) sys.exit(1) new_cl_args = dict() new_cl_args['cluster'] = cluster_tuple[0] new_cl_args['role'] = cluster_tuple[1] new_cl_args['environ'] = cluster_tuple[2] new_cl_args['service_url'] = client_confs[cluster]['service_url'].rstrip('/') new_cl_args['deploy_mode'] = config.SERVER_MODE cl_args.update(new_cl_args) return cl_args
def direct_deployment_mode(command, parser, cluster, cl_args): ''' check the direct deployment mode for the given cluster if it is valid return the valid set of args :param command: :param parser: :param cluster: :param cl_args: :return: ''' cluster = cl_args['cluster'] try: config_path = cl_args['config_path'] override_config_file = config.parse_override_config_and_write_file(cl_args['config_property']) except KeyError: # if some of the arguments are not found, print error and exit subparser = config.get_subparser(parser, command) print(subparser.format_help()) return dict() # check if the cluster config directory exists if not cdefs.check_direct_mode_cluster_definition(cluster, config_path): Log.error("Cluster config directory \'%s\' does not exist", config_path) return dict() config_path = config.get_heron_cluster_conf_dir(cluster, config_path) if not os.path.isdir(config_path): Log.error("Cluster config directory \'%s\' does not exist", config_path) return dict() Log.info("Using cluster definition in %s" % config_path) try: cluster_role_env = (cl_args['cluster'], cl_args['role'], cl_args['environ']) config.direct_mode_cluster_role_env(cluster_role_env, config_path) cluster_tuple = config.defaults_cluster_role_env(cluster_role_env) except Exception as ex: Log.error("Argument cluster/[role]/[env] is not correct: %s", str(ex)) return dict() new_cl_args = dict() new_cl_args['cluster'] = cluster_tuple[0] new_cl_args['role'] = cluster_tuple[1] new_cl_args['environ'] = cluster_tuple[2] new_cl_args['config_path'] = config_path new_cl_args['override_config_file'] = override_config_file new_cl_args['deploy_mode'] = config.DIRECT_MODE cl_args.update(new_cl_args) return cl_args
def extract_common_args(command, parser, cl_args): ''' Extract all the common args for all commands :param command: :param parser: :param cl_args: :return: ''' try: cluster_role_env = cl_args.pop('cluster/[role]/[env]') except KeyError: try: cluster_role_env = cl_args.pop('cluster') # for version command except KeyError: # if some of the arguments are not found, print error and exit subparser = config.get_subparser(parser, command) print(subparser.format_help()) return dict() new_cl_args = dict() cluster_tuple = config.get_cluster_role_env(cluster_role_env) new_cl_args['cluster'] = cluster_tuple[0] new_cl_args['role'] = cluster_tuple[1] new_cl_args['environ'] = cluster_tuple[2] new_cl_args['submit_user'] = getpass.getuser() cl_args.update(new_cl_args) return cl_args
def execute(handlers, local_commands): ''' Run the command :return: ''' # verify if the environment variables are correctly set check_environment() # create the argument parser parser = create_parser(handlers) # if no argument is provided, print help and exit if len(sys.argv[1:]) == 0: parser.print_help() return 0 # insert the boolean values for some of the options sys.argv = config.insert_bool_values(sys.argv) try: # parse the args args, unknown_args = parser.parse_known_args() except ValueError as ex: Log.error("Error while parsing arguments: %s", str(ex)) Log.debug(traceback.format_exc()) sys.exit(1) command_line_args = vars(args) # set log level log.set_logging_level(command_line_args) Log.debug("Input Command Line Args: %s", command_line_args) # command to be execute command = command_line_args['subcommand'] is_local_command = command in local_commands if command == 'version': results = run(handlers, command, parser, command_line_args, unknown_args) return 0 if result.is_successful(results) else 1 if not is_local_command: log.set_logging_level(command_line_args) Log.debug("Input Command Line Args: %s", command_line_args) # determine the mode of deployment command_line_args = extract_common_args(command, parser, command_line_args) command_line_args = deployment_mode(command, parser, command_line_args) # bail out if args are empty if not command_line_args: return 1 # register dirs cleanup function during exit if command_line_args['deploy_mode'] == config.DIRECT_MODE and command != "version": cleaned_up_files.append(command_line_args['override_config_file']) atexit.register(cleanup, cleaned_up_files) # print the input parameters, if verbose is enabled Log.debug("Processed Command Line Args: %s", command_line_args) start = time.time() results = run(handlers, command, parser, command_line_args, unknown_args) if not is_local_command: result.render(results) end = time.time() if not is_local_command: sys.stdout.flush() Log.debug('Elapsed time: %.3fs.', (end - start)) return 0 if result.is_successful(results) else 1
def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() component = self.get_argument_component() metric_names = self.get_required_arguments_metricnames() topology = self.tracker.getTopologyByClusterRoleEnvironAndName( cluster, role, environ, topology_name) interval = int(self.get_argument(constants.PARAM_INTERVAL, default=-1)) instances = self.get_arguments(constants.PARAM_INSTANCE) metrics = yield tornado.gen.Task( self.getComponentMetrics, topology.tmaster, component, metric_names, instances, interval) self.write_success_response(metrics) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
def getComponentMetrics(self, tmaster, componentName, metricNames, instances, interval, callback=None): """ Get the specified metrics for the given component name of this topology. Returns the following dict on success: { "metrics": { <metricname>: { <instance>: <numeric value>, <instance>: <numeric value>, ... }, ... }, "interval": <numeric value>, "component": "..." } Raises exception on failure. """ if not tmaster or not tmaster.host or not tmaster.stats_port: raise Exception("No Tmaster found") host = tmaster.host port = tmaster.stats_port metricRequest = tmaster_pb2.MetricRequest() metricRequest.component_name = componentName if len(instances) > 0: for instance in instances: metricRequest.instance_id.append(instance) for metricName in metricNames: metricRequest.metric.append(metricName) metricRequest.interval = interval # Serialize the metricRequest to send as a payload # with the HTTP request. metricRequestString = metricRequest.SerializeToString() url = "http://{0}:{1}/stats".format(host, port) request = tornado.httpclient.HTTPRequest(url, body=metricRequestString, method='POST', request_timeout=5) Log.debug("Making HTTP call to fetch metrics") Log.debug("url: " + url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting metrics from Tmaster, code: " + responseCode Log.error(message) raise Exception(message) # Parse the response from tmaster. metricResponse = tmaster_pb2.MetricResponse() metricResponse.ParseFromString(result.body) if metricResponse.status.status == common_pb2.NOTOK: if metricResponse.status.HasField("message"): Log.warn("Received response from Tmaster: %s", metricResponse.status.message) # Form the response. ret = {} ret["interval"] = metricResponse.interval ret["component"] = componentName ret["metrics"] = {} for metric in metricResponse.metric: instance = metric.instance_id for im in metric.metric: metricname = im.name value = im.value if metricname not in ret["metrics"]: ret["metrics"][metricname] = {} ret["metrics"][metricname][instance] = value raise tornado.gen.Return(ret)
def floorTimestamps(self, start, end, timeline): """ floor timestamp """ ret = {} for timestamp, value in timeline.items(): ts = timestamp / 60 * 60 if start <= ts <= end: ret[ts] = value return ret
def setDefault(self, constant, start, end): """ set default time """ starttime = start / 60 * 60 if starttime < start: starttime += 60 endtime = end / 60 * 60 while starttime <= endtime: # STREAMCOMP-1559 # Second check is a work around, because the response from tmaster # contains value 0, if it is queries for the current timestamp, # since the bucket is created in the tmaster, but is not filled # by the metrics. if starttime not in self.timeline or self.timeline[starttime] == 0: self.timeline[starttime] = constant starttime += 60
def initialize(self, config, context): """We initialize the window duration and slide interval """ if SlidingWindowBolt.WINDOW_DURATION_SECS in config: self.window_duration = int(config[SlidingWindowBolt.WINDOW_DURATION_SECS]) else: self.logger.fatal("Window Duration has to be specified in the config") if SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS in config: self.slide_interval = int(config[SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS]) else: self.slide_interval = self.window_duration if self.slide_interval > self.window_duration: self.logger.fatal("Slide Interval should be <= Window Duration") # By modifying the config, we are able to setup the tick timer config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.slide_interval) self.current_tuples = deque() if hasattr(self, 'saved_state'): if 'tuples' in self.saved_state: self.current_tuples = self.saved_state['tuples']
def process(self, tup): """Process a single tuple of input We add the (time, tuple) pair into our current_tuples. And then look for expiring elemnents """ curtime = int(time.time()) self.current_tuples.append((tup, curtime)) self._expire(curtime)
def process_tick(self, tup): """Called every slide_interval """ curtime = int(time.time()) window_info = WindowContext(curtime - self.window_duration, curtime) tuple_batch = [] for (tup, tm) in self.current_tuples: tuple_batch.append(tup) self.processWindow(window_info, tuple_batch) self._expire(curtime)
def initialize(self, config, context): """We initialize the window duration and slide interval """ if TumblingWindowBolt.WINDOW_DURATION_SECS in config: self.window_duration = int(config[TumblingWindowBolt.WINDOW_DURATION_SECS]) else: self.logger.fatal("Window Duration has to be specified in the config") # By modifying the config, we are able to setup the tick timer config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.window_duration) self.current_tuples = deque() if hasattr(self, 'saved_state'): if 'tuples' in self.saved_state: self.current_tuples = self.saved_state['tuples']
def process_tick(self, tup): """Called every window_duration """ curtime = int(time.time()) window_info = WindowContext(curtime - self.window_duration, curtime) self.processWindow(window_info, list(self.current_tuples)) for tup in self.current_tuples: self.ack(tup) self.current_tuples.clear()
def get(self, path): """ get method """ logging.debug("request to download: %s", path) # If the file is large, we want to abandon downloading # if user cancels the requests. # pylint: disable=attribute-defined-outside-init self.connection_closed = False self.set_header("Content-Disposition", "attachment") if not utils.check_path(path): self.write("Only relative paths are allowed") self.set_status(403) self.finish() return if path is None or not os.path.isfile(path): self.write("File %s not found" % path) self.set_status(404) self.finish() return length = int(4 * 1024 * 1024) offset = int(0) while True: data = utils.read_chunk(path, offset=offset, length=length, escape_data=False) if self.connection_closed or 'data' not in data or len(data['data']) < length: break offset += length self.write(data['data']) self.flush() if 'data' in data: self.write(data['data']) self.finish()
def getStmgrsRegSummary(self, tmaster, callback=None): """ Get summary of stream managers registration summary """ if not tmaster or not tmaster.host or not tmaster.stats_port: return reg_request = tmaster_pb2.StmgrsRegistrationSummaryRequest() request_str = reg_request.SerializeToString() port = str(tmaster.stats_port) host = tmaster.host url = "http://{0}:{1}/stmgrsregistrationsummary".format(host, port) request = tornado.httpclient.HTTPRequest(url, body=request_str, method='POST', request_timeout=5) Log.debug('Making HTTP call to fetch stmgrsregistrationsummary url: %s', url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting exceptions from Tmaster, code: " + responseCode Log.error(message) raise tornado.gen.Return({ "message": message }) # Parse the response from tmaster. reg_response = tmaster_pb2.StmgrsRegistrationSummaryResponse() reg_response.ParseFromString(result.body) # Send response ret = {} for stmgr in reg_response.registered_stmgrs: ret[stmgr] = True for stmgr in reg_response.absent_stmgrs: ret[stmgr] = False raise tornado.gen.Return(ret)
def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ) runtime_state = topology_info["runtime_state"] runtime_state["topology_version"] = topology_info["metadata"]["release_version"] topology = self.tracker.getTopologyByClusterRoleEnvironAndName( cluster, role, environ, topology_name) reg_summary = yield tornado.gen.Task(self.getStmgrsRegSummary, topology.tmaster) for stmgr, reg in reg_summary.items(): runtime_state["stmgrs"].setdefault(stmgr, {})["is_registered"] = reg self.write_success_response(runtime_state) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
def atomic_write_file(path, content): """ file.write(...) is not atomic. We write to a tmp file and then rename to target path since rename is atomic. We do this to avoid the content of file is dirty read/partially read by others. """ # Write to a randomly tmp file tmp_file = get_tmp_filename() with open(tmp_file, 'w') as f: f.write(content) # make sure that all data is on disk f.flush() os.fsync(f.fileno()) # Rename the tmp file os.rename(tmp_file, path)
def setup(executor): """Set up log, process and signal handlers""" # pylint: disable=unused-argument def signal_handler(signal_to_handle, frame): # We would do nothing here but just exit # Just catch the SIGTERM and then cleanup(), registered with atexit, would invoke Log.info('signal_handler invoked with signal %s', signal_to_handle) executor.stop_state_manager_watches() sys.exit(signal_to_handle) def cleanup(): """Handler to trigger when receiving the SIGTERM signal Do cleanup inside this method, including: 1. Terminate all children processes """ Log.info('Executor terminated; exiting all process in executor.') # Kill child processes first and wait for log collection to finish for pid in executor.processes_to_monitor.keys(): os.kill(pid, signal.SIGTERM) time.sleep(5) # We would not wait or check whether process spawned dead or not os.killpg(0, signal.SIGTERM) # Redirect stdout and stderr to files in append mode # The filename format is heron-executor-<container_id>.stdxxx shardid = executor.shard log.configure(logfile='heron-executor-%s.stdout' % shardid) pid = os.getpid() sid = os.getsid(pid) # POSIX prohibits the change of the process group ID of a session leader if pid <> sid: Log.info('Set up process group; executor becomes leader') os.setpgrp() # create new process group, become its leader Log.info('Register the SIGTERM signal handler') signal.signal(signal.SIGTERM, signal_handler) Log.info('Register the atexit clean up') atexit.register(cleanup)
def main(): """Register exit handlers, initialize the executor and run it.""" # Since Heron on YARN runs as headless users, pex compiled # binaries should be exploded into the container working # directory. In order to do this, we need to set the # PEX_ROOT shell environment before forking the processes shell_env = os.environ.copy() shell_env["PEX_ROOT"] = os.path.join(os.path.abspath('.'), ".pex") # Instantiate the executor, bind it to signal handlers and launch it executor = HeronExecutor(sys.argv, shell_env) executor.initialize() start(executor)
def init_from_parsed_args(self, parsed_args): """ initialize from parsed arguments """ self.shard = parsed_args.shard self.topology_name = parsed_args.topology_name self.topology_id = parsed_args.topology_id self.topology_defn_file = parsed_args.topology_defn_file self.state_manager_connection = parsed_args.state_manager_connection self.state_manager_root = parsed_args.state_manager_root self.state_manager_config_file = parsed_args.state_manager_config_file self.tmaster_binary = parsed_args.tmaster_binary self.stmgr_binary = parsed_args.stmgr_binary self.metrics_manager_classpath = parsed_args.metrics_manager_classpath self.metricscache_manager_classpath = parsed_args.metricscache_manager_classpath # '=' can be parsed in a wrong way by some schedulers (aurora) hence it needs to be escaped. # It is escaped in two different ways. '(61)' is the new escaping. '&equals;' was # the original replacement but it is not friendly to bash and is causing issues. The original # escaping is still left there for reference and backward compatibility purposes (to be # removed after no topology needs it) self.instance_jvm_opts =\ base64.b64decode(parsed_args.instance_jvm_opts.lstrip('"'). rstrip('"').replace('(61)', '=').replace('&equals;', '=')) self.classpath = parsed_args.classpath # Needed for Docker environments since the hostname of a docker container is the container's # id within docker, rather than the host's hostname. NOTE: this 'HOST' env variable is not # guaranteed to be set in all Docker executor environments (outside of Marathon) if is_docker_environment(): self.master_host = os.environ.get('HOST') if 'HOST' in os.environ else socket.gethostname() else: self.master_host = socket.gethostname() self.master_port = parsed_args.master_port self.tmaster_controller_port = parsed_args.tmaster_controller_port self.tmaster_stats_port = parsed_args.tmaster_stats_port self.heron_internals_config_file = parsed_args.heron_internals_config_file self.override_config_file = parsed_args.override_config_file self.component_ram_map =\ map(lambda x: {x.split(':')[0]: int(x.split(':')[1])}, parsed_args.component_ram_map.split(',')) self.component_ram_map =\ functools.reduce(lambda x, y: dict(x.items() + y.items()), self.component_ram_map) # component_jvm_opts_in_base64 itself is a base64-encoding-json-map, which is appended with # " at the start and end. It also escapes "=" to "&equals" due to aurora limitation # And the json is a map from base64-encoding-component-name to base64-encoding-jvm-options self.component_jvm_opts = {} # First we need to decode the base64 string back to a json map string. # '=' can be parsed in a wrong way by some schedulers (aurora) hence it needs to be escaped. # It is escaped in two different ways. '(61)' is the new escaping. '&equals;' was # the original replacement but it is not friendly to bash and is causing issues. The original # escaping is still left there for reference and backward compatibility purposes (to be # removed after no topology needs it) component_jvm_opts_in_json =\ base64.b64decode(parsed_args.component_jvm_opts. lstrip('"').rstrip('"').replace('(61)', '=').replace('&equals;', '=')) if component_jvm_opts_in_json != "": for (k, v) in json.loads(component_jvm_opts_in_json).items(): # In json, the component name and JVM options are still in base64 encoding self.component_jvm_opts[base64.b64decode(k)] = base64.b64decode(v) self.pkg_type = parsed_args.pkg_type self.topology_binary_file = parsed_args.topology_binary_file self.heron_java_home = parsed_args.heron_java_home self.shell_port = parsed_args.shell_port self.heron_shell_binary = parsed_args.heron_shell_binary self.metrics_manager_port = parsed_args.metrics_manager_port self.metricscache_manager_master_port = parsed_args.metricscache_manager_master_port self.metricscache_manager_stats_port = parsed_args.metricscache_manager_stats_port self.cluster = parsed_args.cluster self.role = parsed_args.role self.environment = parsed_args.environment self.instance_classpath = parsed_args.instance_classpath self.metrics_sinks_config_file = parsed_args.metrics_sinks_config_file self.scheduler_classpath = parsed_args.scheduler_classpath self.scheduler_port = parsed_args.scheduler_port self.python_instance_binary = parsed_args.python_instance_binary self.cpp_instance_binary = parsed_args.cpp_instance_binary self.is_stateful_topology = (parsed_args.is_stateful.lower() == 'true') self.checkpoint_manager_classpath = parsed_args.checkpoint_manager_classpath self.checkpoint_manager_port = parsed_args.checkpoint_manager_port self.checkpoint_manager_ram = parsed_args.checkpoint_manager_ram self.stateful_config_file = parsed_args.stateful_config_file self.metricscache_manager_mode = parsed_args.metricscache_manager_mode \ if parsed_args.metricscache_manager_mode else "disabled" self.health_manager_mode = parsed_args.health_manager_mode self.health_manager_classpath = '%s:%s'\ % (self.scheduler_classpath, parsed_args.health_manager_classpath) self.jvm_remote_debugger_ports = \ parsed_args.jvm_remote_debugger_ports.split(",") \ if parsed_args.jvm_remote_debugger_ports else None
def parse_args(args): """Uses python argparse to collect positional args""" Log.info("Input args: %r" % args) parser = argparse.ArgumentParser() parser.add_argument("--shard", type=int, required=True) parser.add_argument("--topology-name", required=True) parser.add_argument("--topology-id", required=True) parser.add_argument("--topology-defn-file", required=True) parser.add_argument("--state-manager-connection", required=True) parser.add_argument("--state-manager-root", required=True) parser.add_argument("--state-manager-config-file", required=True) parser.add_argument("--tmaster-binary", required=True) parser.add_argument("--stmgr-binary", required=True) parser.add_argument("--metrics-manager-classpath", required=True) parser.add_argument("--instance-jvm-opts", required=True) parser.add_argument("--classpath", required=True) parser.add_argument("--master-port", required=True) parser.add_argument("--tmaster-controller-port", required=True) parser.add_argument("--tmaster-stats-port", required=True) parser.add_argument("--heron-internals-config-file", required=True) parser.add_argument("--override-config-file", required=True) parser.add_argument("--component-ram-map", required=True) parser.add_argument("--component-jvm-opts", required=True) parser.add_argument("--pkg-type", required=True) parser.add_argument("--topology-binary-file", required=True) parser.add_argument("--heron-java-home", required=True) parser.add_argument("--shell-port", required=True) parser.add_argument("--heron-shell-binary", required=True) parser.add_argument("--metrics-manager-port", required=True) parser.add_argument("--cluster", required=True) parser.add_argument("--role", required=True) parser.add_argument("--environment", required=True) parser.add_argument("--instance-classpath", required=True) parser.add_argument("--metrics-sinks-config-file", required=True) parser.add_argument("--scheduler-classpath", required=True) parser.add_argument("--scheduler-port", required=True) parser.add_argument("--python-instance-binary", required=True) parser.add_argument("--cpp-instance-binary", required=True) parser.add_argument("--metricscache-manager-classpath", required=True) parser.add_argument("--metricscache-manager-master-port", required=True) parser.add_argument("--metricscache-manager-stats-port", required=True) parser.add_argument("--metricscache-manager-mode", required=False) parser.add_argument("--is-stateful", required=True) parser.add_argument("--checkpoint-manager-classpath", required=True) parser.add_argument("--checkpoint-manager-port", required=True) parser.add_argument("--checkpoint-manager-ram", type=long, required=True) parser.add_argument("--stateful-config-file", required=True) parser.add_argument("--health-manager-mode", required=True) parser.add_argument("--health-manager-classpath", required=True) parser.add_argument("--jvm-remote-debugger-ports", required=False, help="ports to be used by a remote debugger for JVM instances") parsed_args, unknown_args = parser.parse_known_args(args[1:]) if unknown_args: Log.error('Unknown argument: %s' % unknown_args[0]) parser.print_help() sys.exit(1) return parsed_args
def initialize(self): """ Initialize the environment. Done with a method call outside of the constructor for 2 reasons: 1. Unit tests probably won't want/need to do this 2. We don't initialize the logger (also something unit tests don't want) until after the constructor """ create_folders = Command('mkdir -p %s' % self.log_dir, self.shell_env) self.run_command_or_exit(create_folders) chmod_logs_dir = Command('chmod a+rx . && chmod a+x %s' % self.log_dir, self.shell_env) self.run_command_or_exit(chmod_logs_dir) chmod_x_binaries = [self.tmaster_binary, self.stmgr_binary, self.heron_shell_binary] for binary in chmod_x_binaries: stat_result = os.stat(binary)[stat.ST_MODE] if not stat_result & stat.S_IXOTH: chmod_binary = Command('chmod +x %s' % binary, self.shell_env) self.run_command_or_exit(chmod_binary) # Log itself pid log_pid_for_process(get_heron_executor_process_name(self.shard), os.getpid())
def _get_metricsmgr_cmd(self, metricsManagerId, sink_config_file, port): ''' get the command to start the metrics manager processes ''' metricsmgr_main_class = 'org.apache.heron.metricsmgr.MetricsManager' metricsmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'), # We could not rely on the default -Xmx setting, which could be very big, # for instance, the default -Xmx in Twitter mesos machine is around 18GB '-Xmx1024M', '-XX:+PrintCommandLineFlags', '-verbosegc', '-XX:+PrintGCDetails', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDateStamps', '-XX:+PrintGCCause', '-XX:+UseGCLogFileRotation', '-XX:NumberOfGCLogFiles=5', '-XX:GCLogFileSize=100M', '-XX:+PrintPromotionFailure', '-XX:+PrintTenuringDistribution', '-XX:+PrintHeapAtGC', '-XX:+HeapDumpOnOutOfMemoryError', '-XX:+UseConcMarkSweepGC', '-XX:+PrintCommandLineFlags', '-Xloggc:log-files/gc.metricsmgr.log', '-Djava.net.preferIPv4Stack=true', '-cp', self.metrics_manager_classpath, metricsmgr_main_class, '--id=' + metricsManagerId, '--port=' + str(port), '--topology=' + self.topology_name, '--cluster=' + self.cluster, '--role=' + self.role, '--environment=' + self.environment, '--topology-id=' + self.topology_id, '--system-config-file=' + self.heron_internals_config_file, '--override-config-file=' + self.override_config_file, '--sink-config-file=' + sink_config_file] return Command(metricsmgr_cmd, self.shell_env)
def _get_metrics_cache_cmd(self): ''' get the command to start the metrics manager processes ''' metricscachemgr_main_class = 'org.apache.heron.metricscachemgr.MetricsCacheManager' metricscachemgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'), # We could not rely on the default -Xmx setting, which could be very big, # for instance, the default -Xmx in Twitter mesos machine is around 18GB '-Xmx1024M', '-XX:+PrintCommandLineFlags', '-verbosegc', '-XX:+PrintGCDetails', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDateStamps', '-XX:+PrintGCCause', '-XX:+UseGCLogFileRotation', '-XX:NumberOfGCLogFiles=5', '-XX:GCLogFileSize=100M', '-XX:+PrintPromotionFailure', '-XX:+PrintTenuringDistribution', '-XX:+PrintHeapAtGC', '-XX:+HeapDumpOnOutOfMemoryError', '-XX:+UseConcMarkSweepGC', '-XX:+PrintCommandLineFlags', '-Xloggc:log-files/gc.metricscache.log', '-Djava.net.preferIPv4Stack=true', '-cp', self.metricscache_manager_classpath, metricscachemgr_main_class, "--metricscache_id", 'metricscache-0', "--master_port", self.metricscache_manager_master_port, "--stats_port", self.metricscache_manager_stats_port, "--topology_name", self.topology_name, "--topology_id", self.topology_id, "--system_config_file", self.heron_internals_config_file, "--override_config_file", self.override_config_file, "--sink_config_file", self.metrics_sinks_config_file, "--cluster", self.cluster, "--role", self.role, "--environment", self.environment] return Command(metricscachemgr_cmd, self.shell_env)
def _get_healthmgr_cmd(self): ''' get the command to start the topology health manager processes ''' healthmgr_main_class = 'org.apache.heron.healthmgr.HealthManager' healthmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'), # We could not rely on the default -Xmx setting, which could be very big, # for instance, the default -Xmx in Twitter mesos machine is around 18GB '-Xmx1024M', '-XX:+PrintCommandLineFlags', '-verbosegc', '-XX:+PrintGCDetails', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDateStamps', '-XX:+PrintGCCause', '-XX:+UseGCLogFileRotation', '-XX:NumberOfGCLogFiles=5', '-XX:GCLogFileSize=100M', '-XX:+PrintPromotionFailure', '-XX:+PrintTenuringDistribution', '-XX:+PrintHeapAtGC', '-XX:+HeapDumpOnOutOfMemoryError', '-XX:+UseConcMarkSweepGC', '-XX:+PrintCommandLineFlags', '-Xloggc:log-files/gc.healthmgr.log', '-Djava.net.preferIPv4Stack=true', '-cp', self.health_manager_classpath, healthmgr_main_class, "--cluster", self.cluster, "--role", self.role, "--environment", self.environment, "--topology_name", self.topology_name, "--metricsmgr_port", self.metrics_manager_port] return Command(healthmgr_cmd, self.shell_env)
def _get_tmaster_processes(self): ''' get the command to start the tmaster processes ''' retval = {} tmaster_cmd_lst = [ self.tmaster_binary, '--topology_name=%s' % self.topology_name, '--topology_id=%s' % self.topology_id, '--zkhostportlist=%s' % self.state_manager_connection, '--zkroot=%s' % self.state_manager_root, '--myhost=%s' % self.master_host, '--master_port=%s' % str(self.master_port), '--controller_port=%s' % str(self.tmaster_controller_port), '--stats_port=%s' % str(self.tmaster_stats_port), '--config_file=%s' % self.heron_internals_config_file, '--override_config_file=%s' % self.override_config_file, '--metrics_sinks_yaml=%s' % self.metrics_sinks_config_file, '--metricsmgr_port=%s' % str(self.metrics_manager_port), '--ckptmgr_port=%s' % str(self.checkpoint_manager_port)] tmaster_env = self.shell_env.copy() if self.shell_env is not None else {} tmaster_cmd = Command(tmaster_cmd_lst, tmaster_env) if os.environ.get('ENABLE_HEAPCHECK') is not None: tmaster_cmd.env.update({ 'LD_PRELOAD': "/usr/lib/libtcmalloc.so", 'HEAPCHECK': "normal" }) retval["heron-tmaster"] = tmaster_cmd if self.metricscache_manager_mode.lower() != "disabled": retval["heron-metricscache"] = self._get_metrics_cache_cmd() if self.health_manager_mode.lower() != "disabled": retval["heron-healthmgr"] = self._get_healthmgr_cmd() retval[self.metricsmgr_ids[0]] = self._get_metricsmgr_cmd( self.metricsmgr_ids[0], self.metrics_sinks_config_file, self.metrics_manager_port) if self.is_stateful_topology: retval.update(self._get_ckptmgr_process()) return retval
def _get_streaming_processes(self): ''' Returns the processes to handle streams, including the stream-mgr and the user code containing the stream logic of the topology ''' retval = {} instance_plans = self._get_instance_plans(self.packing_plan, self.shard) instance_info = [] for instance_plan in instance_plans: global_task_id = instance_plan.task_id component_index = instance_plan.component_index component_name = instance_plan.component_name instance_id = "container_%s_%s_%d" % (str(self.shard), component_name, global_task_id) instance_info.append((instance_id, component_name, global_task_id, component_index)) stmgr_cmd_lst = [ self.stmgr_binary, '--topology_name=%s' % self.topology_name, '--topology_id=%s' % self.topology_id, '--topologydefn_file=%s' % self.topology_defn_file, '--zkhostportlist=%s' % self.state_manager_connection, '--zkroot=%s' % self.state_manager_root, '--stmgr_id=%s' % self.stmgr_ids[self.shard], '--instance_ids=%s' % ','.join(map(lambda x: x[0], instance_info)), '--myhost=%s' % self.master_host, '--data_port=%s' % str(self.master_port), '--local_data_port=%s' % str(self.tmaster_controller_port), '--metricsmgr_port=%s' % str(self.metrics_manager_port), '--shell_port=%s' % str(self.shell_port), '--config_file=%s' % self.heron_internals_config_file, '--override_config_file=%s' % self.override_config_file, '--ckptmgr_port=%s' % str(self.checkpoint_manager_port), '--ckptmgr_id=%s' % self.ckptmgr_ids[self.shard], '--metricscachemgr_mode=%s' % self.metricscache_manager_mode.lower()] stmgr_env = self.shell_env.copy() if self.shell_env is not None else {} stmgr_cmd = Command(stmgr_cmd_lst, stmgr_env) if os.environ.get('ENABLE_HEAPCHECK') is not None: stmgr_cmd.env.update({ 'LD_PRELOAD': "/usr/lib/libtcmalloc.so", 'HEAPCHECK': "normal" }) retval[self.stmgr_ids[self.shard]] = stmgr_cmd # metricsmgr_metrics_sink_config_file = 'metrics_sinks.yaml' retval[self.metricsmgr_ids[self.shard]] = self._get_metricsmgr_cmd( self.metricsmgr_ids[self.shard], self.metrics_sinks_config_file, self.metrics_manager_port ) if self.is_stateful_topology: retval.update(self._get_ckptmgr_process()) if self.pkg_type == 'jar' or self.pkg_type == 'tar': retval.update(self._get_java_instance_cmd(instance_info)) elif self.pkg_type == 'pex': retval.update(self._get_python_instance_cmd(instance_info)) elif self.pkg_type == 'so': retval.update(self._get_cpp_instance_cmd(instance_info)) elif self.pkg_type == 'dylib': retval.update(self._get_cpp_instance_cmd(instance_info)) else: raise ValueError("Unrecognized package type: %s" % self.pkg_type) return retval
def _get_ckptmgr_process(self): ''' Get the command to start the checkpoint manager process''' ckptmgr_main_class = 'org.apache.heron.ckptmgr.CheckpointManager' ckptmgr_ram_mb = self.checkpoint_manager_ram / (1024 * 1024) ckptmgr_cmd = [os.path.join(self.heron_java_home, "bin/java"), '-Xms%dM' % ckptmgr_ram_mb, '-Xmx%dM' % ckptmgr_ram_mb, '-XX:+PrintCommandLineFlags', '-verbosegc', '-XX:+PrintGCDetails', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDateStamps', '-XX:+PrintGCCause', '-XX:+UseGCLogFileRotation', '-XX:NumberOfGCLogFiles=5', '-XX:GCLogFileSize=100M', '-XX:+PrintPromotionFailure', '-XX:+PrintTenuringDistribution', '-XX:+PrintHeapAtGC', '-XX:+HeapDumpOnOutOfMemoryError', '-XX:+UseConcMarkSweepGC', '-XX:+UseConcMarkSweepGC', '-Xloggc:log-files/gc.ckptmgr.log', '-Djava.net.preferIPv4Stack=true', '-cp', self.checkpoint_manager_classpath, ckptmgr_main_class, '-t' + self.topology_name, '-i' + self.topology_id, '-c' + self.ckptmgr_ids[self.shard], '-p' + self.checkpoint_manager_port, '-f' + self.stateful_config_file, '-o' + self.override_config_file, '-g' + self.heron_internals_config_file] retval = {} retval[self.ckptmgr_ids[self.shard]] = Command(ckptmgr_cmd, self.shell_env) return retval
def _get_instance_plans(self, packing_plan, container_id): """ For the given packing_plan, return the container plan with the given container_id. If protobufs supported maps, we could just get the plan by id, but it doesn't so we have a collection of containers to iterate over. """ this_container_plan = None for container_plan in packing_plan.container_plans: if container_plan.id == container_id: this_container_plan = container_plan # When the executor runs in newly added container by `heron update`, # there is no plan for this container. In this situation, # return None to bypass instance processes. if this_container_plan is None: return None return this_container_plan.instance_plans
def _get_heron_support_processes(self): """ Get a map from all daemon services' name to the command to start them """ retval = {} retval[self.heron_shell_ids[self.shard]] = Command([ '%s' % self.heron_shell_binary, '--port=%s' % self.shell_port, '--log_file_prefix=%s/heron-shell-%s.log' % (self.log_dir, self.shard), '--secret=%s' % self.topology_id], self.shell_env) return retval
def _wait_process_std_out_err(self, name, process): ''' Wait for the termination of a process and log its stdout & stderr ''' proc.stream_process_stdout(process, stdout_log_fn(name)) process.wait()
def _start_processes(self, commands): """Start all commands and add them to the dict of processes to be monitored """ Log.info("Start processes") processes_to_monitor = {} # First start all the processes for (name, command) in commands.items(): p = self._run_process(name, command) processes_to_monitor[p.pid] = ProcessInfo(p, name, command) # Log down the pid file log_pid_for_process(name, p.pid) with self.process_lock: self.processes_to_monitor.update(processes_to_monitor)
def start_process_monitor(self): """ Monitor all processes in processes_to_monitor dict, restarting any if they fail, up to max_runs times. """ # Now wait for any child to die Log.info("Start process monitor") while True: if len(self.processes_to_monitor) > 0: (pid, status) = os.wait() with self.process_lock: if pid in self.processes_to_monitor.keys(): old_process_info = self.processes_to_monitor[pid] name = old_process_info.name command = old_process_info.command Log.info("%s (pid=%s) exited with status %d. command=%s" % (name, pid, status, command)) # Log the stdout & stderr of the failed process self._wait_process_std_out_err(name, old_process_info.process) # Just make it world readable if os.path.isfile("core.%d" % pid): os.system("chmod a+r core.%d" % pid) if old_process_info.attempts >= self.max_runs: Log.info("%s exited too many times" % name) sys.exit(1) time.sleep(self.interval_between_runs) p = self._run_process(name, command) del self.processes_to_monitor[pid] self.processes_to_monitor[p.pid] =\ ProcessInfo(p, name, command, old_process_info.attempts + 1) # Log down the pid file log_pid_for_process(name, p.pid)
def get_commands_to_run(self): """ Prepare either TMaster or Streaming commands according to shard. The Shell command is attached to all containers. The empty container plan and non-exist container plan are bypassed. """ # During shutdown the watch might get triggered with the empty packing plan if len(self.packing_plan.container_plans) == 0: return {} if self._get_instance_plans(self.packing_plan, self.shard) is None and self.shard != 0: retval = {} retval['heron-shell'] = Command([ '%s' % self.heron_shell_binary, '--port=%s' % self.shell_port, '--log_file_prefix=%s/heron-shell-%s.log' % (self.log_dir, self.shard), '--secret=%s' % self.topology_id], self.shell_env) return retval if self.shard == 0: commands = self._get_tmaster_processes() else: self._untar_if_needed() commands = self._get_streaming_processes() # Attach daemon processes commands.update(self._get_heron_support_processes()) return commands
def get_command_changes(self, current_commands, updated_commands): """ Compares the current command with updated command to return a 3-tuple of dicts, keyed by command name: commands_to_kill, commands_to_keep and commands_to_start. """ commands_to_kill = {} commands_to_keep = {} commands_to_start = {} # if the current command has a matching command in the updated commands we keep it # otherwise we kill it for current_name, current_command in current_commands.items(): # We don't restart tmaster since it watches the packing plan and updates itself. The stream # manager is restarted just to reset state, but we could update it to do so without a restart if current_name in updated_commands.keys() and \ current_command == updated_commands[current_name] and \ not current_name.startswith('stmgr-'): commands_to_keep[current_name] = current_command else: commands_to_kill[current_name] = current_command # updated commands not in the keep list need to be started for updated_name, updated_command in updated_commands.items(): if updated_name not in commands_to_keep.keys(): commands_to_start[updated_name] = updated_command return commands_to_kill, commands_to_keep, commands_to_start
def launch(self): ''' Determines the commands to be run and compares them with the existing running commands. Then starts new ones required and kills old ones no longer required. ''' with self.process_lock: current_commands = dict(map((lambda process: (process.name, process.command)), self.processes_to_monitor.values())) updated_commands = self.get_commands_to_run() # get the commands to kill, keep and start commands_to_kill, commands_to_keep, commands_to_start = \ self.get_command_changes(current_commands, updated_commands) Log.info("current commands: %s" % sorted(current_commands.keys())) Log.info("new commands : %s" % sorted(updated_commands.keys())) Log.info("commands_to_kill: %s" % sorted(commands_to_kill.keys())) Log.info("commands_to_keep: %s" % sorted(commands_to_keep.keys())) Log.info("commands_to_start: %s" % sorted(commands_to_start.keys())) self._kill_processes(commands_to_kill) self._start_processes(commands_to_start) Log.info("Launch complete - processes killed=%s kept=%s started=%s monitored=%s" % (len(commands_to_kill), len(commands_to_keep), len(commands_to_start), len(self.processes_to_monitor)))
def start_state_manager_watches(self): """ Receive updates to the packing plan from the statemgrs and update processes as needed. """ Log.info("Start state manager watches") statemgr_config = StateMgrConfig() statemgr_config.set_state_locations(configloader.load_state_manager_locations( self.cluster, state_manager_config_file=self.state_manager_config_file, overrides={"heron.statemgr.connection.string": self.state_manager_connection})) try: self.state_managers = statemanagerfactory.get_all_state_managers(statemgr_config) for state_manager in self.state_managers: state_manager.start() except Exception as ex: Log.error("Found exception while initializing state managers: %s. Bailing out..." % ex) traceback.print_exc() sys.exit(1) # pylint: disable=unused-argument def on_packing_plan_watch(state_manager, new_packing_plan): Log.debug("State watch triggered for PackingPlan update on shard %s. Existing: %s, New: %s" % (self.shard, str(self.packing_plan), str(new_packing_plan))) if self.packing_plan != new_packing_plan: Log.info("PackingPlan change detected on shard %s, relaunching effected processes." % self.shard) self.update_packing_plan(new_packing_plan) Log.info("Updating executor processes") self.launch() else: Log.info( "State watch triggered for PackingPlan update but plan not changed so not relaunching.") for state_manager in self.state_managers: # The callback function with the bound # state_manager as first variable. onPackingPlanWatch = functools.partial(on_packing_plan_watch, state_manager) state_manager.get_packing_plan(self.topology_name, onPackingPlanWatch) Log.info("Registered state watch for packing plan changes with state manager %s." % str(state_manager))
def run(self, name, config, builder): """Builds the topology and submits it""" if not isinstance(name, str): raise RuntimeError("Name has to be a string type") if not isinstance(config, Config): raise RuntimeError("config has to be a Config type") if not isinstance(builder, Builder): raise RuntimeError("builder has to be a Builder type") bldr = TopologyBuilder(name=name) builder.build(bldr) bldr.set_config(config._api_config) bldr.build_and_submit()
def _modules_to_main(modList): """Force every module in modList to be placed into main""" if not modList: return main = sys.modules['__main__'] for modname in modList: if isinstance(modname, str): try: mod = __import__(modname) except Exception: sys.stderr.write( 'warning: could not import %s\n. ' 'Your function may unexpectedly error due to this import failing;' 'A version mismatch is likely. Specific error was:\n' % modname) print_exec(sys.stderr) else: setattr(main, mod.__name__, mod)
def _fill_function(func, globalsn, defaults, dictn, module): """ Fills in the rest of function data into the skeleton function object that were created via _make_skel_func(). """ func.__globals__.update(globalsn) func.__defaults__ = defaults func.__dict__ = dictn func.__module__ = module return func
def _make_skel_func(code, closures, base_globals=None): """ Creates a skeleton function object that contains just the provided code and the correct number of cells in func_closure. All other func attributes (e.g. func_globals) are empty. """ closure = _reconstruct_closure(closures) if closures else None if base_globals is None: base_globals = {} base_globals['__builtins__'] = __builtins__ return types.FunctionType(code, base_globals, None, None, closure)
def _load_class(cls, d): """ Loads additional properties into class `cls`. """ for k, v in d.items(): if isinstance(k, tuple): typ, k = k if typ == 'property': v = property(*v) elif typ == 'staticmethod': v = staticmethod(v) # pylint: disable=redefined-variable-type elif typ == 'classmethod': v = classmethod(v) setattr(cls, k, v) return cls
def save_module(self, obj): """ Save a module as an import """ self.modules.add(obj) self.save_reduce(subimport, (obj.__name__,), obj=obj)
def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None if themodule: self.modules.add(themodule) if getattr(themodule, name, None) is obj: return self.save_global(obj, name) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None: #print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule) self.save_function_tuple(obj) return else: # func is nested klass = getattr(themodule, name, None) if klass is None or klass is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ save = self.save write = self.write code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects # create a skeleton function object and memoize it save(_make_skel_func) save((code, closure, base_globals)) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function save(f_globals) save(defaults) save(dct) save(func.__module__) write(pickle.TUPLE) write(pickle.REDUCE)
def save_file(self, obj): # pylint: disable=too-many-branches """Save a file""" try: import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute except ImportError: import io as pystringIO # pylint: disable=reimported if not hasattr(obj, 'name') or not hasattr(obj, 'mode'): raise pickle.PicklingError("Cannot pickle files that do not map to an actual file") if obj is sys.stdout: return self.save_reduce(getattr, (sys, 'stdout'), obj=obj) if obj is sys.stderr: return self.save_reduce(getattr, (sys, 'stderr'), obj=obj) if obj is sys.stdin: raise pickle.PicklingError("Cannot pickle standard input") if hasattr(obj, 'isatty') and obj.isatty(): raise pickle.PicklingError("Cannot pickle files that map to tty objects") if 'r' not in obj.mode: raise pickle.PicklingError("Cannot pickle files that are not opened for reading") name = obj.name try: fsize = os.stat(name).st_size except OSError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name) if obj.closed: #create an empty closed string io retval = pystringIO.StringIO("") retval.close() elif not fsize: #empty file retval = pystringIO.StringIO("") try: tmpfile = file(name) tst = tmpfile.read(1) except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) tmpfile.close() if tst != '': raise pickle.PicklingError( "Cannot pickle file %s as it does not appear to map to a physical, real file" % name) else: try: tmpfile = file(name) contents = tmpfile.read() tmpfile.close() except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) retval = pystringIO.StringIO(contents) curloc = obj.tell() retval.seek(curloc) retval.name = name self.save(retval) self.memoize(obj)
def tail(filename, n): """Returns last n lines from the filename. No exception handling""" size = os.path.getsize(filename) with open(filename, "rb") as f: fm = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ) try: for i in xrange(size - 1, -1, -1): if fm[i] == '\n': n -= 1 if n == -1: break return fm[i + 1 if i else 0:].splitlines() finally: fm.close()
def get_serializer(context): """Returns a serializer for a given context""" cluster_config = context.get_cluster_config() serializer_clsname = cluster_config.get(constants.TOPOLOGY_SERIALIZER_CLASSNAME, None) if serializer_clsname is None: return PythonSerializer() else: try: topo_pex_path = context.get_topology_pex_path() pex_loader.load_pex(topo_pex_path) serializer_cls = pex_loader.import_and_get_class(topo_pex_path, serializer_clsname) serializer = serializer_cls() return serializer except Exception as e: raise RuntimeError("Error with loading custom serializer class: %s, with error message: %s" % (serializer_clsname, str(e)))
def _run_once(self): """Run once, should be called only from loop()""" try: self.do_wait() self._execute_wakeup_tasks() self._trigger_timers() except Exception as e: Log.error("Error occured during _run_once(): " + str(e)) Log.error(traceback.format_exc()) self.should_exit = True
def register_timer_task_in_sec(self, task, second): """Registers a new timer task :param task: function to be run at a specified second from now :param second: how many seconds to wait before the timer is triggered """ # Python time is in float second_in_float = float(second) expiration = time.time() + second_in_float heappush(self.timer_tasks, (expiration, task))
def _get_next_timeout_interval(self): """Get the next timeout from now This should be used from do_wait(). :returns (float) next_timeout, or 10.0 if there are no timer events """ if len(self.timer_tasks) == 0: return sys.maxsize else: next_timeout_interval = self.timer_tasks[0][0] - time.time() return next_timeout_interval
def _execute_wakeup_tasks(self): """Executes wakeup tasks, should only be called from loop()""" # Check the length of wakeup tasks first to avoid concurrent issues size = len(self.wakeup_tasks) for i in range(size): self.wakeup_tasks[i]()
def _trigger_timers(self): """Triggers expired timers""" current = time.time() while len(self.timer_tasks) > 0 and (self.timer_tasks[0][0] - current <= 0): task = heappop(self.timer_tasks)[1] task()
def post(self): """ post method """ def status_finish(ret): self.set_status(ret) self.finish() def kill_parent(): status_finish(200) logger.info("Killing parent executor") os.killpg(os.getppid(), signal.SIGTERM) logger = logging.getLogger(__file__) logger.info("Received 'Killing process' request") data = dict(urlparse.parse_qsl(self.request.body)) # check shared secret sharedSecret = data.get('secret') if sharedSecret != options.secret: status_finish(403) return instanceId = data.get('instance_id_to_restart') if instanceId: filepath = instanceId + '.pid' if os.path.isfile(filepath): # instance_id found if instanceId.startswith('heron-executor-'): # kill heron-executor kill_parent() else: # kill other normal instance fh = open(filepath) firstLine = int(fh.readline()) fh.close() logger.info("Killing process " + instanceId + " " + str(firstLine)) os.kill(firstLine, signal.SIGTERM) status_finish(200) else: # instance_id not found logger.info(filepath + " not found") status_finish(422) else: # instance_id not given, which means kill the container kill_parent()
def execute_query(self, tmaster, query_string, start, end): """ execute query """ if not tmaster: raise Exception("No tmaster found") self.tmaster = tmaster root = self.parse_query_string(query_string) metrics = yield root.execute(self.tracker, self.tmaster, start, end) raise tornado.gen.Return(metrics)
def find_closing_braces(self, query): """Find the index of the closing braces for the opening braces at the start of the query string. Note that first character of input string must be an opening braces.""" if query[0] != '(': raise Exception("Trying to find closing braces for no opening braces") num_open_braces = 0 for i in range(len(query)): c = query[i] if c == '(': num_open_braces += 1 elif c == ')': num_open_braces -= 1 if num_open_braces == 0: return i raise Exception("No closing braces found")
def get_sub_parts(self, query): """The subparts are seperated by a comma. Make sure that commas inside the part themselves are not considered.""" parts = [] num_open_braces = 0 delimiter = ',' last_starting_index = 0 for i in range(len(query)): if query[i] == '(': num_open_braces += 1 elif query[i] == ')': num_open_braces -= 1 elif query[i] == delimiter and num_open_braces == 0: parts.append(query[last_starting_index: i].strip()) last_starting_index = i + 1 parts.append(query[last_starting_index:].strip()) return parts
def parse_query_string(self, query): """Returns a parse tree for the query, each of the node is a subclass of Operator. This is both a lexical as well as syntax analyzer step.""" if not query: return None # Just braces do not matter if query[0] == '(': index = self.find_closing_braces(query) # This must be the last index, since this was an NOP starting brace if index != len(query) - 1: raise Exception("Invalid syntax") else: return self.parse_query_string(query[1:-1]) start_index = query.find("(") # There must be a ( in the query if start_index < 0: # Otherwise it must be a constant try: constant = float(query) return constant except ValueError: raise Exception("Invalid syntax") token = query[:start_index] if token not in self.operators: raise Exception("Invalid token: " + token) # Get sub components rest_of_the_query = query[start_index:] braces_end_index = self.find_closing_braces(rest_of_the_query) if braces_end_index != len(rest_of_the_query) - 1: raise Exception("Invalid syntax") parts = self.get_sub_parts(rest_of_the_query[1:-1]) # parts are simple strings in this case if token == "TS": # This will raise exception if parts are not syntactically correct return self.operators[token](parts) children = [] for part in parts: children.append(self.parse_query_string(part)) # Make a node for the current token node = self.operators[token](children) return node
def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() topology = self.tracker.getTopologyByClusterRoleEnvironAndName( cluster, role, environ, topology_name) start_time = self.get_argument_starttime() end_time = self.get_argument_endtime() self.validateInterval(start_time, end_time) query = self.get_argument_query() metrics = yield tornado.gen.Task(self.executeMetricsQuery, topology.tmaster, query, int(start_time), int(end_time)) self.write_success_response(metrics) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None): """ Get the specified metrics for the given query in this topology. Returns the following dict on success: { "timeline": [{ "instance": <instance>, "data": { <start_time> : <numeric value>, <start_time> : <numeric value>, ... } }, { ... }, ... "starttime": <numeric value>, "endtime": <numeric value>, }, Returns the following dict on failure: { "message": "..." } """ query = Query(self.tracker) metrics = yield query.execute_query(tmaster, queryString, start_time, end_time) # Parse the response ret = {} ret["starttime"] = start_time ret["endtime"] = end_time ret["timeline"] = [] for metric in metrics: tl = { "data": metric.timeline } if metric.instance: tl["instance"] = metric.instance ret["timeline"].append(tl) raise tornado.gen.Return(ret)
def create_parser(subparsers): ''' :param subparsers: :return: ''' parser = subparsers.add_parser( 'help', help='Prints help for commands', add_help=True) # pylint: disable=protected-access parser._positionals.title = "Required arguments" parser._optionals.title = "Optional arguments" parser.add_argument( 'help-command', nargs='?', default='help', help='Provide help for a command') parser.set_defaults(subcommand='help') return parser
def run(command, parser, args, unknown_args): ''' :param command: :param parser: :param args: :param unknown_args: :return: ''' # get the command for detailed help command_help = args['help-command'] # if no command is provided, just print main help if command_help == 'help': parser.print_help() return SimpleResult(Status.Ok) # get the subparser for the specific command subparser = config.get_subparser(parser, command_help) if subparser: print(subparser.format_help()) return SimpleResult(Status.Ok) else: Log.error("Unknown subcommand \'%s\'", command_help) return SimpleResult(Status.InvocationError)
def emit(self, tup, stream=Stream.DEFAULT_STREAM_ID, anchors=None, direct_task=None, need_task_ids=False): """Emits a new tuple from this Bolt It is compatible with StreamParse API. :type tup: list or tuple :param tup: the new output Tuple to send from this bolt, should only contain only serializable data. :type stream: str :param stream: the ID of the stream to emit this Tuple to. Leave empty to emit to the default stream. :type anchors: list :param anchors: a list of HeronTuples to which the emitted Tuples should be anchored. :type direct_task: int :param direct_task: the task to send the Tupel to if performing a direct emit. :type need_task_ids: bool :param need_task_ids: indicate whether or not you would like the task IDs the Tuple was emitted. """ # first check whether this tuple is sane self.pplan_helper.check_output_schema(stream, tup) # get custom grouping target task ids; get empty list if not custom grouping custom_target_task_ids = self.pplan_helper.choose_tasks_for_custom_grouping(stream, tup) self.pplan_helper.context.invoke_hook_emit(tup, stream, None) data_tuple = tuple_pb2.HeronDataTuple() data_tuple.key = 0 if direct_task is not None: if not isinstance(direct_task, int): raise TypeError("direct_task argument needs to be an integer, given: %s" % str(type(direct_task))) # performing emit-direct data_tuple.dest_task_ids.append(direct_task) elif custom_target_task_ids is not None: for task_id in custom_target_task_ids: # for custom grouping data_tuple.dest_task_ids.append(task_id) # Set the anchors for a tuple if anchors is not None: merged_roots = set() for tup in [t for t in anchors if isinstance(t, HeronTuple) and t.roots is not None]: merged_roots.update(tup.roots) for rt in merged_roots: to_add = data_tuple.roots.add() to_add.CopyFrom(rt) tuple_size_in_bytes = 0 start_time = time.time() # Serialize for obj in tup: serialized = self.serializer.serialize(obj) data_tuple.values.append(serialized) tuple_size_in_bytes += len(serialized) serialize_latency_ns = (time.time() - start_time) * system_constants.SEC_TO_NS self.bolt_metrics.serialize_data_tuple(stream, serialize_latency_ns) super(BoltInstance, self).admit_data_tuple(stream_id=stream, data_tuple=data_tuple, tuple_size_in_bytes=tuple_size_in_bytes) self.bolt_metrics.update_emit_count(stream) if need_task_ids: sent_task_ids = custom_target_task_ids or [] if direct_task is not None: sent_task_ids.append(direct_task) return sent_task_ids
def process_incoming_tuples(self): """Should be called when tuple was buffered into in_stream This method is equivalent to ``addBoltTasks()`` but is designed for event-driven single-thread bolt. """ # back-pressure if self.output_helper.is_out_queue_available(): self._read_tuples_and_execute() self.output_helper.send_out_tuples() else: # update outqueue full count self.bolt_metrics.update_out_queue_full_count()
def ack(self, tup): """Indicate that processing of a Tuple has succeeded It is compatible with StreamParse API. """ if not isinstance(tup, HeronTuple): Log.error("Only HeronTuple type is supported in ack()") return if self.acking_enabled: ack_tuple = tuple_pb2.AckTuple() ack_tuple.ackedtuple = int(tup.id) tuple_size_in_bytes = 0 for rt in tup.roots: to_add = ack_tuple.roots.add() to_add.CopyFrom(rt) tuple_size_in_bytes += rt.ByteSize() super(BoltInstance, self).admit_control_tuple(ack_tuple, tuple_size_in_bytes, True) process_latency_ns = (time.time() - tup.creation_time) * system_constants.SEC_TO_NS self.pplan_helper.context.invoke_hook_bolt_ack(tup, process_latency_ns) self.bolt_metrics.acked_tuple(tup.stream, tup.component, process_latency_ns)
def fail(self, tup): """Indicate that processing of a Tuple has failed It is compatible with StreamParse API. """ if not isinstance(tup, HeronTuple): Log.error("Only HeronTuple type is supported in fail()") return if self.acking_enabled: fail_tuple = tuple_pb2.AckTuple() fail_tuple.ackedtuple = int(tup.id) tuple_size_in_bytes = 0 for rt in tup.roots: to_add = fail_tuple.roots.add() to_add.CopyFrom(rt) tuple_size_in_bytes += rt.ByteSize() super(BoltInstance, self).admit_control_tuple(fail_tuple, tuple_size_in_bytes, False) fail_latency_ns = (time.time() - tup.creation_time) * system_constants.SEC_TO_NS self.pplan_helper.context.invoke_hook_bolt_fail(tup, fail_latency_ns) self.bolt_metrics.failed_tuple(tup.stream, tup.component, fail_latency_ns)
def execute(handlers): ''' Run the command :return: ''' # verify if the environment variables are correctly set check_environment() # create the argument parser parser = create_parser(handlers) # if no argument is provided, print help and exit if len(sys.argv[1:]) == 0: parser.print_help() return 0 # insert the boolean values for some of the options sys.argv = config.insert_bool_values(sys.argv) try: # parse the args args, unknown_args = parser.parse_known_args() except ValueError as ex: Log.error("Error while parsing arguments: %s", str(ex)) Log.debug(traceback.format_exc()) sys.exit(1) command_line_args = vars(args) # set log level log.set_logging_level(command_line_args) Log.debug("Input Command Line Args: %s", command_line_args) # command to be execute command = command_line_args['subcommand'] # print the input parameters, if verbose is enabled Log.debug("Processed Command Line Args: %s", command_line_args) results = run(handlers, command, parser, command_line_args, unknown_args) return 0 if result.is_successful(results) else 1
def create_parser(subparsers): ''' Create a subparser for the standalone command :param subparsers: :return: ''' parser = subparsers.add_parser( 'standalone', help='Start a standalone Heron cluster', add_help=True ) cli_args.add_titles(parser) parser_action = parser.add_subparsers() parser_cluster = parser_action.add_parser( Action.CLUSTER, help='Start or stop cluster', add_help=True, formatter_class=argparse.RawTextHelpFormatter, ) parser_cluster.set_defaults(action=Action.CLUSTER) parser_set = parser_action.add_parser( Action.SET, help='Set configurations for standalone cluster e.g. master or slave nodes', add_help=True, formatter_class=argparse.RawTextHelpFormatter ) parser_set.set_defaults(action=Action.SET) parser_template = parser_action.add_parser( Action.TEMPLATE, help='Template Heron configurations based on cluster roles', add_help=True, formatter_class=argparse.RawTextHelpFormatter ) parser_template.set_defaults(action=Action.TEMPLATE) parser_cluster.add_argument( TYPE, type=str, choices={Cluster.START, Cluster.STOP}, help= \ """ Choices supports the following: start - Start standalone Heron cluster stop - Stop standalone Heron cluster """ ) parser_template.add_argument( TYPE, type=str, choices={"configs"}, ) parser_get = parser_action.add_parser( Action.GET, help='Get attributes about the standalone cluster', add_help=True, formatter_class=argparse.RawTextHelpFormatter ) parser_get.set_defaults(action=Action.GET) parser_get.add_argument( TYPE, type=str, choices={Get.SERVICE_URL, Get.HERON_TRACKER_URL, Get.HERON_UI_URL}, help= \ """ Choices supports the following: service-url - Get the service url for standalone cluster heron-tracker-url - Get the url for the heron tracker in standalone cluster heron-ui-url - Get the url for the heron ui standalone cluster """ ) parser_info = parser_action.add_parser( Action.INFO, help='Get general information about the standalone cluster', add_help=True, formatter_class=argparse.RawTextHelpFormatter ) parser_info.set_defaults(action=Action.INFO) add_additional_args([parser_set, parser_cluster, parser_template, parser_get, parser_info]) parser.set_defaults(subcommand='standalone') return parser
def run(command, parser, cl_args, unknown_args): ''' runs parser ''' action = cl_args["action"] if action == Action.SET: call_editor(get_inventory_file(cl_args)) update_config_files(cl_args) elif action == Action.CLUSTER: action_type = cl_args["type"] if action_type == Cluster.START: start_cluster(cl_args) elif action_type == Cluster.STOP: if check_sure(cl_args, "Are you sure you want to stop the cluster?" " This will terminate everything running in " "the cluster and remove any scheduler state."): stop_cluster(cl_args) else: raise ValueError("Invalid cluster action %s" % action_type) elif action == Action.TEMPLATE: update_config_files(cl_args) elif action == Action.GET: action_type = cl_args["type"] if action_type == Get.SERVICE_URL: print get_service_url(cl_args) elif action_type == Get.HERON_UI_URL: print get_heron_ui_url(cl_args) elif action_type == Get.HERON_TRACKER_URL: print get_heron_tracker_url(cl_args) else: raise ValueError("Invalid get action %s" % action_type) elif action == Action.INFO: print_cluster_info(cl_args) else: raise ValueError("Invalid action %s" % action) return SimpleResult(Status.Ok)
def template_slave_hcl(cl_args, masters): ''' Template slave config file ''' slave_config_template = "%s/standalone/templates/slave.template.hcl" % cl_args["config_path"] slave_config_actual = "%s/standalone/resources/slave.hcl" % cl_args["config_path"] masters_in_quotes = ['"%s"' % master for master in masters] template_file(slave_config_template, slave_config_actual, {"<nomad_masters:master_port>": ", ".join(masters_in_quotes)})
def template_scheduler_yaml(cl_args, masters): ''' Template scheduler.yaml ''' single_master = masters[0] scheduler_config_actual = "%s/standalone/scheduler.yaml" % cl_args["config_path"] scheduler_config_template = "%s/standalone/templates/scheduler.template.yaml" \ % cl_args["config_path"] template_file(scheduler_config_template, scheduler_config_actual, {"<scheduler_uri>": "http://%s:4646" % single_master})
def template_uploader_yaml(cl_args, masters): ''' Tempate uploader.yaml ''' single_master = masters[0] uploader_config_template = "%s/standalone/templates/uploader.template.yaml" \ % cl_args["config_path"] uploader_config_actual = "%s/standalone/uploader.yaml" % cl_args["config_path"] template_file(uploader_config_template, uploader_config_actual, {"<http_uploader_uri>": "http://%s:9000/api/v1/file/upload" % single_master})
def template_apiserver_hcl(cl_args, masters, zookeepers): """ template apiserver.hcl """ single_master = masters[0] apiserver_config_template = "%s/standalone/templates/apiserver.template.hcl" \ % cl_args["config_path"] apiserver_config_actual = "%s/standalone/resources/apiserver.hcl" % cl_args["config_path"] replacements = { "<heron_apiserver_hostname>": '"%s"' % get_hostname(single_master, cl_args), "<heron_apiserver_executable>": '"%s/heron-apiserver"' % config.get_heron_bin_dir() if is_self(single_master) else '"%s/.heron/bin/heron-apiserver"' % get_remote_home(single_master, cl_args), "<zookeeper_host:zookeeper_port>": ",".join( ['%s' % zk if ":" in zk else '%s:2181' % zk for zk in zookeepers]), "<scheduler_uri>": "http://%s:4646" % single_master } template_file(apiserver_config_template, apiserver_config_actual, replacements)
def template_statemgr_yaml(cl_args, zookeepers): ''' Template statemgr.yaml ''' statemgr_config_file_template = "%s/standalone/templates/statemgr.template.yaml" \ % cl_args["config_path"] statemgr_config_file_actual = "%s/standalone/statemgr.yaml" % cl_args["config_path"] template_file(statemgr_config_file_template, statemgr_config_file_actual, {"<zookeeper_host:zookeeper_port>": ",".join( ['"%s"' % zk if ":" in zk else '"%s:2181"' % zk for zk in zookeepers])})
def template_heron_tools_hcl(cl_args, masters, zookeepers): ''' template heron tools ''' heron_tools_hcl_template = "%s/standalone/templates/heron_tools.template.hcl" \ % cl_args["config_path"] heron_tools_hcl_actual = "%s/standalone/resources/heron_tools.hcl" \ % cl_args["config_path"] single_master = masters[0] template_file(heron_tools_hcl_template, heron_tools_hcl_actual, { "<zookeeper_host:zookeeper_port>": ",".join( ['%s' % zk if ":" in zk else '%s:2181' % zk for zk in zookeepers]), "<heron_tracker_executable>": '"%s/heron-tracker"' % config.get_heron_bin_dir(), "<heron_tools_hostname>": '"%s"' % get_hostname(single_master, cl_args), "<heron_ui_executable>": '"%s/heron-ui"' % config.get_heron_bin_dir() })
def print_cluster_info(cl_args): ''' get cluster info for standalone cluster ''' parsed_roles = read_and_parse_roles(cl_args) masters = list(parsed_roles[Role.MASTERS]) slaves = list(parsed_roles[Role.SLAVES]) zookeepers = list(parsed_roles[Role.ZOOKEEPERS]) cluster = list(parsed_roles[Role.CLUSTER]) # OrderedDicts are used here so that the key order can be # specified directly info = OrderedDict() info['numNodes'] = len(cluster) info['nodes'] = cluster roles = OrderedDict() roles['masters'] = masters roles['slaves'] = slaves roles['zookeepers'] = zookeepers urls = OrderedDict() urls['serviceUrl'] = get_service_url(cl_args) urls['heronUi'] = get_heron_ui_url(cl_args) urls['heronTracker'] = get_heron_tracker_url(cl_args) info['roles'] = roles info['urls'] = urls print json.dumps(info, indent=2)
def add_additional_args(parsers): ''' add additional parameters to parser ''' for parser in parsers: cli_args.add_verbose(parser) cli_args.add_config(parser) parser.add_argument( '--heron-dir', default=config.get_heron_dir(), help='Path to Heron home directory')
def stop_cluster(cl_args): ''' teardown the cluster ''' Log.info("Terminating cluster...") roles = read_and_parse_roles(cl_args) masters = roles[Role.MASTERS] slaves = roles[Role.SLAVES] dist_nodes = masters.union(slaves) # stop all jobs if masters: try: single_master = list(masters)[0] jobs = get_jobs(cl_args, single_master) for job in jobs: job_id = job["ID"] Log.info("Terminating job %s" % job_id) delete_job(cl_args, job_id, single_master) except: Log.debug("Error stopping jobs") Log.debug(sys.exc_info()[0]) for node in dist_nodes: Log.info("Terminating processes on %s" % node) if not is_self(node): cmd = "ps aux | grep heron-nomad | awk '{print \$2}' " \ "| xargs kill" cmd = ssh_remote_execute(cmd, node, cl_args) else: cmd = "ps aux | grep heron-nomad | awk '{print $2}' " \ "| xargs kill" Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) Log.info("Cleaning up directories on %s" % node) cmd = "rm -rf /tmp/slave ; rm -rf /tmp/master" if not is_self(node): cmd = ssh_remote_execute(cmd, node, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output))
def start_cluster(cl_args): ''' Start a Heron standalone cluster ''' roles = read_and_parse_roles(cl_args) masters = roles[Role.MASTERS] slaves = roles[Role.SLAVES] zookeepers = roles[Role.ZOOKEEPERS] Log.info("Roles:") Log.info(" - Master Servers: %s" % list(masters)) Log.info(" - Slave Servers: %s" % list(slaves)) Log.info(" - Zookeeper Servers: %s" % list(zookeepers)) if not masters: Log.error("No master servers specified!") sys.exit(-1) if not slaves: Log.error("No slave servers specified!") sys.exit(-1) if not zookeepers: Log.error("No zookeeper servers specified!") sys.exit(-1) # make sure configs are templated update_config_files(cl_args) dist_nodes = list(masters.union(slaves)) # if just local deployment if not (len(dist_nodes) == 1 and is_self(dist_nodes[0])): distribute_package(roles, cl_args) start_master_nodes(masters, cl_args) start_slave_nodes(slaves, cl_args) start_api_server(masters, cl_args) start_heron_tools(masters, cl_args) Log.info("Heron standalone cluster complete!")
def start_heron_tools(masters, cl_args): ''' Start Heron tracker and UI ''' single_master = list(masters)[0] wait_for_master_to_start(single_master) cmd = "%s run %s >> /tmp/heron_tools_start.log 2>&1 &" \ % (get_nomad_path(cl_args), get_heron_tools_job_file(cl_args)) Log.info("Starting Heron Tools on %s" % single_master) if not is_self(single_master): cmd = ssh_remote_execute(cmd, single_master, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: Log.error("Failed to start Heron Tools on %s with error:\n%s" % (single_master, output[1])) sys.exit(-1) wait_for_job_to_start(single_master, "heron-tools") Log.info("Done starting Heron Tools")
def distribute_package(roles, cl_args): ''' distribute Heron packages to all nodes ''' Log.info("Distributing heron package to nodes (this might take a while)...") masters = roles[Role.MASTERS] slaves = roles[Role.SLAVES] tar_file = tempfile.NamedTemporaryFile(suffix=".tmp").name Log.debug("TAR file %s to %s" % (cl_args["heron_dir"], tar_file)) make_tarfile(tar_file, cl_args["heron_dir"]) dist_nodes = masters.union(slaves) scp_package(tar_file, dist_nodes, cl_args)
def wait_for_master_to_start(single_master): ''' Wait for a nomad master to start ''' i = 0 while True: try: r = requests.get("http://%s:4646/v1/status/leader" % single_master) if r.status_code == 200: break except: Log.debug(sys.exc_info()[0]) Log.info("Waiting for cluster to come up... %s" % i) time.sleep(1) if i > 10: Log.error("Failed to start Nomad Cluster!") sys.exit(-1) i = i + 1
def wait_for_job_to_start(single_master, job): ''' Wait for a Nomad job to start ''' i = 0 while True: try: r = requests.get("http://%s:4646/v1/job/%s" % (single_master, job)) if r.status_code == 200 and r.json()["Status"] == "running": break else: raise RuntimeError() except: Log.debug(sys.exc_info()[0]) Log.info("Waiting for %s to come up... %s" % (job, i)) time.sleep(1) if i > 20: Log.error("Failed to start Nomad Cluster!") sys.exit(-1) i = i + 1
def scp_package(package_file, destinations, cl_args): ''' scp and extract package ''' pids = [] for dest in destinations: if is_self(dest): continue Log.info("Server: %s" % dest) file_path = "/tmp/heron.tar.gz" dest_file_path = "%s:%s" % (dest, file_path) remote_cmd = "rm -rf ~/.heron && mkdir ~/.heron " \ "&& tar -xzvf %s -C ~/.heron --strip-components 1" % (file_path) cmd = '%s && %s' \ % (scp_cmd(package_file, dest_file_path, cl_args), ssh_remote_execute(remote_cmd, dest, cl_args)) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pids.append({"pid": pid, "dest": dest}) errors = [] for entry in pids: pid = entry["pid"] return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: errors.append("Failed to scp package to %s with error:\n%s" % (entry["dest"], output[1])) if errors: for error in errors: Log.error(error) sys.exit(-1) Log.info("Done distributing packages")
def make_tarfile(output_filename, source_dir): ''' Tar a directory ''' with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir))
def start_master_nodes(masters, cl_args): ''' Start master nodes ''' pids = [] for master in masters: Log.info("Starting master on %s" % master) cmd = "%s agent -config %s >> /tmp/nomad_server_log 2>&1 &" \ % (get_nomad_path(cl_args), get_nomad_master_config_file(cl_args)) if not is_self(master): cmd = ssh_remote_execute(cmd, master, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pids.append({"pid": pid, "dest": master}) errors = [] for entry in pids: pid = entry["pid"] return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: errors.append("Failed to start master on %s with error:\n%s" % (entry["dest"], output[1])) if errors: for error in errors: Log.error(error) sys.exit(-1) Log.info("Done starting masters")
def start_slave_nodes(slaves, cl_args): ''' Star slave nodes ''' pids = [] for slave in slaves: Log.info("Starting slave on %s" % slave) cmd = "%s agent -config %s >> /tmp/nomad_client.log 2>&1 &" \ % (get_nomad_path(cl_args), get_nomad_slave_config_file(cl_args)) if not is_self(slave): cmd = ssh_remote_execute(cmd, slave, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pids.append({"pid": pid, "dest": slave}) errors = [] for entry in pids: pid = entry["pid"] return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: errors.append("Failed to start slave on %s with error:\n%s" % (entry["dest"], output[1])) if errors: for error in errors: Log.error(error) sys.exit(-1) Log.info("Done starting slaves")
def read_and_parse_roles(cl_args): ''' read config files to get roles ''' roles = dict() with open(get_inventory_file(cl_args), 'r') as stream: try: roles = yaml.load(stream) except yaml.YAMLError as exc: Log.error("Error parsing inventory file: %s" % exc) sys.exit(-1) if Role.ZOOKEEPERS not in roles or not roles[Role.ZOOKEEPERS]: Log.error("Zookeeper servers node defined!") sys.exit(-1) if Role.CLUSTER not in roles or not roles[Role.CLUSTER]: Log.error("Heron cluster nodes defined!") sys.exit(-1) # Set roles roles[Role.MASTERS] = set([roles[Role.CLUSTER][0]]) roles[Role.SLAVES] = set(roles[Role.CLUSTER]) roles[Role.ZOOKEEPERS] = set(roles[Role.ZOOKEEPERS]) roles[Role.CLUSTER] = set(roles[Role.CLUSTER]) return roles