Search is not available for this dataset
text
stringlengths
75
104k
def read_file(file_path): ''' read file ''' lines = [] with open(file_path, "r") as tf: lines = [line.strip("\n") for line in tf.readlines() if not line.startswith("#")] # filter empty lines lines = [line for line in lines if line] return lines
def call_editor(file_path): ''' call editor ''' EDITOR = os.environ.get('EDITOR', 'vim') with open(file_path, 'r+') as tf: call([EDITOR, tf.name])
def get_remote_home(host, cl_args): ''' get home directory of remote host ''' cmd = "echo ~" if not is_self(host): cmd = ssh_remote_execute(cmd, host, cl_args) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() if return_code != 0: Log.error("Failed to get home path for remote host %s with output:\n%s" % (host, output)) sys.exit(-1) return output[0].strip("\n")
def get_hostname(ip_addr, cl_args): ''' get host name of remote host ''' if is_self(ip_addr): return get_self_hostname() cmd = "hostname" ssh_cmd = ssh_remote_execute(cmd, ip_addr, cl_args) pid = subprocess.Popen(ssh_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() if return_code != 0: Log.error("Failed to get hostname for remote host %s with output:\n%s" % (ip_addr, output)) sys.exit(-1) return output[0].strip("\n")
def is_self(addr): ''' check if this host is this addr ''' ips = [] for i in netifaces.interfaces(): entry = netifaces.ifaddresses(i) if netifaces.AF_INET in entry: for ipv4 in entry[netifaces.AF_INET]: if "addr" in ipv4: ips.append(ipv4["addr"]) return addr in ips or addr == get_self_hostname()
def log(self, message, level=None): """Log message, optionally providing a logging level It is compatible with StreamParse API. :type message: str :param message: the log message to send :type level: str :param level: the logging level, one of: trace (=debug), debug, info, warn or error (default: info) """ if level is None: _log_level = logging.INFO else: if level == "trace" or level == "debug": _log_level = logging.DEBUG elif level == "info": _log_level = logging.INFO elif level == "warn": _log_level = logging.WARNING elif level == "error": _log_level = logging.ERROR else: raise ValueError("%s is not supported as logging level" % str(level)) self.logger.log(_log_level, message)
def load_py_instance(self, is_spout): """Loads user defined component (spout/bolt)""" try: if is_spout: spout_proto = self.pplan_helper.get_my_spout() py_classpath = spout_proto.comp.class_name self.logger.info("Loading Spout from: %s", py_classpath) else: bolt_proto = self.pplan_helper.get_my_bolt() py_classpath = bolt_proto.comp.class_name self.logger.info("Loading Bolt from: %s", py_classpath) pex_loader.load_pex(self.pplan_helper.topology_pex_abs_path) spbl_class = pex_loader.import_and_get_class(self.pplan_helper.topology_pex_abs_path, py_classpath) except Exception as e: spbl = "spout" if is_spout else "bolt" self.logger.error(traceback.format_exc()) raise RuntimeError("Error when loading a %s from pex: %s" % (spbl, str(e))) return spbl_class
def get(self): """ get method """ # Get all the values for parameter "cluster". clusters = self.get_arguments(constants.PARAM_CLUSTER) # Get all the values for parameter "environ". environs = self.get_arguments(constants.PARAM_ENVIRON) # Get role role = self.get_argument_role() ret = {} topologies = self.tracker.topologies for topology in topologies: cluster = topology.cluster environ = topology.environ execution_state = topology.execution_state if not cluster or not execution_state or not environ: continue topo_role = execution_state.role if not topo_role: continue # This cluster is not asked for. # Note that "if not clusters", then # we show for all the clusters. if clusters and cluster not in clusters: continue # This environ is not asked for. # Note that "if not environs", then # we show for all the environs. if environs and environ not in environs: continue # This role is not asked for. # Note that "if not role", then # we show for all the roles. if role and role != topo_role: continue if cluster not in ret: ret[cluster] = {} if topo_role not in ret[cluster]: ret[cluster][topo_role] = {} if environ not in ret[cluster][topo_role]: ret[cluster][topo_role][environ] = [] ret[cluster][topo_role][environ].append(topology.name) self.write_success_response(ret)
def dereference_symlinks(src): """ Resolve all symbolic references that `src` points to. Note that this is different than `os.path.realpath` as path components leading up to the final location may still be symbolic links. """ while os.path.islink(src): src = os.path.join(os.path.dirname(src), os.readlink(src)) return src
def get(self): """ get method """ clusters = [statemgr.name for statemgr in self.tracker.state_managers] self.write_success_response(clusters)
def get_cluster_role_env_topologies(cluster, role, env): ''' Get the list of topologies given a cluster submitted by a given role under a given environment :param cluster: :param role: :param env: :return: ''' return _get_topologies(cluster, role=role, env=env)
def get_execution_state(cluster, environ, topology, role=None): ''' Get the execution state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(EXECUTION_STATE_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_logical_plan(cluster, environ, topology, role=None): ''' Get the logical plan state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_comps(cluster, environ, topology, role=None): ''' Get the list of component names for the topology from Heron Nest :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) lplan = yield fetch_url_as_json(request_url) comps = lplan['spouts'].keys() + lplan['bolts'].keys() raise tornado.gen.Return(comps)
def get_instances(cluster, environ, topology, role=None): ''' Get the list of instances for the topology from Heron Nest :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(PHYSICALPLAN_URL_FMT), params) pplan = yield fetch_url_as_json(request_url) instances = pplan['instances'].keys() raise tornado.gen.Return(instances)
def get_physical_plan(cluster, environ, topology, role=None): ''' Get the physical plan state of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(PHYSICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_scheduler_location(cluster, environ, topology, role=None): ''' Get the scheduler location of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(SCHEDULER_LOCATION_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_component_exceptionsummary(cluster, environ, topology, component, role=None): ''' Get summary of exception for a component :param cluster: :param environ: :param topology: :param component: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(EXCEPTION_SUMMARY_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_component_exceptions(cluster, environ, topology, component, role=None): ''' Get exceptions for 'component' for 'topology' :param cluster: :param environ: :param topology: :param component: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(EXCEPTIONS_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_comp_instance_metrics(cluster, environ, topology, component, metrics, instances, time_range, role=None): ''' Get the metrics for some instances of a topology from tracker :param cluster: :param environ: :param topology: :param component: :param metrics: dict of display name to cuckoo name :param instances: :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the fetch url request_url = tornado.httputil.url_concat( create_url(METRICS_URL_FMT), params) # convert a single instance to a list, if needed all_instances = instances if isinstance(instances, list) else [instances] # append each metric to the url for _, metric_name in metrics.items(): request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name[0])) # append each instance to the url for i in all_instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=i)) # append the time interval to the url request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_comp_metrics(cluster, environ, topology, component, instances, metricnames, time_range, role=None): ''' Get the metrics for all the instances of a topology from Heron Nest :param cluster: :param environ: :param topology: :param component: :param instances: :param metricnames: dict of display name to cuckoo name :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the url request_url = tornado.httputil.url_concat( create_url(METRICS_URL_FMT), params) # append each metric to the url for metric_name in metricnames: request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name)) # append each instance to the url for instance in instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=instance)) # append the time interval to the url request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_metrics(cluster, environment, topology, timerange, query, role=None): ''' Get the metrics for a topology from tracker :param cluster: :param environment: :param topology: :param timerange: :param query: :param role: :return: ''' params = dict( cluster=cluster, environ=environment, topology=topology, starttime=timerange[0], endtime=timerange[1], query=query) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(METRICS_QUERY_URL_FMT), params ) logging.info("get_metrics %s", request_url) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_comp_metrics_timeline(cluster, environ, topology, component, instances, metricnames, time_range, role=None): ''' Get the minute-by-minute metrics for all instances of a topology from tracker :param cluster: :param environ: :param topology: :param component: :param instances: :param metricnames: dict of display name to cuckoo name :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the url request_url = tornado.httputil.url_concat(create_url(METRICS_TIMELINE_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) # append each metric to the url for metric_name in metricnames: request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name)) # append each instance to the url for instance in instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=instance)) # append the time interval to the url request_url = tornado.httputil.url_concat( request_url, dict(starttime=time_range[0], endtime=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_topology_info(cluster, environ, topology, role=None): ''' :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(INFO_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_instance_pid(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(PID_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_instance_jstack(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(JSTACK_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_instance_mem_histogram(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(HISTOGRAM_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def run_instance_jmap(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(JMAP_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_container_file_download_url(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DOWNLOAD_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) return request_url
def get_container_file_data(cluster, environ, topology, container, path, offset, length, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param offset: :param length: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path, offset=offset, length=length) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DATA_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def get_filestats(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(FILESTATS_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def fetch(self, cluster, metric, topology, component, instance, timerange, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return: ''' components = [component] if component != "*" else (yield get_comps(cluster, environ, topology)) futures = [] for comp in components: query = self.get_query(metric, comp, instance) future = get_metrics(cluster, environ, topology, timerange, query) futures.append(future) results = yield futures timelines = [] for result in results: timelines.extend(result["timeline"]) result = self.get_metric_response(timerange, timelines, False) raise tornado.gen.Return(result)
def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return: ''' components = [component] if component != "*" else (yield get_comps(cluster, environ, topology)) result = {} futures = [] for comp in components: query = self.get_query(metric, comp, instance) max_query = "MAX(%s)" % query future = get_metrics(cluster, environ, topology, timerange, max_query) futures.append(future) results = yield futures data = self.compute_max(results) result = self.get_metric_response(timerange, data, True) raise tornado.gen.Return(result)
def fetch_backpressure(self, cluster, metric, topology, component, instance, \ timerange, is_max, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param isMax: :param environ: :return: ''' instances = yield get_instances(cluster, environ, topology) if component != "*": filtered_inst = [instance for instance in instances if instance.split("_")[2] == component] else: filtered_inst = instances futures_dict = {} for inst in filtered_inst: query = queries.get(metric).format(inst) futures_dict[inst] = get_metrics(cluster, environ, topology, timerange, query) res = yield futures_dict if not is_max: timelines = [] for key in res: result = res[key] # Replacing stream manager instance name with component instance name if len(result["timeline"]) > 0: result["timeline"][0]["instance"] = key timelines.extend(result["timeline"]) result = self.get_metric_response(timerange, timelines, is_max) else: data = self.compute_max(res.values()) result = self.get_metric_response(timerange, data, is_max) raise tornado.gen.Return(result)
def compute_max(self, multi_ts): ''' :param multi_ts: :return: ''' if len(multi_ts) > 0 and len(multi_ts[0]["timeline"]) > 0: keys = multi_ts[0]["timeline"][0]["data"].keys() timelines = ([res["timeline"][0]["data"][key] for key in keys] for res in multi_ts) values = (max(v) for v in zip(*timelines)) return dict(zip(keys, values)) return {}
def get_metric_response(self, timerange, data, isMax): ''' :param timerange: :param data: :param isMax: :return: ''' if isMax: return dict( status="success", starttime=timerange[0], endtime=timerange[1], result=dict(timeline=[dict(data=data)]) ) return dict( status="success", starttime=timerange[0], endtime=timerange[1], result=dict(timeline=data) )
def get_query(self, metric, component, instance): ''' :param metric: :param component: :param instance: :return: ''' q = queries.get(metric) return q.format(component, instance)
def to_table(result): ''' normalize raw result to table ''' max_count = 20 table, count = [], 0 for role, envs_topos in result.items(): for env, topos in envs_topos.items(): for topo in topos: count += 1 if count > max_count: continue else: table.append([role, env, topo]) header = ['role', 'env', 'topology'] rest_count = 0 if count <= max_count else count - max_count return table, header, rest_count
def show_cluster(cl_args, cluster): ''' print topologies information to stdout ''' try: result = tracker_access.get_cluster_topologies(cluster) if not result: Log.error('No topologies in cluster \'%s\'' % cluster) return False result = result[cluster] except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False table, header, rest_count = to_table(result) print('Topologies running in cluster \'%s\'' % cluster) if rest_count: print(' with %d more...' % rest_count) print(tabulate(table, headers=header)) return True
def show_cluster_role(cl_args, cluster, role): ''' print topologies information to stdout ''' try: result = tracker_access.get_cluster_role_topologies(cluster, role) if not result: Log.error('Unknown cluster/role \'%s\'' % '/'.join([cluster, role])) return False result = result[cluster] except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False table, header, rest_count = to_table(result) print('Topologies running in cluster \'%s\' submitted by \'%s\':' % (cluster, role)) if rest_count: print(' with %d more...' % rest_count) print(tabulate(table, headers=header)) return True
def show_cluster_role_env(cl_args, cluster, role, env): ''' print topologies information to stdout ''' try: result = tracker_access.get_cluster_role_env_topologies(cluster, role, env) if not result: Log.error('Unknown cluster/role/env \'%s\'' % '/'.join([cluster, role, env])) return False result = result[cluster] except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False table, header, rest_count = to_table(result) print('Topologies running in cluster \'%s\', submitted by \'%s\', and\ under environment \'%s\':' % (cluster, role, env)) if rest_count: print(' with %d more...' % rest_count) print(tabulate(table, headers=header)) return True
def run(command, parser, cl_args, unknown_args): """ run command """ location = cl_args['cluster/[role]/[env]'].split('/') if len(location) == 1: return show_cluster(cl_args, *location) elif len(location) == 2: return show_cluster_role(cl_args, *location) elif len(location) == 3: return show_cluster_role_env(cl_args, *location) else: Log.error('Invalid topologies selection') return False
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None): ''' Execute a heron class given the args and the jars needed for class path :param class_name: :param lib_jars: :param extra_jars: :param args: :param java_defines: :return: ''' # default optional params to empty list if not provided if extra_jars is None: extra_jars = [] if args is None: args = [] if java_defines is None: java_defines = [] # Format all java -D options that need to be passed while running # the class locally. java_opts = ['-D' + opt for opt in java_defines] # Construct the command line for the sub process to run # Because of the way Python execute works, # the java opts must be passed as part of the list all_args = [config.get_java_path(), "-client", "-Xmx1g"] + \ java_opts + \ ["-cp", config.get_classpath(extra_jars + lib_jars)] all_args += [class_name] + list(args) # set heron_config environment variable heron_env = os.environ.copy() heron_env['HERON_OPTIONS'] = opts.get_heron_config() # print the verbose message Log.debug("Invoking class using command: ``%s''", ' '.join(all_args)) Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"])) # invoke the command with subprocess and print error message, if any process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1) # stdout message has the information Java program sends back # stderr message has extra information, such as debugging message return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines): ''' :param class_name: :param topology_tar: :param arguments: :param tmpdir_root: :param java_defines: :return: ''' # Extract tar to a tmp folder. tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp') with contextlib.closing(tarfile.open(topology_tar)) as tar: tar.extractall(path=tmpdir) # A tar generated by pants has all dependency jars under libs/ # in addition to the topology jar at top level. Pants keeps # filename for jar and tar the same except for extension. topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar" extra_jars = [ os.path.join(tmpdir, topology_jar), os.path.join(tmpdir, "*"), os.path.join(tmpdir, "libs/*") ] lib_jars = config.get_heron_libs(jars.topology_jars()) # Now execute the class return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def get(self, cluster, environ, topology, comp_name): ''' :param cluster: :param environ: :param topology: :param comp_name: :return: ''' start_time = time.time() comp_names = [] if comp_name == "All": lplan = yield access.get_logical_plan(cluster, environ, topology) if not lplan: self.write(dict()) return if not 'spouts' in lplan or not 'bolts' in lplan: self.write(dict()) return comp_names = lplan['spouts'].keys() comp_names.extend(lplan['bolts'].keys()) else: comp_names = [comp_name] exception_infos = dict() for comp_name in comp_names: exception_infos[comp_name] = yield access.get_component_exceptionsummary( cluster, environ, topology, comp_name) # Combine exceptions from multiple component aggregate_exceptions = dict() for comp_name, exception_logs in exception_infos.items(): for exception_log in exception_logs: class_name = exception_log['class_name'] if class_name != '': if not class_name in aggregate_exceptions: aggregate_exceptions[class_name] = 0 aggregate_exceptions[class_name] += int(exception_log['count']) # Put the exception value in a table aggregate_exceptions_table = [] for key in aggregate_exceptions: aggregate_exceptions_table.append([key, str(aggregate_exceptions[key])]) result = dict( status="success", executiontime=time.time() - start_time, result=aggregate_exceptions_table) self.write(result)
def get(self): ''' :return: ''' # get all the topologies from heron nest topologies = yield access.get_topologies_states() result = dict() # now convert some of the fields to be displayable for cluster, cluster_value in topologies.items(): result[cluster] = dict() for environ, environ_value in cluster_value.items(): result[cluster][environ] = dict() for topology, topology_value in environ_value.items(): if "jobname" not in topology_value or topology_value["jobname"] is None: continue if "submission_time" in topology_value: topology_value["submission_time"] = topology_value["submission_time"] else: topology_value["submission_time"] = '-' result[cluster][environ][topology] = topology_value self.write(result)
def get(self, cluster, environ, topology): ''' :param cluster: :param environ: :param topology: :return: ''' start_time = time.time() lplan = yield access.get_logical_plan(cluster, environ, topology) # construct the result result = dict( status="success", message="", version=common.VERSION, executiontime=time.time() - start_time, result=lplan ) self.write(result)
def get(self, cluster, environ, topology): ''' :param cluster: :param environ: :param topology: :return: ''' start_time = time.time() pplan = yield access.get_physical_plan(cluster, environ, topology) result_map = dict( status="success", message="", version=common.VERSION, executiontime=time.time() - start_time, result=pplan ) self.write(result_map)
def get(self, cluster, environ, topology, component): ''' :param cluster: :param environ: :param topology: :param component: :return: ''' start_time = time.time() futures = yield access.get_component_exceptions(cluster, environ, topology, component) result_map = dict( status='success', executiontime=time.time() - start_time, result=futures) self.write(json.dumps(result_map))
def get(self, cluster, environ, topology, instance): ''' :param cluster: :param environ: :param topology: :param instance: :return: ''' pplan = yield access.get_physical_plan(cluster, environ, topology) host = pplan['stmgrs'][pplan['instances'][instance]['stmgrId']]['host'] result = json.loads((yield access.get_instance_pid( cluster, environ, topology, instance))) self.write('<pre><br/>$%s>: %s<br/><br/>%s</pre>' % ( host, tornado.escape.xhtml_escape(result['command']), tornado.escape.xhtml_escape(result['stdout'])))
def add_context(self, err_context, succ_context=None): """ Prepend msg to add some context information :param pmsg: context info :return: None """ self.err_context = err_context self.succ_context = succ_context
def renderProcessStdErr(self, stderr_line): """ render stderr of shelled-out process stderr could be error message of failure of invoking process or normal stderr output from successfully shelled-out process. In the first case, ``Popen'' should fail fast and we should be able to get return code immediately. We then render the failure message. In the second case, we simply print stderr line in stderr. The way to handle the first case is shaky but should be the best we can do since we have conflicts of design goals here. :param stderr_line: one line from shelled-out process :return: """ retcode = self.process.poll() if retcode is not None and status_type(retcode) == Status.InvocationError: self._do_log(Log.error, stderr_line) else: self._do_print(sys.stderr, stderr_line)
def renderProcessStdOut(self, stdout): """ render stdout of shelled-out process stdout always contains information Java process wants to propagate back to cli, so we do special rendering here :param stdout: all lines from shelled-out process :return: """ # since we render stdout line based on Java process return code, # ``status'' has to be already set assert self.status is not None # remove pending newline if self.status == Status.Ok: self._do_log(Log.info, stdout) elif self.status == Status.HeronError: # remove last newline since logging will append newline self._do_log(Log.error, stdout) # No need to prefix [INFO] here. We want to display dry-run response in a clean way elif self.status == Status.DryRun: self._do_print(sys.stdout, stdout) elif self.status == Status.InvocationError: self._do_print(sys.stdout, stdout) else: raise RuntimeError( "Unknown status type of value %d. Expected value: %s" % \ (self.status.value, list(Status)))
def is_host_port_reachable(self): """ Returns true if the host is reachable. In some cases, it may not be reachable a tunnel must be used. """ for hostport in self.hostportlist: try: socket.create_connection(hostport, StateManager.TIMEOUT_SECONDS) return True except: LOG.info("StateManager %s Unable to connect to host: %s port %i" % (self.name, hostport[0], hostport[1])) continue return False
def pick_unused_port(self): """ Pick an unused port. There is a slight chance that this wont work. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('127.0.0.1', 0)) _, port = s.getsockname() s.close() return port
def establish_ssh_tunnel(self): """ Establish an ssh tunnel for each local host and port that can be used to communicate with the state host. """ localportlist = [] for (host, port) in self.hostportlist: localport = self.pick_unused_port() self.tunnel.append(subprocess.Popen( ('ssh', self.tunnelhost, '-NL127.0.0.1:%d:%s:%d' % (localport, host, port)))) localportlist.append(('127.0.0.1', localport)) return localportlist
def delete_topology_from_zk(self, topologyName): """ Removes the topology entry from: 1. topologies list, 2. pplan, 3. execution_state, and """ self.delete_pplan(topologyName) self.delete_execution_state(topologyName) self.delete_topology(topologyName)
def monitor(self): """ Monitor the rootpath and call the callback corresponding to the change. This monitoring happens periodically. This function is called in a seperate thread from the main thread, because it sleeps for the intervals between each poll. """ def trigger_watches_based_on_files(watchers, path, directory, ProtoClass): """ For all the topologies in the watchers, check if the data in directory has changed. Trigger the callback if it has. """ for topology, callbacks in watchers.items(): file_path = os.path.join(path, topology) data = "" if os.path.exists(file_path): with open(os.path.join(path, topology)) as f: data = f.read() if topology not in directory or data != directory[topology]: proto_object = ProtoClass() proto_object.ParseFromString(data) for callback in callbacks: callback(proto_object) directory[topology] = data while not self.monitoring_thread_stop_signal: topologies_path = self.get_topologies_path() topologies = [] if os.path.isdir(topologies_path): topologies = list(filter( lambda f: os.path.isfile(os.path.join(topologies_path, f)), os.listdir(topologies_path))) if set(topologies) != set(self.topologies_directory): for callback in self.topologies_watchers: callback(topologies) self.topologies_directory = topologies trigger_watches_based_on_files( self.topology_watchers, topologies_path, self.topologies_directory, Topology) # Get the directory name for execution state execution_state_path = os.path.dirname(self.get_execution_state_path("")) trigger_watches_based_on_files( self.execution_state_watchers, execution_state_path, self.execution_state_directory, ExecutionState) # Get the directory name for packing_plan packing_plan_path = os.path.dirname(self.get_packing_plan_path("")) trigger_watches_based_on_files( self.packing_plan_watchers, packing_plan_path, self.packing_plan_directory, PackingPlan) # Get the directory name for pplan pplan_path = os.path.dirname(self.get_pplan_path("")) trigger_watches_based_on_files( self.pplan_watchers, pplan_path, self.pplan_directory, PhysicalPlan) # Get the directory name for tmaster tmaster_path = os.path.dirname(self.get_tmaster_path("")) trigger_watches_based_on_files( self.tmaster_watchers, tmaster_path, self.tmaster_directory, TMasterLocation) # Get the directory name for scheduler location scheduler_location_path = os.path.dirname(self.get_scheduler_location_path("")) trigger_watches_based_on_files( self.scheduler_location_watchers, scheduler_location_path, self.scheduler_location_directory, SchedulerLocation) # Sleep for some time self.event.wait(timeout=5)
def get_topologies(self, callback=None): """get topologies""" if callback: self.topologies_watchers.append(callback) else: topologies_path = self.get_topologies_path() return filter(lambda f: os.path.isfile(os.path.join(topologies_path, f)), os.listdir(topologies_path))
def get_topology(self, topologyName, callback=None): """get topology""" if callback: self.topology_watchers[topologyName].append(callback) else: topology_path = self.get_topology_path(topologyName) with open(topology_path) as f: data = f.read() topology = Topology() topology.ParseFromString(data) return topology
def get_packing_plan(self, topologyName, callback=None): """ get packing plan """ if callback: self.packing_plan_watchers[topologyName].append(callback) else: packing_plan_path = self.get_packing_plan_path(topologyName) with open(packing_plan_path) as f: data = f.read() packing_plan = PackingPlan() packing_plan.ParseFromString(data)
def get_pplan(self, topologyName, callback=None): """ Get physical plan of a topology """ if callback: self.pplan_watchers[topologyName].append(callback) else: pplan_path = self.get_pplan_path(topologyName) with open(pplan_path) as f: data = f.read() pplan = PhysicalPlan() pplan.ParseFromString(data) return pplan
def get_execution_state(self, topologyName, callback=None): """ Get execution state """ if callback: self.execution_state_watchers[topologyName].append(callback) else: execution_state_path = self.get_execution_state_path(topologyName) with open(execution_state_path) as f: data = f.read() executionState = ExecutionState() executionState.ParseFromString(data) return executionState
def get_tmaster(self, topologyName, callback=None): """ Get tmaster """ if callback: self.tmaster_watchers[topologyName].append(callback) else: tmaster_path = self.get_tmaster_path(topologyName) with open(tmaster_path) as f: data = f.read() tmaster = TMasterLocation() tmaster.ParseFromString(data) return tmaster
def get_scheduler_location(self, topologyName, callback=None): """ Get scheduler location """ if callback: self.scheduler_location_watchers[topologyName].append(callback) else: scheduler_location_path = self.get_scheduler_location_path(topologyName) with open(scheduler_location_path) as f: data = f.read() scheduler_location = SchedulerLocation() scheduler_location.ParseFromString(data) return scheduler_location
def get(self, pid): ''' get method ''' body = utils.str_cmd(['jmap', '-histo', pid], None, None) self.content_type = 'application/json' self.write(json.dumps(body)) self.finish()
def create_socket_options(): """Creates SocketOptions object from a given sys_config dict""" sys_config = system_config.get_sys_config() opt_list = [const.INSTANCE_NETWORK_WRITE_BATCH_SIZE_BYTES, const.INSTANCE_NETWORK_WRITE_BATCH_TIME_MS, const.INSTANCE_NETWORK_READ_BATCH_SIZE_BYTES, const.INSTANCE_NETWORK_READ_BATCH_TIME_MS, const.INSTANCE_NETWORK_OPTIONS_SOCKET_RECEIVED_BUFFER_SIZE_BYTES, const.INSTANCE_NETWORK_OPTIONS_SOCKET_SEND_BUFFER_SIZE_BYTES] Log.debug("In create_socket_options()") try: value_lst = [int(sys_config[opt]) for opt in opt_list] sock_opt = SocketOptions(*value_lst) return sock_opt except ValueError as e: # couldn't convert to int raise ValueError("Invalid value in sys_config: %s" % str(e)) except KeyError as e: # option key was not found raise KeyError("Incomplete sys_config: %s" % str(e))
def class_dict_to_specs(mcs, class_dict): """Takes a class `__dict__` and returns `HeronComponentSpec` entries""" specs = {} for name, spec in class_dict.items(): if isinstance(spec, HeronComponentSpec): # Use the variable name as the specification name. if spec.name is None: spec.name = name if spec.name in specs: raise ValueError("Duplicate component name: %s" % spec.name) else: specs[spec.name] = spec return specs
def class_dict_to_topo_config(mcs, class_dict): """ Takes a class `__dict__` and returns a map containing topology-wide configuration. The returned dictionary is a sanitized `dict` of type `<str -> (str|object)>`. This classmethod firsts insert default topology configuration and then overrides it with a given topology-wide configuration. Note that this configuration will be overriden by a component-specific configuration at runtime. """ topo_config = {} # add defaults topo_config.update(mcs.DEFAULT_TOPOLOGY_CONFIG) for name, custom_config in class_dict.items(): if name == 'config' and isinstance(custom_config, dict): sanitized_dict = mcs._sanitize_config(custom_config) topo_config.update(sanitized_dict) return topo_config
def init_topology(mcs, classname, class_dict): """Initializes a topology protobuf""" if classname == 'Topology': # Base class can't initialize protobuf return heron_options = TopologyType.get_heron_options_from_env() initial_state = heron_options.get("cmdline.topology.initial.state", "RUNNING") tmp_directory = heron_options.get("cmdline.topologydefn.tmpdirectory") if tmp_directory is None: raise RuntimeError("Topology definition temp directory not specified") topology_name = heron_options.get("cmdline.topology.name", classname) topology_id = topology_name + str(uuid.uuid4()) # create protobuf topology = topology_pb2.Topology() topology.id = topology_id topology.name = topology_name topology.state = topology_pb2.TopologyState.Value(initial_state) topology.topology_config.CopyFrom(TopologyType.get_topology_config_protobuf(class_dict)) TopologyType.add_bolts_and_spouts(topology, class_dict) class_dict['topology_name'] = topology_name class_dict['topology_id'] = topology_id class_dict['protobuf_topology'] = topology class_dict['topologydefn_tmpdir'] = tmp_directory class_dict['heron_runtime_options'] = heron_options
def get_heron_options_from_env(): """Retrieves heron options from the `HERON_OPTIONS` environment variable. Heron options have the following format: cmdline.topologydefn.tmpdirectory=/var/folders/tmpdir cmdline.topology.initial.state=PAUSED In this case, the returned map will contain: #!json { "cmdline.topologydefn.tmpdirectory": "/var/folders/tmpdir", "cmdline.topology.initial.state": "PAUSED" } Currently supports the following options natively: - `cmdline.topologydefn.tmpdirectory`: (required) the directory to which this topology's defn file is written - `cmdline.topology.initial.state`: (default: "RUNNING") the initial state of the topology - `cmdline.topology.name`: (default: class name) topology name on deployment Returns: map mapping from key to value """ heron_options_raw = os.environ.get("HERON_OPTIONS") if heron_options_raw is None: raise RuntimeError("HERON_OPTIONS environment variable not found") options = {} for option_line in heron_options_raw.replace("%%%%", " ").split(','): key, sep, value = option_line.partition("=") if sep: options[key] = value else: raise ValueError("Invalid HERON_OPTIONS part %r" % option_line) return options
def add_spec(self, *specs): """Add specs to the topology :type specs: HeronComponentSpec :param specs: specs to add to the topology """ for spec in specs: if not isinstance(spec, HeronComponentSpec): raise TypeError("Argument to add_spec needs to be HeronComponentSpec, given: %s" % str(spec)) if spec.name is None: raise ValueError("TopologyBuilder cannot take a spec without name") if spec.name == "config": raise ValueError("config is a reserved name") if spec.name in self._specs: raise ValueError("Attempting to add duplicate spec name: %r %r" % (spec.name, spec)) self._specs[spec.name] = spec
def add_spout(self, name, spout_cls, par, config=None, optional_outputs=None): """Add a spout to the topology""" spout_spec = spout_cls.spec(name=name, par=par, config=config, optional_outputs=optional_outputs) self.add_spec(spout_spec) return spout_spec
def add_bolt(self, name, bolt_cls, par, inputs, config=None, optional_outputs=None): """Add a bolt to the topology""" bolt_spec = bolt_cls.spec(name=name, par=par, inputs=inputs, config=config, optional_outputs=optional_outputs) self.add_spec(bolt_spec) return bolt_spec
def set_config(self, config): """Set topology-wide configuration to the topology :type config: dict :param config: topology-wide config """ if not isinstance(config, dict): raise TypeError("Argument to set_config needs to be dict, given: %s" % str(config)) self._topology_config = config
def build_and_submit(self): """Builds the topology and submits to the destination""" class_dict = self._construct_topo_class_dict() topo_cls = TopologyType(self.topology_name, (Topology,), class_dict) topo_cls.write()
def fetch_url_as_json(fetch_url, default_value=None): ''' Fetch the given url and convert the response to json. :param fetch_url: URL to fetch :param default_value: value to return in case of failure :return: ''' # assign empty dict for optional param if default_value is None: default_value = dict() Log.debug("fetching url %s", fetch_url) ret = default_value # time the duration of the fetch start = time.time() # fetch the URL asynchronously http_response = yield tornado.httpclient.AsyncHTTPClient().fetch(fetch_url) # handle http errors, and return if any if http_response.error: Log.error("Unable to get response from %s. Error %s", fetch_url, http_response.error) raise tornado.gen.Return(ret) # load response and handle return errors, if any response = json.loads(http_response.body) if not 'result' in response: Log.error("Empty response from %s", fetch_url) raise tornado.gen.Return(ret) # get the response and execution time on server side ret = response['result'] execution = 1000 * response['executiontime'] # calculate the time end = time.time() duration = 1000 * (end - start) Log.debug("TIME: url fetch took %.2f ms server time %s", execution, fetch_url) Log.debug("TIME: url fetch took %.2f ms round trip %s", duration, fetch_url) # convert future to value raise tornado.gen.Return(ret)
def create_parser(subparsers): """ create parser """ parser = subparsers.add_parser( 'version', help='Display version', usage="%(prog)s", add_help=False) args.add_titles(parser) parser.set_defaults(subcommand='version') return parser
def queries_map(): """map from query parameter to query name""" qs = _all_metric_queries() return dict(zip(qs[0], qs[1]) + zip(qs[2], qs[3]))
def get_clusters(): """Synced API call to get all cluster names""" instance = tornado.ioloop.IOLoop.instance() # pylint: disable=unnecessary-lambda try: return instance.run_sync(lambda: API.get_clusters()) except Exception: Log.debug(traceback.format_exc()) raise
def get_logical_plan(cluster, env, topology, role): """Synced API call to get logical plans""" instance = tornado.ioloop.IOLoop.instance() try: return instance.run_sync(lambda: API.get_logical_plan(cluster, env, topology, role)) except Exception: Log.debug(traceback.format_exc()) raise
def get_topology_info(*args): """Synced API call to get topology information""" instance = tornado.ioloop.IOLoop.instance() try: return instance.run_sync(lambda: API.get_topology_info(*args)) except Exception: Log.debug(traceback.format_exc()) raise
def get_component_metrics(component, cluster, env, topology, role): """Synced API call to get component metrics""" all_queries = metric_queries() try: result = get_topology_metrics(cluster, env, topology, component, [], all_queries, [0, -1], role) return result["metrics"] except Exception: Log.debug(traceback.format_exc()) raise
def configure(level=logging.INFO, logfile=None): """ Configure logger which dumps log on terminal :param level: logging level: info, warning, verbose... :type level: logging level :param logfile: log file name, default to None :type logfile: string :return: None :rtype: None """ # Remove all the existing StreamHandlers to avoid duplicate for handler in Log.handlers: if isinstance(handler, logging.StreamHandler): Log.handlers.remove(handler) Log.setLevel(level) # if logfile is specified, FileHandler is used if logfile is not None: log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" formatter = logging.Formatter(fmt=log_format, datefmt=date_format) file_handler = logging.FileHandler(logfile) file_handler.setFormatter(formatter) Log.addHandler(file_handler) # otherwise, use StreamHandler to output to stream (stdout, stderr...) else: log_format = "[%(asctime)s] %(log_color)s[%(levelname)s]%(reset)s: %(message)s" # pylint: disable=redefined-variable-type formatter = colorlog.ColoredFormatter(fmt=log_format, datefmt=date_format) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) Log.addHandler(stream_handler)
def init_rotating_logger(level, logfile, max_files, max_bytes): """Initializes a rotating logger It also makes sure that any StreamHandler is removed, so as to avoid stdout/stderr constipation issues """ logging.basicConfig() root_logger = logging.getLogger() log_format = "[%(asctime)s] [%(levelname)s] %(filename)s: %(message)s" root_logger.setLevel(level) handler = RotatingFileHandler(logfile, maxBytes=max_bytes, backupCount=max_files) handler.setFormatter(logging.Formatter(fmt=log_format, datefmt=date_format)) root_logger.addHandler(handler) for handler in root_logger.handlers: root_logger.debug("Associated handlers - " + str(handler)) if isinstance(handler, logging.StreamHandler): root_logger.debug("Removing StreamHandler: " + str(handler)) root_logger.handlers.remove(handler)
def set_logging_level(cl_args): """simply set verbose level based on command-line args :param cl_args: CLI arguments :type cl_args: dict :return: None :rtype: None """ if 'verbose' in cl_args and cl_args['verbose']: configure(logging.DEBUG) else: configure(logging.INFO)
def _get_spout(self): """Returns Spout protobuf message""" spout = topology_pb2.Spout() spout.comp.CopyFrom(self._get_base_component()) # Add output streams self._add_out_streams(spout) return spout
def _get_bolt(self): """Returns Bolt protobuf message""" bolt = topology_pb2.Bolt() bolt.comp.CopyFrom(self._get_base_component()) # Add streams self._add_in_streams(bolt) self._add_out_streams(bolt) return bolt
def _get_base_component(self): """Returns Component protobuf message""" comp = topology_pb2.Component() comp.name = self.name comp.spec = topology_pb2.ComponentObjectSpec.Value("PYTHON_CLASS_NAME") comp.class_name = self.python_class_path comp.config.CopyFrom(self._get_comp_config()) return comp
def _get_comp_config(self): """Returns component-specific Config protobuf message It first adds ``topology.component.parallelism``, and is overriden by a user-defined component-specific configuration, specified by spec(). """ proto_config = topology_pb2.Config() # first add parallelism key = proto_config.kvs.add() key.key = TOPOLOGY_COMPONENT_PARALLELISM key.value = str(self.parallelism) key.type = topology_pb2.ConfigValueType.Value("STRING_VALUE") # iterate through self.custom_config if self.custom_config is not None: sanitized = self._sanitize_config(self.custom_config) for key, value in sanitized.items(): if isinstance(value, str): kvs = proto_config.kvs.add() kvs.key = key kvs.value = value kvs.type = topology_pb2.ConfigValueType.Value("STRING_VALUE") else: # need to serialize kvs = proto_config.kvs.add() kvs.key = key kvs.serialized_value = default_serializer.serialize(value) kvs.type = topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE") return proto_config
def _sanitize_config(custom_config): """Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)> It checks if keys are all strings and sanitizes values of a given dictionary as follows: - If string, number or boolean is given as a value, it is converted to string. For string and number (int, float), it is converted to string by a built-in ``str()`` method. For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is converted to "false" instead of "False", in order to keep the consistency with Java configuration. - If neither of the above is given as a value, it is inserted into the sanitized dict as it is. These values will need to be serialized before adding to a protobuf message. """ if not isinstance(custom_config, dict): raise TypeError("Component-specific configuration must be given as a dict type, given: %s" % str(type(custom_config))) sanitized = {} for key, value in custom_config.items(): if not isinstance(key, str): raise TypeError("Key for component-specific configuration must be string, given: %s:%s" % (str(type(key)), str(key))) if isinstance(value, bool): sanitized[key] = "true" if value else "false" elif isinstance(value, (str, int, float)): sanitized[key] = str(value) else: sanitized[key] = value return sanitized
def _add_in_streams(self, bolt): """Adds inputs to a given protobuf Bolt message""" if self.inputs is None: return # sanitize inputs and get a map <GlobalStreamId -> Grouping> input_dict = self._sanitize_inputs() for global_streamid, gtype in input_dict.items(): in_stream = bolt.inputs.add() in_stream.stream.CopyFrom(self._get_stream_id(global_streamid.component_id, global_streamid.stream_id)) if isinstance(gtype, Grouping.FIELDS): # it's a field grouping in_stream.gtype = gtype.gtype in_stream.grouping_fields.CopyFrom(self._get_stream_schema(gtype.fields)) elif isinstance(gtype, Grouping.CUSTOM): # it's a custom grouping in_stream.gtype = gtype.gtype in_stream.custom_grouping_object = gtype.python_serialized in_stream.type = topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT") else: in_stream.gtype = gtype
def _sanitize_inputs(self): """Sanitizes input fields and returns a map <GlobalStreamId -> Grouping>""" ret = {} if self.inputs is None: return if isinstance(self.inputs, dict): # inputs are dictionary, must be either <HeronComponentSpec -> Grouping> or # <GlobalStreamId -> Grouping> for key, grouping in self.inputs.items(): if not Grouping.is_grouping_sane(grouping): raise ValueError('A given grouping is not supported') if isinstance(key, HeronComponentSpec): # use default streamid if key.name is None: # should not happen as TopologyType metaclass sets name attribute # before calling this method raise RuntimeError("In _sanitize_inputs(): HeronComponentSpec doesn't have a name") global_streamid = GlobalStreamId(key.name, Stream.DEFAULT_STREAM_ID) ret[global_streamid] = grouping elif isinstance(key, GlobalStreamId): ret[key] = grouping else: raise ValueError("%s is not supported as a key to inputs" % str(key)) elif isinstance(self.inputs, (list, tuple)): # inputs are lists, must be either a list of HeronComponentSpec or GlobalStreamId # will use SHUFFLE grouping for input_obj in self.inputs: if isinstance(input_obj, HeronComponentSpec): if input_obj.name is None: # should not happen as TopologyType metaclass sets name attribute # before calling this method raise RuntimeError("In _sanitize_inputs(): HeronComponentSpec doesn't have a name") global_streamid = GlobalStreamId(input_obj.name, Stream.DEFAULT_STREAM_ID) ret[global_streamid] = Grouping.SHUFFLE elif isinstance(input_obj, GlobalStreamId): ret[input_obj] = Grouping.SHUFFLE else: raise ValueError("%s is not supported as an input" % str(input_obj)) else: raise TypeError("Inputs must be a list, dict, or None, given: %s" % str(self.inputs)) return ret
def _add_out_streams(self, spbl): """Adds outputs to a given protobuf Bolt or Spout message""" if self.outputs is None: return # sanitize outputs and get a map <stream_id -> out fields> output_map = self._sanitize_outputs() for stream_id, out_fields in output_map.items(): out_stream = spbl.outputs.add() out_stream.stream.CopyFrom(self._get_stream_id(self.name, stream_id)) out_stream.schema.CopyFrom(self._get_stream_schema(out_fields))
def _sanitize_outputs(self): """Sanitizes output fields and returns a map <stream_id -> list of output fields>""" ret = {} if self.outputs is None: return if not isinstance(self.outputs, (list, tuple)): raise TypeError("Argument to outputs must be either list or tuple, given: %s" % str(type(self.outputs))) for output in self.outputs: if not isinstance(output, (str, Stream)): raise TypeError("Outputs must be a list of strings or Streams, given: %s" % str(output)) if isinstance(output, str): # it's a default stream if Stream.DEFAULT_STREAM_ID not in ret: ret[Stream.DEFAULT_STREAM_ID] = list() ret[Stream.DEFAULT_STREAM_ID].append(output) else: # output is a Stream object if output.stream_id == Stream.DEFAULT_STREAM_ID and Stream.DEFAULT_STREAM_ID in ret: # some default stream fields are already in there ret[Stream.DEFAULT_STREAM_ID].extend(output.fields) else: ret[output.stream_id] = output.fields return ret
def get_out_streamids(self): """Returns a set of output stream ids registered for this component""" if self.outputs is None: return set() if not isinstance(self.outputs, (list, tuple)): raise TypeError("Argument to outputs must be either list or tuple, given: %s" % str(type(self.outputs))) ret_lst = [] for output in self.outputs: if not isinstance(output, (str, Stream)): raise TypeError("Outputs must be a list of strings or Streams, given: %s" % str(output)) ret_lst.append(Stream.DEFAULT_STREAM_ID if isinstance(output, str) else output.stream_id) return set(ret_lst)
def _get_stream_id(comp_name, stream_id): """Returns a StreamId protobuf message""" proto_stream_id = topology_pb2.StreamId() proto_stream_id.id = stream_id proto_stream_id.component_name = comp_name return proto_stream_id
def _get_stream_schema(fields): """Returns a StreamSchema protobuf message""" stream_schema = topology_pb2.StreamSchema() for field in fields: key = stream_schema.keys.add() key.key = field key.type = topology_pb2.Type.Value("OBJECT") return stream_schema
def component_id(self): """Returns component_id of this GlobalStreamId Note that if HeronComponentSpec is specified as componentId and its name is not yet available (i.e. when ``name`` argument was not given in ``spec()`` method in Bolt or Spout), this property returns a message with uuid. However, this is provided only for safety with __eq__(), __str__(), and __hash__() methods, and not meant to be called explicitly before TopologyType class finally sets the name attribute of HeronComponentSpec. """ if isinstance(self._component_id, HeronComponentSpec): if self._component_id.name is None: # HeronComponentSpec instance's name attribute might not be available until # TopologyType metaclass finally sets it. This statement is to support __eq__(), # __hash__() and __str__() methods with safety, as raising Exception is not # appropriate this case. return "<No name available for HeronComponentSpec yet, uuid: %s>" % self._component_id.uuid return self._component_id.name elif isinstance(self._component_id, str): return self._component_id else: raise ValueError("Component Id for this GlobalStreamId is not properly set: <%s:%s>" % (str(type(self._component_id)), str(self._component_id)))
def write_error(self, status_code, **kwargs): ''' :param status_code: :param kwargs: :return: ''' if "exc_info" in kwargs: exc_info = kwargs["exc_info"] error = exc_info[1] errormessage = "%s: %s" % (status_code, error) self.render("error.html", errormessage=errormessage) else: errormessage = "%s" % (status_code) self.render("error.html", errormessage=errormessage)