Search is not available for this dataset
text
stringlengths
75
104k
def register_metric(self, name, metric, time_bucket_in_sec): """Registers a new metric to this context""" collector = self.get_metrics_collector() collector.register_metric(name, metric, time_bucket_in_sec)
def get_sources(self, component_id): """Returns the declared inputs to specified component :return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or None if not found """ # this is necessary because protobuf message is not hashable StreamId = namedtuple('StreamId', 'id, component_name') if component_id in self.inputs: ret = {} for istream in self.inputs.get(component_id): key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name) ret[key] = istream.gtype return ret else: return None
def get_component_tasks(self, component_id): """Returns the task ids allocated for the given component id""" ret = [] for task_id, comp_id in self.task_to_component_map.items(): if comp_id == component_id: ret.append(task_id) return ret
def add_task_hook(self, task_hook): """Registers a specified task hook to this context :type task_hook: heron.instance.src.python.utils.topology.ITaskHook :param task_hook: Implementation of ITaskHook """ if not isinstance(task_hook, ITaskHook): raise TypeError("In add_task_hook(): attempt to add non ITaskHook instance, given: %s" % str(type(task_hook))) self.task_hooks.append(task_hook)
def get_metrics_collector(self): """Returns this context's metrics collector""" if self.metrics_collector is None or not isinstance(self.metrics_collector, MetricsCollector): raise RuntimeError("Metrics collector is not registered in this context") return self.metrics_collector
def invoke_hook_prepare(self): """invoke task hooks for after the spout/bolt's initialize() method""" for task_hook in self.task_hooks: task_hook.prepare(self.get_cluster_config(), self)
def invoke_hook_emit(self, values, stream_id, out_tasks): """invoke task hooks for every time a tuple is emitted in spout/bolt :type values: list :param values: values emitted :type stream_id: str :param stream_id: stream id into which tuple is emitted :type out_tasks: list :param out_tasks: list of custom grouping target task id """ if len(self.task_hooks) > 0: emit_info = EmitInfo(values=values, stream_id=stream_id, task_id=self.get_task_id(), out_tasks=out_tasks) for task_hook in self.task_hooks: task_hook.emit(emit_info)
def invoke_hook_spout_ack(self, message_id, complete_latency_ns): """invoke task hooks for every time spout acks a tuple :type message_id: str :param message_id: message id to which an acked tuple was anchored :type complete_latency_ns: float :param complete_latency_ns: complete latency in nano seconds """ if len(self.task_hooks) > 0: spout_ack_info = SpoutAckInfo(message_id=message_id, spout_task_id=self.get_task_id(), complete_latency_ms=complete_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.spout_ack(spout_ack_info)
def invoke_hook_spout_fail(self, message_id, fail_latency_ns): """invoke task hooks for every time spout fails a tuple :type message_id: str :param message_id: message id to which a failed tuple was anchored :type fail_latency_ns: float :param fail_latency_ns: fail latency in nano seconds """ if len(self.task_hooks) > 0: spout_fail_info = SpoutFailInfo(message_id=message_id, spout_task_id=self.get_task_id(), fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.spout_fail(spout_fail_info)
def invoke_hook_bolt_execute(self, heron_tuple, execute_latency_ns): """invoke task hooks for every time bolt processes a tuple :type heron_tuple: HeronTuple :param heron_tuple: tuple that is executed :type execute_latency_ns: float :param execute_latency_ns: execute latency in nano seconds """ if len(self.task_hooks) > 0: bolt_execute_info = \ BoltExecuteInfo(heron_tuple=heron_tuple, executing_task_id=self.get_task_id(), execute_latency_ms=execute_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.bolt_execute(bolt_execute_info)
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns): """invoke task hooks for every time bolt acks a tuple :type heron_tuple: HeronTuple :param heron_tuple: tuple that is acked :type process_latency_ns: float :param process_latency_ns: process latency in nano seconds """ if len(self.task_hooks) > 0: bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple, acking_task_id=self.get_task_id(), process_latency_ms=process_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.bolt_ack(bolt_ack_info)
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns): """invoke task hooks for every time bolt fails a tuple :type heron_tuple: HeronTuple :param heron_tuple: tuple that is failed :type fail_latency_ns: float :param fail_latency_ns: fail latency in nano seconds """ if len(self.task_hooks) > 0: bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple, failing_task_id=self.get_task_id(), fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.bolt_fail(bolt_fail_info)
def create_parser(subparsers): ''' Create a subparser for the submit command :param subparsers: :return: ''' parser = subparsers.add_parser( 'submit', help='Submit a topology', usage="%(prog)s [options] cluster/[role]/[env] " + \ "topology-file-name topology-class-name [topology-args]", add_help=True ) cli_args.add_titles(parser) cli_args.add_cluster_role_env(parser) cli_args.add_topology_file(parser) cli_args.add_topology_class(parser) cli_args.add_config(parser) cli_args.add_deactive_deploy(parser) cli_args.add_dry_run(parser) cli_args.add_extra_launch_classpath(parser) cli_args.add_release_yaml_file(parser) cli_args.add_service_url(parser) cli_args.add_system_property(parser) cli_args.add_verbose(parser) parser.set_defaults(subcommand='submit') return parser
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file, topology_name): ''' Launch a topology given topology jar, its definition file and configurations :param cl_args: :param tmp_dir: :param topology_file: :param topology_defn_file: :param topology_name: :return: ''' # get the normalized path for topology.tar.gz topology_pkg_path = config.normalized_class_path(os.path.join(tmp_dir, 'topology.tar.gz')) # get the release yaml file release_yaml_file = cl_args['release_yaml_file'] # create a tar package with the cluster configuration and generated config files config_path = cl_args['config_path'] tar_pkg_files = [topology_file, topology_defn_file] generated_config_files = [release_yaml_file, cl_args['override_config_file']] config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files) # pass the args to submitter main args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--submit_user", cl_args['submit_user'], "--heron_home", config.get_heron_dir(), "--config_path", config_path, "--override_config_file", cl_args['override_config_file'], "--release_file", release_yaml_file, "--topology_package", topology_pkg_path, "--topology_defn", topology_defn_file, "--topology_bin", os.path.basename(topology_file) # pex/cpp file if pex/cpp specified ] if Log.getEffectiveLevel() == logging.DEBUG: args.append("--verbose") if cl_args["dry_run"]: args.append("--dry_run") if "dry_run_format" in cl_args: args += ["--dry_run_format", cl_args["dry_run_format"]] lib_jars = config.get_heron_libs( jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars() ) extra_jars = cl_args['extra_launch_classpath'].split(':') # invoke the submitter to submit and launch the topology main_class = 'org.apache.heron.scheduler.SubmitterMain' res = execute.heron_class( class_name=main_class, lib_jars=lib_jars, extra_jars=extra_jars, args=args, java_defines=[]) err_ctxt = "Failed to launch topology '%s' %s" % (topology_name, launch_mode_msg(cl_args)) succ_ctxt = "Successfully launched topology '%s' %s" % (topology_name, launch_mode_msg(cl_args)) res.add_context(err_ctxt, succ_ctxt) return res
def launch_topology_server(cl_args, topology_file, topology_defn_file, topology_name): ''' Launch a topology given topology jar, its definition file and configurations :param cl_args: :param topology_file: :param topology_defn_file: :param topology_name: :return: ''' service_apiurl = cl_args['service_url'] + rest.ROUTE_SIGNATURES['submit'][1] service_method = rest.ROUTE_SIGNATURES['submit'][0] data = dict( name=topology_name, cluster=cl_args['cluster'], role=cl_args['role'], environment=cl_args['environ'], user=cl_args['submit_user'], ) Log.info("" + str(cl_args)) overrides = dict() if 'config_property' in cl_args: overrides = config.parse_override_config(cl_args['config_property']) if overrides: data.update(overrides) if cl_args['dry_run']: data["dry_run"] = True files = dict( definition=open(topology_defn_file, 'rb'), topology=open(topology_file, 'rb'), ) err_ctxt = "Failed to launch topology '%s' %s" % (topology_name, launch_mode_msg(cl_args)) succ_ctxt = "Successfully launched topology '%s' %s" % (topology_name, launch_mode_msg(cl_args)) try: r = service_method(service_apiurl, data=data, files=files) ok = r.status_code is requests.codes.ok created = r.status_code is requests.codes.created s = Status.Ok if created or ok else Status.HeronError if s is Status.HeronError: Log.error(r.json().get('message', "Unknown error from API server %d" % r.status_code)) elif ok: # this case happens when we request a dry_run print(r.json().get("response")) except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as err: Log.error(err) return SimpleResult(Status.HeronError, err_ctxt, succ_ctxt) return SimpleResult(s, err_ctxt, succ_ctxt)
def launch_topologies(cl_args, topology_file, tmp_dir): ''' Launch topologies :param cl_args: :param topology_file: :param tmp_dir: :return: list(Responses) ''' # the submitter would have written the .defn file to the tmp_dir defn_files = glob.glob(tmp_dir + '/*.defn') if len(defn_files) == 0: return SimpleResult(Status.HeronError, "No topologies found under %s" % tmp_dir) results = [] for defn_file in defn_files: # load the topology definition from the file topology_defn = topology_pb2.Topology() try: handle = open(defn_file, "rb") topology_defn.ParseFromString(handle.read()) handle.close() except Exception as e: err_context = "Cannot load topology definition '%s': %s" % (defn_file, e) return SimpleResult(Status.HeronError, err_context) # launch the topology Log.info("Launching topology: \'%s\'%s", topology_defn.name, launch_mode_msg(cl_args)) # check if we have to do server or direct based deployment if cl_args['deploy_mode'] == config.SERVER_MODE: res = launch_topology_server( cl_args, topology_file, defn_file, topology_defn.name) else: res = launch_a_topology( cl_args, tmp_dir, topology_file, defn_file, topology_defn.name) results.append(res) return results
def submit_fatjar(cl_args, unknown_args, tmp_dir): ''' We use the packer to make a package for the jar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology_class_name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known location. We then write to appropriate places in zookeeper and launch the scheduler jobs :param cl_args: :param unknown_args: :param tmp_dir: :return: ''' # execute main of the topology to create the topology definition topology_file = cl_args['topology-file-name'] main_class = cl_args['topology-class-name'] res = execute.heron_class( class_name=main_class, lib_jars=config.get_heron_libs(jars.topology_jars()), extra_jars=[topology_file], args=tuple(unknown_args), java_defines=cl_args['topology_main_jvm_property']) result.render(res) if not result.is_successful(res): err_context = ("Failed to create topology definition " \ "file when executing class '%s' of file '%s'") % (main_class, topology_file) res.add_context(err_context) return res results = launch_topologies(cl_args, topology_file, tmp_dir) return results
def submit_tar(cl_args, unknown_args, tmp_dir): ''' Extract and execute the java files inside the tar and then add topology definition file created by running submitTopology We use the packer to make a package for the tar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology class name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known packer location. We then write to appropriate places in zookeeper and launch the aurora jobs :param cl_args: :param unknown_args: :param tmp_dir: :return: ''' # execute main of the topology to create the topology definition topology_file = cl_args['topology-file-name'] java_defines = cl_args['topology_main_jvm_property'] main_class = cl_args['topology-class-name'] res = execute.heron_tar( main_class, topology_file, tuple(unknown_args), tmp_dir, java_defines) result.render(res) if not result.is_successful(res): err_context = ("Failed to create topology definition " \ "file when executing class '%s' of file '%s'") % (main_class, topology_file) res.add_context(err_context) return res return launch_topologies(cl_args, topology_file, tmp_dir)
def run(command, parser, cl_args, unknown_args): ''' Submits the topology to the scheduler * Depending on the topology file name extension, we treat the file as a fatjar (if the ext is .jar) or a tar file (if the ext is .tar/.tar.gz). * We upload the topology file to the packer, update zookeeper and launch scheduler jobs representing that topology * You can see your topology in Heron UI :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Submit Args %s", cl_args) # get the topology file name topology_file = cl_args['topology-file-name'] if urlparse.urlparse(topology_file).scheme: cl_args['topology-file-name'] = download(topology_file, cl_args['cluster']) topology_file = cl_args['topology-file-name'] Log.debug("download uri to local file: %s", topology_file) # check to see if the topology file exists if not os.path.isfile(topology_file): err_context = "Topology file '%s' does not exist" % topology_file return SimpleResult(Status.InvocationError, err_context) # check if it is a valid file type jar_type = topology_file.endswith(".jar") tar_type = topology_file.endswith(".tar") or topology_file.endswith(".tar.gz") pex_type = topology_file.endswith(".pex") cpp_type = topology_file.endswith(".dylib") or topology_file.endswith(".so") if not (jar_type or tar_type or pex_type or cpp_type): _, ext_name = os.path.splitext(topology_file) err_context = "Unknown file type '%s'. Please use .tar "\ "or .tar.gz or .jar or .pex or .dylib or .so file"\ % ext_name return SimpleResult(Status.InvocationError, err_context) # check if extra launch classpath is provided and if it is validate if cl_args['extra_launch_classpath']: valid_classpath = classpath.valid_java_classpath(cl_args['extra_launch_classpath']) if not valid_classpath: err_context = "One of jar or directory in extra launch classpath does not exist: %s" % \ cl_args['extra_launch_classpath'] return SimpleResult(Status.InvocationError, err_context) # create a temporary directory for topology definition file tmp_dir = tempfile.mkdtemp() opts.cleaned_up_files.append(tmp_dir) # if topology needs to be launched in deactivated state, do it so if cl_args['deploy_deactivated']: initial_state = topology_pb2.TopologyState.Name(topology_pb2.PAUSED) else: initial_state = topology_pb2.TopologyState.Name(topology_pb2.RUNNING) # set the tmp dir and deactivated state in global options opts.set_config('cmdline.topologydefn.tmpdirectory', tmp_dir) opts.set_config('cmdline.topology.initial.state', initial_state) opts.set_config('cmdline.topology.role', cl_args['role']) opts.set_config('cmdline.topology.environment', cl_args['environ']) # Use CLI release yaml file if the release_yaml_file config is empty if not cl_args['release_yaml_file']: cl_args['release_yaml_file'] = config.get_heron_release_file() # check the extension of the file name to see if it is tar/jar file. if jar_type: return submit_fatjar(cl_args, unknown_args, tmp_dir) elif tar_type: return submit_tar(cl_args, unknown_args, tmp_dir) elif cpp_type: return submit_cpp(cl_args, unknown_args, tmp_dir) else: return submit_pex(cl_args, unknown_args, tmp_dir)
def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() container = self.get_argument(constants.PARAM_CONTAINER) path = self.get_argument(constants.PARAM_PATH) offset = self.get_argument_offset() length = self.get_argument_length() topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ) stmgr_id = "stmgr-" + container stmgr = topology_info["physical_plan"]["stmgrs"][stmgr_id] host = stmgr["host"] shell_port = stmgr["shell_port"] file_data_url = "http://%s:%d/filedata/%s?offset=%s&length=%s" % \ (host, shell_port, path, offset, length) http_client = tornado.httpclient.AsyncHTTPClient() response = yield http_client.fetch(file_data_url) self.write_success_response(json.loads(response.body)) self.finish() except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e)
def setup(self, context): """Implements TextFile Generator's setup method""" myindex = context.get_partition_index() self._files_to_consume = self._files[myindex::context.get_num_partitions()] self.logger.info("TextFileSpout files to consume %s" % self._files_to_consume) self._lines_to_consume = self._get_next_lines() self._emit_count = 0
def add_config(parser): """ add config """ # the default config path default_config_path = config.get_heron_conf_dir() parser.add_argument( '--config-path', metavar='(a string; path to cluster config; default: "' + default_config_path + '")', default=os.path.join(config.get_heron_dir(), default_config_path)) return parser
def add_verbose(parser): """ add optional verbose argument""" parser.add_argument( '--verbose', metavar='(a boolean; default: "false")', type=bool, default=False) return parser
def add_tracker_url(parser): """ add optional tracker_url argument """ parser.add_argument( '--tracker_url', metavar='(tracker url; default: "' + DEFAULT_TRACKER_URL + '")', type=str, default=DEFAULT_TRACKER_URL) return parser
def hex_escape(bin_str): """ Hex encode a binary string """ printable = string.ascii_letters + string.digits + string.punctuation + ' ' return ''.join(ch if ch in printable else r'0x{0:02x}'.format(ord(ch)) for ch in bin_str)
def make_shell_endpoint(topologyInfo, instance_id): """ Makes the http endpoint for the heron shell if shell port is present, otherwise returns None. """ # Format: container_<id>_<instance_id> pplan = topologyInfo["physical_plan"] stmgrId = pplan["instances"][instance_id]["stmgrId"] host = pplan["stmgrs"][stmgrId]["host"] shell_port = pplan["stmgrs"][stmgrId]["shell_port"] return "http://%s:%d" % (host, shell_port)
def make_shell_logfiles_url(host, shell_port, _, instance_id=None): """ Make the url for log-files in heron-shell from the info stored in stmgr. If no instance_id is provided, the link will be to the dir for the whole container. If shell port is not present, it returns None. """ if not shell_port: return None if not instance_id: return "http://%s:%d/browse/log-files" % (host, shell_port) else: return "http://%s:%d/file/log-files/%s.log.0" % (host, shell_port, instance_id)
def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length): """ Make the url for log-file data in heron-shell from the info stored in stmgr. """ return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \ (host, shell_port, instance_id, offset, length)
def cygpath(x): """ This will return the path of input arg for windows :return: the path in windows """ command = ['cygpath', '-wp', x] p = subprocess.Popen(command, stdout=subprocess.PIPE) output, _ = p.communicate() lines = output.split("\n") return lines[0]
def get_heron_tracker_dir(): """ This will extract heron tracker directory from .pex file. :return: root location for heron-tools. """ path = "/".join(os.path.realpath(__file__).split('/')[:-8]) return normalized_class_path(path)
def parse_config_file(config_file): """ This will parse the config file for the tracker :return: the config or None if the file is not found """ expanded_config_file_path = os.path.expanduser(config_file) if not os.path.lexists(expanded_config_file_path): return None configs = {} # Read the configuration file with open(expanded_config_file_path, 'r') as f: configs = yaml.load(f) return configs
def _handle_register_response(self, response): """Called when a register response (RegisterInstanceResponse) arrives""" if response.status.status != common_pb2.StatusCode.Value("OK"): raise RuntimeError("Stream Manager returned a not OK response for register") Log.info("We registered ourselves to the Stream Manager") self.is_registered = True if response.HasField("pplan"): Log.info("Handling assignment message from response") self._handle_assignment_message(response.pplan) else: Log.debug("Received a register response with no pplan")
def _handle_assignment_message(self, pplan): """Called when new NewInstanceAssignmentMessage arrives""" Log.debug("In handle_assignment_message() of STStmgrClient, Physical Plan: \n%s", str(pplan)) self.heron_instance_cls.handle_assignment_msg(pplan)
def decode_packet(packet): """Decodes an IncomingPacket object and returns (typename, reqid, serialized message)""" if not packet.is_complete: raise RuntimeError("In decode_packet(): Packet corrupted") data = packet.data len_typename = HeronProtocol.unpack_int(data[:4]) data = data[4:] typename = data[:len_typename] data = data[len_typename:] reqid = REQID.unpack(data[:REQID.REQID_SIZE]) data = data[REQID.REQID_SIZE:] len_msg = HeronProtocol.unpack_int(data[:4]) data = data[4:] serialized_msg = data[:len_msg] return typename, reqid, serialized_msg
def create_packet(reqid, message): """Creates Outgoing Packet from a given reqid and message :param reqid: REQID object :param message: protocol buffer object """ assert message.IsInitialized() packet = '' # calculate the totla size of the packet incl. header typename = message.DESCRIPTOR.full_name datasize = HeronProtocol.get_size_to_pack_string(typename) + \ REQID.REQID_SIZE + HeronProtocol.get_size_to_pack_message(message) # first write out how much data is there as the header packet += HeronProtocol.pack_int(datasize) # next write the type string packet += HeronProtocol.pack_int(len(typename)) packet += typename # reqid packet += reqid.pack() # add the proto packet += HeronProtocol.pack_int(message.ByteSize()) packet += message.SerializeToString() return OutgoingPacket(packet)
def send(self, dispatcher): """Sends this outgoing packet to dispatcher's socket""" if self.sent_complete: return sent = dispatcher.send(self.to_send) self.to_send = self.to_send[sent:]
def create_packet(header, data): """Creates an IncomingPacket object from header and data This method is for testing purposes """ packet = IncomingPacket() packet.header = header packet.data = data if len(header) == HeronProtocol.HEADER_SIZE: packet.is_header_read = True if len(data) == packet.get_datasize(): packet.is_complete = True return packet
def read(self, dispatcher): """Reads incoming data from asyncore.dispatcher""" try: if not self.is_header_read: # try reading header to_read = HeronProtocol.HEADER_SIZE - len(self.header) self.header += dispatcher.recv(to_read) if len(self.header) == HeronProtocol.HEADER_SIZE: self.is_header_read = True else: Log.debug("Header read incomplete; read %d bytes of header" % len(self.header)) return if self.is_header_read and not self.is_complete: # try reading data to_read = self.get_datasize() - len(self.data) self.data += dispatcher.recv(to_read) if len(self.data) == self.get_datasize(): self.is_complete = True except socket.error as e: if e.errno == socket.errno.EAGAIN or e.errno == socket.errno.EWOULDBLOCK: # Try again later -> call continue_read later Log.debug("Try again error") else: # Fatal error Log.debug("Fatal error when reading IncomingPacket") raise RuntimeError("Fatal error occured in IncomingPacket.read()")
def generate(): """Generates a random REQID for request""" data_bytes = bytearray(random.getrandbits(8) for i in range(REQID.REQID_SIZE)) return REQID(data_bytes)
def create_parser(subparsers): ''' :param subparsers: :return: ''' parser = subparsers.add_parser( 'restart', help='Restart a topology', usage="%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]", add_help=True) args.add_titles(parser) args.add_cluster_role_env(parser) args.add_topology(parser) parser.add_argument( 'container-id', nargs='?', type=int, default=-1, help='Identifier of the container to be restarted') args.add_config(parser) args.add_service_url(parser) args.add_verbose(parser) parser.set_defaults(subcommand='restart') return parser
def run(command, parser, cl_args, unknown_args): ''' :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Restart Args: %s", cl_args) container_id = cl_args['container-id'] if cl_args['deploy_mode'] == config.SERVER_MODE: dict_extra_args = {"container_id": str(container_id)} return cli_helper.run_server(command, cl_args, "restart topology", extra_args=dict_extra_args) else: list_extra_args = ["--container_id", str(container_id)] return cli_helper.run_direct(command, cl_args, "restart topology", extra_args=list_extra_args)
def get_heron_config(): ''' Get config opts from the global variable :return: ''' opt_list = [] for (key, value) in config_opts.items(): opt_list.append('%s=%s' % (key, value)) all_opts = (','.join(opt_list)).replace(' ', '%%%%') return all_opts
def yaml_config_reader(config_path): """Reads yaml config file and returns auto-typed config_dict""" if not config_path.endswith(".yaml"): raise ValueError("Config file not yaml") with open(config_path, 'r') as f: config = yaml.load(f) return config
def handle_new_tuple_set_2(self, hts2): """Called when new HeronTupleSet2 arrives Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet See more at GitHub PR #1421 :param tuple_msg_set: HeronTupleSet2 type """ if self.my_pplan_helper is None or self.my_instance is None: Log.error("Got tuple set when no instance assigned yet") else: hts = tuple_pb2.HeronTupleSet() if hts2.HasField('control'): hts.control.CopyFrom(hts2.control) else: hdts = tuple_pb2.HeronDataTupleSet() hdts.stream.CopyFrom(hts2.data.stream) try: for trunk in hts2.data.tuples: added_tuple = hdts.tuples.add() added_tuple.ParseFromString(trunk) except Exception: Log.exception('Fail to deserialize HeronDataTuple') hts.data.CopyFrom(hdts) self.in_stream.offer(hts) if self.my_pplan_helper.is_topology_running(): self.my_instance.py_class.process_incoming_tuples()
def handle_initiate_stateful_checkpoint(self, ckptmsg): """Called when we get InitiateStatefulCheckpoint message :param ckptmsg: InitiateStatefulCheckpoint type """ self.in_stream.offer(ckptmsg) if self.my_pplan_helper.is_topology_running(): self.my_instance.py_class.process_incoming_tuples()
def handle_start_stateful_processing(self, start_msg): """Called when we receive StartInstanceStatefulProcessing message :param start_msg: StartInstanceStatefulProcessing type """ Log.info("Received start stateful processing for %s" % start_msg.checkpoint_id) self.is_stateful_started = True self.start_instance_if_possible()
def handle_restore_instance_state(self, restore_msg): """Called when we receive RestoreInstanceStateRequest message :param restore_msg: RestoreInstanceStateRequest type """ Log.info("Restoring instance state to checkpoint %s" % restore_msg.state.checkpoint_id) # Stop the instance if self.is_stateful_started: self.my_instance.py_class.stop() self.my_instance.py_class.clear_collector() self.is_stateful_started = False # Clear all buffers self.in_stream.clear() self.out_stream.clear() # Deser the state if self.stateful_state is not None: self.stateful_state.clear() if restore_msg.state.state is not None and restore_msg.state.state: try: self.stateful_state = self.serializer.deserialize(restore_msg.state.state) except Exception as e: raise RuntimeError("Could not serialize state during restore " + str(e)) else: Log.info("The restore request does not have an actual state") if self.stateful_state is None: self.stateful_state = HashMapState() Log.info("Instance restore state deserialized") # Send the response back resp = ckptmgr_pb2.RestoreInstanceStateResponse() resp.status.status = common_pb2.StatusCode.Value("OK") resp.checkpoint_id = restore_msg.state.checkpoint_id self._stmgr_client.send_message(resp)
def send_buffered_messages(self): """Send messages in out_stream to the Stream Manager""" while not self.out_stream.is_empty() and self._stmgr_client.is_registered: tuple_set = self.out_stream.poll() if isinstance(tuple_set, tuple_pb2.HeronTupleSet): tuple_set.src_task_id = self.my_pplan_helper.my_task_id self.gateway_metrics.update_sent_packet(tuple_set.ByteSize()) self._stmgr_client.send_message(tuple_set)
def _handle_state_change_msg(self, new_helper): """Called when state change is commanded by stream manager""" assert self.my_pplan_helper is not None assert self.my_instance is not None and self.my_instance.py_class is not None if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state(): # handle state change # update the pplan_helper self.my_pplan_helper = new_helper if new_helper.is_topology_running(): if not self.is_instance_started: self.start_instance_if_possible() self.my_instance.py_class.invoke_activate() elif new_helper.is_topology_paused(): self.my_instance.py_class.invoke_deactivate() else: raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state()) else: Log.info("Topology state remains the same.")
def handle_assignment_msg(self, pplan): """Called when new NewInstanceAssignmentMessage arrives Tells this instance to become either spout/bolt. :param pplan: PhysicalPlan proto """ new_helper = PhysicalPlanHelper(pplan, self.instance.instance_id, self.topo_pex_file_abs_path) if self.my_pplan_helper is not None and \ (self.my_pplan_helper.my_component_name != new_helper.my_component_name or self.my_pplan_helper.my_task_id != new_helper.my_task_id): raise RuntimeError("Our Assignment has changed. We will die to pick it.") new_helper.set_topology_context(self.metrics_collector) if self.my_pplan_helper is None: Log.info("Received a new Physical Plan") Log.info("Push the new pplan_helper to Heron Instance") self._handle_assignment_msg(new_helper) else: Log.info("Received a new Physical Plan with the same assignment -- State Change") Log.info("Old state: %s, new state: %s.", self.my_pplan_helper.get_topology_state(), new_helper.get_topology_state()) self._handle_state_change_msg(new_helper)
def check_output_schema(self, stream_id, tup): """Checks if a given stream_id and tuple matches with the output schema :type stream_id: str :param stream_id: stream id into which tuple is sent :type tup: list :param tup: tuple that is going to be sent """ # do some checking to make sure that the number of fields match what's expected size = self._output_schema.get(stream_id, None) if size is None: raise RuntimeError("%s emitting to stream %s but was not declared in output fields" % (self.my_component_name, stream_id)) elif size != len(tup): raise RuntimeError("Number of fields emitted in stream %s does not match what's expected. " "Expected: %s, Observed: %s" % (stream_id, size, len(tup)))
def get_topology_config(self): """Returns the topology config""" if self.pplan.topology.HasField("topology_config"): return self._get_dict_from_config(self.pplan.topology.topology_config) else: return {}
def set_topology_context(self, metrics_collector): """Sets a new topology context""" Log.debug("Setting topology context") cluster_config = self.get_topology_config() cluster_config.update(self._get_dict_from_config(self.my_component.config)) task_to_component_map = self._get_task_to_comp_map() self.context = TopologyContextImpl(cluster_config, self.pplan.topology, task_to_component_map, self.my_task_id, metrics_collector, self.topology_pex_abs_path)
def _get_dict_from_config(topology_config): """Converts Config protobuf message to python dictionary Values are converted according to the rules below: - Number string (e.g. "12" or "1.2") is appropriately converted to ``int`` or ``float`` - Boolean string ("true", "True", "false" or "False") is converted to built-in boolean type (i.e. ``True`` or ``False``) - Normal string is inserted to dict as is - Serialized value is deserialized and inserted as a corresponding Python object """ config = {} for kv in topology_config.kvs: if kv.HasField("value"): assert kv.type == topology_pb2.ConfigValueType.Value("STRING_VALUE") # value is string if PhysicalPlanHelper._is_number(kv.value): config[kv.key] = PhysicalPlanHelper._get_number(kv.value) elif kv.value.lower() in ("true", "false"): config[kv.key] = True if kv.value.lower() == "true" else False else: config[kv.key] = kv.value elif kv.HasField("serialized_value") and \ kv.type == topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE"): # deserialize that config[kv.key] = default_serializer.deserialize(kv.serialized_value) else: assert kv.HasField("type") Log.error("Unsupported config <key:value> found: %s, with type: %s" % (str(kv), str(kv.type))) continue return config
def _setup_custom_grouping(self, topology): """Checks whether there are any bolts that consume any of my streams using custom grouping""" for i in range(len(topology.bolts)): for in_stream in topology.bolts[i].inputs: if in_stream.stream.component_name == self.my_component_name and \ in_stream.gtype == topology_pb2.Grouping.Value("CUSTOM"): # this bolt takes my output in custom grouping manner if in_stream.type == topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT"): custom_grouping_obj = default_serializer.deserialize(in_stream.custom_grouping_object) if isinstance(custom_grouping_obj, str): pex_loader.load_pex(self.topology_pex_abs_path) grouping_cls = \ pex_loader.import_and_get_class(self.topology_pex_abs_path, custom_grouping_obj) custom_grouping_obj = grouping_cls() assert isinstance(custom_grouping_obj, ICustomGrouping) self.custom_grouper.add(in_stream.stream.id, self._get_taskids_for_component(topology.bolts[i].comp.name), custom_grouping_obj, self.my_component_name) elif in_stream.type == topology_pb2.CustomGroupingObjectType.Value("JAVA_OBJECT"): raise NotImplementedError("Java-serialized custom grouping is not yet supported " "for python topology") else: raise ValueError("Unrecognized custom grouping type found: %s" % str(in_stream.type))
def add(self, stream_id, task_ids, grouping, source_comp_name): """Adds the target component :type stream_id: str :param stream_id: stream id into which tuples are emitted :type task_ids: list of str :param task_ids: list of task ids to which tuples are emitted :type grouping: ICustomStreamGrouping object :param grouping: custom grouping to use :type source_comp_name: str :param source_comp_name: source component name """ if stream_id not in self.targets: self.targets[stream_id] = [] self.targets[stream_id].append(Target(task_ids, grouping, source_comp_name))
def prepare(self, context): """Prepares the custom grouping for this component""" for stream_id, targets in self.targets.items(): for target in targets: target.prepare(context, stream_id)
def choose_tasks(self, stream_id, values): """Choose tasks for a given stream_id and values and Returns a list of target tasks""" if stream_id not in self.targets: return [] ret = [] for target in self.targets[stream_id]: ret.extend(target.choose_tasks(values)) return ret
def prepare(self, context, stream_id): """Invoke prepare() of this custom grouping""" self.grouping.prepare(context, self.source_comp_name, stream_id, self.task_ids)
def choose_tasks(self, values): """Invoke choose_tasks() of this custom grouping""" ret = self.grouping.choose_tasks(values) if not isinstance(ret, list): raise TypeError("Returned object after custom grouping's choose_tasks() " "needs to be a list, given: %s" % str(type(ret))) else: for i in ret: if not isinstance(i, int): raise TypeError("Returned object after custom grouping's choose_tasks() " "contained non-integer: %s" % str(i)) if i not in self.task_ids: raise ValueError("Returned object after custom grouping's choose_tasks() contained " "a task id that is not registered: %d" % i) return ret
def add_config(parser): ''' :param parser: :return: ''' # the default config path default_config_path = config.get_heron_conf_dir() parser.add_argument( '--config-path', default=os.path.join(config.get_heron_dir(), default_config_path), help='Path to cluster configuration files') parser.add_argument( '--config-property', metavar='PROPERTY=VALUE', action='append', default=[], help='Configuration properties that overrides default options') return parser
def add_dry_run(parser): ''' :param parser: :return: ''' default_format = 'table' resp_formats = ['raw', 'table', 'colored_table', 'json'] available_options = ', '.join(['%s' % opt for opt in resp_formats]) def dry_run_resp_format(value): if value not in resp_formats: raise argparse.ArgumentTypeError( 'Invalid dry-run response format: %s. Available formats: %s' % (value, available_options)) return value parser.add_argument( '--dry-run', default=False, action='store_true', help='Enable dry-run mode. Information about ' 'the command will print but no action will be taken on the topology') parser.add_argument( '--dry-run-format', metavar='DRY_RUN_FORMAT', default='colored_table' if sys.stdout.isatty() else 'table', type=dry_run_resp_format, help='The format of the dry-run output ([%s], default=%s). ' 'Ignored when dry-run mode is not enabled' % ('|'.join(resp_formats), default_format)) return parser
def read_server_mode_cluster_definition(cluster, cl_args): ''' Read the cluster definition for server mode :param cluster: :param cl_args: :param config_file: :return: ''' client_confs = dict() client_confs[cluster] = cliconfig.cluster_config(cluster) # now check if the service-url from command line is set, if so override it if cl_args.get('service_url', None): client_confs[cluster]['service_url'] = cl_args['service_url'] # the return value of yaml.load can be None if conf_file is an empty file # or there is no service-url in command line, if needed. return client_confs
def check_direct_mode_cluster_definition(cluster, config_path): ''' Check the cluster definition for direct mode :param cluster: :param config_path: :return: ''' config_path = config.get_heron_cluster_conf_dir(cluster, config_path) if not os.path.isdir(config_path): return False return True
def get(self, path): ''' get method ''' if not path: path = "." if not utils.check_path(path): self.write("Only relative paths are allowed") self.set_status(403) self.finish() return t = Template(utils.get_asset("browse.html")) args = dict( path=path, listing=utils.get_listing(path), format_prefix=utils.format_prefix, stat=stat, get_stat=utils.get_stat, os=os, css=utils.get_asset("bootstrap.css") ) self.write(t.generate(**args)) self.finish()
def format_mode(sres): """ Format a line in the directory list based on the file's type and other attributes. """ mode = sres.st_mode root = (mode & 0o700) >> 6 group = (mode & 0o070) >> 3 user = (mode & 0o7) def stat_type(md): ''' stat type''' if stat.S_ISDIR(md): return 'd' elif stat.S_ISSOCK(md): return 's' else: return '-' def triple(md): ''' triple ''' return '%c%c%c' % ( 'r' if md & 0b100 else '-', 'w' if md & 0b010 else '-', 'x' if md & 0b001 else '-') return ''.join([stat_type(mode), triple(root), triple(group), triple(user)])
def format_mtime(mtime): """ Format the date associated with a file to be displayed in directory listing. """ now = datetime.now() dt = datetime.fromtimestamp(mtime) return '%s %2d %5s' % ( dt.strftime('%b'), dt.day, dt.year if dt.year != now.year else dt.strftime('%H:%M'))
def format_prefix(filename, sres): """ Prefix to a filename in the directory listing. This is to make the listing similar to an output of "ls -alh". """ try: pwent = pwd.getpwuid(sres.st_uid) user = pwent.pw_name except KeyError: user = sres.st_uid try: grent = grp.getgrgid(sres.st_gid) group = grent.gr_name except KeyError: group = sres.st_gid return '%s %3d %10s %10s %10d %s' % ( format_mode(sres), sres.st_nlink, user, group, sres.st_size, format_mtime(sres.st_mtime), )
def get_listing(path): """ Returns the list of files and directories in a path. Prepents a ".." (parent directory link) if path is not current dir. """ if path != ".": listing = sorted(['..'] + os.listdir(path)) else: listing = sorted(os.listdir(path)) return listing
def get_stat(path, filename): ''' get stat ''' return os.stat(os.path.join(path, filename))
def read_chunk(filename, offset=-1, length=-1, escape_data=False): """ Read a chunk of a file from an offset upto the length. """ try: length = int(length) offset = int(offset) except ValueError: return {} if not os.path.isfile(filename): return {} try: fstat = os.stat(filename) except Exception: return {} if offset == -1: offset = fstat.st_size if length == -1: length = fstat.st_size - offset with open(filename, "r") as fp: fp.seek(offset) try: data = fp.read(length) except IOError: return {} if data: data = _escape_data(data) if escape_data else data return dict(offset=offset, length=len(data), data=data) return dict(offset=offset, length=0)
def pipe(prev_proc, to_cmd): """ Pipes output of prev_proc into to_cmd. Returns piped process """ stdin = None if prev_proc is None else prev_proc.stdout process = subprocess.Popen(to_cmd, stdout=subprocess.PIPE, stdin=stdin) if prev_proc is not None: prev_proc.stdout.close() # Allow prev_proc to receive a SIGPIPE return process
def str_cmd(cmd, cwd, env): """ Runs the command and returns its stdout and stderr. """ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env) stdout_builder, stderr_builder = proc.async_stdout_stderr_builder(process) process.wait() stdout, stderr = stdout_builder.result(), stderr_builder.result() return {'command': ' '.join(cmd), 'stderr': stderr, 'stdout': stdout}
def chain(cmd_list): """ Feed output of one command to the next and return final output Returns string output of chained application of commands. """ command = ' | '.join(map(lambda x: ' '.join(x), cmd_list)) chained_proc = functools.reduce(pipe, [None] + cmd_list) stdout_builder = proc.async_stdout_builder(chained_proc) chained_proc.wait() return { 'command': command, 'stdout': stdout_builder.result() }
def create_parser(subparsers): """ create parser """ metrics_parser = subparsers.add_parser( 'metrics', help='Display info of a topology\'s metrics', usage="%(prog)s cluster/[role]/[env] topology-name [options]", add_help=False) args.add_cluster_role_env(metrics_parser) args.add_topology_name(metrics_parser) args.add_verbose(metrics_parser) args.add_tracker_url(metrics_parser) args.add_config(metrics_parser) args.add_component_name(metrics_parser) metrics_parser.set_defaults(subcommand='metrics') containers_parser = subparsers.add_parser( 'containers', help='Display info of a topology\'s containers metrics', usage="%(prog)s cluster/[role]/[env] topology-name [options]", add_help=False) args.add_cluster_role_env(containers_parser) args.add_topology_name(containers_parser) args.add_verbose(containers_parser) args.add_tracker_url(containers_parser) args.add_config(containers_parser) args.add_container_id(containers_parser) containers_parser.set_defaults(subcommand='containers') return subparsers
def parse_topo_loc(cl_args): """ parse topology location """ try: topo_loc = cl_args['cluster/[role]/[env]'].split('/') topo_name = cl_args['topology-name'] topo_loc.append(topo_name) if len(topo_loc) != 4: raise return topo_loc except Exception: Log.error('Invalid topology location') raise
def to_table(metrics): """ normalize raw metrics API result to table """ all_queries = tracker_access.metric_queries() m = tracker_access.queries_map() names = metrics.values()[0].keys() stats = [] for n in names: info = [n] for field in all_queries: try: info.append(str(metrics[field][n])) except KeyError: pass stats.append(info) header = ['container id'] + [m[k] for k in all_queries if k in metrics.keys()] return stats, header
def run_metrics(command, parser, cl_args, unknown_args): """ run metrics subcommand """ cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ'] topology = cl_args['topology-name'] try: result = tracker_access.get_topology_info(cluster, env, topology, role) spouts = result['physical_plan']['spouts'].keys() bolts = result['physical_plan']['bolts'].keys() components = spouts + bolts cname = cl_args['component'] if cname: if cname in components: components = [cname] else: Log.error('Unknown component: \'%s\'' % cname) raise except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False cresult = [] for comp in components: try: metrics = tracker_access.get_component_metrics(comp, cluster, env, topology, role) except: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False stat, header = to_table(metrics) cresult.append((comp, stat, header)) for i, (comp, stat, header) in enumerate(cresult): if i != 0: print('') print('\'%s\' metrics:' % comp) print(tabulate(stat, headers=header)) return True
def run_bolts(command, parser, cl_args, unknown_args): """ run bolts subcommand """ cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ'] topology = cl_args['topology-name'] try: result = tracker_access.get_topology_info(cluster, env, topology, role) bolts = result['physical_plan']['bolts'].keys() bolt_name = cl_args['bolt'] if bolt_name: if bolt_name in bolts: bolts = [bolt_name] else: Log.error('Unknown bolt: \'%s\'' % bolt_name) raise except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False bolts_result = [] for bolt in bolts: try: metrics = tracker_access.get_component_metrics(bolt, cluster, env, topology, role) stat, header = to_table(metrics) bolts_result.append((bolt, stat, header)) except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False for i, (bolt, stat, header) in enumerate(bolts_result): if i != 0: print('') print('\'%s\' metrics:' % bolt) print(tabulate(stat, headers=header)) return True
def run_containers(command, parser, cl_args, unknown_args): """ run containers subcommand """ cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ'] topology = cl_args['topology-name'] container_id = cl_args['id'] try: result = tracker_access.get_topology_info(cluster, env, topology, role) except: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False containers = result['physical_plan']['stmgrs'] all_bolts, all_spouts = set(), set() for _, bolts in result['physical_plan']['bolts'].items(): all_bolts = all_bolts | set(bolts) for _, spouts in result['physical_plan']['spouts'].items(): all_spouts = all_spouts | set(spouts) stmgrs = containers.keys() stmgrs.sort() if container_id is not None: try: normalized_cid = container_id - 1 if normalized_cid < 0: raise stmgrs = [stmgrs[normalized_cid]] except: Log.error('Invalid container id: %d' % container_id) return False table = [] for sid, name in enumerate(stmgrs): cid = sid + 1 host = containers[name]["host"] port = containers[name]["port"] pid = containers[name]["pid"] instances = containers[name]["instance_ids"] bolt_nums = len([instance for instance in instances if instance in all_bolts]) spout_nums = len([instance for instance in instances if instance in all_spouts]) table.append([cid, host, port, pid, bolt_nums, spout_nums, len(instances)]) headers = ["container", "host", "port", "pid", "#bolt", "#spout", "#instance"] sys.stdout.flush() print(tabulate(table, headers=headers)) return True
def define_options(address, port, tracker_url, base_url): ''' :param address: :param port: :param tracker_url: :return: ''' define("address", default=address) define("port", default=port) define("tracker_url", default=tracker_url) define("base_url", default=base_url)
def main(): ''' :param argv: :return: ''' log.configure(logging.DEBUG) tornado.log.enable_pretty_logging() # create the parser and parse the arguments (parser, child_parser) = args.create_parsers() (parsed_args, remaining) = parser.parse_known_args() if remaining: r = child_parser.parse_args(args=remaining, namespace=parsed_args) namespace = vars(r) if 'version' in namespace: common_config.print_build_info(zipped_pex=True) else: parser.print_help() parser.exit() # log additional information command_line_args = vars(parsed_args) Log.info("Listening at http://%s:%d%s", command_line_args['address'], command_line_args['port'], command_line_args['base_url']) Log.info("Using tracker url: %s", command_line_args['tracker_url']) # pass the options to tornado and start the ui server define_options(command_line_args['address'], command_line_args['port'], command_line_args['tracker_url'], command_line_args['base_url']) http_server = tornado.httpserver.HTTPServer(Application(command_line_args['base_url'])) http_server.listen(command_line_args['port'], address=command_line_args['address']) # pylint: disable=unused-argument # stop Tornado IO loop def signal_handler(signum, frame): # start a new line after ^C character because this looks nice print('\n', end='') Log.debug('SIGINT received. Stopping UI') tornado.ioloop.IOLoop.instance().stop() # associate SIGINT and SIGTERM with a handler signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # start Tornado IO loop tornado.ioloop.IOLoop.instance().start()
def spec(cls, name=None, inputs=None, par=1, config=None, optional_outputs=None): """Register this bolt to the topology and create ``HeronComponentSpec`` This method takes an optional ``outputs`` argument for supporting dynamic output fields declaration. However, it is recommended that ``outputs`` should be declared as an attribute of your ``Bolt`` subclass. Also, some ways of declaring inputs is not supported in this implementation; please read the documentation below. :type name: str :param name: Name of this bolt. :type inputs: dict or list :param inputs: Streams that feed into this Bolt. Two forms of this are acceptable: 1. A `dict` mapping from ``HeronComponentSpec`` to ``Grouping``. In this case, default stream is used. 2. A `dict` mapping from ``GlobalStreamId`` to ``Grouping``. This ``GlobalStreamId`` object itself is different from StreamParse, because Heron does not use thrift, although its constructor method is compatible. 3. A `list` of ``HeronComponentSpec``. In this case, default stream with SHUFFLE grouping is used. 4. A `list` of ``GlobalStreamId``. In this case, SHUFFLE grouping is used. :type par: int :param par: Parallelism hint for this spout. :type config: dict :param config: Component-specific config settings. :type optional_outputs: list of (str or Stream) or tuple of (str or Stream) :param optional_outputs: Additional output fields for this bolt. These fields are added to existing ``outputs`` class attributes of your bolt. This is an optional argument, and exists only for supporting dynamic output field declaration. """ python_class_path = "%s.%s" % (cls.__module__, cls.__name__) if hasattr(cls, 'outputs'): # avoid modification to cls.outputs _outputs = copy.copy(cls.outputs) else: _outputs = [] if optional_outputs is not None: assert isinstance(optional_outputs, (list, tuple)) for out in optional_outputs: assert isinstance(out, (str, Stream)) _outputs.append(out) return HeronComponentSpec(name, python_class_path, is_spout=False, par=par, inputs=inputs, outputs=_outputs, config=config)
def emit(self, tup, stream=Stream.DEFAULT_STREAM_ID, anchors=None, direct_task=None, need_task_ids=False): """Emits a new tuple from this Bolt It is compatible with StreamParse API. :type tup: list or tuple :param tup: the new output Tuple to send from this bolt, which should contain only serializable data. :type stream: str :param stream: the ID of the stream to emit this Tuple to. Leave empty to emit to the default stream. :type anchors: list :param anchors: a list of HeronTuples to which the emitted Tuples should be anchored. :type direct_task: int :param direct_task: the task to send the Tuple to if performing a direct emit. :type need_task_ids: bool :param need_task_ids: indicate whether or not you would like the task IDs the Tuple was emitted. """ self.delegate.emit(tup, stream, anchors, direct_task, need_task_ids)
def create_parser(subparsers): """ create argument parser """ parser = subparsers.add_parser( 'clusters', help='Display existing clusters', usage="%(prog)s [options]", add_help=True) args.add_verbose(parser) args.add_tracker_url(parser) parser.set_defaults(subcommand='clusters') return subparsers
def run(command, parser, cl_args, unknown_args): """ run command """ try: clusters = tracker_access.get_clusters() except: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False print('Available clusters:') for cluster in clusters: print(' %s' % cluster) return True
def get_time_ranges(ranges): ''' :param ranges: :return: ''' # get the current time now = int(time.time()) # form the new time_slots = dict() for key, value in ranges.items(): time_slots[key] = (now - value[0], now - value[1], value[2]) return (now, time_slots)
def add_arguments(parser): """ add arguments """ default_config_file = os.path.join( utils.get_heron_tracker_conf_dir(), constants.DEFAULT_CONFIG_FILE) parser.add_argument( '--config-file', metavar='(a string; path to config file; default: "' + default_config_file + '")', default=default_config_file) parser.add_argument( '--type', metavar='(an string; type of state manager (zookeeper or file, etc.); example: ' \ + str(constants.DEFAULT_STATE_MANAGER_TYPE) + ')', choices=["file", "zookeeper"]) parser.add_argument( '--name', metavar='(an string; name to be used for the state manager; example: ' \ + str(constants.DEFAULT_STATE_MANAGER_NAME) + ')') parser.add_argument( '--rootpath', metavar='(an string; where all the states are stored; example: ' \ + str(constants.DEFAULT_STATE_MANAGER_ROOTPATH) + ')') parser.add_argument( '--tunnelhost', metavar='(an string; if ssh tunneling needs to be established to connect to it; example: ' \ + str(constants.DEFAULT_STATE_MANAGER_TUNNELHOST) + ')') parser.add_argument( '--hostport', metavar='(an string; only used to connect to zk, must be of the form \'host:port\';'\ ' example: ' + str(constants.DEFAULT_STATE_MANAGER_HOSTPORT) + ')') parser.add_argument( '--port', metavar='(an integer; port to listen; default: ' + str(constants.DEFAULT_PORT) + ')', type=int, default=constants.DEFAULT_PORT) parser.add_argument( '--verbose', action='store_true') return parser
def create_parsers(): """ create argument parser """ parser = argparse.ArgumentParser( epilog='For detailed documentation, go to http://github.com/apache/incubator-heron', usage="%(prog)s [options] [help]", add_help=False) parser = add_titles(parser) parser = add_arguments(parser) ya_parser = argparse.ArgumentParser( parents=[parser], formatter_class=SubcommandHelpFormatter, add_help=False) subparsers = ya_parser.add_subparsers( title="Available commands") help_parser = subparsers.add_parser( 'help', help='Prints help', add_help=False) help_parser.set_defaults(help=True) subparsers.add_parser( 'version', help='Prints version', add_help=True) return parser, ya_parser
def main(): """ main """ # create the parser and parse the arguments (parser, _) = create_parsers() (args, remaining) = parser.parse_known_args() if remaining == ['help']: parser.print_help() parser.exit() elif remaining == ['version']: common_config.print_build_info() parser.exit() elif remaining != []: Log.error('Invalid subcommand') sys.exit(1) namespace = vars(args) log.set_logging_level(namespace) # set Tornado global option define_options(namespace['port'], namespace['config_file']) config = Config(create_tracker_config(namespace)) # create Tornado application application = Application(config) # pylint: disable=unused-argument # SIGINT handler: # 1. stop all the running zkstatemanager and filestatemanagers # 2. stop the Tornado IO loop def signal_handler(signum, frame): # start a new line after ^C character because this looks nice print('\n', end='') application.stop() tornado.ioloop.IOLoop.instance().stop() # associate SIGINT and SIGTERM with a handler signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) Log.info("Running on port: %d", namespace['port']) if namespace["config_file"]: Log.info("Using config file: %s", namespace['config_file']) Log.info("Using state manager:\n" + str(config)) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(namespace['port']) tornado.ioloop.IOLoop.instance().start()
def make_tuple(stream, tuple_key, values, roots=None): """Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId`` """ component_name = stream.component_name stream_id = stream.id gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id, task=gen_task, values=values, creation_time=time.time(), roots=roots)
def make_tick_tuple(): """Creates a TickTuple""" return HeronTuple(id=TupleHelper.TICK_TUPLE_ID, component=TupleHelper.TICK_SOURCE_COMPONENT, stream=TupleHelper.TICK_TUPLE_ID, task=None, values=None, creation_time=time.time(), roots=None)
def make_root_tuple_info(stream_id, tuple_id): """Creates a RootTupleInfo""" key = random.getrandbits(TupleHelper.MAX_SFIXED64_RAND_BITS) return RootTupleInfo(stream_id=stream_id, tuple_id=tuple_id, insertion_time=time.time(), key=key)
def fetch_backpressure(self, cluster, metric, topology, component, instance, \ timerange, is_max, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param is_max: :param environ: :return: ''' pass
def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines): """Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. """ for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression. """ return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)