sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _get_java_env(self): "Pass the VCAP through the environment to the java submission" env = super(_StreamingAnalyticsSubmitter, self)._get_java_env() vcap = streamsx.rest._get_vcap_services(self._vcap_services) env['VCAP_SERVICES'] = json.dumps(vcap) return env
Pass the VCAP through the environment to the java submission
entailment
def _get_java_env(self): "Set env vars from connection if set" env = super(_DistributedSubmitter, self)._get_java_env() if self._streams_connection is not None: # Need to sure the environment matches the connection. sc = self._streams_connection if isinstance(sc._delegator, streamsx.rest_primitives._StreamsRestDelegator): env.pop('STREAMS_DOMAIN_ID', None) env.pop('STREAMS_INSTANCE_ID', None) else: env['STREAMS_DOMAIN_ID'] = sc.get_domains()[0].id if not ConfigParams.SERVICE_DEFINITION in self._config(): env['STREAMS_REST_URL'] = sc.resource_url env['STREAMS_USERNAME'] = sc.session.auth[0] env['STREAMS_PASSWORD'] = sc.session.auth[1] return env
Set env vars from connection if set
entailment
def from_overlays(overlays): """Create a `JobConfig` instance from a full job configuration overlays object. All logical items, such as ``comment`` and ``job_name``, are extracted from `overlays`. The remaining information in the single job config overlay in ``overlays`` is set as ``raw_overlay``. Args: overlays(dict): Full job configuration overlays object. Returns: JobConfig: Instance representing logical view of `overlays`. .. versionadded:: 1.9 """ jc = JobConfig() jc.comment = overlays.get('comment') if 'jobConfigOverlays' in overlays: if len(overlays['jobConfigOverlays']) >= 1: jco = copy.deepcopy(overlays['jobConfigOverlays'][0]) # Now extract the logical information if 'jobConfig' in jco: _jc = jco['jobConfig'] jc.job_name = _jc.pop('jobName', None) jc.job_group = _jc.pop('jobGroup', None) jc.preload = _jc.pop('preloadApplicationBundles', False) jc.data_directory = _jc.pop('dataDirectory', None) jc.tracing = _jc.pop('tracing', None) for sp in _jc.pop('submissionParameters', []): jc.submission_parameters[sp['name']] = sp['value'] if not _jc: del jco['jobConfig'] if 'deploymentConfig' in jco: _dc = jco['deploymentConfig'] if 'manual' == _dc.get('fusionScheme'): if 'fusionTargetPeCount' in _dc: jc.target_pe_count = _dc.pop('fusionTargetPeCount') if len(_dc) == 1: del jco['deploymentConfig'] if jco: jc.raw_overlay = jco return jc
Create a `JobConfig` instance from a full job configuration overlays object. All logical items, such as ``comment`` and ``job_name``, are extracted from `overlays`. The remaining information in the single job config overlay in ``overlays`` is set as ``raw_overlay``. Args: overlays(dict): Full job configuration overlays object. Returns: JobConfig: Instance representing logical view of `overlays`. .. versionadded:: 1.9
entailment
def _add_overlays(self, config): """ Add this as a jobConfigOverlays JSON to config. """ if self._comment: config['comment'] = self._comment jco = {} config["jobConfigOverlays"] = [jco] if self._raw_overlay: jco.update(self._raw_overlay) jc = jco.get('jobConfig', {}) if self.job_name is not None: jc["jobName"] = self.job_name if self.job_group is not None: jc["jobGroup"] = self.job_group if self.data_directory is not None: jc["dataDirectory"] = self.data_directory if self.preload: jc['preloadApplicationBundles'] = True if self.tracing is not None: jc['tracing'] = self.tracing if self.submission_parameters: sp = jc.get('submissionParameters', []) for name in self.submission_parameters: sp.append({'name': str(name), 'value': self.submission_parameters[name]}) jc['submissionParameters'] = sp if jc: jco["jobConfig"] = jc if self.target_pe_count is not None and self.target_pe_count >= 1: deployment = jco.get('deploymentConfig', {}) deployment.update({'fusionScheme' : 'manual', 'fusionTargetPeCount' : self.target_pe_count}) jco["deploymentConfig"] = deployment return config
Add this as a jobConfigOverlays JSON to config.
entailment
def job(self): """REST binding for the job associated with the submitted build. Returns: Job: REST binding for running job or ``None`` if connection information was not available or no job was submitted. """ if self._submitter and hasattr(self._submitter, '_job_access'): return self._submitter._job_access() return None
REST binding for the job associated with the submitted build. Returns: Job: REST binding for running job or ``None`` if connection information was not available or no job was submitted.
entailment
def cancel_job_button(self, description=None): """Display a button that will cancel the submitted job. Used in a Jupyter IPython notebook to provide an interactive mechanism to cancel a job submitted from the notebook. Once clicked the button is disabled unless the cancel fails. A job may be cancelled directly using:: submission_result = submit(ctx_type, topology, config) submission_result.job.cancel() Args: description(str): Text used as the button description, defaults to value based upon the job name. .. warning:: Behavior when called outside a notebook is undefined. .. versionadded:: 1.12 """ if not hasattr(self, 'jobId'): return try: import ipywidgets as widgets if not description: description = 'Cancel job: ' description += self.name if hasattr(self, 'name') else self.job.name button = widgets.Button(description=description, button_style='danger', layout=widgets.Layout(width='40%')) out = widgets.Output() vb = widgets.VBox([button, out]) @out.capture(clear_output=True) def _cancel_job_click(b): b.disabled=True print('Cancelling job: id=' + str(self.job.id) + ' ...\n', flush=True) try: rc = self.job.cancel() out.clear_output() if rc: print('Cancelled job: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True) else: print('Job already cancelled: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True) except: b.disabled=False out.clear_output() raise button.on_click(_cancel_job_click) display(vb) except: pass
Display a button that will cancel the submitted job. Used in a Jupyter IPython notebook to provide an interactive mechanism to cancel a job submitted from the notebook. Once clicked the button is disabled unless the cancel fails. A job may be cancelled directly using:: submission_result = submit(ctx_type, topology, config) submission_result.job.cancel() Args: description(str): Text used as the button description, defaults to value based upon the job name. .. warning:: Behavior when called outside a notebook is undefined. .. versionadded:: 1.12
entailment
def add_default(self, key: str, value: Optional[str], default_type: type = str) -> None: """ Adds a default value and a default type for a key. :param key: Key :param value: *Serialized* default value, i.e. a string or ``None``. :param default_type: The type to unserialize values for this key to, defaults to ``str``. """ self.defaults[key] = HierarkeyDefault(value, default_type)
Adds a default value and a default type for a key. :param key: Key :param value: *Serialized* default value, i.e. a string or ``None``. :param default_type: The type to unserialize values for this key to, defaults to ``str``.
entailment
def add_type(self, type: type, serialize: Callable[[Any], str], unserialize: Callable[[str], Any]) -> None: """ Adds serialization support for a new type. :param type: The type to add support for. :param serialize: A callable that takes an object of type ``type`` and returns a string. :param unserialize: A callable that takes a string and returns an object of type ``type``. """ self.types.append(HierarkeyType(type=type, serialize=serialize, unserialize=unserialize))
Adds serialization support for a new type. :param type: The type to add support for. :param serialize: A callable that takes an object of type ``type`` and returns a string. :param unserialize: A callable that takes a string and returns an object of type ``type``.
entailment
def set_global(self, cache_namespace: str = None) -> type: """ Decorator. Attaches the global key-value store of this hierarchy to an object. :param cache_namespace: Optional. A custom namespace used for caching. By default this is constructed from the name of the class this is applied to and the ``attribute_name`` of this ``Hierarkey`` object. """ if isinstance(cache_namespace, type): raise ImproperlyConfigured('Incorrect decorator usage, you need to use .add_global() ' 'instead of .add_global') def wrapper(wrapped_class): if issubclass(wrapped_class, models.Model): raise ImproperlyConfigured('Hierarkey.add_global() can only be invoked on a normal class, ' 'not on a Django model.') if not issubclass(wrapped_class, GlobalSettingsBase): raise ImproperlyConfigured('You should use .add_global() on a class that inherits from ' 'GlobalSettingsBase.') _cache_namespace = cache_namespace or ('%s_%s' % (wrapped_class.__name__, self.attribute_name)) attrs = self._create_attrs(wrapped_class) model_name = '%s_%sStore' % (wrapped_class.__name__, self.attribute_name.title()) if getattr(sys.modules[wrapped_class.__module__], model_name, None): # Already wrapped return wrapped_class kv_model = self._create_model(model_name, attrs) def init(self, *args, object=None, **kwargs): super(kv_model, self).__init__(*args, **kwargs) setattr(kv_model, '__init__', init) hierarkey = self def prop(iself): from .proxy import HierarkeyProxy attrname = '_hierarkey_proxy_{}_{}'.format(_cache_namespace, self.attribute_name) cached = getattr(iself, attrname, None) if not cached: cached = HierarkeyProxy._new(iself, type=kv_model, hierarkey=hierarkey, cache_namespace=_cache_namespace) setattr(iself, attrname, cached) return cached setattr(sys.modules[wrapped_class.__module__], model_name, kv_model) setattr(wrapped_class, '_%s_objects' % self.attribute_name, kv_model.objects) setattr(wrapped_class, self.attribute_name, property(prop)) self.global_class = wrapped_class return wrapped_class return wrapper
Decorator. Attaches the global key-value store of this hierarchy to an object. :param cache_namespace: Optional. A custom namespace used for caching. By default this is constructed from the name of the class this is applied to and the ``attribute_name`` of this ``Hierarkey`` object.
entailment
def add(self, cache_namespace: str = None, parent_field: str = None) -> type: """ Decorator. Attaches a global key-value store to a Django model. :param cache_namespace: Optional. A custom namespace used for caching. By default this is constructed from the name of the class this is applied to and the ``attribute_name`` of this ``Hierarkey`` object. :param parent_field: Optional. The name of a field of this model that refers to the parent in the hierarchy. This must be a ``ForeignKey`` field. """ if isinstance(cache_namespace, type): raise ImproperlyConfigured('Incorrect decorator usage, you need to use .add() instead of .add') def wrapper(model): if not issubclass(model, models.Model): raise ImproperlyConfigured('Hierarkey.add() can only be invoked on a Django model') _cache_namespace = cache_namespace or ('%s_%s' % (model.__name__, self.attribute_name)) attrs = self._create_attrs(model) attrs['object'] = models.ForeignKey(model, related_name='_%s_objects' % self.attribute_name, on_delete=models.CASCADE) model_name = '%s_%sStore' % (model.__name__, self.attribute_name.title()) kv_model = self._create_model(model_name, attrs) setattr(sys.modules[model.__module__], model_name, kv_model) hierarkey = self def prop(iself): from .proxy import HierarkeyProxy attrname = '_hierarkey_proxy_{}_{}'.format(_cache_namespace, self.attribute_name) cached = getattr(iself, attrname, None) if not cached: try: parent = getattr(iself, parent_field) if parent_field else None except models.ObjectDoesNotExist: # pragma: no cover parent = None if not parent and hierarkey.global_class: parent = hierarkey.global_class() cached = HierarkeyProxy._new( iself, type=kv_model, hierarkey=hierarkey, parent=parent, cache_namespace=_cache_namespace ) setattr(iself, attrname, cached) return cached setattr(model, self.attribute_name, property(prop)) return model return wrapper
Decorator. Attaches a global key-value store to a Django model. :param cache_namespace: Optional. A custom namespace used for caching. By default this is constructed from the name of the class this is applied to and the ``attribute_name`` of this ``Hierarkey`` object. :param parent_field: Optional. The name of a field of this model that refers to the parent in the hierarchy. This must be a ``ForeignKey`` field.
entailment
def _as_spl_expr(value): """ Return value converted to an SPL expression if needed other otherwise value. """ import streamsx._streams._numpy if hasattr(value, 'spl_json'): return value if isinstance(value, Enum): value = streamsx.spl.op.Expression.expression(value.name) npcnv = streamsx._streams._numpy.as_spl_expr(value) if npcnv is not None: return npcnv return value
Return value converted to an SPL expression if needed other otherwise value.
entailment
def _unique_id(self, prefix): """ Generate a unique (within the graph) identifer internal to graph generation. """ _id = self._id_gen self._id_gen += 1 return prefix + str(_id)
Generate a unique (within the graph) identifer internal to graph generation.
entailment
def _requested_name(self, name, action=None, func=None): """Create a unique name for an operator or a stream. """ if name is not None: if name in self._used_names: # start at 2 for the "second" one of this name n = 2 while True: pn = name + '_' + str(n) if pn not in self._used_names: self._used_names.add(pn) return pn n += 1 else: self._used_names.add(name) return name if func is not None: if hasattr(func, '__name__'): name = func.__name__ if name == '<lambda>': # Avoid use of <> characters in name # as they are converted to unicode # escapes in SPL identifier name = action + '_lambda' elif hasattr(func, '__class__'): name = func.__class__.__name__ if name is None: if action is not None: name = action else: name = self.name # Recurse once to get unique version of name return self._requested_name(name)
Create a unique name for an operator or a stream.
entailment
def colocate(self, others, why): """ Colocate this operator with another. """ if isinstance(self, Marker): return colocate_tag = '__spl_' + why + '$' + str(self.index) self._colocate_tag(colocate_tag) for op in others: op._colocate_tag(colocate_tag)
Colocate this operator with another.
entailment
def main_composite(kind, toolkits=None, name=None): """Wrap a main composite invocation as a `Topology`. Provides a bridge between an SPL application (main composite) and a `Topology`. Create a `Topology` that contains just the invocation of the main composite defined by `kind`. The returned `Topology` may be used like any other topology instance including job configuration, tester or even addition of SPL operator invocations or functional transformations. .. note:: Since a main composite by definition has no input or output ports any functionality added to the topology cannot interact directly with its invocation. Args: kind(str): Kind of the main composite operator invocation. toolkits(list[str]): Optional list of toolkits the main composite depends on. name(str): Invocation name for the main composite. Returns: tuple: tuple containing: - **Topology**: Topology with main composite invocation. - **Invoke**: Invocation of the main composite .. versionadded: 1.11 """ if '::' in kind: ns, name = kind.rsplit('::', 1) ns += '._spl' else: raise ValueError('Main composite requires a namespace qualified name: ' + str(kind)) topo = streamsx.topology.topology.Topology(name=name, namespace=ns) if toolkits: for tk_path in toolkits: streamsx.spl.toolkit.add_toolkit(topo, tk_path) return topo, Invoke(topo, kind, name=name)
Wrap a main composite invocation as a `Topology`. Provides a bridge between an SPL application (main composite) and a `Topology`. Create a `Topology` that contains just the invocation of the main composite defined by `kind`. The returned `Topology` may be used like any other topology instance including job configuration, tester or even addition of SPL operator invocations or functional transformations. .. note:: Since a main composite by definition has no input or output ports any functionality added to the topology cannot interact directly with its invocation. Args: kind(str): Kind of the main composite operator invocation. toolkits(list[str]): Optional list of toolkits the main composite depends on. name(str): Invocation name for the main composite. Returns: tuple: tuple containing: - **Topology**: Topology with main composite invocation. - **Invoke**: Invocation of the main composite .. versionadded: 1.11
entailment
def attribute(self, stream, name): """Expression for an input attribute. An input attribute is an attribute on one of the input ports of the operator invocation. `stream` must have been used to declare this invocation. Args: stream(Stream): Stream the attribute is from. name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute. """ if stream not in self._inputs: raise ValueError("Stream is not an input of this operator.") if len(self._inputs) == 1: return Expression('attribute', name) else: iport = self._op().inputPorts[self._inputs.index(stream)] return Expression('attribute', iport._alias + '.' + name)
Expression for an input attribute. An input attribute is an attribute on one of the input ports of the operator invocation. `stream` must have been used to declare this invocation. Args: stream(Stream): Stream the attribute is from. name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute.
entailment
def output(self, stream, value): """SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ if stream not in self.outputs: raise ValueError("Stream is not an output of this operator.") e = self.expression(value) e._stream = stream return e
SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
entailment
def output(self, value): """SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ return super(Source, self).output(self.stream, value)
SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
entailment
def attribute(self, name): """Expression for an input attribute. An input attribute is an attribute on the input port of the operator invocation. Args: name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute. """ return super(Map, self).attribute(self._inputs[0], name)
Expression for an input attribute. An input attribute is an attribute on the input port of the operator invocation. Args: name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute.
entailment
def output(self, value): """SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ return super(Map, self).output(self.stream, value)
SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
entailment
def expression(value): """Create an SPL expression. Args: value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value. Returns: Expression: SPL expression from `value`. """ if isinstance(value, Expression): # Clone the expression to allow it to # be used in multiple contexts return Expression(value._type, value._value) if hasattr(value, 'spl_json'): sj = value.spl_json() return Expression(sj['type'], sj['value']) return Expression('splexpr', value)
Create an SPL expression. Args: value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value. Returns: Expression: SPL expression from `value`.
entailment
def spl_json(self): """Private method. May be removed at any time.""" _splj = {} _splj["type"] = self._type _splj["value"] = self._value return _splj
Private method. May be removed at any time.
entailment
def _extract_from_toolkit(args): """ Look at all the modules in opt/python/streams (opt/python/streams/*.py) and extract any spl decorated function as an operator. """ extractor = _Extractor(args) if extractor._cmd_args.verbose: print("spl-python-extract:", __version__) print("Topology toolkit location:", _topology_tk_dir()) tk_dir = extractor._tk_dir tk_streams = os.path.join(tk_dir, 'opt', 'python', 'streams') if not os.path.isdir(tk_streams) or not fnmatch.filter(os.listdir(tk_streams), '*.py'): # Nothing to do for Python extraction extractor._make_toolkit() return lf = os.path.join(tk_streams, '.lockfile') with open(lf, 'w') as lfno: fcntl.flock(lfno, fcntl.LOCK_EX) tk_idx = os.path.join(tk_dir, 'toolkit.xml') tk_time = os.path.getmtime(tk_idx) if os.path.exists(tk_idx) else None changed = False if tk_time else True if tk_time: for mf in glob.glob(os.path.join(tk_streams, '*.py')): if os.path.getmtime(mf) >= tk_time: changed = True break if changed: path_items = _setup_path(tk_dir, tk_streams) for mf in glob.glob(os.path.join(tk_streams, '*.py')): print('Checking ', mf, 'for operators') name = inspect.getmodulename(mf) dynm = imp.load_source(name, mf) streams_python_file = inspect.getsourcefile(dynm) extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isfunction)) extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isclass)) langList = extractor._copy_globalization_resources() if extractor._cmd_args.verbose: print("Available languages for TopologySplpy resource:", langList) extractor._setup_info_xml(langList) extractor._make_toolkit() _reset_path(path_items) fcntl.flock(lfno, fcntl.LOCK_UN)
Look at all the modules in opt/python/streams (opt/python/streams/*.py) and extract any spl decorated function as an operator.
entailment
def _copy_globalization_resources(self): '''Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings''' rootDir = os.path.join(_topology_tk_dir(), "impl", "nl") languageList = [] for dirName in os.listdir(rootDir): srcDir = os.path.join(_topology_tk_dir(), "impl", "nl", dirName) if (os.path.isdir(srcDir)) and (dirName != "include"): dstDir = os.path.join(self._tk_dir, "impl", "nl", dirName) try: print("Copy globalization resources " + dirName) os.makedirs(dstDir) except OSError as e: if (e.errno == 17) and (os.path.isdir(dstDir)): if self._cmd_args.verbose: print("Directory", dstDir, "exists") else: raise srcFile = os.path.join(srcDir, "TopologySplpyResource.xlf") if os.path.isfile(srcFile): res = shutil.copy2(srcFile, dstDir) languageList.append(dirName) if self._cmd_args.verbose: print("Written: " + res) return languageList
Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings
entailment
def _setup_info_xml(self, languageList): '''Setup the info.xml file This function prepares or checks the info.xml file in the project directory - if the info.xml does not exist in the project directory, it copies the template info.xml into the project directory. The project name is obtained from the project directory name - If there is a info.xml file, the resource section is inspected. If the resource section has no valid message set description for the TopologySplpy Resource a warning message is printed''' infoXmlFile = os.path.join(self._tk_dir, 'info.xml') print('Check info.xml:', infoXmlFile) try: TopologySplpyResourceMessageSetFound = False TopologySplpyResourceLanguages = [] tree = ET.parse(infoXmlFile) root = tree.getroot() for resources in root.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}resources'): if self._cmd_args.verbose: print('Resource: ', resources.tag) for messageSet in resources.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}messageSet'): if self._cmd_args.verbose: print('Message set:', messageSet.tag, messageSet.attrib) if 'name' in messageSet.attrib: if messageSet.attrib['name'] == 'TopologySplpyResource': TopologySplpyResourceMessageSetFound = True for lang in messageSet.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}lang'): language = os.path.dirname(lang.text) TopologySplpyResourceLanguages.append(language) if TopologySplpyResourceMessageSetFound: TopologySplpyResourceLanguages.sort() languageList.sort() copiedLanguagesSet = set(languageList) resourceLanguageSet = set(TopologySplpyResourceLanguages) if self._cmd_args.verbose: print('copied language resources:\n', languageList) print('TopologySplpyResource from info.xml:\n', TopologySplpyResourceLanguages) if copiedLanguagesSet == resourceLanguageSet: print('Resource section of info.xml verified') else: errstr = """"ERROR: Message set for the "TopologySplpyResource" is incomplete or invalid. Correct the resource section in info.xml file. Sample info xml:\n""" + _INFO_XML_TEMPLATE sys.exit(errstr) else: errstr = """"ERROR: Message set for the "TopologySplpyResource" is missing. Correct the resource section in info.xml file. Sample info xml:\n""" + _INFO_XML_TEMPLATE sys.exit(errstr) except FileNotFoundError as e: print("WARNING: File info.xml not found. Creating info.xml from template") #Get default project name from project directory projectRootDir = os.path.abspath(self._tk_dir) #os.path.abspath returns the path without trailing / projectName = os.path.basename(projectRootDir) infoXml=_INFO_XML_TEMPLATE.replace('__SPLPY_TOOLKIT_NAME__', projectName) f = open(infoXmlFile, 'w') f.write(infoXml) f.close() except SystemExit as e: raise e except: errstr = """ERROR: File info.xml is invalid or not accessible Sample info xml:\n""" + _INFO_XML_TEMPLATE sys.exit(errstr)
Setup the info.xml file This function prepares or checks the info.xml file in the project directory - if the info.xml does not exist in the project directory, it copies the template info.xml into the project directory. The project name is obtained from the project directory name - If there is a info.xml file, the resource section is inspected. If the resource section has no valid message set description for the TopologySplpy Resource a warning message is printed
entailment
def main(args=None): """ Output information about `streamsx` and the environment. Useful for support to get key information for use of `streamsx` and Python in IBM Streams. """ _parse_args(args) streamsx._streams._version._mismatch_check('streamsx.topology.context') srp = pkg_resources.working_set.find(pkg_resources.Requirement.parse('streamsx')) if srp is not None: srv = srp.parsed_version location = srp.location spkg = 'package' else: srv = streamsx._streams._version.__version__ location = os.path.dirname(streamsx._streams._version.__file__) location = os.path.dirname(location) location = os.path.dirname(location) tk_path = (os.path.join('com.ibm.streamsx.topology', 'opt', 'python', 'packages')) spkg = 'toolkit' if location.endswith(tk_path) else 'unknown' print('streamsx==' + str(srv) + ' (' + spkg + ')') print(' location: ' + str(location)) print('Python version:' + str(sys.version)) print('PYTHONHOME=' + str(os.environ.get('PYTHONHOME', 'unset'))) print('PYTHONPATH=' + str(os.environ.get('PYTHONPATH', 'unset'))) print('PYTHONWARNINGS=' + str(os.environ.get('PYTHONWARNINGS', 'unset'))) print('STREAMS_INSTALL=' + str(os.environ.get('STREAMS_INSTALL', 'unset'))) print('JAVA_HOME=' + str(os.environ.get('JAVA_HOME', 'unset'))) return 0
Output information about `streamsx` and the environment. Useful for support to get key information for use of `streamsx` and Python in IBM Streams.
entailment
def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS): """Define an operator-driven consistent region configuration. The source operator triggers drain and checkpoint cycles for the region. Args: drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration. """ return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
Define an operator-driven consistent region configuration. The source operator triggers drain and checkpoint cycles for the region. Args: drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration.
entailment
def periodic(period, drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS): """Create a periodic consistent region configuration. The IBM Streams runtime will trigger a drain and checkpoint the region periodically at the time interval specified by `period`. Args: period: The trigger period. This may be either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration. """ return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.PERIODIC, period=period, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
Create a periodic consistent region configuration. The IBM Streams runtime will trigger a drain and checkpoint the region periodically at the time interval specified by `period`. Args: period: The trigger period. This may be either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration.
entailment
def _get_timestamp_tuple(ts): """ Internal method to get a timestamp tuple from a value. Handles input being a datetime or a Timestamp. """ if isinstance(ts, datetime.datetime): return Timestamp.from_datetime(ts).tuple() elif isinstance(ts, Timestamp): return ts raise TypeError('Timestamp or datetime.datetime required')
Internal method to get a timestamp tuple from a value. Handles input being a datetime or a Timestamp.
entailment
def from_datetime(dt, machine_id=0): """ Convert a datetime to an SPL `Timestamp`. Args: dt(datetime.datetime): Datetime to be converted. machine_id(int): Machine identifier. Returns: Timestamp: Datetime converted to Timestamp. """ td = dt - Timestamp._EPOCH seconds = td.days * 3600 * 24 seconds += td.seconds return Timestamp(seconds, td.microseconds*1000, machine_id)
Convert a datetime to an SPL `Timestamp`. Args: dt(datetime.datetime): Datetime to be converted. machine_id(int): Machine identifier. Returns: Timestamp: Datetime converted to Timestamp.
entailment
def add_toolkit(topology, location): """Add an SPL toolkit to a topology. Args: topology(Topology): Topology to include toolkit in. location(str): Location of the toolkit directory. """ import streamsx.topology.topology assert isinstance(topology, streamsx.topology.topology.Topology) tkinfo = dict() tkinfo['root'] = os.path.abspath(location) topology.graph._spl_toolkits.append(tkinfo)
Add an SPL toolkit to a topology. Args: topology(Topology): Topology to include toolkit in. location(str): Location of the toolkit directory.
entailment
def add_toolkit_dependency(topology, name, version): """Add a version dependency on an SPL toolkit to a topology. To specify a range of versions for the dependent toolkits, use brackets (``[]``) or parentheses. Use brackets to represent an inclusive range and parentheses to represent an exclusive range. The following examples describe how to specify a dependency on a range of toolkit versions: * ``[1.0.0, 2.0.0]`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both inclusive. * ``[1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 or later, but not including 2.0.0. * ``(1.0.0, 2.0.0]`` represents a dependency on toolkits versions later than 1.0.0 and less than or equal to 2.0.0. * ``(1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both exclusive. Args: topology(Topology): Topology to include toolkit in. name(str): Toolkit name. version(str): Toolkit version dependency. .. seealso:: `Toolkit information model file <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.3.0/com.ibm.streams.dev.doc/doc/toolkitinformationmodelfile.html>`_ .. versionadded:: 1.12 """ import streamsx.topology.topology assert isinstance(topology, streamsx.topology.topology.Topology) tkinfo = dict() tkinfo['name'] = name tkinfo['version'] = version topology.graph._spl_toolkits.append(tkinfo)
Add a version dependency on an SPL toolkit to a topology. To specify a range of versions for the dependent toolkits, use brackets (``[]``) or parentheses. Use brackets to represent an inclusive range and parentheses to represent an exclusive range. The following examples describe how to specify a dependency on a range of toolkit versions: * ``[1.0.0, 2.0.0]`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both inclusive. * ``[1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 or later, but not including 2.0.0. * ``(1.0.0, 2.0.0]`` represents a dependency on toolkits versions later than 1.0.0 and less than or equal to 2.0.0. * ``(1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both exclusive. Args: topology(Topology): Topology to include toolkit in. name(str): Toolkit name. version(str): Toolkit version dependency. .. seealso:: `Toolkit information model file <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.3.0/com.ibm.streams.dev.doc/doc/toolkitinformationmodelfile.html>`_ .. versionadded:: 1.12
entailment
def submit(args=None): """ Performs the submit according to arguments and returns an object describing the result. """ streamsx._streams._version._mismatch_check('streamsx.topology.context') cmd_args = _parse_args(args) if cmd_args.topology is not None: app = _get_topology_app(cmd_args) elif cmd_args.main_composite is not None: app = _get_spl_app(cmd_args) elif cmd_args.bundle is not None: app = _get_bundle(cmd_args) _job_config_args(cmd_args, app) sr = _submit(cmd_args, app) if 'return_code' not in sr: sr['return_code'] = 1; print(sr) return sr
Performs the submit according to arguments and returns an object describing the result.
entailment
def _parse_args(args): """ Argument parsing """ cmd_parser = argparse.ArgumentParser(description='Execute a Streams application using a Streaming Analytics service.') ctx_group = cmd_parser.add_mutually_exclusive_group(required=True) ctx_group.add_argument('--service-name', help='Submit to Streaming Analytics service') ctx_group.add_argument('--create-bundle', action='store_true', help='Create a bundle using a local IBM Streams install. No job submission occurs.') app_group = cmd_parser.add_mutually_exclusive_group(required=True) app_group.add_argument('--topology', help='Topology to call') app_group.add_argument('--main-composite', help='SPL main composite') app_group.add_argument('--bundle', help="Streams application bundle (sab file) to submit to service") bld_group = cmd_parser.add_argument_group('Build options', 'Application build options') bld_group.add_argument('--toolkits', nargs='+', help='SPL toolkit containing the main composite and any other required SPL toolkits.') _define_jco_args(cmd_parser) cmd_args = cmd_parser.parse_args(args) return cmd_args
Argument parsing
entailment
def _define_jco_args(cmd_parser): """ Define job configuration arguments. Returns groups defined, currently one. """ jo_group = cmd_parser.add_argument_group('Job options', 'Job configuration options') jo_group.add_argument('--job-name', help='Job name') jo_group.add_argument('--preload', action='store_true', help='Preload job onto all resources in the instance') jo_group.add_argument('--trace', choices=['error', 'warn', 'info', 'debug', 'trace'], help='Application trace level') jo_group.add_argument('--submission-parameters', '-p', nargs='+', action=_SubmitParamArg, help="Submission parameters as name=value pairs") jo_group.add_argument('--job-config-overlays', help="Path to file containing job configuration overlays JSON. Overrides any job configuration set by the application." , metavar='file') return jo_group,
Define job configuration arguments. Returns groups defined, currently one.
entailment
def _submit_topology(cmd_args, app): """Submit a Python topology to the service. This includes an SPL main composite wrapped in a Python topology. """ cfg = app.cfg if cmd_args.create_bundle: ctxtype = ctx.ContextTypes.BUNDLE elif cmd_args.service_name: cfg[ctx.ConfigParams.FORCE_REMOTE_BUILD] = True cfg[ctx.ConfigParams.SERVICE_NAME] = cmd_args.service_name ctxtype = ctx.ContextTypes.STREAMING_ANALYTICS_SERVICE sr = ctx.submit(ctxtype, app.app, cfg) return sr
Submit a Python topology to the service. This includes an SPL main composite wrapped in a Python topology.
entailment
def _submit_bundle(cmd_args, app): """Submit an existing bundle to the service""" sac = streamsx.rest.StreamingAnalyticsConnection(service_name=cmd_args.service_name) sas = sac.get_streaming_analytics() sr = sas.submit_job(bundle=app.app, job_config=app.cfg[ctx.ConfigParams.JOB_CONFIG]) if 'exception' in sr: rc = 1 elif 'status_code' in sr: try: rc = 0 if int(sr['status_code'] == 200) else 1 except: rc = 1 elif 'id' in sr or 'jobId' in sr: rc = 0 sr['return_code'] = rc return sr
Submit an existing bundle to the service
entailment
def pipe(wrapped): """ Decorator to create an SPL operator from a function. A pipe SPL operator with a single input port and a single output port. For each tuple on the input port the function is called passing the contents of the tuple. SPL attributes from the tuple are passed by position. The value returned from the function results in zero or more tuples being submitted to the operator output port, see :ref:`submit-from-python`. .. deprecated:: 1.8 Recommended to use :py:class:`@spl.map <map>` instead. """ if not inspect.isfunction(wrapped): raise TypeError('A function is required') return _wrapforsplop(_OperatorType.Pipe, wrapped, 'position', False)
Decorator to create an SPL operator from a function. A pipe SPL operator with a single input port and a single output port. For each tuple on the input port the function is called passing the contents of the tuple. SPL attributes from the tuple are passed by position. The value returned from the function results in zero or more tuples being submitted to the operator output port, see :ref:`submit-from-python`. .. deprecated:: 1.8 Recommended to use :py:class:`@spl.map <map>` instead.
entailment
def _define_fixed(wrapped, callable_): """For the callable see how many positional parameters are required""" is_class = inspect.isclass(wrapped) style = callable_._splpy_style if hasattr(callable_, '_splpy_style') else wrapped._splpy_style if style == 'dictionary': return -1 fixed_count = 0 if style == 'tuple': sig = _inspect.signature(callable_) pmds = sig.parameters itpmds = iter(pmds) # Skip 'self' for classes if is_class: next(itpmds) for pn in itpmds: param = pmds[pn] if param.kind == _inspect.Parameter.POSITIONAL_OR_KEYWORD: fixed_count += 1 if param.kind == _inspect.Parameter.VAR_POSITIONAL: # *args fixed_count = -1 break if param.kind == _inspect.Parameter.VAR_KEYWORD: break return fixed_count
For the callable see how many positional parameters are required
entailment
def ignore(wrapped): """ Decorator to ignore a Python function. If a Python callable is decorated with ``@spl.ignore`` then function is ignored by ``spl-python-extract.py``. Args: wrapped: Function that will be ignored. """ @functools.wraps(wrapped) def _ignore(*args, **kwargs): return wrapped(*args, **kwargs) _ignore._splpy_optype = _OperatorType.Ignore _ignore._splpy_file = inspect.getsourcefile(wrapped) return _ignore
Decorator to ignore a Python function. If a Python callable is decorated with ``@spl.ignore`` then function is ignored by ``spl-python-extract.py``. Args: wrapped: Function that will be ignored.
entailment
def sink(wrapped): """Creates an SPL operator with a single input port. A SPL operator with a single input port and no output ports. For each tuple on the input port the decorated function is called passing the contents of the tuple. .. deprecated:: 1.8 Recommended to use :py:class:`@spl.for_each <for_each>` instead. """ if not inspect.isfunction(wrapped): raise TypeError('A function is required') return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False)
Creates an SPL operator with a single input port. A SPL operator with a single input port and no output ports. For each tuple on the input port the decorated function is called passing the contents of the tuple. .. deprecated:: 1.8 Recommended to use :py:class:`@spl.for_each <for_each>` instead.
entailment
def submit(self, port_id, tuple_): """Submit a tuple to the output port. The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted), ``tuple``, ``dict` or ``list`` of those types. For details on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`. Args: port_id: Identifier of the port specified in the ``output_ports`` parameter of the ``@spl.primitive_operator`` decorator. tuple_: Tuple (or tuples) to be submitted to the output port. """ port_index = self._splpy_output_ports[port_id] ec._submit(self, port_index, tuple_)
Submit a tuple to the output port. The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted), ``tuple``, ``dict` or ``list`` of those types. For details on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`. Args: port_id: Identifier of the port specified in the ``output_ports`` parameter of the ``@spl.primitive_operator`` decorator. tuple_: Tuple (or tuples) to be submitted to the output port.
entailment
def _splpy_convert_tuple(attributes): """Create a function that converts tuples to be submitted as dict objects into Python tuples with the value by position. Return function handles tuple,dict,list[tuple|dict|None],None """ def _to_tuples(tuple_): if isinstance(tuple_, tuple): return tuple_ if isinstance(tuple_, dict): return tuple(tuple_.get(name, None) for name in attributes) if isinstance(tuple_, list): lt = list() for ev in tuple_: if isinstance(ev, dict): ev = tuple(ev.get(name, None) for name in attributes) lt.append(ev) return lt return tuple_ return _to_tuples
Create a function that converts tuples to be submitted as dict objects into Python tuples with the value by position. Return function handles tuple,dict,list[tuple|dict|None],None
entailment
def _splpy_primitive_input_fns(obj): """Convert the list of class input functions to be instance functions against obj. Used by @spl.primitive_operator SPL cpp template. """ ofns = list() for fn in obj._splpy_input_ports: ofns.append(getattr(obj, fn.__name__)) return ofns
Convert the list of class input functions to be instance functions against obj. Used by @spl.primitive_operator SPL cpp template.
entailment
def _splpy_all_ports_ready(callable_): """Call all_ports_ready for a primitive operator.""" if hasattr(type(callable_), 'all_ports_ready'): try: return callable_.all_ports_ready() except: ei = sys.exc_info() if streamsx._streams._runtime._call_exit(callable_, ei): return None raise e1[1] return None
Call all_ports_ready for a primitive operator.
entailment
def set_trace(a_frame=None): """ Breaks on the line that invoked this function or at given frame. User can then resume execution. To call set_trace() use: .. code-block:: python import ikp3db ; ikp3db.set_trace() :param a_frame: The frame at which to break on. :type a_frame: frame :return: An error message or None is everything went fine. :rtype: str or None """ if not ikpdb: return "Error: IKP3db must be launched before calling ikpd.set_trace()." if a_frame is None: a_frame = sys._getframe().f_back ikpdb._line_tracer(a_frame) return None
Breaks on the line that invoked this function or at given frame. User can then resume execution. To call set_trace() use: .. code-block:: python import ikp3db ; ikp3db.set_trace() :param a_frame: The frame at which to break on. :type a_frame: frame :return: An error message or None is everything went fine. :rtype: str or None
entailment
def post_mortem(trace_back=None, exc_info=None): """ Breaks on a traceback and send all execution information to the debugger client. If the interpreter is handling an exception at this traceback, exception information is sent to _line_tracer() which will transmit it to the debugging client. Caller can also pass an *exc_info* that will be used to extract exception information. If passed exc_info has precedence over traceback. This method is useful for integrating with systems that manage exceptions. Using it, you can setup a developer mode where unhandled exceptions are sent to the developer. Once user resumes execution, control is returned to caller. IKP3db is just used to "pretty" display the execution environment. To call post_mortem() use: .. code-block:: python import ikp3db ... ikp3db.postmortem(any_traceback) :param trace_back: The traceback at which to break on. :type trace_back: traceback :param exc_info: Complete description of the raised Exception as returned by sys.exc_info. :type exc_info: tuple :return: An error message or None is everything went fine. :rtype: str or None """ if not ikpdb: return "Error: IKP3db must be launched before calling ikpd.post_mortem()." if exc_info: trace_back = exc_info[2] elif trace_back and not exc_info: if sys.exc_info()[2] == trace_back: exc_info = sys.exc_info() else: return "missing parameter trace_back or exc_info" pm_traceback = trace_back while pm_traceback.tb_next: pm_traceback = pm_traceback.tb_next ikpdb._line_tracer(pm_traceback.tb_frame, exc_info=exc_info) _logger.g_info("Post mortem processing finished.") return None
Breaks on a traceback and send all execution information to the debugger client. If the interpreter is handling an exception at this traceback, exception information is sent to _line_tracer() which will transmit it to the debugging client. Caller can also pass an *exc_info* that will be used to extract exception information. If passed exc_info has precedence over traceback. This method is useful for integrating with systems that manage exceptions. Using it, you can setup a developer mode where unhandled exceptions are sent to the developer. Once user resumes execution, control is returned to caller. IKP3db is just used to "pretty" display the execution environment. To call post_mortem() use: .. code-block:: python import ikp3db ... ikp3db.postmortem(any_traceback) :param trace_back: The traceback at which to break on. :type trace_back: traceback :param exc_info: Complete description of the raised Exception as returned by sys.exc_info. :type exc_info: tuple :return: An error message or None is everything went fine. :rtype: str or None
entailment
def setup(cls, ikpdb_log_arg): """ activates DEBUG logging level based on the `ikpdb_log_arg` parameter string. `ikpdb_log_arg` corresponds to the `--ikpdb-log` command line argument. `ikpdb_log_arg` is composed of a serie of letters that set the `DEBUG` logging level on the components of the debugger. Here are the letters and the component they activate `DEBUG` logging level on: - n,N: Network - b,B: Breakpoints - e,E: Expression evaluation - x,X: Execution - f,F: Frame - p,P: Path and python path manipulation - g,G: Global debugger By default logging is disabled for all components. Any `ikpdb_log_arg` value different from the letters above (eg: '9') activates `INFO` level logging on all domains. To log, use:: _logger.x_debug("useful information") Where: - `_logger` is a reference to the IKPdbLogger class - `x` is the `Execution` domain - `debug` is the logging level """ if not ikpdb_log_arg: return IKPdbLogger.enabled = True logging_configuration_string = ikpdb_log_arg.lower() for letter in logging_configuration_string: if letter in IKPdbLogger.DOMAINS: IKPdbLogger.DOMAINS[letter] = 10
activates DEBUG logging level based on the `ikpdb_log_arg` parameter string. `ikpdb_log_arg` corresponds to the `--ikpdb-log` command line argument. `ikpdb_log_arg` is composed of a serie of letters that set the `DEBUG` logging level on the components of the debugger. Here are the letters and the component they activate `DEBUG` logging level on: - n,N: Network - b,B: Breakpoints - e,E: Expression evaluation - x,X: Execution - f,F: Frame - p,P: Path and python path manipulation - g,G: Global debugger By default logging is disabled for all components. Any `ikpdb_log_arg` value different from the letters above (eg: '9') activates `INFO` level logging on all domains. To log, use:: _logger.x_debug("useful information") Where: - `_logger` is a reference to the IKPdbLogger class - `x` is the `Execution` domain - `debug` is the logging level
entailment
def send(self, command, _id=None, result={}, frames=[], threads=None, error_messages=[], warning_messages=[], info_messages=[], exception=None): """ Build a message from parameters and send it to debugger. :param command: The command sent to the debugger client. :type command: str :param _id: Unique id of the sent message. Right now, it's always `None` for messages by debugger to client. :type _id: int :param result: Used to send `exit_code` and updated `executionStatus` to debugger client. :type result: dict :param frames: contains the complete stack frames when debugger sends the `programBreak` message. :type frames: list :param error_messages: A list of error messages the debugger client must display to the user. :type error_messages: list of str :param warning_messages: A list of warning messages the debugger client must display to the user. :type warning_messages: list of str :param info_messages: A list of info messages the debugger client must display to the user. :type info_messages: list of str :param exception: If debugger encounter an exception, this dict contains 2 keys: `type` and `info` (the later is the message). :type exception: dict """ with self._connection_lock: payload = { '_id': _id, 'command': command, 'result': result, 'commandExecStatus': 'ok', 'frames': frames, 'info_messages': info_messages, 'warning_messages': warning_messages, 'error_messages': error_messages, 'exception': exception } if threads: payload['threads'] = threads msg = self.encode(payload) if self._connection: msg_bytes = bytearray(msg, 'utf-8') send_bytes_count = self._connection.sendall(msg_bytes) self.log_sent(msg) return send_bytes_count raise IKPdbConnectionError("Connection lost!")
Build a message from parameters and send it to debugger. :param command: The command sent to the debugger client. :type command: str :param _id: Unique id of the sent message. Right now, it's always `None` for messages by debugger to client. :type _id: int :param result: Used to send `exit_code` and updated `executionStatus` to debugger client. :type result: dict :param frames: contains the complete stack frames when debugger sends the `programBreak` message. :type frames: list :param error_messages: A list of error messages the debugger client must display to the user. :type error_messages: list of str :param warning_messages: A list of warning messages the debugger client must display to the user. :type warning_messages: list of str :param info_messages: A list of info messages the debugger client must display to the user. :type info_messages: list of str :param exception: If debugger encounter an exception, this dict contains 2 keys: `type` and `info` (the later is the message). :type exception: dict
entailment
def reply(self, obj, result, command_exec_status='ok', info_messages=[], warning_messages=[], error_messages=[]): """Build a response from a previouslsy received command message, send it and return number of sent bytes. :param result: Used to send back the result of the command execution to the debugger client. :type result: dict See send() above for others parameters definition. """ with self._connection_lock: # TODO: add a parameter to remove args from messages ? if True: del obj['args'] obj['result'] = result obj['commandExecStatus'] = command_exec_status obj['info_messages'] = info_messages obj['warning_messages'] = warning_messages obj['error_messages'] = error_messages msg_str = self.encode(obj) msg_bytes = bytearray(msg_str, 'utf-8') send_bytes_count = self._connection.sendall(msg_bytes) self.log_sent(msg_bytes) return send_bytes_count
Build a response from a previouslsy received command message, send it and return number of sent bytes. :param result: Used to send back the result of the command execution to the debugger client. :type result: dict See send() above for others parameters definition.
entailment
def receive(self, ikpdb): """Waits for a message from the debugger and returns it as a dict. """ # with self._connection_lock: while self._network_loop: _logger.n_debug("Enter socket.recv(%s) with self._received_data = %s", self.SOCKET_BUFFER_SIZE, self._received_data) try: # We may land here with a full packet already in self.received_data # In that case we must not enter recv() if self.SOCKET_BUFFER_SIZE: data = self._connection.recv(self.SOCKET_BUFFER_SIZE) else: data = b'' _logger.n_debug("Socket.recv(%s) => %s", self.SOCKET_BUFFER_SIZE, data) except socket.timeout: _logger.n_debug("socket.timeout witk ikpdb.status=%s", ikpdb.status) if ikpdb.status == 'terminated': _logger.n_debug("breaking IKPdbConnectionHandler.receive() " "network loop as ikpdb state is 'terminated'.") return { 'command': '_InternalQuit', 'args':{} } continue except socket.error as socket_err: if ikpdb.status == 'terminated': return {'command': '_InternalQuit', 'args':{'socket_error_number': socket_err.errno, 'socket_error_str': socket_err.strerror}} continue except Exception as exc: _logger.g_error("Unexecpected Error: '%s' in IKPdbConnectionHandler" ".command_loop.", exc) _logger.g_error(traceback.format_exc()) print("".join(traceback.format_stack())) return { 'command': '_InternalQuit', 'args':{ "error": exc.__class__.__name__, "message": exc.message } } # received data is utf8 encoded self._received_data += data.decode('utf-8') # have we received a MAGIC_CODE try: magic_code_idx = self._received_data.index(self.MAGIC_CODE) except ValueError: continue # Have we received a 'length=' try: length_idx = self._received_data.index(u'length=') except ValueError: continue # extract length content from received data json_length = int(self._received_data[length_idx + 7:magic_code_idx]) message_length = magic_code_idx + len(self.MAGIC_CODE) + json_length if message_length <= len(self._received_data): full_message = self._received_data[:message_length] self._received_data = self._received_data[message_length:] if len(self._received_data) > 0: self.SOCKET_BUFFER_SIZE = 0 else: self.SOCKET_BUFFER_SIZE = 4096 break else: self.SOCKET_BUFFER_SIZE = message_length - len(self._received_data) self.log_received(full_message) obj = self.decode(full_message) return obj
Waits for a message from the debugger and returns it as a dict.
entailment
def clear(self): """ Clear a breakpoint by removing it from all lists. """ del IKBreakpoint.breakpoints_by_file_and_line[self.file_name, self.line_number] IKBreakpoint.breakpoints_by_number[self.number] = None IKBreakpoint.breakpoints_files[self.file_name].remove(self.line_number) if len(IKBreakpoint.breakpoints_files[self.file_name]) == 0: del IKBreakpoint.breakpoints_files[self.file_name] IKBreakpoint.update_active_breakpoint_flag()
Clear a breakpoint by removing it from all lists.
entailment
def update_active_breakpoint_flag(cls): """ Checks all breakpoints to find wether at least one is active and update `any_active_breakpoint` accordingly. """ cls.any_active_breakpoint=any([bp.enabled for bp in cls.breakpoints_by_number if bp])
Checks all breakpoints to find wether at least one is active and update `any_active_breakpoint` accordingly.
entailment
def lookup_effective_breakpoint(cls, file_name, line_number, frame): """ Checks if there is an enabled breakpoint at given file_name and line_number. Check breakpoint condition if any. :return: found, enabled and condition verified breakpoint or None :rtype: IKPdbBreakpoint or None """ bp = cls.breakpoints_by_file_and_line.get((file_name, line_number), None) if not bp: return None if not bp.enabled: return None if not bp.condition: return bp try: value = eval(bp.condition, frame.f_globals, frame.f_locals) return bp if value else None except: pass return None
Checks if there is an enabled breakpoint at given file_name and line_number. Check breakpoint condition if any. :return: found, enabled and condition verified breakpoint or None :rtype: IKPdbBreakpoint or None
entailment
def get_breakpoints_list(cls): """:return: a list of all breakpoints. :rtype: a list of dict with this keys: `breakpoint_number`, `bp.number`, `file_name`, `line_number`, `condition`, `enabled`. Warning: IKPDb line numbers are 1 based so line number conversion must be done by clients (eg. inouk.ikpdb for Cloud9) """ breakpoints_list = [] for bp in cls.breakpoints_by_number: if bp: # breakpoint #0 exists and is always None bp_dict = { 'breakpoint_number': bp.number, 'file_name': bp.file_name, 'line_number': bp.line_number, 'condition': bp.condition, 'enabled': bp.enabled, } breakpoints_list.append(bp_dict) return breakpoints_list
:return: a list of all breakpoints. :rtype: a list of dict with this keys: `breakpoint_number`, `bp.number`, `file_name`, `line_number`, `condition`, `enabled`. Warning: IKPDb line numbers are 1 based so line number conversion must be done by clients (eg. inouk.ikpdb for Cloud9)
entailment
def disable_all_breakpoints(cls): """ Disable all breakpoints and udate `active_breakpoint_flag`. """ for bp in cls.breakpoints_by_number: if bp: # breakpoint #0 exists and is always None bp.enabled = False cls.update_active_breakpoint_flag() return
Disable all breakpoints and udate `active_breakpoint_flag`.
entailment
def backup_breakpoints_state(cls): """ Returns the state of all breakpoints in a list that can be used later to restore all breakpoints state""" all_breakpoints_state = [] for bp in cls.breakpoints_by_number: if bp: all_breakpoints_state.append((bp.number, bp.enabled, bp.condition,)) return all_breakpoints_state
Returns the state of all breakpoints in a list that can be used later to restore all breakpoints state
entailment
def restore_breakpoints_state(cls, breakpoints_state_list): """Restore the state of breakpoints given a list provided by backup_breakpoints_state(). If list of breakpoint has changed since backup missing or added breakpoints are ignored. breakpoints_state_list is a list of tuple. Each tuple is of form: (breakpoint_number, enabled, condition) """ for breakpoint_state in breakpoints_state_list: bp = cls.breakpoints_by_number[breakpoint_state[0]] if bp: bp.enabled = breakpoint_state[1] bp.condition = breakpoint_state[2] cls.update_active_breakpoint_flag() return
Restore the state of breakpoints given a list provided by backup_breakpoints_state(). If list of breakpoint has changed since backup missing or added breakpoints are ignored. breakpoints_state_list is a list of tuple. Each tuple is of form: (breakpoint_number, enabled, condition)
entailment
def canonic(self, file_name): """ returns canonical version of a file name. A canonical file name is an absolute, lowercase normalized path to a given file. """ if file_name == "<" + file_name[1:-1] + ">": return file_name c_file_name = self.file_name_cache.get(file_name) if not c_file_name: c_file_name = os.path.abspath(file_name) c_file_name = os.path.normcase(c_file_name) self.file_name_cache[file_name] = c_file_name return c_file_name
returns canonical version of a file name. A canonical file name is an absolute, lowercase normalized path to a given file.
entailment
def normalize_path_in(self, client_file_name): """Translate a (possibly incomplete) file or module name received from debugging client into an absolute file name. """ _logger.p_debug("normalize_path_in(%s) with os.getcwd()=>%s", client_file_name, os.getcwd()) # remove client CWD from file_path if client_file_name.startswith(self._CLIENT_CWD): file_name = client_file_name[len(self._CLIENT_CWD):] else: file_name = client_file_name # Try to find file using it's absolute path if os.path.isabs(file_name) and os.path.exists(file_name): _logger.p_debug(" => found absolute path: '%s'", file_name) return file_name # Can we find the file relatively to launch CWD (useful with buildout) f = os.path.join(self._CWD, file_name) if os.path.exists(f): _logger.p_debug(" => found path relative to self._CWD: '%s'", f) return f # Can we find file relatively to launch script f = os.path.join(sys.path[0], file_name) if os.path.exists(f) and self.canonic(f) == self.mainpyfile: _logger.p_debug(" => found path relative to launch script: '%s'", f) return f # Try as an absolute path after adding .py extension root, ext = os.path.splitext(file_name) if ext == '': f = file_name + '.py' if os.path.isabs(f): _logger.p_debug(" => found absolute path after adding .py extension: '%s'", f) return f # Can we find the file in system path for dir_name in sys.path: while os.path.islink(dir_name): dir_name = os.readlink(dir_name) f = os.path.join(dir_name, file_name) if os.path.exists(f): _logger.p_debug(" => found path in sys.path: '%s'", f) return f return None
Translate a (possibly incomplete) file or module name received from debugging client into an absolute file name.
entailment
def normalize_path_out(self, path): """Normalizes path sent to client :param path: path to normalize :return: normalized path """ if path.startswith(self._CWD): normalized_path = path[len(self._CWD):] else: normalized_path = path # For remote debugging preprend client CWD if self._CLIENT_CWD: normalized_path = os.path.join(self._CLIENT_CWD, normalized_path) _logger.p_debug("normalize_path_out('%s') => %s", path, normalized_path) return normalized_path
Normalizes path sent to client :param path: path to normalize :return: normalized path
entailment
def object_properties_count(self, o): """ returns the number of user browsable properties of an object. """ o_type = type(o) if isinstance(o, (dict, list, tuple, set)): return len(o) elif isinstance(o, (type(None), bool, float, str, int, bytes, types.ModuleType, types.MethodType, types.FunctionType)): return 0 else: # Following lines are used to debug variables members browsing # and counting # if False and str(o_type) == "<class 'socket._socketobject'>": # print "@378" # print dir(o) # print "hasattr(o, '__dict__')=%s" % hasattr(o,'__dict__') # count = 0 # if hasattr(o, '__dict__'): # for m_name, m_value in o.__dict__.iteritems(): # if m_name.startswith('__'): # print " %s=>False" % (m_name,) # continue # if type(m_value) in (types.ModuleType, types.MethodType, types.FunctionType,): # print " %s=>False" % (m_name,) # continue # print " %s=>True" % (m_name,) # count +=1 # print " %s => %s = %s" % (o, count, dir(o),) # else: try: if hasattr(o, '__dict__'): count = len([m_name for m_name, m_value in o.__dict__.items() if not m_name.startswith('__') and not type(m_value) in (types.ModuleType, types.MethodType, types.FunctionType,) ]) else: count = 0 except: # Thank you werkzeug __getattr__ overloading! count = 0 return count
returns the number of user browsable properties of an object.
entailment
def extract_object_properties(self, o, limit_size=False): """Extracts all properties from an object (eg. f_locals, f_globals, user dict, instance ...) and returns them as an array of variables. """ try: prop_str = repr(o)[:512] except: prop_str = "Error while extracting value" _logger.e_debug("extract_object_properties(%s)", prop_str) var_list = [] if isinstance(o, dict): a_var_name = None a_var_value = None for a_var_name in o: a_var_value = o[a_var_name] children_count = self.object_properties_count(a_var_value) v_name, v_value, v_type = self.extract_name_value_type(a_var_name, a_var_value, limit_size=limit_size) a_var_info = { 'id': id(a_var_value), 'name': v_name, 'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',), 'value': v_value, 'children_count': children_count, } var_list.append(a_var_info) elif type(o) in (list, tuple, set,): MAX_CHILDREN_TO_RETURN = 256 MAX_CHILDREN_MESSAGE = "Truncated by ikpdb (don't hot change me !)." a_var_name = None a_var_value = None do_truncate = len(o) > MAX_CHILDREN_TO_RETURN for idx, a_var_value in enumerate(o): children_count = self.object_properties_count(a_var_value) v_name, v_value, v_type = self.extract_name_value_type(idx, a_var_value, limit_size=limit_size) var_list.append({ 'id': id(a_var_value), 'name': v_name, 'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',), 'value': v_value, 'children_count': children_count, }) if do_truncate and idx==MAX_CHILDREN_TO_RETURN-1: var_list.append({ 'id': None, 'name': str(MAX_CHILDREN_TO_RETURN), 'type': '', 'value': MAX_CHILDREN_MESSAGE, 'children_count': 0, }) break else: a_var_name = None a_var_value = None if hasattr(o, '__dict__'): for a_var_name, a_var_value in o.__dict__.items(): if (not a_var_name.startswith('__') and not type(a_var_value) in (types.ModuleType, types.MethodType, types.FunctionType,)): children_count = self.object_properties_count(a_var_value) v_name, v_value, v_type = self.extract_name_value_type(a_var_name, a_var_value, limit_size=limit_size) var_list.append({ 'id': id(a_var_value), 'name': v_name, 'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',), 'value': v_value, 'children_count': children_count, }) return var_list
Extracts all properties from an object (eg. f_locals, f_globals, user dict, instance ...) and returns them as an array of variables.
entailment
def extract_name_value_type(self, name, value, limit_size=False): """Extracts value of any object, eventually reduces it's size and returns name, truncated value and type (for str with size appended) """ MAX_STRING_LEN_TO_RETURN = 487 try: t_value = repr(value) except: t_value = "Error while extracting value" # convert all var names to string if isinstance(name, str): r_name = name else: r_name = repr(name) # truncate value to limit data flow between ikpdb and client if len(t_value) > MAX_STRING_LEN_TO_RETURN: r_value = "%s ... (truncated by ikpdb)" % (t_value[:MAX_STRING_LEN_TO_RETURN],) r_name = "%s*" % r_name # add a visual marker to truncated var's name else: r_value = t_value if isinstance(value, str): r_type = "%s [%s]" % (IKPdbRepr(value), len(value),) else: r_type = IKPdbRepr(value) return r_name, r_value, r_type
Extracts value of any object, eventually reduces it's size and returns name, truncated value and type (for str with size appended)
entailment
def dump_frames(self, frame): """ dumps frames chain in a representation suitable for serialization and remote (debugger) client usage. """ current_thread = threading.currentThread() frames = [] frame_browser = frame # Browse the frame chain as far as we can _logger.f_debug("dump_frames(), frame analysis:") spacer = "" while hasattr(frame_browser, 'f_back') and frame_browser.f_back != self.frame_beginning: spacer += "=" _logger.f_debug("%s>frame = %s, frame.f_code = %s, frame.f_back = %s, " "self.frame_beginning = %s", spacer, hex(id(frame_browser)), frame_browser.f_code, hex(id(frame_browser.f_back)), hex(id(self.frame_beginning))) # At root frame, globals == locals so we dump only globals if hasattr(frame_browser.f_back, 'f_back')\ and frame_browser.f_back.f_back != self.frame_beginning: locals_vars_list = self.extract_object_properties(frame_browser.f_locals, limit_size=True) else: locals_vars_list = [] globals_vars_list = self.extract_object_properties(frame_browser.f_globals, limit_size=True) # normalize path sent to debugging client file_path = self.normalize_path_out(frame_browser.f_code.co_filename) frame_name = "%s() [%s]" % (frame_browser.f_code.co_name, current_thread.name,) remote_frame = { 'id': id(frame_browser), 'name': frame_name, 'line_number': frame_browser.f_lineno, # Warning 1 based 'file_path': file_path, 'f_locals': locals_vars_list + globals_vars_list, 'thread': current_thread.ident, 'thread_name': current_thread.name } frames.append(remote_frame) frame_browser = frame_browser.f_back return frames
dumps frames chain in a representation suitable for serialization and remote (debugger) client usage.
entailment
def evaluate(self, frame_id, expression, global_context=False, disable_break=False): """Evaluates 'expression' in the context of the frame identified by 'frame_id' or globally. Breakpoints are disabled depending on 'disable_break' value. Returns a tuple of value and type both as str. Note that - depending on the CGI_ESCAPE_EVALUATE_OUTPUT attribute - value is escaped. """ if disable_break: breakpoints_backup = IKBreakpoint.backup_breakpoints_state() IKBreakpoint.disable_all_breakpoints() if frame_id and not global_context: eval_frame = ctypes.cast(frame_id, ctypes.py_object).value global_vars = eval_frame.f_globals local_vars = eval_frame.f_locals else: global_vars = None local_vars = None try: result = eval(expression, global_vars, local_vars) result_type = IKPdbRepr(result) result_value = repr(result) except SyntaxError: # eval() failed, try with exec to handle statements try: result = exec(expression, global_vars, local_vars) result_type = IKPdbRepr(result) result_value = repr(result) except Exception as e: t, result = sys.exc_info()[:2] if isinstance(t, str): result_type = t else: result_type = str(t.__name__) result_value = "%s: %s" % (result_type, result,) except: t, result = sys.exc_info()[:2] if isinstance(t, str): result_type = t else: result_type = t.__name__ result_value = "%s: %s" % (result_type, result,) if disable_break: IKBreakpoint.restore_breakpoints_state(breakpoints_backup) _logger.e_debug("evaluate(%s) => result_value=%s, result_type=%s, result=%s", expression, result_value, result_type, result) if self.CGI_ESCAPE_EVALUATE_OUTPUT: result_value = cgi.escape(result_value) # We must check that result is json.dump compatible so that it can be sent back to client. try: json.dumps(result_value) except: t, result = sys.exc_info()[:2] if isinstance(t, str): result_type = t else: result_type = t.__name__ result_value = "<plaintext>%s: IKP3db is unable to JSON encode result to send it to "\ "debugging client.\n"\ " This typically occurs if you try to print a string that cannot be"\ " decoded to 'UTF-8'.\n"\ " You should be able to evaluate result and inspect it's content"\ " by removing the print statement." % result_type return result_value, result_type
Evaluates 'expression' in the context of the frame identified by 'frame_id' or globally. Breakpoints are disabled depending on 'disable_break' value. Returns a tuple of value and type both as str. Note that - depending on the CGI_ESCAPE_EVALUATE_OUTPUT attribute - value is escaped.
entailment
def let_variable(self, frame_id, var_name, expression_value): """ Let a frame's var with a value by building then eval a let expression with breakoints disabled. """ breakpoints_backup = IKBreakpoint.backup_breakpoints_state() IKBreakpoint.disable_all_breakpoints() let_expression = "%s=%s" % (var_name, expression_value,) eval_frame = ctypes.cast(frame_id, ctypes.py_object).value global_vars = eval_frame.f_globals local_vars = eval_frame.f_locals try: exec(let_expression, global_vars, local_vars) error_message="" except Exception as e: t, result = sys.exc_info()[:2] if isinstance(t, str): result_type = t else: result_type = str(t.__name__) error_message = "%s: %s" % (result_type, result,) IKBreakpoint.restore_breakpoints_state(breakpoints_backup) _logger.e_debug("let_variable(%s) => %s", let_expression, error_message or 'succeed') return error_message
Let a frame's var with a value by building then eval a let expression with breakoints disabled.
entailment
def setup_step_into(self, frame, pure=False): """Setup debugger for a "stepInto" """ self.frame_calling = frame if pure: self.frame_stop = None else: self.frame_stop = frame self.frame_return = None self.frame_suspend = False self.pending_stop = True return
Setup debugger for a "stepInto"
entailment
def setup_step_out(self, frame): """Setup debugger for a "stepOut" """ self.frame_calling = None self.frame_stop = None self.frame_return = frame.f_back self.frame_suspend = False self.pending_stop = True return
Setup debugger for a "stepOut"
entailment
def setup_suspend(self): """Setup debugger to "suspend" execution """ self.frame_calling = None self.frame_stop = None self.frame_return = None self.frame_suspend = True self.pending_stop = True self.enable_tracing() return
Setup debugger to "suspend" execution
entailment
def setup_resume(self): """ Setup debugger to "resume" execution """ self.frame_calling = None self.frame_stop = None self.frame_return = None self.frame_suspend = False self.pending_stop = False if not IKBreakpoint.any_active_breakpoint: self.disable_tracing() return
Setup debugger to "resume" execution
entailment
def should_stop_here(self, frame): """ Called by dispatch function to check wether debugger must stop at this frame. Note that we test 'step into' first to give a chance to 'stepOver' in case user click on 'stepInto' on a 'no call' line. """ # TODO: Optimization => defines a set of modules / names where _tracer # is never registered. This will replace skip #if self.skip and self.is_skipped_module(frame.f_globals.get('__name__')): # return False # step into if self.frame_calling and self.frame_calling==frame.f_back: return True # step over if frame==self.frame_stop: # frame cannot be null return True # step out if frame==self.frame_return: # frame cannot be null return True # suspend if self.frame_suspend: return True return False
Called by dispatch function to check wether debugger must stop at this frame. Note that we test 'step into' first to give a chance to 'stepOver' in case user click on 'stepInto' on a 'no call' line.
entailment
def should_break_here(self, frame): """Check wether there is a breakpoint at this frame.""" # Next line commented out for performance #_logger.b_debug("should_break_here(filename=%s, lineno=%s) with breaks=%s", # frame.f_code.co_filename, # frame.f_lineno, # IKBreakpoint.breakpoints_by_number) c_file_name = self.canonic(frame.f_code.co_filename) if not c_file_name in IKBreakpoint.breakpoints_files: return False bp = IKBreakpoint.lookup_effective_breakpoint(c_file_name, frame.f_lineno, frame) return True if bp else False
Check wether there is a breakpoint at this frame.
entailment
def get_threads(self): """Returns a dict of all threads and indicates thread being debugged. key is thread ident and values thread info. Information from this list can be used to swap thread being debugged. """ thread_list = {} for thread in threading.enumerate(): thread_ident = thread.ident thread_list[thread_ident] = { "ident": thread_ident, "name": thread.name, "is_debugger": thread_ident == self.debugger_thread_ident, "is_debugged": thread_ident == self.debugged_thread_ident } return thread_list
Returns a dict of all threads and indicates thread being debugged. key is thread ident and values thread info. Information from this list can be used to swap thread being debugged.
entailment
def set_debugged_thread(self, target_thread_ident=None): """ Allows to reset or set the thread to debug. """ if target_thread_ident is None: self.debugged_thread_ident = None self.debugged_thread_name = '' return { "result": self.get_threads(), "error": "" } thread_list = self.get_threads() if target_thread_ident not in thread_list: return { "result": None, "error": "No thread with ident:%s." % target_thread_ident } if thread_list[target_thread_ident]['is_debugger']: return { "result": None, "error": "Cannot debug IKPdb tracer (sadly...)." } self.debugged_thread_ident = target_thread_ident self.debugged_thread_name = thread_list[target_thread_ident]['name'] return { "result": self.get_threads(), "error": "" }
Allows to reset or set the thread to debug.
entailment
def _line_tracer(self, frame, exc_info=False): """This function is called when debugger has decided that it must stop or break at this frame.""" # next logging statement commented for performance _logger.f_debug("user_line() with " "threadName=%s, frame=%s, frame.f_code=%s, self.mainpyfile=%s," "self.should_break_here()=%s, self.should_stop_here()=%s\n", threading.currentThread().name, hex(id(frame)), frame.f_code, self.mainpyfile, self.should_break_here(frame), self.should_stop_here(frame)) # next lines allow to focus debugging on only one thread if self.debugged_thread_ident is None: self.debugged_thread_ident = threading.currentThread().ident self.debugged_thread_name = threading.currentThread().name else: if threading.currentThread().ident != self.debugged_thread_ident: return # Acquire Breakpoint Lock before sending break command to remote client self._active_breakpoint_lock.acquire() self.status = 'stopped' frames = self.dump_frames(frame) exception=None warning_messages = [] if exc_info: exception = { 'type': IKPdbRepr(exc_info[1]), 'info': exc_info[1].args[0] } if self.stop_at_first_statement: warning_messages = ["IKP3db stopped so that you can setup some " "breakpoints before 'Resuming' execution."] self.stop_at_first_statement = False remote_client.send('programBreak', frames=frames, threads= self.get_threads(), result={'executionStatus': 'stopped'}, #=self.status warning_messages=warning_messages, exception=exception) # Enter a loop to process commands sent by client while True: command = self._command_q.get() if command['cmd'] == 'resume': self.setup_resume() break elif command['cmd'] == 'stepOver': self.setup_step_over(frame) break elif command['cmd'] == 'stepInto': self.setup_step_into(frame) break elif command['cmd'] == 'stepOut': self.setup_step_out(frame) break elif command['cmd'] == 'evaluate': value, result_type = self.evaluate(command['frame'], command['expression'], command['global'], disable_break=command['disableBreak']) remote_client.reply(command['obj'], {'value': value, 'type': result_type}) elif command['cmd'] == 'getProperties': error_messages = [] if command.get('id', False): po_value = ctypes.cast(command['id'], ctypes.py_object).value result={'properties': self.extract_object_properties(po_value) or []} command_exec_status = 'ok' else: result={'properties': self.extract_object_properties(None) or []} command_exec_status = 'ok' _logger.e_debug(" => %s", result) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == 'setVariable': error_messages = [] result = {} command_exec_status = 'ok' # TODO: Rework to use id now that we are in right thread context err_message = self.let_variable(command['frame'], command['name'], command['value']) if err_message: command_exec_status = 'error' msg = "setVariable(%s=%s) failed with error: %s" % (command['name'], command['value'], err_message) error_messages = [msg] _logger.e_error(msg) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == '_InternalQuit': _logger.x_critical("Exiting tracer upon reception of _Internal" "Quit command") raise IKPdbQuit() else: _logger.x_critical("Unknown command: %s received by _line_tracer()" % resume_command) raise IKPdbQuit() self.status = 'running' self._active_breakpoint_lock.release() return
This function is called when debugger has decided that it must stop or break at this frame.
entailment
def dump_tracing_state(self, context): """ A debug tool to dump all threads tracing state """ _logger.x_debug("Dumping all threads Tracing state: (%s)" % context) _logger.x_debug(" self.tracing_enabled=%s" % self.tracing_enabled) _logger.x_debug(" self.execution_started=%s" % self.execution_started) _logger.x_debug(" self.status=%s" % self.status) _logger.x_debug(" self.frame_beginning=%s" % self.frame_beginning) _logger.x_debug(" self.debugger_thread_ident=%s" % self.debugger_thread_ident) if False: for thr in threading.enumerate(): is_current_thread = thr.ident == threading.current_thread().ident _logger.x_debug(" Thread: %s, %s %s" % (thr.name, thr.ident, "<= Current*" if is_current_thread else '')) a_frame = sys._current_frames()[thr.ident] while a_frame: flags = [] if a_frame == self.frame_beginning: flags.append("beginning") if a_frame == inspect.currentframe(): flags.append("current") if flags: flags_str = "**"+",".join(flags) else: flags_str = "" _logger.x_debug(" => %s, %s:%s(%s) | %s %s" % (a_frame, a_frame.f_code.co_filename, a_frame.f_lineno, a_frame.f_code.co_name, a_frame.f_trace, flags_str)) a_frame = a_frame.f_back
A debug tool to dump all threads tracing state
entailment
def enable_tracing(self): """ Enable tracing if it is disabled and debugged program is running, else do nothing. Do this on all threads but the debugger thread. :return: True if tracing has been enabled, False else. """ _logger.x_debug("entering enable_tracing()") # uncomment next line to get debugger tracing info #self.dump_tracing_state("before enable_tracing()") if not self.tracing_enabled and self.execution_started: # Restore or set trace function on all existing frames appart from # debugger threading.settrace(self._tracer) # then enable on all threads to come for thr in threading.enumerate(): if thr.ident != self.debugger_thread_ident: # skip debugger thread a_frame = sys._current_frames()[thr.ident] while a_frame: a_frame.f_trace = self._tracer a_frame = a_frame.f_back iksettrace3._set_trace_on(self._tracer, self.debugger_thread_ident) self.tracing_enabled = True #self.dump_tracing_state("after enable_tracing()") return self.tracing_enabled
Enable tracing if it is disabled and debugged program is running, else do nothing. Do this on all threads but the debugger thread. :return: True if tracing has been enabled, False else.
entailment
def disable_tracing(self): """ Disable tracing if it is disabled and debugged program is running, else do nothing. :return: False if tracing has been disabled, True else. """ _logger.x_debug("disable_tracing()") #self.dump_tracing_state("before disable_tracing()") if self.tracing_enabled and self.execution_started: threading.settrace(None) # don't trace threads to come iksettrace3._set_trace_off() self.tracing_enabled = False #self.dump_tracing_state("after disable_tracing()") return self.tracing_enabled
Disable tracing if it is disabled and debugged program is running, else do nothing. :return: False if tracing has been disabled, True else.
entailment
def set_breakpoint(self, file_name, line_number, condition=None, enabled=True): """ Create a breakpoint, register it in the class's lists and returns a tuple of (error_message, break_number) """ c_file_name = self.canonic(file_name) import linecache line = linecache.getline(c_file_name, line_number) if not line: return "Line %s:%d does not exist." % (c_file_name, line_number), None bp = IKBreakpoint(c_file_name, line_number, condition, enabled) if self.pending_stop or IKBreakpoint.any_active_breakpoint: self.enable_tracing() else: self.disable_tracing() return None, bp.number
Create a breakpoint, register it in the class's lists and returns a tuple of (error_message, break_number)
entailment
def change_breakpoint_state(self, bp_number, enabled, condition=None): """ Change breakpoint status or `condition` expression. :param bp_number: number of breakpoint to change :return: None or an error message (string) """ if not (0 <= bp_number < len(IKBreakpoint.breakpoints_by_number)): return "Found no breakpoint numbered: %s" % bp_number bp = IKBreakpoint.breakpoints_by_number[bp_number] if not bp: return "Found no breakpoint numbered %s" % bp_number _logger.b_debug(" change_breakpoint_state(bp_number=%s, enabled=%s, " "condition=%s) found %s", bp_number, enabled, repr(condition), bp) bp.enabled = enabled bp.condition = condition # update condition for conditional breakpoints IKBreakpoint.update_active_breakpoint_flag() # force flag refresh if self.pending_stop or IKBreakpoint.any_active_breakpoint: self.enable_tracing() else: self.disable_tracing() return None
Change breakpoint status or `condition` expression. :param bp_number: number of breakpoint to change :return: None or an error message (string)
entailment
def clear_breakpoint(self, breakpoint_number): """ Delete a breakpoint identified by it's number. :param breakpoint_number: index of breakpoint to delete :type breakpoint_number: int :return: an error message or None """ if not (0 <= breakpoint_number < len(IKBreakpoint.breakpoints_by_number)): return "Found no breakpoint numbered %s" % breakpoint_number bp = IKBreakpoint.breakpoints_by_number[breakpoint_number] if not bp: return "Found no breakpoint numbered: %s" % breakpoint_number _logger.b_debug(" clear_breakpoint(breakpoint_number=%s) found: %s", breakpoint_number, bp) bp.clear() if self.pending_stop or IKBreakpoint.any_active_breakpoint: self.enable_tracing() else: self.disable_tracing() return None
Delete a breakpoint identified by it's number. :param breakpoint_number: index of breakpoint to delete :type breakpoint_number: int :return: an error message or None
entailment
def _runscript(self, filename): """ Launchs debugged program execution using the execfile() builtin. We reset and setup the __main__ dict to allow the script to run in __main__ namespace. This is required for imports from __main__ to run correctly. Note that this has the effect to wipe IKP3db's vars created at this point. """ import __main__ __main__.__dict__.clear() __main__.__dict__.update({"__name__" : "__main__", "__file__" : filename, "__builtins__": __builtins__,}) self.mainpyfile = self.canonic(filename) #statement = 'execfile(%r)\n' % filename statement = "exec(compile(open('%s').read(), '%s', 'exec'))" % (filename, filename,) globals = __main__.__dict__ locals = globals # When IKP3db sets tracing, a number of call and line events happens # BEFORE debugger even reaches user's code (and the exact sequence of # events depends on python version). So we take special measures to # avoid stopping before we reach the main script (see reset(), # _tracer() and _line_tracer() methods for details). self.reset() self.execution_started = True self.status = 'running' # Turn on limited tracing by setting trace function for # current_thread only. This allow self.frame_beginning to be set at # first tracer "call" invocation. sys.settrace(self._tracer) try: exec(statement, globals, locals) except IKPdbQuit: pass finally: self.status = 'terminated' self.disable_tracing()
Launchs debugged program execution using the execfile() builtin. We reset and setup the __main__ dict to allow the script to run in __main__ namespace. This is required for imports from __main__ to run correctly. Note that this has the effect to wipe IKP3db's vars created at this point.
entailment
def command_loop(self, run_script_event): """ This is the debugger command loop that processes (protocol) client requests. """ while True: obj = remote_client.receive(self) command = obj["command"] # TODO: ensure we always have a command if receive returns args = obj.get('args', {}) if command == 'getBreakpoints': breakpoints_list = IKBreakpoint.get_breakpoints_list() remote_client.reply(obj, breakpoints_list) _logger.b_debug("getBreakpoints(%s) => %s", args, breakpoints_list) elif command == "setBreakpoint": # Set a new breakpoint. If the lineno line doesn't exist for the # filename passed as argument, return an error message. # The filename should be in canonical form, as described in the # canonic() method. file_name = args['file_name'] line_number = args['line_number'] condition = args.get('condition', None) enabled = args.get('enabled', True) _logger.b_debug("setBreakpoint(file_name=%s, line_number=%s," " condition=%s, enabled=%s) with CWD=%s", file_name, line_number, condition, enabled, os.getcwd()) error_messages = [] result = {} c_file_name = self.normalize_path_in(file_name) if not c_file_name: err = "Failed to find file '%s'" % file_name _logger.g_error("setBreakpoint error: %s", err) msg = "IKP3db error: Failed to set a breakpoint at %s:%s "\ "(%s)." % (file_name, line_number, err) error_messages = [msg] command_exec_status = 'error' else: err, bp_number = self.set_breakpoint(c_file_name, line_number, condition=condition, enabled=enabled) if err: _logger.g_error("setBreakpoint error: %s", err) msg = "IKP3db error: Failed to set a breakpoint at %s:%s "\ "(%s)." % (file_name, line_number, err,) error_messages = [msg] command_exec_status = 'error' else: result = {'breakpoint_number': bp_number} command_exec_status = 'ok' remote_client.reply(obj, result, command_exec_status=command_exec_status, error_messages=error_messages) elif command == "changeBreakpointState": # Allows to: # - activate or deactivate breakpoint # - set or remove condition _logger.b_debug("changeBreakpointState(%s)", args) bp_number = args.get('breakpoint_number', None) if bp_number is None: result = {} msg = "changeBreakpointState() error: missing required " \ "breakpointNumber parameter." _logger.g_error(" "+msg) error_messages = [msg] command_exec_status = 'error' else: err = self.change_breakpoint_state(bp_number, args.get('enabled', False), condition=args.get('condition', '')) result = {} error_messages = [] if err: msg = "changeBreakpointState() error: \"%s\"" % err _logger.g_error(" "+msg) error_messages = [msg] command_exec_status = 'error' else: command_exec_status = 'ok' remote_client.reply(obj, result, command_exec_status=command_exec_status, error_messages=error_messages) _logger.b_debug(" command_exec_status => %s", command_exec_status) elif command == "clearBreakpoint": _logger.b_debug("clearBreakpoint(%s)", args) bp_number = args.get('breakpoint_number', None) if bp_number is None: result = {} msg = "IKP3db error: Failed to delete breakpoint (Missing "\ "required breakpointNumber parameter)." error_messages = [msg] command_exec_status = 'error' else: err = self.clear_breakpoint(args['breakpoint_number']) result = {} error_messages = [] if err: msg = "IKP3db error: Failed to delete breakpoint (%s)." % err _logger.g_error(msg) error_messages = [msg] command_exec_status = 'error' else: command_exec_status = 'ok' remote_client.reply(obj, result, command_exec_status=command_exec_status, error_messages=error_messages) elif command == 'runScript': #TODO: handle a 'stopAtEntry' arg _logger.x_debug("runScript(%s)", args) remote_client.reply(obj, {'executionStatus': 'running'}) run_script_event.set() elif command == 'suspend': _logger.x_debug("suspend(%s)", args) # We return a running status which is True at that point. Next # programBreak will change status to 'stopped' remote_client.reply(obj, {'executionStatus': 'running'}) self.setup_suspend() elif command == 'resume': _logger.x_debug("resume(%s)", args) remote_client.reply(obj, {'executionStatus': 'running'}) self._command_q.put({'cmd':'resume'}) elif command == 'stepOver': # <=> Pdb n(ext) _logger.x_debug("stepOver(%s)", args) remote_client.reply(obj, {'executionStatus': 'running'}) self._command_q.put({'cmd':'stepOver'}) elif command == 'stepInto': # <=> Pdb s(tep) _logger.x_debug("stepInto(%s)", args) remote_client.reply(obj, {'executionStatus': 'running'}) self._command_q.put({'cmd':'stepInto'}) elif command == 'stepOut': # <=> Pdb r(eturn) _logger.x_debug("stepOut(%s)", args) remote_client.reply(obj, {'executionStatus': 'running'}) self._command_q.put({'cmd':'stepOut'}) elif command == 'evaluate': _logger.e_debug("evaluate(%s)", args) if self.tracing_enabled and self.status == 'stopped': self._command_q.put({ 'cmd':'evaluate', 'obj': obj, 'frame': args['frame'], 'expression': args['expression'], 'global': args['global'], 'disableBreak': args['disableBreak'] }) # reply will be done in _tracer() where result is available else: remote_client.reply(obj, {'value': None, 'type': None}) elif command == 'getProperties': _logger.e_debug("getProperties(%s,%s)", args, obj) if self.tracing_enabled and self.status == 'stopped': if args.get('id'): self._command_q.put({ 'cmd':'getProperties', 'obj': obj, 'id': args['id'] }) # reply will be done in _tracer() when result is available else: result={} command_exec_status = 'error' error_messages = ["IKP3db received getProperties command sent without target variable 'id'."] remote_client.reply(obj, result, command_exec_status=command_exec_status, error_messages=error_messages) else: remote_client.reply(obj, {'value': None, 'type': None}) elif command == 'setVariable': _logger.e_debug("setVariable(%s)", args) if self.tracing_enabled and self.status == 'stopped': self._command_q.put({ 'cmd':'setVariable', 'obj': obj, 'frame': args['frame'], 'name': args['name'], # TODO: Rework plugin to send var's id 'value': args['value'] }) # reply will be done in _tracer() when result is available else: remote_client.reply(obj, {'value': None, 'type': None}) elif command == 'reconnect': _logger.n_debug("reconnect(%s)", args) remote_client.reply(obj, {'executionStatus': self.status}) elif command == 'getThreads': _logger.x_debug("getThreads(%s)", args) threads_list = self.get_threads() remote_client.reply(obj, threads_list) elif command == 'setDebuggedThread': _logger.x_debug("setDebuggedThread(%s)", args) ret_val = self.set_debugged_thread(args['ident']) if ret_val['error']: remote_client.reply(obj, {}, # result command_exec_status='error', error_messages=[ret_val['error']]) else: remote_client.reply(obj, ret_val['result']) elif command == '_InternalQuit': # '_InternalQuit' is an IKP3db internal message, generated by # IKPdbConnectionHandler when a socket.error occured. # Usually this occurs when socket has been destroyed as # debugged program sys.exit() # So we leave the command loop to stop the debugger thread # in order to allow debugged program to shutdown correctly. # This message must NEVER be send by remote client. _logger.e_debug("_InternalQuit(%s)", args) self._command_q.put({'cmd':'_InternalQuit'}) return else: # unrecognized command ; just log and ignored _logger.g_critical("Unsupported command '%s' ignored.", command) if IKPdbLogger.enabled: _logger.b_debug("Current breakpoints list [any_active_breakpoint=%s]:", IKBreakpoint.any_active_breakpoint) _logger.b_debug(" IKBreakpoint.breakpoints_by_file_and_line:") if not IKBreakpoint.breakpoints_by_file_and_line: _logger.b_debug(" <empty>") for file_line, bp in list(IKBreakpoint.breakpoints_by_file_and_line.items()): _logger.b_debug(" %s => #%s, enabled=%s, condition=%s, %s", file_line, bp.number, bp.enabled, repr(bp.condition), bp) _logger.b_debug(" IKBreakpoint.breakpoints_files = %s", IKBreakpoint.breakpoints_files) _logger.b_debug(" IKBreakpoint.breakpoints_by_number = %s", IKBreakpoint.breakpoints_by_number)
This is the debugger command loop that processes (protocol) client requests.
entailment
def _get_elements(self, url, key, eclass, id=None, name=None): """Get elements matching `id` or `name` Args: url(str): url of children. key(str): key in the returned JSON. eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of. id(str, optional): only return resources whose `id` property matches the given `id` name(str, optional): only return resources whose `name` property matches the given `name` Returns: list(_ResourceElement): List of `eclass` instances Raises: ValueError: both `id` and `name` are specified together """ if id is not None and name is not None: raise ValueError("id and name cannot specified together") json_elements = self.rest_client.make_request(url)[key] return [eclass(element, self.rest_client) for element in json_elements if _exact_resource(element, id) and _matching_resource(element, name)]
Get elements matching `id` or `name` Args: url(str): url of children. key(str): key in the returned JSON. eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of. id(str, optional): only return resources whose `id` property matches the given `id` name(str, optional): only return resources whose `name` property matches the given `name` Returns: list(_ResourceElement): List of `eclass` instances Raises: ValueError: both `id` and `name` are specified together
entailment
def _get_element_by_id(self, url, key, eclass, id): """Get a single element matching an `id` Args: url(str): url of children. key(str): key in the returned JSON. eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of. id(str): return resources whose `id` property matches the given `id` Returns: _ResourceElement: Element of type `eclass` matching the given `id` Raises: ValueError: No resource matches given `id` or multiple resources matching given `id` """ elements = self._get_elements(url, key, eclass, id=id) if not elements: raise ValueError("No resource matching: {0}".format(id)) if len(elements) == 1: return elements[0] raise ValueError("Multiple resources matching: {0}".format(id))
Get a single element matching an `id` Args: url(str): url of children. key(str): key in the returned JSON. eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of. id(str): return resources whose `id` property matches the given `id` Returns: _ResourceElement: Element of type `eclass` matching the given `id` Raises: ValueError: No resource matches given `id` or multiple resources matching given `id`
entailment
def get_domain(self): """Get the Streams domain for the instance that owns this view. Returns: Domain: Streams domain for the instance owning this view. """ if hasattr(self, 'domain'): return Domain(self.rest_client.make_request(self.domain), self.rest_client)
Get the Streams domain for the instance that owns this view. Returns: Domain: Streams domain for the instance owning this view.
entailment
def get_instance(self): """Get the Streams instance that owns this view. Returns: Instance: Streams instance owning this view. """ return Instance(self.rest_client.make_request(self.instance), self.rest_client)
Get the Streams instance that owns this view. Returns: Instance: Streams instance owning this view.
entailment
def get_job(self): """Get the Streams job that owns this view. Returns: Job: Streams Job owning this view. """ return Job(self.rest_client.make_request(self.job), self.rest_client)
Get the Streams job that owns this view. Returns: Job: Streams Job owning this view.
entailment
def stop_data_fetch(self): """Stops the thread that fetches data from the Streams view server. """ if self._data_fetcher: self._data_fetcher.stop.set() self._data_fetcher = None
Stops the thread that fetches data from the Streams view server.
entailment
def start_data_fetch(self): """Starts a thread that fetches data from the Streams view server. Each item in the returned `Queue` represents a single tuple on the stream the view is attached to. Returns: queue.Queue: Queue containing view data. .. note:: This is a queue of the tuples coverted to Python objects, it is not a queue of :py:class:`ViewItem` objects. """ self.stop_data_fetch() self._data_fetcher = _ViewDataFetcher(self, self._tuple_fn) t = threading.Thread(target=self._data_fetcher) t.start() return self._data_fetcher.items
Starts a thread that fetches data from the Streams view server. Each item in the returned `Queue` represents a single tuple on the stream the view is attached to. Returns: queue.Queue: Queue containing view data. .. note:: This is a queue of the tuples coverted to Python objects, it is not a queue of :py:class:`ViewItem` objects.
entailment
def fetch_tuples(self, max_tuples=20, timeout=None): """ Fetch a number of tuples from this view. Fetching of data must have been started with :py:meth:`start_data_fetch` before calling this method. If ``timeout`` is ``None`` then the returned list will contain ``max_tuples`` tuples. Otherwise if the timeout is reached the list may contain less than ``max_tuples`` tuples. Args: max_tuples(int): Maximum number of tuples to fetch. timeout(float): Maximum time to wait for ``max_tuples`` tuples. Returns: list: List of fetched tuples. .. versionadded:: 1.12 """ tuples = list() if timeout is None: while len(tuples) < max_tuples: fetcher = self._data_fetcher if not fetcher: break tuples.append(fetcher.items.get()) return tuples timeout = float(timeout) end = time.time() + timeout while len(tuples) < max_tuples: qto = end - time.time() if qto <= 0: break try: fetcher = self._data_fetcher if not fetcher: break tuples.append(fetcher.items.get(timeout=qto)) except queue.Empty: break return tuples
Fetch a number of tuples from this view. Fetching of data must have been started with :py:meth:`start_data_fetch` before calling this method. If ``timeout`` is ``None`` then the returned list will contain ``max_tuples`` tuples. Otherwise if the timeout is reached the list may contain less than ``max_tuples`` tuples. Args: max_tuples(int): Maximum number of tuples to fetch. timeout(float): Maximum time to wait for ``max_tuples`` tuples. Returns: list: List of fetched tuples. .. versionadded:: 1.12
entailment
def display(self, duration=None, period=2): """Display a view within a Jupyter or IPython notebook. Provides an easy mechanism to visualize data on a stream using a view. Tuples are fetched from the view and displayed in a table within the notebook cell using a ``pandas.DataFrame``. The table is continually updated with the latest tuples from the view. This method calls :py:meth:`start_data_fetch` and will call :py:meth:`stop_data_fetch` when completed if `duration` is set. Args: duration(float): Number of seconds to fetch and display tuples. If ``None`` then the display will be updated until :py:meth:`stop_data_fetch` is called. period(float): Maximum update period. .. note:: A view is a sampling of data on a stream so tuples that are on the stream may not appear in the view. .. note:: Python modules `ipywidgets` and `pandas` must be installed in the notebook environment. .. warning:: Behavior when called outside a notebook is undefined. .. versionadded:: 1.12 """ import ipywidgets as widgets vn = widgets.Text(value=self.description, description=self.name, disabled=True) active = widgets.Valid(value=True, description='Fetching', readout='Stopped') out = widgets.Output(layout={'border': '1px solid black'}) hb = widgets.HBox([vn, active]) vb = widgets.VBox([hb, out]) display(vb) self._display_thread = threading.Thread(target=lambda: self._display(out, duration, period, active)) self._display_thread.start()
Display a view within a Jupyter or IPython notebook. Provides an easy mechanism to visualize data on a stream using a view. Tuples are fetched from the view and displayed in a table within the notebook cell using a ``pandas.DataFrame``. The table is continually updated with the latest tuples from the view. This method calls :py:meth:`start_data_fetch` and will call :py:meth:`stop_data_fetch` when completed if `duration` is set. Args: duration(float): Number of seconds to fetch and display tuples. If ``None`` then the display will be updated until :py:meth:`stop_data_fetch` is called. period(float): Maximum update period. .. note:: A view is a sampling of data on a stream so tuples that are on the stream may not appear in the view. .. note:: Python modules `ipywidgets` and `pandas` must be installed in the notebook environment. .. warning:: Behavior when called outside a notebook is undefined. .. versionadded:: 1.12
entailment
def get_view_items(self): """Get a list of :py:class:`ViewItem` elements associated with this view. Returns: list(ViewItem): List of ViewItem(s) associated with this view. """ view_items = [ViewItem(json_view_items, self.rest_client) for json_view_items in self.rest_client.make_request(self.viewItems)['viewItems']] logger.debug("Retrieved " + str(len(view_items)) + " items from view " + self.name) return view_items
Get a list of :py:class:`ViewItem` elements associated with this view. Returns: list(ViewItem): List of ViewItem(s) associated with this view.
entailment
def retrieve_log_trace(self, filename=None, dir=None): """Retrieves the application log and trace files of the job and saves them as a compressed tar file. An existing file with the same name will be overwritten. Args: filename (str): name of the created tar file. Defaults to `job_<id>_<timestamp>.tar.gz` where `id` is the job identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``job_355_1511995995.tar.gz``. dir (str): a valid directory in which to save the archive. Defaults to the current directory. Returns: str: the path to the created tar file, or ``None`` if retrieving a job's logs is not supported in the version of IBM Streams to which the job is submitted. .. versionadded:: 1.8 """ if hasattr(self, "applicationLogTrace") and self.applicationLogTrace is not None: logger.debug("Retrieving application logs from: " + self.applicationLogTrace) if not filename: filename = _file_name('job', self.id, '.tar.gz') return self.rest_client._retrieve_file(self.applicationLogTrace, filename, dir, 'application/x-compressed') else: return None
Retrieves the application log and trace files of the job and saves them as a compressed tar file. An existing file with the same name will be overwritten. Args: filename (str): name of the created tar file. Defaults to `job_<id>_<timestamp>.tar.gz` where `id` is the job identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``job_355_1511995995.tar.gz``. dir (str): a valid directory in which to save the archive. Defaults to the current directory. Returns: str: the path to the created tar file, or ``None`` if retrieving a job's logs is not supported in the version of IBM Streams to which the job is submitted. .. versionadded:: 1.8
entailment
def get_views(self, name=None): """Get the list of :py:class:`~streamsx.rest_primitives.View` elements associated with this job. Args: name(str, optional): Returns view(s) matching `name`. `name` can be a regular expression. If `name` is not supplied, then all views associated with this instance are returned. Returns: list(streamsx.rest_primitives.View): List of views matching `name`. Retrieving a list of views that contain the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> job = instances[0].get_jobs()[0] >>> views = job.get_views(name = "*temperatureSensor*") """ return self._get_elements(self.views, 'views', View, name=name)
Get the list of :py:class:`~streamsx.rest_primitives.View` elements associated with this job. Args: name(str, optional): Returns view(s) matching `name`. `name` can be a regular expression. If `name` is not supplied, then all views associated with this instance are returned. Returns: list(streamsx.rest_primitives.View): List of views matching `name`. Retrieving a list of views that contain the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> job = instances[0].get_jobs()[0] >>> views = job.get_views(name = "*temperatureSensor*")
entailment
def get_operators(self, name=None): """Get the list of :py:class:`Operator` elements associated with this job. Args: name(str): Only return operators matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all operators for this job are returned. Returns: list(Operator): List of Operator elements associated with this job. Retrieving a list of operators whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> job = instances[0].get_jobs()[0] >>> operators = job.get_operators(name="*temperatureSensor*") .. versionchanged:: 1.9 `name` parameter added. """ return self._get_elements(self.operators, 'operators', Operator, name=name)
Get the list of :py:class:`Operator` elements associated with this job. Args: name(str): Only return operators matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all operators for this job are returned. Returns: list(Operator): List of Operator elements associated with this job. Retrieving a list of operators whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> job = instances[0].get_jobs()[0] >>> operators = job.get_operators(name="*temperatureSensor*") .. versionchanged:: 1.9 `name` parameter added.
entailment
def cancel(self, force=False): """Cancel this job. Args: force (bool, optional): Forcefully cancel this job. Returns: bool: True if the job was cancelled, otherwise False if an error occurred. """ return self.rest_client._sc._delegator._cancel_job(self, force)
Cancel this job. Args: force (bool, optional): Forcefully cancel this job. Returns: bool: True if the job was cancelled, otherwise False if an error occurred.
entailment
def get_metrics(self, name=None): """Get metrics for this operator. Args: name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all metrics for this operator are returned. Returns: list(Metric): List of matching metrics. Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> operator = instances[0].get_operators()[0] >>> metrics = op.get_metrics(name='*temperatureSensor*') """ return self._get_elements(self.metrics, 'metrics', Metric, name=name)
Get metrics for this operator. Args: name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all metrics for this operator are returned. Returns: list(Metric): List of matching metrics. Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> operator = instances[0].get_operators()[0] >>> metrics = op.get_metrics(name='*temperatureSensor*')
entailment
def get_host(self): """Get resource this operator is currently executing in. If the operator is running on an externally managed resource ``None`` is returned. Returns: Host: Resource this operator is running on. .. versionadded:: 1.9 """ if hasattr(self, 'host') and self.host: return Host(self.rest_client.make_request(self.host), self.rest_client)
Get resource this operator is currently executing in. If the operator is running on an externally managed resource ``None`` is returned. Returns: Host: Resource this operator is running on. .. versionadded:: 1.9
entailment