sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_pe(self):
"""Get the Streams processing element this operator is executing in.
Returns:
PE: Processing element for this operator.
.. versionadded:: 1.9
"""
return PE(self.rest_client.make_request(self.pe), self.rest_client) | Get the Streams processing element this operator is executing in.
Returns:
PE: Processing element for this operator.
.. versionadded:: 1.9 | entailment |
def retrieve_trace(self, filename=None, dir=None):
"""Retrieves the application trace files for this PE
and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.trace` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
"""
if hasattr(self, "applicationTrace") and self.applicationTrace is not None:
logger.debug("Retrieving PE trace: " + self.applicationTrace)
if not filename:
filename = _file_name('pe', self.id, '.trace')
return self.rest_client._retrieve_file(self.applicationTrace, filename, dir, 'text/plain')
else:
return None | Retrieves the application trace files for this PE
and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.trace` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9 | entailment |
def retrieve_console_log(self, filename=None, dir=None):
"""Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9
"""
if hasattr(self, "consoleLog") and self.consoleLog is not None:
logger.debug("Retrieving PE console log: " + self.consoleLog)
if not filename:
filename = _file_name('pe', self.id, '.stdouterr')
return self.rest_client._retrieve_file(self.consoleLog, filename, dir, 'text/plain')
else:
return None | Retrieves the application console log (standard out and error)
files for this PE and saves them as a plain text file.
An existing file with the same name will be overwritten.
Args:
filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``.
dir (str): a valid directory in which to save the file. Defaults to the current directory.
Returns:
str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted.
.. versionadded:: 1.9 | entailment |
def get_resource_allocation(self):
"""Get the :py:class:`ResourceAllocation` element tance.
Returns:
ResourceAllocation: Resource allocation used to access information about the resource where this PE is running.
.. versionadded:: 1.9
"""
if hasattr(self, 'resourceAllocation'):
return ResourceAllocation(self.rest_client.make_request(self.resourceAllocation), self.rest_client) | Get the :py:class:`ResourceAllocation` element tance.
Returns:
ResourceAllocation: Resource allocation used to access information about the resource where this PE is running.
.. versionadded:: 1.9 | entailment |
def get_resource(self):
"""Get the :py:class:`Resource` of the resource allocation.
Returns:
Resource: Resource for this allocation.
.. versionadded:: 1.9
"""
return Resource(self.rest_client.make_request(self.resource), self.rest_client) | Get the :py:class:`Resource` of the resource allocation.
Returns:
Resource: Resource for this allocation.
.. versionadded:: 1.9 | entailment |
def get_jobs(self, name=None):
"""Retrieves jobs running on this resource in its instance.
Args:
name (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all jobs are returned.
Returns:
list(Job): A list of jobs matching the given `name`.
.. note:: If ``applicationResource`` is `False` an empty list is returned.
.. versionadded:: 1.9
"""
if self.applicationResource:
return self._get_elements(self.jobs, 'jobs', Job, None, name)
else:
return [] | Retrieves jobs running on this resource in its instance.
Args:
name (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all jobs are returned.
Returns:
list(Job): A list of jobs matching the given `name`.
.. note:: If ``applicationResource`` is `False` an empty list is returned.
.. versionadded:: 1.9 | entailment |
def get_operator_output_port(self):
"""Get the output port of this exported stream.
Returns:
OperatorOutputPort: Output port of this exported stream.
"""
return OperatorOutputPort(self.rest_client.make_request(self.operatorOutputPort), self.rest_client) | Get the output port of this exported stream.
Returns:
OperatorOutputPort: Output port of this exported stream. | entailment |
def _as_published_topic(self):
"""This stream as a PublishedTopic if it is published otherwise None
"""
oop = self.get_operator_output_port()
if not hasattr(oop, 'export'):
return
export = oop.export
if export['type'] != 'properties':
return
seen_export_type = False
topic = None
for p in export['properties']:
if p['type'] != 'rstring':
continue
if p['name'] == '__spl_exportType':
if p['values'] == ['"topic"']:
seen_export_type = True
else:
return
if p['name'] == '__spl_topic':
topic = p['values'][0]
if seen_export_type and topic is not None:
schema = None
if hasattr(oop, 'tupleAttributes'):
ta_url = oop.tupleAttributes
ta_resp = self.rest_client.make_request(ta_url)
schema = streamsx.topology.schema.StreamSchema(ta_resp['splType'])
return PublishedTopic(topic[1:-1], schema)
return | This stream as a PublishedTopic if it is published otherwise None | entailment |
def of_service(config):
"""Connect to an IBM Streams service instance running in IBM Cloud Private for Data.
The instance is specified in `config`. The configuration may be code injected from the list of services
in a Jupyter notebook running in ICPD or manually created. The code that selects a service instance by name is::
# Two lines are code injected in a Jupyter notebook by selecting the service instance
from icpd_core import ipcd_util
cfg = icpd_util.get_service_details(name='instanceName')
instance = Instance.of_service(cfg)
SSL host verification is disabled by setting :py:const:`~streamsx.topology.context.ConfigParams.SSL_VERIFY`
to ``False`` within `config` before calling this method::
cfg[ConfigParams.SSL_VERIFY] = False
instance = Instance.of_service(cfg)
Args:
config(dict): Configuration of IBM Streams service instance.
Returns:
Instance: Instance representing for IBM Streams service instance.
.. note:: Only supported when running within the ICPD cluster,
for example in a Jupyter notebook within a ICPD project.
.. versionadded:: 1.12
"""
service = Instance._find_service_def(config)
if not service:
raise ValueError()
endpoint = service['connection_info'].get('serviceRestEndpoint')
resource_url, name = Instance._root_from_endpoint(endpoint)
sc = streamsx.rest.StreamsConnection(resource_url=resource_url, auth=_ICPDAuthHandler(name, service['service_token']))
if streamsx.topology.context.ConfigParams.SSL_VERIFY in config:
sc.session.verify = config[streamsx.topology.context.ConfigParams.SSL_VERIFY]
return sc.get_instance(name) | Connect to an IBM Streams service instance running in IBM Cloud Private for Data.
The instance is specified in `config`. The configuration may be code injected from the list of services
in a Jupyter notebook running in ICPD or manually created. The code that selects a service instance by name is::
# Two lines are code injected in a Jupyter notebook by selecting the service instance
from icpd_core import ipcd_util
cfg = icpd_util.get_service_details(name='instanceName')
instance = Instance.of_service(cfg)
SSL host verification is disabled by setting :py:const:`~streamsx.topology.context.ConfigParams.SSL_VERIFY`
to ``False`` within `config` before calling this method::
cfg[ConfigParams.SSL_VERIFY] = False
instance = Instance.of_service(cfg)
Args:
config(dict): Configuration of IBM Streams service instance.
Returns:
Instance: Instance representing for IBM Streams service instance.
.. note:: Only supported when running within the ICPD cluster,
for example in a Jupyter notebook within a ICPD project.
.. versionadded:: 1.12 | entailment |
def get_job(self, id):
"""Retrieves a job matching the given `id`
Args:
id (str): Job `id` to match.
Returns:
Job: Job matching the given `id`
Raises:
ValueError: No resource matches given `id` or multiple resources matching given `id`
"""
return self._get_element_by_id(self.jobs, 'jobs', Job, str(id)) | Retrieves a job matching the given `id`
Args:
id (str): Job `id` to match.
Returns:
Job: Job matching the given `id`
Raises:
ValueError: No resource matches given `id` or multiple resources matching given `id` | entailment |
def get_published_topics(self):
"""Get a list of published topics for this instance.
Streams applications publish streams to a a topic that can be subscribed to by other
applications. This allows a microservice approach where publishers
and subscribers are independent of each other.
A published stream has a topic and a schema. It is recommended that a
topic is only associated with a single schema.
Streams may be published and subscribed by applications regardless of the
implementation language. For example a Python application can publish
a stream of JSON tuples that are subscribed to by SPL and Java applications.
Returns:
list(PublishedTopic): List of currently published topics.
"""
published_topics = []
# A topic can be published multiple times
# (typically with the same schema) but the
# returned list only wants to contain a topic,schema
# pair once. I.e. the list of topics being published is
# being returned, not the list of streams.
seen_topics = {}
for es in self.get_exported_streams():
pt = es._as_published_topic()
if pt is not None:
if pt.topic in seen_topics:
if pt.schema is None:
continue
if pt.schema in seen_topics[pt.topic]:
continue
seen_topics[pt.topic].append(pt.schema)
else:
seen_topics[pt.topic] = [pt.schema]
published_topics.append(pt)
return published_topics | Get a list of published topics for this instance.
Streams applications publish streams to a a topic that can be subscribed to by other
applications. This allows a microservice approach where publishers
and subscribers are independent of each other.
A published stream has a topic and a schema. It is recommended that a
topic is only associated with a single schema.
Streams may be published and subscribed by applications regardless of the
implementation language. For example a Python application can publish
a stream of JSON tuples that are subscribed to by SPL and Java applications.
Returns:
list(PublishedTopic): List of currently published topics. | entailment |
def get_application_configurations(self, name=None):
"""Retrieves application configurations for this instance.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all application configurations are returned.
Returns:
list(ApplicationConfiguration): A list of application configurations matching the given `name`.
.. versionadded 1.12
"""
if hasattr(self, 'applicationConfigurations'):
return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name) | Retrieves application configurations for this instance.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all application configurations are returned.
Returns:
list(ApplicationConfiguration): A list of application configurations matching the given `name`.
.. versionadded 1.12 | entailment |
def create_application_configuration(self, name, properties, description=None):
"""Create an application configuration.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
.. versionadded 1.12
"""
if not hasattr(self, 'applicationConfigurations'):
raise NotImplementedError()
cv = ApplicationConfiguration._props(name, properties, description)
res = self.rest_client.session.post(self.applicationConfigurations,
headers = {'Accept' : 'application/json'},
json=cv)
_handle_http_errors(res)
return ApplicationConfiguration(res.json(), self.rest_client) | Create an application configuration.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
.. versionadded 1.12 | entailment |
def submit_job(self, bundle, job_config=None):
"""Submit a Streams Application Bundle (sab file) to
this Streaming Analytics service.
Args:
bundle(str): path to a Streams application bundle (sab file)
containing the application to be submitted
job_config(JobConfig): a job configuration overlay
Returns:
dict: JSON response from service containing 'name' field with unique
job name assigned to submitted job, or, 'error_status' and
'description' fields if submission was unsuccessful.
"""
return self._delegator._submit_job(bundle=bundle, job_config=job_config) | Submit a Streams Application Bundle (sab file) to
this Streaming Analytics service.
Args:
bundle(str): path to a Streams application bundle (sab file)
containing the application to be submitted
job_config(JobConfig): a job configuration overlay
Returns:
dict: JSON response from service containing 'name' field with unique
job name assigned to submitted job, or, 'error_status' and
'description' fields if submission was unsuccessful. | entailment |
def cancel_job(self, job_id=None, job_name=None):
"""Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation.
"""
return self._delegator.cancel_job(job_id=job_id, job_name = job_name) | Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation. | entailment |
def _get_jobs_url(self):
"""Get & save jobs URL from the status call."""
if self._jobs_url is None:
self.get_instance_status()
if self._jobs_url is None:
raise ValueError("Cannot obtain jobs URL")
return self._jobs_url | Get & save jobs URL from the status call. | entailment |
def cancel_job(self, job_id=None, job_name=None):
"""Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation.
"""
payload = {}
if job_name is not None:
payload['job_name'] = job_name
if job_id is not None:
payload['job_id'] = job_id
jobs_url = self._get_url('jobs_path')
res = self.rest_client.session.delete(jobs_url, params=payload)
_handle_http_errors(res)
return res.json() | Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation. | entailment |
def start_instance(self):
"""Start the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance start operation.
"""
start_url = self._get_url('start_path')
res = self.rest_client.session.put(start_url, json={})
_handle_http_errors(res)
return res.json() | Start the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance start operation. | entailment |
def stop_instance(self):
"""Stop the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance stop operation.
"""
stop_url = self._get_url('stop_path')
res = self.rest_client.session.put(stop_url, json={})
_handle_http_errors(res)
return res.json() | Stop the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance stop operation. | entailment |
def get_instance_status(self):
"""Get the status the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance status operation.
"""
status_url = self._get_url('status_path')
res = self.rest_client.session.get(status_url)
_handle_http_errors(res)
return res.json() | Get the status the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance status operation. | entailment |
def submit_job(self, job_config=None):
"""Submit this Streams Application Bundle (sab file) to
its associated instance.
Args:
job_config(JobConfig): a job configuration overlay
Returns:
Job: Resulting job instance.
"""
job_id = self._delegator._submit_bundle(self, job_config)
return self._instance.get_job(job_id) | Submit this Streams Application Bundle (sab file) to
its associated instance.
Args:
job_config(JobConfig): a job configuration overlay
Returns:
Job: Resulting job instance. | entailment |
def _cancel_job(self, job, force):
"""Cancel job using streamtool."""
import streamsx.st as st
if st._has_local_install:
return st._cancel_job(job.id, force,
domain_id=job.get_instance().get_domain().id, instance_id=job.get_instance().id)
return False | Cancel job using streamtool. | entailment |
def update(self, properties=None, description=None):
"""Update this application configuration.
To create or update a property provide its key-value
pair in `properties`.
To delete a property provide its key with the value ``None``
in properties.
Args:
properties (dict): Property values to be updated. If ``None`` the properties are unchanged.
description (str): Description for the configuration. If ``None`` the description is unchanged.
Returns:
ApplicationConfiguration: self
"""
cv = ApplicationConfiguration._props(properties=properties, description=description)
res = self.rest_client.session.patch(self.rest_self,
headers = {'Accept' : 'application/json',
'Content-Type' : 'application/json'},
json=cv)
_handle_http_errors(res)
self.json_rep = res.json()
return self | Update this application configuration.
To create or update a property provide its key-value
pair in `properties`.
To delete a property provide its key with the value ``None``
in properties.
Args:
properties (dict): Property values to be updated. If ``None`` the properties are unchanged.
description (str): Description for the configuration. If ``None`` the description is unchanged.
Returns:
ApplicationConfiguration: self | entailment |
def delete(self):
"""Delete this application configuration.
"""
res = self.rest_client.session.delete(self.rest_self)
_handle_http_errors(res) | Delete this application configuration. | entailment |
def _normalize(schema, allow_none=True):
"""
Normalize a schema.
"""
if allow_none and schema is None:
return schema
if isinstance(schema, CommonSchema):
return schema
if isinstance(schema, StreamSchema):
return schema
if isinstance(schema, basestring):
return StreamSchema(schema)
py_types = {
_spl_object: CommonSchema.Python,
_spl_str: CommonSchema.String,
json: CommonSchema.Json,
}
if schema in py_types:
return py_types[schema]
# With Python 3 allow a named tuple with type hints
# to be used as a schema definition
if sys.version_info.major == 3:
import typing
if isinstance(schema, type) and issubclass(schema, tuple):
if hasattr(schema, '_fields') and hasattr(schema, '_field_types'):
return _from_named_tuple(schema)
raise ValueError("Unknown stream schema type:" + str(schema)) | Normalize a schema. | entailment |
def is_common(schema):
"""
Is `schema` an common schema.
Args:
schema: Scheme to test.
Returns:
bool: ``True`` if schema is a common schema, otherwise ``False``.
"""
if isinstance(schema, StreamSchema):
return schema.schema() in _SCHEMA_COMMON
if isinstance(schema, CommonSchema):
return True
if isinstance(schema, basestring):
return is_common(StreamSchema(schema))
return False | Is `schema` an common schema.
Args:
schema: Scheme to test.
Returns:
bool: ``True`` if schema is a common schema, otherwise ``False``. | entailment |
def _set(self, schema):
"""Set a schema from another schema"""
if isinstance(schema, CommonSchema):
self._spl_type = False
self._schema = schema.schema()
self._style = self._default_style()
else:
self._spl_type = schema._spl_type
self._schema = schema._schema
self._style = schema._style | Set a schema from another schema | entailment |
def as_tuple(self, named=None):
"""
Create a structured schema that will pass stream tuples into callables as ``tuple`` instances.
If this instance represents a common schema then it will be returned
without modification. Stream tuples with common schemas are
always passed according to their definition.
**Passing as tuple**
When `named` evaluates to ``False`` then each stream tuple will
be passed as a ``tuple``. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t[0]`` and the second as ``t[1]`` where ``t`` represents
the passed value..
**Passing as named tuple**
When `named` is ``True`` or a ``str`` then each stream tuple will
be passed as a named tuple. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t.id`` (or ``t[0]``) and the second as ``t.value`` (``t[1]``)
where ``t`` represents the passed value.
.. warning:: If an schema's attribute name is not a valid Python identifier or
starts with an underscore then it will be renamed as positional name ``_n``.
For example, with the schema ``tuple<int32 a, int32 def, int32 id>`` the
field names are ``a``, ``_1``, ``_2``.
The value of `named` is used as the name of the named tuple
class with ``StreamTuple`` used when `named` is ``True``.
It is not guaranteed that the class of the namedtuple is the
same for all callables processing tuples with the same
structured schema, only that the tuple is a named tuple
with the correct field names.
Args:
named: Pass stream tuples as a named tuple.
If not set then stream tuples are passed as
instances of ``tuple``.
Returns:
StreamSchema: Schema passing stream tuples as ``tuple`` if allowed.
.. versionadded:: 1.8
.. versionadded:: 1.9 Addition of `named` parameter.
"""
if not named:
return self._copy(tuple)
if named == True or isinstance(named, basestring):
return self._copy(self._make_named_tuple(name=named))
return self._copy(tuple) | Create a structured schema that will pass stream tuples into callables as ``tuple`` instances.
If this instance represents a common schema then it will be returned
without modification. Stream tuples with common schemas are
always passed according to their definition.
**Passing as tuple**
When `named` evaluates to ``False`` then each stream tuple will
be passed as a ``tuple``. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t[0]`` and the second as ``t[1]`` where ``t`` represents
the passed value..
**Passing as named tuple**
When `named` is ``True`` or a ``str`` then each stream tuple will
be passed as a named tuple. For example with a structured schema
of ``tuple<rstring id, float64 value>`` a value is passed as
``('TempSensor', 27.4)`` and access to the first attribute
is ``t.id`` (or ``t[0]``) and the second as ``t.value`` (``t[1]``)
where ``t`` represents the passed value.
.. warning:: If an schema's attribute name is not a valid Python identifier or
starts with an underscore then it will be renamed as positional name ``_n``.
For example, with the schema ``tuple<int32 a, int32 def, int32 id>`` the
field names are ``a``, ``_1``, ``_2``.
The value of `named` is used as the name of the named tuple
class with ``StreamTuple`` used when `named` is ``True``.
It is not guaranteed that the class of the namedtuple is the
same for all callables processing tuples with the same
structured schema, only that the tuple is a named tuple
with the correct field names.
Args:
named: Pass stream tuples as a named tuple.
If not set then stream tuples are passed as
instances of ``tuple``.
Returns:
StreamSchema: Schema passing stream tuples as ``tuple`` if allowed.
.. versionadded:: 1.8
.. versionadded:: 1.9 Addition of `named` parameter. | entailment |
def extend(self, schema):
"""
Extend a structured schema by another.
For example extending ``tuple<rstring id, timestamp ts, float64 value>``
with ``tuple<float32 score>`` results in ``tuple<rstring id, timestamp ts, float64 value, float32 score>``.
Args:
schema(StreamSchema): Schema to extend this schema by.
Returns:
StreamSchema: New schema that is an extension of this schema.
"""
if self._spl_type:
raise TypeError("Not supported for declared SPL types")
base = self.schema()
extends = schema.schema()
new_schema = base[:-1] + ',' + extends[6:]
return StreamSchema(new_schema) | Extend a structured schema by another.
For example extending ``tuple<rstring id, timestamp ts, float64 value>``
with ``tuple<float32 score>`` results in ``tuple<rstring id, timestamp ts, float64 value, float32 score>``.
Args:
schema(StreamSchema): Schema to extend this schema by.
Returns:
StreamSchema: New schema that is an extension of this schema. | entailment |
def _fnop_style(schema, op, name):
"""Set an operator's parameter representing the style of this schema."""
if is_common(schema):
if name in op.params:
del op.params[name]
return
if _is_pending(schema):
ntp = 'pending'
elif schema.style is tuple:
ntp = 'tuple'
elif schema.style is _spl_dict:
ntp = 'dict'
elif _is_namedtuple(schema.style) and hasattr(schema.style, '_splpy_namedtuple'):
ntp = 'namedtuple:' + schema.style._splpy_namedtuple
else:
return
op.params[name] = ntp | Set an operator's parameter representing the style of this schema. | entailment |
def save(self) -> None:
"""
Saves all changed values to the database.
"""
for name, field in self.fields.items():
value = self.cleaned_data[name]
if isinstance(value, UploadedFile):
# Delete old file
fname = self._s.get(name, as_type=File)
if fname:
try:
default_storage.delete(fname.name)
except OSError: # pragma: no cover
logger.error('Deleting file %s failed.' % fname.name)
# Create new file
newname = default_storage.save(self.get_new_filename(value.name), value)
value._name = newname
self._s.set(name, value)
elif isinstance(value, File):
# file is unchanged
continue
elif isinstance(field, forms.FileField):
# file is deleted
fname = self._s.get(name, as_type=File)
if fname:
try:
default_storage.delete(fname.name)
except OSError: # pragma: no cover
logger.error('Deleting file %s failed.' % fname.name)
del self._s[name]
elif value is None:
del self._s[name]
elif self._s.get(name, as_type=type(value)) != value:
self._s.set(name, value) | Saves all changed values to the database. | entailment |
def get_new_filename(self, name: str) -> str:
"""
Returns the file name to use based on the original filename of an uploaded file.
By default, the file name is constructed as::
<model_name>-<attribute_name>/<primary_key>/<original_basename>.<random_nonce>.<extension>
"""
nonce = get_random_string(length=8)
return '%s-%s/%s/%s.%s.%s' % (
self.obj._meta.model_name, self.attribute_name,
self.obj.pk, name, nonce, name.split('.')[-1]
) | Returns the file name to use based on the original filename of an uploaded file.
By default, the file name is constructed as::
<model_name>-<attribute_name>/<primary_key>/<original_basename>.<random_nonce>.<extension> | entailment |
def _stop(sas, cmd_args):
"""Stop the service if no jobs are running unless force is set"""
if not cmd_args.force:
status = sas.get_instance_status()
jobs = int(status['job_count'])
if jobs:
return status
return sas.stop_instance() | Stop the service if no jobs are running unless force is set | entailment |
def main(args=None):
""" Performs an action against a Streaming Analytics service.
"""
streamsx._streams._version._mismatch_check('streamsx.topology.context')
try:
sr = run_cmd(args)
sr['return_code'] = 0
except:
sr = {'return_code':1, 'error': sys.exc_info()}
return sr | Performs an action against a Streaming Analytics service. | entailment |
def _parse_args(args):
""" Argument parsing
"""
cmd_parser = argparse.ArgumentParser(description='Control commands for a Streaming Analytics service.')
cmd_parser.add_argument('--service-name', help='Streaming Analytics service name')
cmd_parser.add_argument('--full-response', action='store_true', help='Print the full JSON response.')
subparsers = cmd_parser.add_subparsers(help='Supported commands', dest='subcmd')
parser_start = subparsers.add_parser('start', help='Start the service instance')
parser_status = subparsers.add_parser('status', help='Get the service status.')
parser_stop = subparsers.add_parser('stop', help='Stop the instance for the service.')
parser_stop.add_argument('--force', action='store_true', help='Stop the service even if jobs are running.')
return cmd_parser.parse_args(args) | Argument parsing | entailment |
def _source_info():
"""
Get information from the user's code (two frames up)
to leave breadcrumbs for file, line, class and function.
"""
ofi = inspect.getouterframes(inspect.currentframe())[2]
try:
calling_class = ofi[0].f_locals['self'].__class__
except KeyError:
calling_class = None
# Tuple of file,line,calling_class,function_name
return ofi[1], ofi[2], calling_class, ofi[3] | Get information from the user's code (two frames up)
to leave breadcrumbs for file, line, class and function. | entailment |
def spl_json(self):
"""Internal method."""
return streamsx.spl.op.Expression.expression('com.ibm.streamsx.topology.topic::' + self.name).spl_json() | Internal method. | entailment |
def source(self, func, name=None):
"""
Declare a source stream that introduces tuples into the application.
Typically used to create a stream of tuple from an external source,
such as a sensor or reading from an external system.
Tuples are obtained from an iterator obtained from the passed iterable
or callable that returns an iterable.
Each tuple that is not None from the iterator is present on the returned stream.
Each tuple is a Python object and must be picklable to allow execution of the application
to be distributed across available resources in the Streams instance.
If the iterator's ``__iter__`` or ``__next__`` block then shutdown,
checkpointing or consistent region processing may be delayed.
Having ``__next__`` return ``None`` (no available tuples) or tuples
to submit will allow such processing to proceed.
A shutdown ``threading.Event`` is available through
:py:func:`streamsx.ec.shutdown` which becomes set when a shutdown
of the processing element has been requested. This event my be waited
on to perform a sleep that will terminate upon shutdown.
Args:
func(callable): An iterable or a zero-argument callable that returns an iterable of tuples.
name(str): Name of the stream, defaults to a generated name.
Exceptions raised by ``func`` or its iterator will cause
its processing element will terminate.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method.
Suppressing an exception raised by ``func.__iter__`` causes the
source to be empty, no tuples are submitted to the stream.
Suppressing an exception raised by ``__next__`` on the iterator
results in no tuples being submitted for that call to ``__next__``.
Processing continues with calls to ``__next__`` to fetch subsequent tuples.
Returns:
Stream: A stream whose tuples are the result of the iterable obtained from `func`.
"""
_name = name
if inspect.isroutine(func):
pass
elif callable(func):
pass
else:
if _name is None:
_name = type(func).__name__
func = streamsx.topology.runtime._IterableInstance(func)
sl = _SourceLocation(_source_info(), "source")
_name = self.graph._requested_name(_name, action='source', func=func)
# source is always stateful
op = self.graph.addOperator(self.opnamespace+"::Source", func, name=_name, sl=sl)
op._layout(kind='Source', name=_name, orig_name=name)
oport = op.addOutputPort(name=_name)
return Stream(self, oport)._make_placeable() | Declare a source stream that introduces tuples into the application.
Typically used to create a stream of tuple from an external source,
such as a sensor or reading from an external system.
Tuples are obtained from an iterator obtained from the passed iterable
or callable that returns an iterable.
Each tuple that is not None from the iterator is present on the returned stream.
Each tuple is a Python object and must be picklable to allow execution of the application
to be distributed across available resources in the Streams instance.
If the iterator's ``__iter__`` or ``__next__`` block then shutdown,
checkpointing or consistent region processing may be delayed.
Having ``__next__`` return ``None`` (no available tuples) or tuples
to submit will allow such processing to proceed.
A shutdown ``threading.Event`` is available through
:py:func:`streamsx.ec.shutdown` which becomes set when a shutdown
of the processing element has been requested. This event my be waited
on to perform a sleep that will terminate upon shutdown.
Args:
func(callable): An iterable or a zero-argument callable that returns an iterable of tuples.
name(str): Name of the stream, defaults to a generated name.
Exceptions raised by ``func`` or its iterator will cause
its processing element will terminate.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method.
Suppressing an exception raised by ``func.__iter__`` causes the
source to be empty, no tuples are submitted to the stream.
Suppressing an exception raised by ``__next__`` on the iterator
results in no tuples being submitted for that call to ``__next__``.
Processing continues with calls to ``__next__`` to fetch subsequent tuples.
Returns:
Stream: A stream whose tuples are the result of the iterable obtained from `func`. | entailment |
def subscribe(self, topic, schema=streamsx.topology.schema.CommonSchema.Python, name=None, connect=None, buffer_capacity=None, buffer_full_policy=None):
"""
Subscribe to a topic published by other Streams applications.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber matches a
publisher if the topic and schema match.
By default a stream is subscribed as :py:const:`~streamsx.topology.schema.CommonSchema.Python` objects
which connects to streams published to topic by Python Streams applications.
Structured schemas are subscribed to using an instance of
:py:class:`StreamSchema`. A Streams application publishing
structured schema streams may have been implemented in any
programming language supported by Streams.
JSON streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.Json`.
Each tuple on the returned stream will be a Python dictionary
object created by ``json.loads(tuple)``.
A Streams application publishing JSON streams may have been implemented in any programming language
supported by Streams.
String streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.String`.
Each tuple on the returned stream will be a Python string object.
A Streams application publishing string streams may have been implemented in any programming language
supported by Streams.
Subscribers can ensure they do not slow down matching publishers
by using a buffered connection with a buffer full policy
that drops tuples.
Args:
topic(str): Topic to subscribe to.
schema(~streamsx.topology.schema.StreamSchema): schema to subscribe to.
name(str): Name of the subscribed stream, defaults to a generated name.
connect(SubscribeConnection): How subscriber will be connected to matching publishers. Defaults to :py:const:`~SubscribeConnection.Direct` connection.
buffer_capacity(int): Buffer capacity in tuples when `connect` is set to :py:const:`~SubscribeConnection.Buffered`. Defaults to 1000 when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
buffer_full_policy(~streamsx.types.CongestionPolicy): Policy when a pulished tuple arrives and the subscriber's buffer is full. Defaults to `Wait` when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
Returns:
Stream: A stream whose tuples have been published to the topic by other Streams applications.
.. versionchanged:: 1.9 `connect`, `buffer_capacity` and `buffer_full_policy` parameters added.
.. seealso:`SubscribeConnection`
"""
schema = streamsx.topology.schema._normalize(schema)
_name = self.graph._requested_name(name, 'subscribe')
sl = _SourceLocation(_source_info(), "subscribe")
# subscribe is never stateful
op = self.graph.addOperator(kind="com.ibm.streamsx.topology.topic::Subscribe", sl=sl, name=_name, stateful=False)
oport = op.addOutputPort(schema=schema, name=_name)
params = {'topic': topic, 'streamType': schema}
if connect is not None and connect != SubscribeConnection.Direct:
params['connect'] = connect
if buffer_capacity:
params['bufferCapacity'] = int(buffer_capacity)
if buffer_full_policy:
params['bufferFullPolicy'] = buffer_full_policy
op.setParameters(params)
op._layout_group('Subscribe', name if name else _name)
return Stream(self, oport)._make_placeable() | Subscribe to a topic published by other Streams applications.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber matches a
publisher if the topic and schema match.
By default a stream is subscribed as :py:const:`~streamsx.topology.schema.CommonSchema.Python` objects
which connects to streams published to topic by Python Streams applications.
Structured schemas are subscribed to using an instance of
:py:class:`StreamSchema`. A Streams application publishing
structured schema streams may have been implemented in any
programming language supported by Streams.
JSON streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.Json`.
Each tuple on the returned stream will be a Python dictionary
object created by ``json.loads(tuple)``.
A Streams application publishing JSON streams may have been implemented in any programming language
supported by Streams.
String streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.String`.
Each tuple on the returned stream will be a Python string object.
A Streams application publishing string streams may have been implemented in any programming language
supported by Streams.
Subscribers can ensure they do not slow down matching publishers
by using a buffered connection with a buffer full policy
that drops tuples.
Args:
topic(str): Topic to subscribe to.
schema(~streamsx.topology.schema.StreamSchema): schema to subscribe to.
name(str): Name of the subscribed stream, defaults to a generated name.
connect(SubscribeConnection): How subscriber will be connected to matching publishers. Defaults to :py:const:`~SubscribeConnection.Direct` connection.
buffer_capacity(int): Buffer capacity in tuples when `connect` is set to :py:const:`~SubscribeConnection.Buffered`. Defaults to 1000 when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
buffer_full_policy(~streamsx.types.CongestionPolicy): Policy when a pulished tuple arrives and the subscriber's buffer is full. Defaults to `Wait` when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
Returns:
Stream: A stream whose tuples have been published to the topic by other Streams applications.
.. versionchanged:: 1.9 `connect`, `buffer_capacity` and `buffer_full_policy` parameters added.
.. seealso:`SubscribeConnection` | entailment |
def add_file_dependency(self, path, location):
"""
Add a file or directory dependency into an Streams application bundle.
Ensures that the file or directory at `path` on the local system
will be available at runtime.
The file will be copied and made available relative to the
application directory. Location determines where the file
is relative to the application directory. Two values for
location are supported `etc` and `opt`.
The runtime path relative to application directory is returned.
The copy is made during the submit call thus the contents of
the file or directory must remain availble until submit returns.
For example calling
``add_file_dependency('/tmp/conf.properties', 'etc')``
will result in contents of the local file `conf.properties`
being available at runtime at the path `application directory`/etc/conf.properties. This call returns ``etc/conf.properties``.
Python callables can determine the application directory at
runtime with :py:func:`~streamsx.ec.get_application_directory`.
For example the path above at runtime is
``os.path.join(streamsx.ec.get_application_directory(), 'etc', 'conf.properties')``
Args:
path(str): Path of the file on the local system.
location(str): Location of the file in the bundle relative to the application directory.
Returns:
str: Path relative to application directory that can be joined at runtime with ``get_application_directory``.
.. versionadded:: 1.7
"""
if location not in {'etc', 'opt'}:
raise ValueError(location)
if not os.path.isfile(path) and not os.path.isdir(path):
raise ValueError(path)
path = os.path.abspath(path)
if location not in self._files:
self._files[location] = [path]
else:
self._files[location].append(path)
return location + '/' + os.path.basename(path) | Add a file or directory dependency into an Streams application bundle.
Ensures that the file or directory at `path` on the local system
will be available at runtime.
The file will be copied and made available relative to the
application directory. Location determines where the file
is relative to the application directory. Two values for
location are supported `etc` and `opt`.
The runtime path relative to application directory is returned.
The copy is made during the submit call thus the contents of
the file or directory must remain availble until submit returns.
For example calling
``add_file_dependency('/tmp/conf.properties', 'etc')``
will result in contents of the local file `conf.properties`
being available at runtime at the path `application directory`/etc/conf.properties. This call returns ``etc/conf.properties``.
Python callables can determine the application directory at
runtime with :py:func:`~streamsx.ec.get_application_directory`.
For example the path above at runtime is
``os.path.join(streamsx.ec.get_application_directory(), 'etc', 'conf.properties')``
Args:
path(str): Path of the file on the local system.
location(str): Location of the file in the bundle relative to the application directory.
Returns:
str: Path relative to application directory that can be joined at runtime with ``get_application_directory``.
.. versionadded:: 1.7 | entailment |
def add_pip_package(self, requirement):
"""
Add a Python package dependency for this topology.
If the package defined by the requirement specifier
is not pre-installed on the build system then the
package is installed using `pip` and becomes part
of the Streams application bundle (`sab` file).
The package is expected to be available from `pypi.org`.
If the package is already installed on the build system
then it is not added into the `sab` file.
The assumption is that the runtime hosts for a Streams
instance have the same Python packages installed as the
build machines. This is always true for IBM Cloud
Private for Data and the Streaming Analytics service on IBM Cloud.
The project name extracted from the requirement
specifier is added to :py:attr:`~exclude_packages`
to avoid the package being added by the dependency
resolver. Thus the package should be added before
it is used in any stream transformation.
When an application is run with trace level ``info``
the available Python packages on the running system
are listed to application trace. This includes
any packages added by this method.
Example::
topo = Topology()
# Add dependency on pint package
# and astral at version 0.8.1
topo.add_pip_package('pint')
topo.add_pip_package('astral==0.8.1')
Args:
requirement(str): Package requirements specifier.
.. warning::
Only supported when using the build service with
a Streams instance in IBM Cloud Private for Data
or Streaming Analytics service on IBM Cloud.
.. note::
Installing packages through `pip` is preferred to
the automatic dependency checking performed on local
modules. This is because `pip` will perform a full
install of the package including any dependent packages
and additional files, such as shared libraries, that
might be missed by dependency discovery.
.. versionadded:: 1.9
"""
self._pip_packages.append(str(requirement))
pr = pkg_resources.Requirement.parse(requirement)
self.exclude_packages.add(pr.project_name) | Add a Python package dependency for this topology.
If the package defined by the requirement specifier
is not pre-installed on the build system then the
package is installed using `pip` and becomes part
of the Streams application bundle (`sab` file).
The package is expected to be available from `pypi.org`.
If the package is already installed on the build system
then it is not added into the `sab` file.
The assumption is that the runtime hosts for a Streams
instance have the same Python packages installed as the
build machines. This is always true for IBM Cloud
Private for Data and the Streaming Analytics service on IBM Cloud.
The project name extracted from the requirement
specifier is added to :py:attr:`~exclude_packages`
to avoid the package being added by the dependency
resolver. Thus the package should be added before
it is used in any stream transformation.
When an application is run with trace level ``info``
the available Python packages on the running system
are listed to application trace. This includes
any packages added by this method.
Example::
topo = Topology()
# Add dependency on pint package
# and astral at version 0.8.1
topo.add_pip_package('pint')
topo.add_pip_package('astral==0.8.1')
Args:
requirement(str): Package requirements specifier.
.. warning::
Only supported when using the build service with
a Streams instance in IBM Cloud Private for Data
or Streaming Analytics service on IBM Cloud.
.. note::
Installing packages through `pip` is preferred to
the automatic dependency checking performed on local
modules. This is because `pip` will perform a full
install of the package including any dependent packages
and additional files, such as shared libraries, that
might be missed by dependency discovery.
.. versionadded:: 1.9 | entailment |
def create_submission_parameter(self, name, default=None, type_=None):
""" Create a submission parameter.
A submission parameter is a handle for a value that
is not defined until topology submission time. Submission
parameters enable the creation of reusable topology bundles.
A submission parameter has a `name`. The name must be unique
within the topology.
The returned parameter is a `callable`.
Prior to submitting the topology, while constructing the topology,
invoking it returns ``None``.
After the topology is submitted, invoking the parameter
within the executing topology returns the actual submission time value
(or the default value if it was not set at submission time).
Submission parameters may be used within functional logic. e.g.::
threshold = topology.create_submission_parameter('threshold', 100);
# s is some stream of integers
s = ...
s = s.filter(lambda v : v > threshold())
.. note::
The parameter (value returned from this method) is only
supported within a lambda expression or a callable
that is not a function.
The default type of a submission parameter's value is a `str`
(`unicode` on Python 2.7). When a `default` is specified
the type of the value matches the type of the default.
If `default` is not set, then the type can be set with `type_`.
The types supported are ``str``, ``int``, ``float`` and ``bool``.
Topology submission behavior when a submission parameter
lacking a default value is created and a value is not provided at
submission time is defined by the underlying topology execution runtime.
* Submission fails for contexts ``DISTRIBUTED``, ``STANDALONE``, and ``STREAMING_ANALYTICS_SERVICE``.
Args:
name(str): Name for submission parameter.
default: Default parameter when submission parameter is not set.
type_: Type of parameter value when default is not set. Supported values are `str`, `int`, `float` and `bool`.
.. versionadded:: 1.9
"""
if name in self._submission_parameters:
raise ValueError("Submission parameter {} already defined.".format(name))
sp = streamsx.topology.runtime._SubmissionParam(name, default, type_)
self._submission_parameters[name] = sp
return sp | Create a submission parameter.
A submission parameter is a handle for a value that
is not defined until topology submission time. Submission
parameters enable the creation of reusable topology bundles.
A submission parameter has a `name`. The name must be unique
within the topology.
The returned parameter is a `callable`.
Prior to submitting the topology, while constructing the topology,
invoking it returns ``None``.
After the topology is submitted, invoking the parameter
within the executing topology returns the actual submission time value
(or the default value if it was not set at submission time).
Submission parameters may be used within functional logic. e.g.::
threshold = topology.create_submission_parameter('threshold', 100);
# s is some stream of integers
s = ...
s = s.filter(lambda v : v > threshold())
.. note::
The parameter (value returned from this method) is only
supported within a lambda expression or a callable
that is not a function.
The default type of a submission parameter's value is a `str`
(`unicode` on Python 2.7). When a `default` is specified
the type of the value matches the type of the default.
If `default` is not set, then the type can be set with `type_`.
The types supported are ``str``, ``int``, ``float`` and ``bool``.
Topology submission behavior when a submission parameter
lacking a default value is created and a value is not provided at
submission time is defined by the underlying topology execution runtime.
* Submission fails for contexts ``DISTRIBUTED``, ``STANDALONE``, and ``STREAMING_ANALYTICS_SERVICE``.
Args:
name(str): Name for submission parameter.
default: Default parameter when submission parameter is not set.
type_: Type of parameter value when default is not set. Supported values are `str`, `int`, `float` and `bool`.
.. versionadded:: 1.9 | entailment |
def _generate_requirements(self):
"""Generate the info to create requirements.txt in the toookit."""
if not self._pip_packages:
return
reqs = ''
for req in self._pip_packages:
reqs += "{}\n".format(req)
reqs_include = {
'contents': reqs,
'target':'opt/python/streams',
'name': 'requirements.txt'}
if 'opt' not in self._files:
self._files['opt'] = [reqs_include]
else:
self._files['opt'].append(reqs_include) | Generate the info to create requirements.txt in the toookit. | entailment |
def _add_job_control_plane(self):
"""
Add a JobControlPlane operator to the topology, if one has not already
been added. If a JobControlPlane operator has already been added,
this has no effect.
"""
if not self._has_jcp:
jcp = self.graph.addOperator(kind="spl.control::JobControlPlane", name="JobControlPlane")
jcp.viewable = False
self._has_jcp = True | Add a JobControlPlane operator to the topology, if one has not already
been added. If a JobControlPlane operator has already been added,
this has no effect. | entailment |
def aliased_as(self, name):
"""
Create an alias of this stream.
Returns an alias of this stream with name `name`.
When invocation of an SPL operator requires an
:py:class:`~streamsx.spl.op.Expression` against
an input port this can be used to ensure expression
matches the input port alias regardless of the name
of the actual stream.
Example use where the filter expression for a ``Filter`` SPL operator
uses ``IN`` to access input tuple attribute ``seq``::
s = ...
s = s.aliased_as('IN')
params = {'filter': op.Expression.expression('IN.seq % 4ul == 0ul')}
f = op.Map('spl.relational::Filter', stream, params = params)
Args:
name(str): Name for returned stream.
Returns:
Stream: Alias of this stream with ``name`` equal to `name`.
.. versionadded:: 1.9
"""
stream = copy.copy(self)
stream._alias = name
return stream | Create an alias of this stream.
Returns an alias of this stream with name `name`.
When invocation of an SPL operator requires an
:py:class:`~streamsx.spl.op.Expression` against
an input port this can be used to ensure expression
matches the input port alias regardless of the name
of the actual stream.
Example use where the filter expression for a ``Filter`` SPL operator
uses ``IN`` to access input tuple attribute ``seq``::
s = ...
s = s.aliased_as('IN')
params = {'filter': op.Expression.expression('IN.seq % 4ul == 0ul')}
f = op.Map('spl.relational::Filter', stream, params = params)
Args:
name(str): Name for returned stream.
Returns:
Stream: Alias of this stream with ``name`` equal to `name`.
.. versionadded:: 1.9 | entailment |
def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
"""
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
"""
if name is None:
name = ''.join(random.choice('0123456789abcdef') for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
# colocate map operator with stream that is being viewed.
if self._placeable:
self._colocate(view_stream, 'view')
else:
view_stream = self
port = view_stream.oport.name
view_config = {
'name': name,
'port': port,
'description': description,
'bufferTime': buffer_time,
'sampleSize': sample_size}
if start:
view_config['activateOption'] = 'automatic'
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view | Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service. | entailment |
def map(self, func=None, name=None, schema=None):
"""
Maps each tuple from this stream into 0 or 1 stream tuples.
For each tuple on this stream ``result = func(tuple)`` is called.
If `result` is not `None` then the result will be submitted
as a tuple on the returned stream. If `result` is `None` then
no tuple submission will occur.
By default the submitted tuple is ``result`` without modification
resulting in a stream of picklable Python objects. Setting the
`schema` parameter changes the type of the stream and
modifies each ``result`` before submission.
* ``object`` or :py:const:`~streamsx.topology.schema.CommonSchema.Python` - The default: `result` is submitted.
* ``str`` type (``unicode`` 2.7) or :py:const:`~streamsx.topology.schema.CommonSchema.String` - A stream of strings: ``str(result)`` is submitted.
* ``json`` or :py:const:`~streamsx.topology.schema.CommonSchema.Json` - A stream of JSON objects: ``result`` must be convertable to a JSON object using `json` package.
* :py:const:`~streamsx.topology.schema.StreamSchema` - A structured stream. `result` must be a `dict` or (Python) `tuple`. When a `dict` is returned the outgoing stream tuple attributes are set by name, when a `tuple` is returned stream tuple attributes are set by position.
* string value - Equivalent to passing ``StreamSchema(schema)``
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
name(str): Name of the mapped stream, defaults to a generated name.
schema(StreamSchema|CommonSchema|str): Schema of the resulting stream.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuple is submitted to the mapped
stream corresponding to the input tuple that caused the exception.
Returns:
Stream: A stream containing tuples mapped by `func`.
.. versionadded:: 1.7 `schema` argument added to allow conversion to
a structured stream.
.. versionadded:: 1.8 Support for submitting `dict` objects as stream tuples to a structured stream (in addition to existing support for `tuple` objects).
.. versionchanged:: 1.11 `func` is optional.
"""
if schema is None:
schema = streamsx.topology.schema.CommonSchema.Python
if func is None:
func = streamsx.topology.runtime._identity
if name is None:
name = 'identity'
ms = self._map(func, schema=schema, name=name)._layout('Map')
ms.oport.operator.sl = _SourceLocation(_source_info(), 'map')
return ms | Maps each tuple from this stream into 0 or 1 stream tuples.
For each tuple on this stream ``result = func(tuple)`` is called.
If `result` is not `None` then the result will be submitted
as a tuple on the returned stream. If `result` is `None` then
no tuple submission will occur.
By default the submitted tuple is ``result`` without modification
resulting in a stream of picklable Python objects. Setting the
`schema` parameter changes the type of the stream and
modifies each ``result`` before submission.
* ``object`` or :py:const:`~streamsx.topology.schema.CommonSchema.Python` - The default: `result` is submitted.
* ``str`` type (``unicode`` 2.7) or :py:const:`~streamsx.topology.schema.CommonSchema.String` - A stream of strings: ``str(result)`` is submitted.
* ``json`` or :py:const:`~streamsx.topology.schema.CommonSchema.Json` - A stream of JSON objects: ``result`` must be convertable to a JSON object using `json` package.
* :py:const:`~streamsx.topology.schema.StreamSchema` - A structured stream. `result` must be a `dict` or (Python) `tuple`. When a `dict` is returned the outgoing stream tuple attributes are set by name, when a `tuple` is returned stream tuple attributes are set by position.
* string value - Equivalent to passing ``StreamSchema(schema)``
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
name(str): Name of the mapped stream, defaults to a generated name.
schema(StreamSchema|CommonSchema|str): Schema of the resulting stream.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuple is submitted to the mapped
stream corresponding to the input tuple that caused the exception.
Returns:
Stream: A stream containing tuples mapped by `func`.
.. versionadded:: 1.7 `schema` argument added to allow conversion to
a structured stream.
.. versionadded:: 1.8 Support for submitting `dict` objects as stream tuples to a structured stream (in addition to existing support for `tuple` objects).
.. versionchanged:: 1.11 `func` is optional. | entailment |
def flat_map(self, func=None, name=None):
"""
Maps and flatterns each tuple from this stream into 0 or more tuples.
For each tuple on this stream ``func(tuple)`` is called.
If the result is not `None` then the the result is iterated over
with each value from the iterator that is not `None` will be submitted
to the return stream.
If the result is `None` or an empty iterable then no tuples are submitted to
the returned stream.
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
This is suitable when each tuple on this stream is an iterable to be flattened.
name(str): Name of the flattened stream, defaults to a generated name.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuples are submitted to the flattened
and mapped stream corresponding to the input tuple
that caused the exception.
Returns:
Stream: A Stream containing flattened and mapped tuples.
Raises:
TypeError: if `func` does not return an iterator nor None
.. versionchanged:: 1.11 `func` is optional.
"""
if func is None:
func = streamsx.topology.runtime._identity
if name is None:
name = 'flatten'
sl = _SourceLocation(_source_info(), 'flat_map')
_name = self.topology.graph._requested_name(name, action='flat_map', func=func)
stateful = self._determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::FlatMap", func, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
oport = op.addOutputPort(name=_name)
return Stream(self.topology, oport)._make_placeable()._layout('FlatMap', name=_name, orig_name=name) | Maps and flatterns each tuple from this stream into 0 or more tuples.
For each tuple on this stream ``func(tuple)`` is called.
If the result is not `None` then the the result is iterated over
with each value from the iterator that is not `None` will be submitted
to the return stream.
If the result is `None` or an empty iterable then no tuples are submitted to
the returned stream.
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
This is suitable when each tuple on this stream is an iterable to be flattened.
name(str): Name of the flattened stream, defaults to a generated name.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuples are submitted to the flattened
and mapped stream corresponding to the input tuple
that caused the exception.
Returns:
Stream: A Stream containing flattened and mapped tuples.
Raises:
TypeError: if `func` does not return an iterator nor None
.. versionchanged:: 1.11 `func` is optional. | entailment |
def parallel(self, width, routing=Routing.ROUND_ROBIN, func=None, name=None):
"""
Split stream into channels and start a parallel region.
Returns a new stream that will contain the contents of
this stream with tuples distributed across its channels.
The returned stream starts a parallel region where all
downstream transforms are replicated across `width` channels.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
Any transform (such as :py:meth:`map`, :py:meth:`filter`, etc.) in
a parallel region has a copy of its callable executing
independently in parallel. Channels remain independent
of other channels until the region is terminated.
For example with this topology fragment a parallel region
of width 3 is created::
s = ...
p = s.parallel(3)
p = p.filter(F()).map(M())
e = p.end_parallel()
e.for_each(E())
Tuples from ``p`` (parallelized ``s``) are distributed
across three channels, 0, 1 & 2
and are independently processed by three instances of ``F`` and ``M``.
The tuples that pass the filter ``F`` in channel 0 are then mapped
by the instance of ``M`` in channel 0, and so on for channels 1 and 2.
The channels are combined by ``end_parallel`` and so a single instance
of ``E`` processes all the tuples from channels 0, 1 & 2.
This stream instance (the original) is outside of the parallel region
and so any downstream transforms are executed normally.
Adding this `map` transform would result in tuples
on ``s`` being processed by a single instance of ``N``::
n = s.map(N())
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Tuples are routed to channels based upon `routing`, see :py:class:`Routing`.
A parallel region can have multiple termination points, for
example when a stream within the stream has multiple transforms
against it::
s = ...
p = s.parallel(3)
m1p = p.map(M1())
m2p = p.map(M2())
p.for_each(E())
m1 = m1p.end_parallel()
m2 = m2p.end_parallel()
Parallel regions can be nested, for example::
s = ...
m = s.parallel(2).map(MO()).parallel(3).map(MI()).end_parallel().end_parallel()
In this case there will be two instances of ``MO`` (the outer region) and six (2x3) instances of ``MI`` (the inner region).
Streams created by :py:meth:`~Topology.source` or
:py:meth:`~Topology.subscribe` are placed in a parallel region
by :py:meth:`set_parallel`.
Args:
width (int): Degree of parallelism.
routing(Routing): Denotes what type of tuple routing to use.
func: Optional function called when :py:const:`Routing.HASH_PARTITIONED` routing is specified.
The function provides an integer value to be used as the hash that determines
the tuple channel routing.
name (str): The name to display for the parallel region.
Returns:
Stream: A stream for which subsequent transformations will be executed in parallel.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`end_parallel`
"""
_name = name
if _name is None:
_name = self.name + '_parallel'
_name = self.topology.graph._requested_name(_name, action='parallel', func=func)
if routing is None or routing == Routing.ROUND_ROBIN or routing == Routing.BROADCAST:
op2 = self.topology.graph.addOperator("$Parallel$", name=_name)
if name is not None:
op2.config['regionName'] = _name
op2.addInputPort(outputPort=self.oport)
if routing == Routing.BROADCAST:
oport = op2.addOutputPort(width, schema=self.oport.schema, routing="BROADCAST", name=_name)
else:
oport = op2.addOutputPort(width, schema=self.oport.schema, routing="ROUND_ROBIN", name=_name)
return Stream(self.topology, oport)
elif routing == Routing.HASH_PARTITIONED:
if (func is None):
if self.oport.schema == streamsx.topology.schema.CommonSchema.String:
keys = ['string']
parallel_input = self.oport
elif self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
func = hash
else:
raise NotImplementedError("HASH_PARTITIONED for schema {0} requires a hash function.".format(self.oport.schema))
if func is not None:
keys = ['__spl_hash']
stateful = self._determine_statefulness(func)
hash_adder = self.topology.graph.addOperator(self.topology.opnamespace+"::HashAdder", func, stateful=stateful)
hash_adder._op_def['hashAdder'] = True
hash_adder._layout(hidden=True)
hash_schema = self.oport.schema.extend(streamsx.topology.schema.StreamSchema("tuple<int64 __spl_hash>"))
hash_adder.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, hash_adder, 'pyStyle')
parallel_input = hash_adder.addOutputPort(schema=hash_schema)
parallel_op = self.topology.graph.addOperator("$Parallel$", name=_name)
if name is not None:
parallel_op.config['regionName'] = _name
parallel_op.addInputPort(outputPort=parallel_input)
parallel_op_port = parallel_op.addOutputPort(oWidth=width, schema=parallel_input.schema, partitioned_keys=keys, routing="HASH_PARTITIONED")
if func is not None:
# use the Functor passthru operator to remove the hash attribute by removing it from output port schema
hrop = self.topology.graph.addPassThruOperator()
hrop._layout(hidden=True)
hrop.addInputPort(outputPort=parallel_op_port)
parallel_op_port = hrop.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, parallel_op_port)
else :
raise TypeError("Invalid routing type supplied to the parallel operator") | Split stream into channels and start a parallel region.
Returns a new stream that will contain the contents of
this stream with tuples distributed across its channels.
The returned stream starts a parallel region where all
downstream transforms are replicated across `width` channels.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
Any transform (such as :py:meth:`map`, :py:meth:`filter`, etc.) in
a parallel region has a copy of its callable executing
independently in parallel. Channels remain independent
of other channels until the region is terminated.
For example with this topology fragment a parallel region
of width 3 is created::
s = ...
p = s.parallel(3)
p = p.filter(F()).map(M())
e = p.end_parallel()
e.for_each(E())
Tuples from ``p`` (parallelized ``s``) are distributed
across three channels, 0, 1 & 2
and are independently processed by three instances of ``F`` and ``M``.
The tuples that pass the filter ``F`` in channel 0 are then mapped
by the instance of ``M`` in channel 0, and so on for channels 1 and 2.
The channels are combined by ``end_parallel`` and so a single instance
of ``E`` processes all the tuples from channels 0, 1 & 2.
This stream instance (the original) is outside of the parallel region
and so any downstream transforms are executed normally.
Adding this `map` transform would result in tuples
on ``s`` being processed by a single instance of ``N``::
n = s.map(N())
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Tuples are routed to channels based upon `routing`, see :py:class:`Routing`.
A parallel region can have multiple termination points, for
example when a stream within the stream has multiple transforms
against it::
s = ...
p = s.parallel(3)
m1p = p.map(M1())
m2p = p.map(M2())
p.for_each(E())
m1 = m1p.end_parallel()
m2 = m2p.end_parallel()
Parallel regions can be nested, for example::
s = ...
m = s.parallel(2).map(MO()).parallel(3).map(MI()).end_parallel().end_parallel()
In this case there will be two instances of ``MO`` (the outer region) and six (2x3) instances of ``MI`` (the inner region).
Streams created by :py:meth:`~Topology.source` or
:py:meth:`~Topology.subscribe` are placed in a parallel region
by :py:meth:`set_parallel`.
Args:
width (int): Degree of parallelism.
routing(Routing): Denotes what type of tuple routing to use.
func: Optional function called when :py:const:`Routing.HASH_PARTITIONED` routing is specified.
The function provides an integer value to be used as the hash that determines
the tuple channel routing.
name (str): The name to display for the parallel region.
Returns:
Stream: A stream for which subsequent transformations will be executed in parallel.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`end_parallel` | entailment |
def end_parallel(self):
"""
Ends a parallel region by merging the channels into a single stream.
Returns:
Stream: Stream for which subsequent transformations are no longer parallelized.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel`
"""
outport = self.oport
if isinstance(self.oport.operator, streamsx.topology.graph.Marker):
if self.oport.operator.kind == "$Union$":
pto = self.topology.graph.addPassThruOperator()
pto.addInputPort(outputPort=self.oport)
outport = pto.addOutputPort(schema=self.oport.schema)
op = self.topology.graph.addOperator("$EndParallel$")
op.addInputPort(outputPort=outport)
oport = op.addOutputPort(schema=self.oport.schema)
endP = Stream(self.topology, oport)
return endP | Ends a parallel region by merging the channels into a single stream.
Returns:
Stream: Stream for which subsequent transformations are no longer parallelized.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel` | entailment |
def set_parallel(self, width, name=None):
"""
Set this source stream to be split into multiple channels
as the start of a parallel region.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.source` results in the stream
having `width` channels, each created by its own instance
of the callable::
s = topo.source(S())
s.set_parallel(3)
f = s.filter(F())
e = f.end_parallel()
Each channel has independent instances of ``S`` and ``F``. Tuples
created by the instance of ``S`` in channel 0 are passed to the
instance of ``F`` in channel 0, and so on for channels 1 and 2.
Callable transforms instances within the channel can use
the runtime functions
:py:func:`~streamsx.ec.channel`,
:py:func:`~streamsx.ec.local_channel`,
:py:func:`~streamsx.ec.max_channels` &
:py:func:`~streamsx.ec.local_max_channels`
to adapt to being invoked in parallel. For example a
source callable can use its channel number to determine
which partition to read from in a partitioned external system.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.subscribe` results in the stream
having `width` channels. Subscribe ensures that the
stream will contain all published tuples matching the
topic subscription and type. A published tuple will appear
on one of the channels though the specific channel is not known
in advance.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Parallel regions are started on non-source streams using
:py:meth:`parallel`.
Args:
width: The degree of parallelism for the parallel region.
name(str): Name of the parallel region. Defaults to the name of this stream.
Returns:
Stream: Returns this stream.
.. seealso:: :py:meth:`parallel`, :py:meth:`end_parallel`
.. versionadded:: 1.9
.. versionchanged:: 1.11 `name` parameter added.
"""
self.oport.operator.config['parallel'] = True
self.oport.operator.config['width'] = streamsx.topology.graph._as_spl_json(width, int)
if name:
name = self.topology.graph._requested_name(str(name), action='set_parallel')
self.oport.operator.config['regionName'] = name
return self | Set this source stream to be split into multiple channels
as the start of a parallel region.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.source` results in the stream
having `width` channels, each created by its own instance
of the callable::
s = topo.source(S())
s.set_parallel(3)
f = s.filter(F())
e = f.end_parallel()
Each channel has independent instances of ``S`` and ``F``. Tuples
created by the instance of ``S`` in channel 0 are passed to the
instance of ``F`` in channel 0, and so on for channels 1 and 2.
Callable transforms instances within the channel can use
the runtime functions
:py:func:`~streamsx.ec.channel`,
:py:func:`~streamsx.ec.local_channel`,
:py:func:`~streamsx.ec.max_channels` &
:py:func:`~streamsx.ec.local_max_channels`
to adapt to being invoked in parallel. For example a
source callable can use its channel number to determine
which partition to read from in a partitioned external system.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.subscribe` results in the stream
having `width` channels. Subscribe ensures that the
stream will contain all published tuples matching the
topic subscription and type. A published tuple will appear
on one of the channels though the specific channel is not known
in advance.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Parallel regions are started on non-source streams using
:py:meth:`parallel`.
Args:
width: The degree of parallelism for the parallel region.
name(str): Name of the parallel region. Defaults to the name of this stream.
Returns:
Stream: Returns this stream.
.. seealso:: :py:meth:`parallel`, :py:meth:`end_parallel`
.. versionadded:: 1.9
.. versionchanged:: 1.11 `name` parameter added. | entailment |
def set_consistent(self, consistent_config):
""" Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
"""
# add job control plane if needed
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable() | Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11 | entailment |
def last(self, size=1):
""" Declares a slding window containing most recent tuples
on this stream.
The number of tuples maintained in the window is defined by `size`.
If `size` is an `int` then it is the count of tuples in the window.
For example, with ``size=10`` the window always contains the
last (most recent) ten tuples.
If `size` is an `datetime.timedelta` then it is the duration
of the window. With a `timedelta` representing five minutes
then the window contains any tuples that arrived in the last
five minutes.
Args:
size: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Examples::
# Create a window against stream s of the last 100 tuples
w = s.last(size=100)
::
# Create a window against stream s of tuples
# arrived on the stream in the last five minutes
w = s.last(size=datetime.timedelta(minutes=5))
Returns:
Window: Window of the last (most recent) tuples on this stream.
"""
win = Window(self, 'SLIDING')
if isinstance(size, datetime.timedelta):
win._evict_time(size)
elif isinstance(size, int):
win._evict_count(size)
else:
raise ValueError(size)
return win | Declares a slding window containing most recent tuples
on this stream.
The number of tuples maintained in the window is defined by `size`.
If `size` is an `int` then it is the count of tuples in the window.
For example, with ``size=10`` the window always contains the
last (most recent) ten tuples.
If `size` is an `datetime.timedelta` then it is the duration
of the window. With a `timedelta` representing five minutes
then the window contains any tuples that arrived in the last
five minutes.
Args:
size: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Examples::
# Create a window against stream s of the last 100 tuples
w = s.last(size=100)
::
# Create a window against stream s of tuples
# arrived on the stream in the last five minutes
w = s.last(size=datetime.timedelta(minutes=5))
Returns:
Window: Window of the last (most recent) tuples on this stream. | entailment |
def union(self, streamSet):
"""
Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream:
"""
if(not isinstance(streamSet,set)) :
raise TypeError("The union operator parameter must be a set object")
if(len(streamSet) == 0):
return self
op = self.topology.graph.addOperator("$Union$")
op.addInputPort(outputPort=self.oport)
for stream in streamSet:
op.addInputPort(outputPort=stream.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport) | Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream: | entailment |
def print(self, tag=None, name=None):
"""
Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
_name = name
if _name is None:
_name = 'print'
fn = streamsx.topology.functions.print_flush
if tag is not None:
tag = str(tag) + ': '
fn = lambda v : streamsx.topology.functions.print_flush(tag + str(v))
sp = self.for_each(fn, name=_name)
sp._op().sl = _SourceLocation(_source_info(), 'print')
return sp | Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance. | entailment |
def publish(self, topic, schema=None, name=None):
"""
Publish this stream on a topic for other Streams applications to subscribe to.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber
matches a publisher if the topic and schema match.
By default a stream is published using its schema.
A stream of :py:const:`Python objects <streamsx.topology.schema.CommonSchema.Python>` can be subscribed to by other Streams Python applications.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
then it is published as a stream of JSON objects.
Other Streams applications may subscribe to it regardless
of their implementation language.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.String`
then it is published as strings
Other Streams applications may subscribe to it regardless
of their implementation language.
Supported values of `schema` are only
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
and
:py:const:`~streamsx.topology.schema.CommonSchema.String`.
Args:
topic(str): Topic to publish this stream to.
schema: Schema to publish. Defaults to the schema of this stream.
name(str): Name of the publish operator, defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `name` parameter.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
sl = _SourceLocation(_source_info(), 'publish')
schema = streamsx.topology.schema._normalize(schema)
if schema is not None and self.oport.schema.schema() != schema.schema():
nc = None
if schema == streamsx.topology.schema.CommonSchema.Json:
schema_change = self.as_json()
elif schema == streamsx.topology.schema.CommonSchema.String:
schema_change = self.as_string()
else:
raise ValueError(schema)
if self._placeable:
self._colocate(schema_change, 'publish')
sp = schema_change.publish(topic, schema=schema, name=name)
sp._op().sl = sl
return sp
_name = self.topology.graph._requested_name(name, action="publish")
# publish is never stateful
op = self.topology.graph.addOperator("com.ibm.streamsx.topology.topic::Publish", params={'topic': topic}, sl=sl, name=_name, stateful=False)
op.addInputPort(outputPort=self.oport)
op._layout_group('Publish', name if name else _name)
sink = Sink(op)
if self._placeable:
self._colocate(sink, 'publish')
return sink | Publish this stream on a topic for other Streams applications to subscribe to.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber
matches a publisher if the topic and schema match.
By default a stream is published using its schema.
A stream of :py:const:`Python objects <streamsx.topology.schema.CommonSchema.Python>` can be subscribed to by other Streams Python applications.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
then it is published as a stream of JSON objects.
Other Streams applications may subscribe to it regardless
of their implementation language.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.String`
then it is published as strings
Other Streams applications may subscribe to it regardless
of their implementation language.
Supported values of `schema` are only
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
and
:py:const:`~streamsx.topology.schema.CommonSchema.String`.
Args:
topic(str): Topic to publish this stream to.
schema: Schema to publish. Defaults to the schema of this stream.
name(str): Name of the publish operator, defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `name` parameter.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance. | entailment |
def autonomous(self):
"""
Starts an autonomous region for downstream processing.
By default IBM Streams processing is executed in an autonomous region
where any checkpointing of operator state is autonomous (independent)
of other operators.
This method may be used to end a consistent region by starting an
autonomous region. This may be called even if this stream is in
an autonomous region.
Autonomous is not applicable when a topology is submitted
to a STANDALONE contexts and will be ignored.
.. versionadded:: 1.6
Returns:
Stream: Stream whose subsequent downstream processing is in an autonomous region.
"""
op = self.topology.graph.addOperator("$Autonomous$")
op.addInputPort(outputPort=self.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport) | Starts an autonomous region for downstream processing.
By default IBM Streams processing is executed in an autonomous region
where any checkpointing of operator state is autonomous (independent)
of other operators.
This method may be used to end a consistent region by starting an
autonomous region. This may be called even if this stream is in
an autonomous region.
Autonomous is not applicable when a topology is submitted
to a STANDALONE contexts and will be ignored.
.. versionadded:: 1.6
Returns:
Stream: Stream whose subsequent downstream processing is in an autonomous region. | entailment |
def as_string(self, name=None):
"""
Declares a stream converting each tuple on this stream
into a string using `str(tuple)`.
The stream is typed as a :py:const:`string stream <streamsx.topology.schema.CommonSchema.String>`.
If this stream is already typed as a string stream then it will
be returned (with no additional processing against it and `name`
is ignored).
Args:
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6
.. versionadded:: 1.6.1 `name` parameter added.
Returns:
Stream: Stream containing the string representations of tuples on this stream.
"""
sas = self._change_schema(streamsx.topology.schema.CommonSchema.String, 'as_string', name)._layout('AsString')
sas.oport.operator.sl = _SourceLocation(_source_info(), 'as_string')
return sas | Declares a stream converting each tuple on this stream
into a string using `str(tuple)`.
The stream is typed as a :py:const:`string stream <streamsx.topology.schema.CommonSchema.String>`.
If this stream is already typed as a string stream then it will
be returned (with no additional processing against it and `name`
is ignored).
Args:
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6
.. versionadded:: 1.6.1 `name` parameter added.
Returns:
Stream: Stream containing the string representations of tuples on this stream. | entailment |
def as_json(self, force_object=True, name=None):
"""
Declares a stream converting each tuple on this stream into
a JSON value.
The stream is typed as a :py:const:`JSON stream <streamsx.topology.schema.CommonSchema.Json>`.
Each tuple must be supported by `JSONEncoder`.
If `force_object` is `True` then each tuple that not a `dict`
will be converted to a JSON object with a single key `payload`
containing the tuple. Thus each object on the stream will
be a JSON object.
If `force_object` is `False` then each tuple is converted to
a JSON value directly using `json` package.
If this stream is already typed as a JSON stream then it will
be returned (with no additional processing against it and
`force_object` and `name` are ignored).
Args:
force_object(bool): Force conversion of non dicts to JSON objects.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6.1
Returns:
Stream: Stream containing the JSON representations of tuples on this stream.
"""
func = streamsx.topology.runtime._json_force_object if force_object else None
saj = self._change_schema(streamsx.topology.schema.CommonSchema.Json, 'as_json', name, func)._layout('AsJson')
saj.oport.operator.sl = _SourceLocation(_source_info(), 'as_json')
return saj | Declares a stream converting each tuple on this stream into
a JSON value.
The stream is typed as a :py:const:`JSON stream <streamsx.topology.schema.CommonSchema.Json>`.
Each tuple must be supported by `JSONEncoder`.
If `force_object` is `True` then each tuple that not a `dict`
will be converted to a JSON object with a single key `payload`
containing the tuple. Thus each object on the stream will
be a JSON object.
If `force_object` is `False` then each tuple is converted to
a JSON value directly using `json` package.
If this stream is already typed as a JSON stream then it will
be returned (with no additional processing against it and
`force_object` and `name` are ignored).
Args:
force_object(bool): Force conversion of non dicts to JSON objects.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6.1
Returns:
Stream: Stream containing the JSON representations of tuples on this stream. | entailment |
def _change_schema(self, schema, action, name=None, func=None):
"""Internal method to change a schema.
"""
if self.oport.schema.schema() == schema.schema():
return self
if func is None:
func = streamsx.topology.functions.identity
_name = name
if _name is None:
_name = action
css = self._map(func, schema, name=_name)
if self._placeable:
self._colocate(css, action)
return css | Internal method to change a schema. | entailment |
def _initialize_rest(self):
"""Used to initialize the View object on first use.
"""
if self._submit_context is None:
raise ValueError("View has not been created.")
job = self._submit_context._job_access()
self._view_object = job.get_views(name=self.name)[0] | Used to initialize the View object on first use. | entailment |
def display(self, duration=None, period=2):
"""Display a view within a Jupyter or IPython notebook.
Provides an easy mechanism to visualize data on a stream
using a view.
Tuples are fetched from the view and displayed in a table
within the notebook cell using a ``pandas.DataFrame``.
The table is continually updated with the latest tuples from the view.
This method calls :py:meth:`start_data_fetch` and will call
:py:meth:`stop_data_fetch` when completed if `duration` is set.
Args:
duration(float): Number of seconds to fetch and display tuples. If ``None`` then the display will be updated until :py:meth:`stop_data_fetch` is called.
period(float): Maximum update period.
.. note::
A view is a sampling of data on a stream so tuples that
are on the stream may not appear in the view.
.. note::
Python modules `ipywidgets` and `pandas` must be installed
in the notebook environment.
.. warning::
Behavior when called outside a notebook is undefined.
.. versionadded:: 1.12
"""
self._initialize_rest()
return self._view_object.display(duration, period) | Display a view within a Jupyter or IPython notebook.
Provides an easy mechanism to visualize data on a stream
using a view.
Tuples are fetched from the view and displayed in a table
within the notebook cell using a ``pandas.DataFrame``.
The table is continually updated with the latest tuples from the view.
This method calls :py:meth:`start_data_fetch` and will call
:py:meth:`stop_data_fetch` when completed if `duration` is set.
Args:
duration(float): Number of seconds to fetch and display tuples. If ``None`` then the display will be updated until :py:meth:`stop_data_fetch` is called.
period(float): Maximum update period.
.. note::
A view is a sampling of data on a stream so tuples that
are on the stream may not appear in the view.
.. note::
Python modules `ipywidgets` and `pandas` must be installed
in the notebook environment.
.. warning::
Behavior when called outside a notebook is undefined.
.. versionadded:: 1.12 | entailment |
def complete(self, stream):
"""Complete the pending stream.
Any connections made to :py:attr:`stream` are connected to `stream` once
this method returns.
Args:
stream(Stream): Stream that completes the connection.
"""
assert not self.is_complete()
self._marker.addInputPort(outputPort=stream.oport)
self.stream.oport.schema = stream.oport.schema
# Update the pending schema to the actual schema
# Any downstream filters that took the reference
# will be automatically updated to the correct schema
self._pending_schema._set(self.stream.oport.schema)
# Mark the operator with the pending stream
# a start point for graph travesal
stream.oport.operator._start_op = True | Complete the pending stream.
Any connections made to :py:attr:`stream` are connected to `stream` once
this method returns.
Args:
stream(Stream): Stream that completes the connection. | entailment |
def trigger(self, when=1):
"""Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the period
of the trigger. With a `timedelta` representing one minute
then the window is triggered every minute.
By default, when `trigger` has not been called on a `Window`
it triggers for every tuple inserted into the window
(equivalent to ``when=1``).
Args:
when: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Returns:
Window: Window that will be triggered.
.. warning:: A trigger is only supported for a sliding window
such as one created by :py:meth:`last`.
"""
tw = Window(self.stream, self._config['type'])
tw._config['evictPolicy'] = self._config['evictPolicy']
tw._config['evictConfig'] = self._config['evictConfig']
if self._config['evictPolicy'] == 'TIME':
tw._config['evictTimeUnit'] = 'MILLISECONDS'
if isinstance(when, datetime.timedelta):
tw._config['triggerPolicy'] = 'TIME'
tw._config['triggerConfig'] = int(when.total_seconds() * 1000.0)
tw._config['triggerTimeUnit'] = 'MILLISECONDS'
elif isinstance(when, int):
tw._config['triggerPolicy'] = 'COUNT'
tw._config['triggerConfig'] = when
else:
raise ValueError(when)
return tw | Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the period
of the trigger. With a `timedelta` representing one minute
then the window is triggered every minute.
By default, when `trigger` has not been called on a `Window`
it triggers for every tuple inserted into the window
(equivalent to ``when=1``).
Args:
when: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Returns:
Window: Window that will be triggered.
.. warning:: A trigger is only supported for a sliding window
such as one created by :py:meth:`last`. | entailment |
def aggregate(self, function, name=None):
"""Aggregates the contents of the window when the window is
triggered.
Upon a window trigger, the supplied function is passed a list containing
the contents of the window: ``function(items)``. The order of the window
items in the list are the order in which they were each received by the
window. If the function's return value is not `None` then the result will
be submitted as a tuple on the returned stream. If the return value is
`None` then no tuple submission will occur. For example, a window that
calculates a moving average of the last 10 tuples could be written as follows::
win = s.last(10).trigger(1)
moving_averages = win.aggregate(lambda tuples: sum(tuples)/len(tuples))
.. note:: If a tumbling (:py:meth:`~Stream.batch`) window's stream
is finite then a final aggregation is performed if the
window is not empty. Thus ``function`` may be passed fewer tuples
for a window sized using a count. For example a stream with 105
tuples and a batch size of 25 tuples will perform four aggregations
with 25 tuples each and a final aggregation of 5 tuples.
Args:
function: The function which aggregates the contents of the window
name(str): The name of the returned stream. Defaults to a generated name.
Returns:
Stream: A `Stream` of the returned values of the supplied function.
.. warning::
In Python 3.5 or later if the stream being aggregated has a
structured schema that contains a ``blob`` type then any ``blob``
value will not be maintained in the window. Instead its
``memoryview`` object will have been released. If the ``blob``
value is required then perform a :py:meth:`map` transformation
(without setting ``schema``) copying any required
blob value in the tuple using ``memoryview.tobytes()``.
.. versionadded:: 1.8
.. versionchanged:: 1.11 Support for aggregation of streams with structured schemas.
"""
schema = streamsx.topology.schema.CommonSchema.Python
sl = _SourceLocation(_source_info(), "aggregate")
_name = self.topology.graph._requested_name(name, action="aggregate", func=function)
stateful = self.stream._determine_statefulness(function)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::Aggregate", function, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.stream.oport, window_config=self._config)
streamsx.topology.schema.StreamSchema._fnop_style(self.stream.oport.schema, op, 'pyStyle')
oport = op.addOutputPort(schema=schema, name=_name)
op._layout(kind='Aggregate', name=_name, orig_name=name)
return Stream(self.topology, oport)._make_placeable() | Aggregates the contents of the window when the window is
triggered.
Upon a window trigger, the supplied function is passed a list containing
the contents of the window: ``function(items)``. The order of the window
items in the list are the order in which they were each received by the
window. If the function's return value is not `None` then the result will
be submitted as a tuple on the returned stream. If the return value is
`None` then no tuple submission will occur. For example, a window that
calculates a moving average of the last 10 tuples could be written as follows::
win = s.last(10).trigger(1)
moving_averages = win.aggregate(lambda tuples: sum(tuples)/len(tuples))
.. note:: If a tumbling (:py:meth:`~Stream.batch`) window's stream
is finite then a final aggregation is performed if the
window is not empty. Thus ``function`` may be passed fewer tuples
for a window sized using a count. For example a stream with 105
tuples and a batch size of 25 tuples will perform four aggregations
with 25 tuples each and a final aggregation of 5 tuples.
Args:
function: The function which aggregates the contents of the window
name(str): The name of the returned stream. Defaults to a generated name.
Returns:
Stream: A `Stream` of the returned values of the supplied function.
.. warning::
In Python 3.5 or later if the stream being aggregated has a
structured schema that contains a ``blob`` type then any ``blob``
value will not be maintained in the window. Instead its
``memoryview`` object will have been released. If the ``blob``
value is required then perform a :py:meth:`map` transformation
(without setting ``schema``) copying any required
blob value in the tuple using ``memoryview.tobytes()``.
.. versionadded:: 1.8
.. versionchanged:: 1.11 Support for aggregation of streams with structured schemas. | entailment |
def get_rest_api():
"""
Get the root URL for the IBM Streams REST API.
"""
assert _has_local_install
url=[]
ok = _run_st(['geturl', '--api'], lines=url)
if not ok:
raise ChildProcessError('streamtool geturl')
return url[0] | Get the root URL for the IBM Streams REST API. | entailment |
def _get_package_name(module):
"""
Gets the package name given a module object
Returns:
str: If the module belongs to a package, the package name.
if the module does not belong to a package, None or ''.
"""
try:
# if __package__ is defined, use it
package_name = module.__package__
except AttributeError:
package_name = None
if package_name is None:
# if __path__ is defined, the package name is the module name
package_name = module.__name__
if not hasattr(module, '__path__'):
# if __path__ is not defined, the package name is the
# string before the last "." of the fully-qualified module name
package_name = package_name.rpartition('.')[0]
return package_name | Gets the package name given a module object
Returns:
str: If the module belongs to a package, the package name.
if the module does not belong to a package, None or ''. | entailment |
def _get_module_name(function):
"""
Gets the function's module name
Resolves the __main__ module to an actual module name
Returns:
str: the function's module name
"""
module_name = function.__module__
if module_name == '__main__':
# get the main module object of the function
main_module = inspect.getmodule(function)
# get the module name from __file__ by getting the base name and removing the .py extension
# e.g. test1.py => test1
if hasattr(main_module, '__file__'):
module_name = os.path.splitext(os.path.basename(main_module.__file__))[0]
return module_name | Gets the function's module name
Resolves the __main__ module to an actual module name
Returns:
str: the function's module name | entailment |
def _is_builtin_module(module):
"""Is builtin or part of standard library
"""
if (not hasattr(module, '__file__')) or module.__name__ in sys.builtin_module_names:
return True
if module.__name__ in _stdlib._STD_LIB_MODULES:
return True
amp = os.path.abspath(module.__file__)
if 'site-packages' in amp:
return False
if amp.startswith(_STD_MODULE_DIR):
return True
if not '.' in module.__name__:
return False
mn_top = module.__name__.split('.')[0]
return mn_top in _stdlib._STD_LIB_MODULES | Is builtin or part of standard library | entailment |
def _find_dependent_modules(self, module):
"""
Return the set of dependent modules for used modules,
classes and routines.
"""
dms = set()
for um in inspect.getmembers(module, inspect.ismodule):
dms.add(um[1])
for uc in inspect.getmembers(module, inspect.isclass):
self._add_obj_module(dms, uc[1])
for ur in inspect.getmembers(module, inspect.isroutine):
self._add_obj_module(dms, ur[1])
return dms | Return the set of dependent modules for used modules,
classes and routines. | entailment |
def add_dependencies(self, module):
"""
Adds a module and its dependencies to the list of dependencies.
Top-level entry point for adding a module and its dependecies.
"""
if module in self._processed_modules:
return None
if hasattr(module, "__name__"):
mn = module.__name__
else:
mn = '<unknown>'
_debug.debug("add_dependencies:module=%s", module)
# If the module in which the class/function is defined is __main__, don't add it. Just add its dependencies.
if mn == "__main__":
self._processed_modules.add(module)
# add the module as a dependency
elif not self._add_dependency(module, mn):
_debug.debug("add_dependencies:not added:module=%s", mn)
return None
_debug.debug("add_dependencies:ADDED:module=%s", mn)
# recursively get the module's imports and add those as dependencies
for dm in self._find_dependent_modules(module):
_debug.debug("add_dependencies:adding dependent module %s for %s", dm, mn)
self.add_dependencies(dm) | Adds a module and its dependencies to the list of dependencies.
Top-level entry point for adding a module and its dependecies. | entailment |
def _include_module(self, module, mn):
""" See if a module should be included or excluded based upon
included_packages and excluded_packages.
As some packages have the following format:
scipy.special.specfun
scipy.linalg
Where the top-level package name is just a prefix to a longer package name,
we don't want to do a direct comparison. Instead, we want to exclude packages
which are either exactly "<package_name>", or start with "<package_name>".
"""
if mn in self.topology.include_packages:
_debug.debug("_include_module:explicit using __include_packages: module=%s", mn)
return True
if '.' in mn:
for include_package in self.topology.include_packages:
if mn.startswith(include_package + '.'):
_debug.debug("_include_module:explicit pattern using __include_packages: module=%s pattern=%s", mn, \
include_package + '.')
return True
if mn in self.topology.exclude_packages:
_debug.debug("_include_module:explicit using __exclude_packages: module=%s", mn)
return False
if '.' in mn:
for exclude_package in self.topology.exclude_packages:
if mn.startswith(exclude_package + '.'):
_debug.debug("_include_module:explicit pattern using __exclude_packages: module=%s pattern=%s", mn, \
exclude_package + '.')
return False
_debug.debug("_include_module:including: module=%s", mn)
return True | See if a module should be included or excluded based upon
included_packages and excluded_packages.
As some packages have the following format:
scipy.special.specfun
scipy.linalg
Where the top-level package name is just a prefix to a longer package name,
we don't want to do a direct comparison. Instead, we want to exclude packages
which are either exactly "<package_name>", or start with "<package_name>". | entailment |
def _add_dependency(self, module, mn):
"""
Adds a module to the list of dependencies
without handling the modules dependences.
Modules in site-packages are excluded from being added into
the toolkit. This mimics dill.
"""
_debug.debug("_add_dependency:module=%s", mn)
if _is_streamsx_module(module):
_debug.debug("_add_dependency:streamsx module=%s", mn)
return False
if _is_builtin_module(module):
_debug.debug("_add_dependency:builtin module=%s", mn)
return False
if not self._include_module(module, mn):
#print ("ignoring dependencies for {0} {1}".format(module.__name__, module))
return False
package_name = _get_package_name(module)
top_package_name = module.__name__.split('.')[0]
if package_name and top_package_name in sys.modules:
# module is part of a package
# get the top-level package
top_package = sys.modules[top_package_name]
if "__path__" in top_package.__dict__:
# for regular packages, there is one top-level directory
# for namespace packages, there can be more than one.
# they will be merged in the bundle
seen_non_site_package = False
for top_package_path in reversed(list(top_package.__path__)):
top_package_path = os.path.abspath(top_package_path)
if 'site-packages' in top_package_path:
continue
seen_non_site_package = True
self._add_package(top_package_path)
if not seen_non_site_package:
_debug.debug("_add_dependency:site-packages path module=%s", mn)
return False
elif hasattr(top_package, '__file__'):
# package that is an individual python file with empty __path__
if 'site-packages' in top_package.__file__:
_debug.debug("_add_dependency:site-packages module=%s", mn)
return False
self._add_package(os.path.abspath(top_package.__file__))
elif hasattr(module, '__file__'):
# individual Python module
module_path = os.path.abspath(module.__file__)
if 'site-packages' in module_path:
_debug.debug("_add_dependency:site-packages module=%s", mn)
return False
self._modules.add(module_path)
self._processed_modules.add(module)
return True | Adds a module to the list of dependencies
without handling the modules dependences.
Modules in site-packages are excluded from being added into
the toolkit. This mimics dill. | entailment |
def freeze(self) -> dict:
"""
Returns a dictionary of all settings set for this object, including
any values of its parents or hardcoded defaults.
"""
settings = {}
for key, v in self._h.defaults.items():
settings[key] = self._unserialize(v.value, v.type)
if self._parent:
settings.update(getattr(self._parent, self._h.attribute_name).freeze())
for key in self._cache():
settings[key] = self.get(key)
return settings | Returns a dictionary of all settings set for this object, including
any values of its parents or hardcoded defaults. | entailment |
def get(self, key: str, default=None, as_type: type = None, binary_file=False):
"""
Get a setting specified by key ``key``. Normally, settings are strings, but
if you put non-strings into the settings object, you can request unserialization
by specifying ``as_type``. If the key does not have a harcdoded default type,
omitting ``as_type`` always will get you a string.
If the setting with the specified name does not exist on this object, any parent object
up to the global settings layer (if configured) will be queried. If still no value is
found, a default value set in ths source code will be returned if one exists.
If not, the value of the ``default`` argument of this method will be returned instead.
If you receive a ``File`` object, it will already be opened. You can specify the ``binary_file``
flag to indicate that it should be opened in binary mode.
"""
if as_type is None and key in self._h.defaults:
as_type = self._h.defaults[key].type
if key in self._cache():
value = self._cache()[key]
else:
value = None
if self._parent:
value = getattr(self._parent, self._h.attribute_name).get(key, as_type=str)
if value is None and key in self._h.defaults:
value = self._h.defaults[key].value
if value is None and default is not None:
value = default
return self._unserialize(value, as_type, binary_file=binary_file) | Get a setting specified by key ``key``. Normally, settings are strings, but
if you put non-strings into the settings object, you can request unserialization
by specifying ``as_type``. If the key does not have a harcdoded default type,
omitting ``as_type`` always will get you a string.
If the setting with the specified name does not exist on this object, any parent object
up to the global settings layer (if configured) will be queried. If still no value is
found, a default value set in ths source code will be returned if one exists.
If not, the value of the ``default`` argument of this method will be returned instead.
If you receive a ``File`` object, it will already be opened. You can specify the ``binary_file``
flag to indicate that it should be opened in binary mode. | entailment |
def set(self, key: str, value: Any) -> None:
"""
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
"""
wc = self._write_cache()
if key in wc:
s = wc[key]
else:
s = self._type(object=self._obj, key=key)
s.value = self._serialize(value)
s.save()
self._cache()[key] = s.value
wc[key] = s
self._flush_external_cache() | Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly. | entailment |
def delete(self, key: str) -> None:
"""
Deletes a setting from this object's storage.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
"""
if key in self._write_cache():
self._write_cache()[key].delete()
del self._write_cache()[key]
if key in self._cache():
del self._cache()[key]
self._flush_external_cache() | Deletes a setting from this object's storage.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly. | entailment |
def _get_vcap_services(vcap_services=None):
"""Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
"""
vcap_services = vcap_services or os.environ.get('VCAP_SERVICES')
if not vcap_services:
raise ValueError(
"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'")
# If it was passed to config as a dict, simply return it
if isinstance(vcap_services, dict):
return vcap_services
try:
# Otherwise, if it's a string, try to load it as json
vcap_services = json.loads(vcap_services)
except json.JSONDecodeError:
# If that doesn't work, attempt to open it as a file path to the json config.
try:
with open(vcap_services) as vcap_json_data:
vcap_services = json.load(vcap_json_data)
except:
raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services)
return vcap_services | Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename. | entailment |
def _get_credentials(vcap_services, service_name=None):
"""Retrieves the credentials of the VCAP Service of the specified `service_name`. If
`service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment
variable.
Args:
vcap_services (dict): A dict representation of the VCAP Services information.
service_name (str): One of the service name stored in `vcap_services`
Returns:
dict: A dict representation of the credentials.
Raises:
ValueError: Cannot find `service_name` in `vcap_services`
"""
service_name = service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
# Get the service corresponding to the SERVICE_NAME
services = vcap_services['streaming-analytics']
creds = None
for service in services:
if service['name'] == service_name:
creds = service['credentials']
break
# If no corresponding service is found, error
if creds is None:
raise ValueError("Streaming Analytics service " + str(service_name) + " was not found in VCAP_SERVICES")
return creds | Retrieves the credentials of the VCAP Service of the specified `service_name`. If
`service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment
variable.
Args:
vcap_services (dict): A dict representation of the VCAP Services information.
service_name (str): One of the service name stored in `vcap_services`
Returns:
dict: A dict representation of the credentials.
Raises:
ValueError: Cannot find `service_name` in `vcap_services` | entailment |
def _get_rest_api_url_from_creds(session, credentials):
"""Retrieves the Streams REST API URL from the provided credentials.
Args:
session (:py:class:`requests.Session`): A Requests session object for making REST calls
credentials (dict): A dict representation of the credentials.
Returns:
str: The remote Streams REST API URL.
"""
resources_url = credentials['rest_url'] + credentials['resources_path']
try:
response_raw = session.get(resources_url, auth=(credentials['userid'], credentials['password']))
response = response_raw.json()
except:
logger.error("Error while retrieving rest REST url from: " + resources_url)
raise
response_raw.raise_for_status()
rest_api_url = response['streams_rest_url'] + '/resources'
return rest_api_url | Retrieves the Streams REST API URL from the provided credentials.
Args:
session (:py:class:`requests.Session`): A Requests session object for making REST calls
credentials (dict): A dict representation of the credentials.
Returns:
str: The remote Streams REST API URL. | entailment |
def _get_iam_rest_api_url_from_creds(rest_client, credentials):
"""Retrieves the Streams REST API URL from the provided credentials using iam authentication.
Args:
rest_client (:py:class:`rest_primitives._IAMStreamsRestClient`): A client for making REST calls using IAM authentication
credentials (dict): A dict representation of the credentials.
Returns:
str: The remote Streams REST API URL.
"""
res = rest_client.make_request(credentials[_IAMConstants.V2_REST_URL])
base = res['streams_self']
end = base.find('/instances')
return base[:end] + '/resources' | Retrieves the Streams REST API URL from the provided credentials using iam authentication.
Args:
rest_client (:py:class:`rest_primitives._IAMStreamsRestClient`): A client for making REST calls using IAM authentication
credentials (dict): A dict representation of the credentials.
Returns:
str: The remote Streams REST API URL. | entailment |
def resource_url(self):
"""str: Root URL for IBM Streams REST API"""
self._resource_url = self._resource_url or st.get_rest_api()
return self._resource_url | str: Root URL for IBM Streams REST API | entailment |
def _get_element_by_id(self, resource_name, eclass, id):
"""Get a single element matching an id"""
elements = self._get_elements(resource_name, eclass, id=id)
if not elements:
raise ValueError("No resource matching: {0}".format(id))
if len(elements) == 1:
return elements[0]
raise ValueError("Multiple resources matching: {0}".format(id)) | Get a single element matching an id | entailment |
def get_domains(self):
"""Retrieves available domains.
Returns:
:py:obj:`list` of :py:class:`~.rest_primitives.Domain`: List of available domains
"""
# Domains are fixed and actually only one per REST api.
if self._domains is None:
self._domains = self._get_elements('domains', Domain)
return self._domains | Retrieves available domains.
Returns:
:py:obj:`list` of :py:class:`~.rest_primitives.Domain`: List of available domains | entailment |
def get_resources(self):
"""Retrieves a list of all known Streams high-level REST resources.
Returns:
:py:obj:`list` of :py:class:`~.rest_primitives.RestResource`: List of all Streams high-level REST resources.
"""
json_resources = self.rest_client.make_request(self.resource_url)['resources']
return [RestResource(resource, self.rest_client) for resource in json_resources] | Retrieves a list of all known Streams high-level REST resources.
Returns:
:py:obj:`list` of :py:class:`~.rest_primitives.RestResource`: List of all Streams high-level REST resources. | entailment |
def of_definition(service_def):
"""Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service.
"""
vcap_services = streamsx.topology.context._vcap_from_service_definition(service_def)
service_name = streamsx.topology.context._name_from_service_definition(service_def)
return StreamingAnalyticsConnection(vcap_services, service_name) | Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service. | entailment |
def resource_url(self):
"""str: Root URL for IBM Streams REST API"""
if self._iam:
self._resource_url = self._resource_url or _get_iam_rest_api_url_from_creds(self.rest_client, self.credentials)
else:
self._resource_url = self._resource_url or _get_rest_api_url_from_creds(self.session, self.credentials)
return self._resource_url | str: Root URL for IBM Streams REST API | entailment |
def is_authenticated(self, request, **kwargs):
"""
Verify 2-legged oauth request. Parameters accepted as
values in the "Authorization" header, as a GET request parameter,
or in a POST body.
"""
log.info("OAuth20Authentication")
try:
key = request.GET.get('oauth_consumer_key')
if not key:
for header in ['Authorization', 'HTTP_AUTHORIZATION']:
auth_header_value = request.META.get(header)
if auth_header_value:
key = auth_header_value.split(' ', 1)[1]
break
if not key and request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
decoded_body = request.body.decode('utf8')
key = json.loads(decoded_body)['oauth_consumer_key']
if not key:
log.info('OAuth20Authentication. No consumer_key found.')
return None
"""
If verify_access_token() does not pass, it will raise an error
"""
token = self.verify_access_token(key, request, **kwargs)
# If OAuth authentication is successful, set the request user to
# the token user for authorization
request.user = token.user
# If OAuth authentication is successful, set oauth_consumer_key on
# request in case we need it later
request.META['oauth_consumer_key'] = key
return True
except KeyError:
log.exception("Error in OAuth20Authentication.")
request.user = AnonymousUser()
return False
except Exception:
log.exception("Error in OAuth20Authentication.")
return False | Verify 2-legged oauth request. Parameters accepted as
values in the "Authorization" header, as a GET request parameter,
or in a POST body. | entailment |
def check_scope(self, token, request):
http_method = request.method
if not hasattr(self, http_method):
raise OAuthError("HTTP method is not recognized")
required_scopes = getattr(self, http_method)
# a None scope means always allowed
if required_scopes is None:
return True
"""
The required scope is either a string or an iterable. If string,
check if it is allowed for our access token otherwise, iterate through
the required_scopes to see which scopes are allowed
"""
# for non iterable types
if isinstance(required_scopes, six.string_types):
if token.allow_scopes(required_scopes.split()):
return [required_scopes]
return []
allowed_scopes = []
try:
for scope in required_scopes:
if token.allow_scopes(scope.split()):
allowed_scopes.append(scope)
except:
raise Exception('Invalid required scope values')
else:
return allowed_scopes | The required scope is either a string or an iterable. If string,
check if it is allowed for our access token otherwise, iterate through
the required_scopes to see which scopes are allowed | entailment |
def _is_args_eightbit(*args) -> bool:
"""
Check if input matches type: renderer.eightbit.
"""
if not args:
return False
elif args[0] is 0:
return True
elif isinstance(args[0], int):
return True
else:
return False | Check if input matches type: renderer.eightbit. | entailment |
def as_dict(self) -> Dict[str, str]:
"""
Export color register as dict.
"""
items: Dict[str, str] = {}
for k, v in self.items():
if type(v) is str:
items.update({k: v})
return items | Export color register as dict. | entailment |
def as_namedtuple(self):
"""
Export color register as namedtuple.
"""
d = self.as_dict()
return namedtuple('ColorRegister', d.keys())(*d.values()) | Export color register as namedtuple. | entailment |
def _extract_attrs(x, n):
"""Extracts attributes for an image. n is the index where the
attributes begin. Extracted elements are deleted from the element
list x. Attrs are returned in pandoc format.
"""
try:
return extract_attrs(x, n)
except (ValueError, IndexError):
if PANDOCVERSION < '1.16':
# Look for attributes attached to the image path, as occurs with
# image references for pandoc < 1.16 (pandoc-fignos Issue #14).
# See http://pandoc.org/MANUAL.html#images for the syntax.
# Note: This code does not handle the "optional title" for
# image references (search for link_attributes in pandoc's docs).
assert x[n-1]['t'] == 'Image'
image = x[n-1]
s = image['c'][-1][0]
if '%20%7B' in s:
path = s[:s.index('%20%7B')]
attrs = unquote(s[s.index('%7B'):])
image['c'][-1][0] = path # Remove attr string from the path
return PandocAttributes(attrs.strip(), 'markdown').to_pandoc()
raise | Extracts attributes for an image. n is the index where the
attributes begin. Extracted elements are deleted from the element
list x. Attrs are returned in pandoc format. | entailment |
def _process_figure(value, fmt):
"""Processes the figure. Returns a dict containing figure properties."""
# pylint: disable=global-statement
global Nreferences # Global references counter
global has_unnumbered_figures # Flags unnumbered figures were found
global cursec # Current section
# Parse the image
attrs, caption = value[0]['c'][:2]
# Initialize the return value
fig = {'is_unnumbered': False,
'is_unreferenceable': False,
'is_tagged': False,
'attrs': attrs}
# Bail out if the label does not conform
if not LABEL_PATTERN.match(attrs[0]):
has_unnumbered_figures = True
fig['is_unnumbered'] = True
fig['is_unreferenceable'] = True
return fig
# Process unreferenceable figures
if attrs[0] == 'fig:': # Make up a unique description
attrs[0] = attrs[0] + str(uuid.uuid4())
fig['is_unreferenceable'] = True
unreferenceable.append(attrs[0])
# For html, hard-code in the section numbers as tags
kvs = PandocAttributes(attrs, 'pandoc').kvs
if numbersections and fmt in ['html', 'html5'] and 'tag' not in kvs:
if kvs['secno'] != cursec:
cursec = kvs['secno']
Nreferences = 1
kvs['tag'] = cursec + '.' + str(Nreferences)
Nreferences += 1
# Save to the global references tracker
fig['is_tagged'] = 'tag' in kvs
if fig['is_tagged']:
# Remove any surrounding quotes
if kvs['tag'][0] == '"' and kvs['tag'][-1] == '"':
kvs['tag'] = kvs['tag'].strip('"')
elif kvs['tag'][0] == "'" and kvs['tag'][-1] == "'":
kvs['tag'] = kvs['tag'].strip("'")
references[attrs[0]] = kvs['tag']
else:
Nreferences += 1
references[attrs[0]] = Nreferences
# Adjust caption depending on the output format
if fmt in ['latex', 'beamer']: # Append a \label if this is referenceable
if not fig['is_unreferenceable']:
value[0]['c'][1] += [RawInline('tex', r'\label{%s}'%attrs[0])]
else: # Hard-code in the caption name and number/tag
if isinstance(references[attrs[0]], int): # Numbered reference
value[0]['c'][1] = [RawInline('html', r'<span>'),
Str(captionname), Space(),
Str('%d:'%references[attrs[0]]),
RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space(), Str('%d:'%references[attrs[0]])]
value[0]['c'][1] += [Space()] + list(caption)
else: # Tagged reference
assert isinstance(references[attrs[0]], STRTYPES)
text = references[attrs[0]]
if text.startswith('$') and text.endswith('$'): # Math
math = text.replace(' ', r'\ ')[1:-1]
els = [Math({"t":"InlineMath", "c":[]}, math), Str(':')]
else: # Text
els = [Str(text+':')]
value[0]['c'][1] = \
[RawInline('html', r'<span>'), Str(captionname), Space()] + \
els + [RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space()] + els
value[0]['c'][1] += [Space()] + list(caption)
return fig | Processes the figure. Returns a dict containing figure properties. | entailment |
def process_figures(key, value, fmt, meta): # pylint: disable=unused-argument
"""Processes the figures."""
global has_unnumbered_figures # pylint: disable=global-statement
# Process figures wrapped in Para elements
if key == 'Para' and len(value) == 1 and \
value[0]['t'] == 'Image' and value[0]['c'][-1][1].startswith('fig:'):
# Inspect the image
if len(value[0]['c']) == 2: # Unattributed, bail out
has_unnumbered_figures = True
if fmt == 'latex':
return [RawBlock('tex', r'\begin{no-prefix-figure-caption}'),
Para(value),
RawBlock('tex', r'\end{no-prefix-figure-caption}')]
return None
# Process the figure
fig = _process_figure(value, fmt)
# Context-dependent output
attrs = fig['attrs']
if fig['is_unnumbered']: # Unnumbered is also unreferenceable
if fmt == 'latex':
return [
RawBlock('tex', r'\begin{no-prefix-figure-caption}'),
Para(value),
RawBlock('tex', r'\end{no-prefix-figure-caption}')]
elif fmt in ['latex', 'beamer']:
key = attrs[0]
if PANDOCVERSION >= '1.17':
# Remove id from the image attributes. It is incorrectly
# handled by pandoc's TeX writer for these versions.
if LABEL_PATTERN.match(attrs[0]):
attrs[0] = ''
if fig['is_tagged']: # Code in the tags
tex = '\n'.join([r'\let\oldthefigure=\thefigure',
r'\renewcommand\thefigure{%s}'%\
references[key]])
pre = RawBlock('tex', tex)
tex = '\n'.join([r'\let\thefigure=\oldthefigure',
r'\addtocounter{figure}{-1}'])
post = RawBlock('tex', tex)
return [pre, Para(value), post]
elif fig['is_unreferenceable']:
attrs[0] = '' # The label isn't needed any further
elif PANDOCVERSION < '1.16' and fmt in ('html', 'html5') \
and LABEL_PATTERN.match(attrs[0]):
# Insert anchor
anchor = RawBlock('html', '<a name="%s"></a>'%attrs[0])
return [anchor, Para(value)]
elif fmt == 'docx':
# As per http://officeopenxml.com/WPhyperlink.php
bookmarkstart = \
RawBlock('openxml',
'<w:bookmarkStart w:id="0" w:name="%s"/>'
%attrs[0])
bookmarkend = \
RawBlock('openxml', '<w:bookmarkEnd w:id="0"/>')
return [bookmarkstart, Para(value), bookmarkend]
return None | Processes the figures. | entailment |
def main():
"""Filters the document AST."""
# pylint: disable=global-statement
global PANDOCVERSION
global Image
# Get the output format and document
fmt = args.fmt
doc = json.loads(STDIN.read())
# Initialize pandocxnos
# pylint: disable=too-many-function-args
PANDOCVERSION = pandocxnos.init(args.pandocversion, doc)
# Element primitives
if PANDOCVERSION < '1.16':
Image = elt('Image', 2)
# Chop up the doc
meta = doc['meta'] if PANDOCVERSION >= '1.18' else doc[0]['unMeta']
blocks = doc['blocks'] if PANDOCVERSION >= '1.18' else doc[1:]
# Process the metadata variables
process(meta)
# First pass
attach_attrs_image = attach_attrs_factory(Image,
extract_attrs=_extract_attrs)
detach_attrs_image = detach_attrs_factory(Image)
insert_secnos = insert_secnos_factory(Image)
delete_secnos = delete_secnos_factory(Image)
filters = [insert_secnos, process_figures, delete_secnos] \
if PANDOCVERSION >= '1.16' else \
[attach_attrs_image, insert_secnos, process_figures,
delete_secnos, detach_attrs_image]
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
filters, blocks)
# Second pass
process_refs = process_refs_factory(references.keys())
replace_refs = replace_refs_factory(references,
use_cleveref_default, False,
plusname if not capitalize else
[name.title() for name in plusname],
starname, 'figure')
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[repair_refs, process_refs, replace_refs],
altered)
# Insert supporting TeX
if fmt == 'latex':
rawblocks = []
if has_unnumbered_figures:
rawblocks += [RawBlock('tex', TEX0),
RawBlock('tex', TEX1),
RawBlock('tex', TEX2)]
if captionname != 'Figure':
rawblocks += [RawBlock('tex', TEX3 % captionname)]
insert_rawblocks = insert_rawblocks_factory(rawblocks)
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[insert_rawblocks], altered)
# Update the doc
if PANDOCVERSION >= '1.18':
doc['blocks'] = altered
else:
doc = doc[:1] + altered
# Dump the results
json.dump(doc, STDOUT)
# Flush stdout
STDOUT.flush() | Filters the document AST. | entailment |
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['media_tree']) | Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster. | entailment |
def file_links(items, opts=None):
"""
Turns a (optionally nested) list of FileNode objects into a list of
strings, linking to the associated files.
"""
result = []
kwargs = get_kwargs_for_file_link(opts)
for item in items:
if isinstance(item, FileNode):
result.append(get_file_link(item, **kwargs))
else:
result.append(file_links(item, kwargs))
return result | Turns a (optionally nested) list of FileNode objects into a list of
strings, linking to the associated files. | entailment |
def get_results(self, request):
"""
Temporarily decreases the `level` attribute of all search results in
order to prevent indendation when displaying them.
"""
super(MediaTreeChangeList, self).get_results(request)
try:
reduce_levels = abs(int(get_request_attr(request, 'reduce_levels', 0)))
except TypeError:
reduce_levels = 0
is_filtered = self.is_filtered(request)
if is_filtered or reduce_levels:
for item in self.result_list:
item.prevent_save()
item.actual_level = item.level
if is_filtered:
item.reduce_levels = item.level
item.level = 0
else:
item.reduce_levels = reduce_levels
item.level = max(0, item.level - reduce_levels) | Temporarily decreases the `level` attribute of all search results in
order to prevent indendation when displaying them. | entailment |
def forwards(self, orm):
"Write your forwards methods here."
for node in orm.FileNode.objects.all():
node.save() | Write your forwards methods here. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.