code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if operation_timeout is None: operation_timeout = \ self.session.retry_timeout_config.operation_timeout if operation_timeout > 0: start_time = time.time() while True: try: self.query_api_version() except Error: pass except Exception: raise else: break if operation_timeout > 0: current_time = time.time() if current_time > start_time + operation_timeout: raise OperationTimeout( "Waiting for Console at {} to become available timed " "out (operation timeout: {} s)". format(self.session.host, operation_timeout), operation_timeout) time.sleep(10)
def wait_for_available(self, operation_timeout=None)
Wait for the Console (HMC) this client is connected to, to become available. The Console is considered available if the :meth:`~zhmcclient.Client.query_api_version` method succeeds. If the Console does not become available within the operation timeout, an :exc:`~zhmcclient.OperationTimeout` exception is raised. Parameters: operation_timeout (:term:`number`): Timeout in seconds, when waiting for the Console to become available. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires, a :exc:`~zhmcclient.OperationTimeout` is raised. Raises: :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for the Console to become available.
3.190202
2.662056
1.198398
full_properties = self.manager.session.get(self._uri) self._properties = dict(full_properties) self._properties_timestamp = int(time.time()) self._full_properties = True
def pull_full_properties(self)
Retrieve the full set of resource properties and cache them in this object. Authorization requirements: * Object-access permission to this resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
5.344421
5.219699
1.023895
try: return self._properties[name] except KeyError: if self._full_properties: raise self.pull_full_properties() return self._properties[name]
def get_property(self, name)
Return the value of a resource property. If the resource property is not cached in this object yet, the full set of resource properties is retrieved and cached in this object, and the resource property is again attempted to be returned. Authorization requirements: * Object-access permission to this resource. Parameters: name (:term:`string`): Name of the resource property, using the names defined in the respective 'Data model' sections in the :term:`HMC API` book. Returns: The value of the resource property. Raises: KeyError: The resource property could not be found (also not in the full set of resource properties). :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
3.779234
3.98422
0.948551
global USERID, PASSWORD # pylint: disable=global-statement USERID = userid or USERID or \ six.input('Enter userid for HMC {}: '.format(zhmc)) PASSWORD = password or PASSWORD or \ getpass.getpass('Enter password for {}: '.format(USERID)) session = zhmcclient.Session(zhmc, USERID, PASSWORD) session.logon() client = zhmcclient.Client(session) print('Established logged-on session with HMC {} using userid {}'. format(zhmc, USERID)) return client
def make_client(zhmc, userid=None, password=None)
Create a `Session` object for the specified HMC and log that on. Create a `Client` object using that `Session` object, and return it. If no userid and password are specified, and if no previous call to this method was made, userid and password are interactively inquired. Userid and password are saved in module-global variables for future calls to this method.
2.859025
2.955585
0.96733
if text is None: return 'None' ret = _indent(text, amount=indent) return ret.lstrip(' ')
def repr_text(text, indent)
Return a debug representation of a multi-line text (e.g. the result of another repr...() function).
6.870003
7.525999
0.912836
# pprint represents lists and tuples in one row if possible. We want one # per row, so we iterate ourselves. if _list is None: return 'None' if isinstance(_list, MutableSequence): bm = '[' em = ']' elif isinstance(_list, Iterable): bm = '(' em = ')' else: raise TypeError("Object must be an iterable, but is a %s" % type(_list)) ret = bm + '\n' for value in _list: ret += _indent('%r,\n' % value, 2) ret += em ret = repr_text(ret, indent=indent) return ret.lstrip(' ')
def repr_list(_list, indent)
Return a debug representation of a list or tuple.
5.350046
5.157012
1.037431
# pprint represents OrderedDict objects using the tuple init syntax, # which is not very readable. Therefore, dictionaries are iterated over. if _dict is None: return 'None' if not isinstance(_dict, Mapping): raise TypeError("Object must be a mapping, but is a %s" % type(_dict)) if isinstance(_dict, OrderedDict): kind = 'ordered' ret = '%s {\n' % kind # non standard syntax for the kind indicator for key in six.iterkeys(_dict): value = _dict[key] ret += _indent('%r: %r,\n' % (key, value), 2) else: # dict kind = 'sorted' ret = '%s {\n' % kind # non standard syntax for the kind indicator for key in sorted(six.iterkeys(_dict)): value = _dict[key] ret += _indent('%r: %r,\n' % (key, value), 2) ret += '}' ret = repr_text(ret, indent=indent) return ret.lstrip(' ')
def repr_dict(_dict, indent)
Return a debug representation of a dict or OrderedDict.
3.764752
3.654255
1.030238
if timestamp is None: return 'None' dt = datetime_from_timestamp(timestamp) ret = "%d (%s)" % (timestamp, dt.strftime('%Y-%m-%d %H:%M:%S.%f %Z')) return ret
def repr_timestamp(timestamp)
Return a debug representation of an HMC timestamp number.
3.219038
2.95664
1.088749
# Note that in Python 2, "None < 0" is allowed and will return True, # therefore we do an extra check for None. if ts is None: raise ValueError("HMC timestamp value must not be None.") if ts < 0: raise ValueError( "Negative HMC timestamp value {} cannot be represented as " "datetime.".format(ts)) epoch_seconds = ts // 1000 delta_microseconds = ts % 1000 * 1000 try: dt = datetime.fromtimestamp(epoch_seconds, pytz.utc) except (ValueError, OSError) as exc: raise ValueError(str(exc)) dt = dt.replace(microsecond=delta_microseconds) return dt
def datetime_from_timestamp(ts)
Convert an :term:`HMC timestamp number <timestamp>` into a :class:`~py:datetime.datetime` object. The HMC timestamp number must be non-negative. This means the special timestamp value -1 cannot be represented as datetime and will cause ``ValueError`` to be raised. The date and time range supported by this function has the following bounds: * The upper bounds is determined by :attr:`py:datetime.datetime.max` and additional limitations, as follows: * 9999-12-31 23:59:59 UTC, for 32-bit and 64-bit CPython on Linux and OS-X. * 3001-01-01 07:59:59 UTC, for 32-bit and 64-bit CPython on Windows, due to a limitation in `gmtime() in Visual C++ <https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/gmtime-gmtime32-gmtime64>`_. * 2038-01-19 03:14:07 UTC, for some 32-bit Python implementations, due to the `Year 2038 problem <https://en.wikipedia.org/wiki/Year_2038_problem>`_. * The lower bounds is the UNIX epoch: 1970-01-01 00:00:00 UTC. Parameters: ts (:term:`timestamp`): Point in time as an HMC timestamp number. Must not be `None`. Returns: :class:`~py:datetime.datetime`: Point in time as a timezone-aware Python datetime object for timezone UTC. Raises: ValueError
3.68514
3.397171
1.084767
if dt is None: raise ValueError("datetime value must not be None.") if dt.tzinfo is None: # Apply default timezone to the timezone-naive input dt = pytz.utc.localize(dt) epoch_seconds = (dt - _EPOCH_DT).total_seconds() ts = int(epoch_seconds * 1000) return ts
def timestamp_from_datetime(dt)
Convert a :class:`~py:datetime.datetime` object into an :term:`HMC timestamp number <timestamp>`. The date and time range supported by this function has the following bounds: * The upper bounds is :attr:`py:datetime.datetime.max`, as follows: * 9999-12-31 23:59:59 UTC, for 32-bit and 64-bit CPython on Linux and OS-X. * 2038-01-19 03:14:07 UTC, for some 32-bit Python implementations, due to the `Year 2038 problem <https://en.wikipedia.org/wiki/Year_2038_problem>`_. * The lower bounds is the UNIX epoch: 1970-01-01 00:00:00 UTC. Parameters: dt (:class:`~py:datetime.datetime`): Point in time as a Python datetime object. The datetime object may be timezone-aware or timezone-naive. If timezone-naive, the UTC timezone is assumed. Must not be `None`. Returns: :term:`timestamp`: Point in time as an HMC timestamp number. Raises: ValueError
3.435698
4.701482
0.730769
if properties is None: properties = {} result = self.session.post(self._base_uri, body=properties) # There should not be overlaps, but just in case there are, the # returned props should overwrite the input props: props = copy.deepcopy(properties) props.update(result) name = props.get(self._name_prop, None) uri = props[self._uri_prop] storage_group = StorageGroup(self, uri, name, props) self._name_uri_cache.update(name, uri) return storage_group
def create(self, properties)
Create and configure a storage group. The new storage group will be associated with the CPC identified by the `cpc-uri` input property. Authorization requirements: * Object-access permission to the CPC that will be associated with the new storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create Storage Group' in the :term:`HMC API` book. The 'cpc-uri' property identifies the CPC to which the new storage group will be associated, and is required to be specified in this parameter. Returns: :class:`~zhmcclient.StorageGroup`: The resource object for the new storage group. The object will have its 'object-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
4.85321
5.410991
0.896917
# We do here some lazy loading. if not self._storage_volumes: self._storage_volumes = StorageVolumeManager(self) return self._storage_volumes
def storage_volumes(self)
:class:`~zhmcclient.StorageVolumeManager`: Access to the :term:`storage volumes <storage volume>` in this storage group.
6.126968
4.88896
1.253225
# We do here some lazy loading. if not self._virtual_storage_resources: self._virtual_storage_resources = \ VirtualStorageResourceManager(self) return self._virtual_storage_resources
def virtual_storage_resources(self)
:class:`~zhmcclient.VirtualStorageResourceManager`: Access to the :term:`virtual storage resources <Virtual Storage Resource>` in this storage group.
4.885727
3.938463
1.240516
# We do here some lazy loading. if not self._cpc: cpc_uri = self.get_property('cpc-uri') cpc_mgr = self.manager.console.manager.client.cpcs self._cpc = cpc_mgr.resource_object(cpc_uri) return self._cpc
def cpc(self)
:class:`~zhmcclient.Cpc`: The :term:`CPC` to which this storage group is associated. The returned :class:`~zhmcclient.Cpc` has only a minimal set of properties populated.
5.515237
4.421077
1.247487
query_parms = [] if name is not None: self.manager._append_query_parms(query_parms, 'name', name) if status is not None: self.manager._append_query_parms(query_parms, 'status', status) query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?{}'.format(query_parms_str) uri = '{}/operations/get-partitions{}'.format( self.uri, query_parms_str) sg_cpc = self.cpc part_mgr = sg_cpc.partitions result = self.manager.session.get(uri) props_list = result['partitions'] part_list = [] for props in props_list: part = part_mgr.resource_object(props['object-uri'], props) part_list.append(part) return part_list
def list_attached_partitions(self, name=None, status=None)
Return the partitions to which this storage group is currently attached, optionally filtered by partition name and status. Authorization requirements: * Object-access permission to this storage group. * Task permission to the "Configure Storage - System Programmer" task. Parameters: name (:term:`string`): Filter pattern (regular expression) to limit returned partitions to those that have a matching name. If `None`, no filtering for the partition name takes place. status (:term:`string`): Filter string to limit returned partitions to those that have a matching status. The value must be a valid partition status property value. If `None`, no filtering for the partition status takes place. Returns: List of :class:`~zhmcclient.Partition` objects representing the partitions to whivch this storage group is currently attached, with a minimal set of properties ('object-id', 'name', 'status'). Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
2.874281
2.757231
1.042452
body = { 'adapter-port-uris': [p.uri for p in ports], } self.manager.session.post( self.uri + '/operations/add-candidate-adapter-ports', body=body)
def add_candidate_adapter_ports(self, ports)
Add a list of storage adapter ports to this storage group's candidate adapter ports list. This operation only applies to storage groups of type "fcp". These adapter ports become candidates for use as backing adapters when creating virtual storage resources when the storage group is attached to a partition. The adapter ports should have connectivity to the storage area network (SAN). Candidate adapter ports may only be added before the CPC discovers a working communications path, indicated by a "verified" status on at least one of this storage group's WWPNs. After that point, all adapter ports in the storage group are automatically detected and manually adding them is no longer possible. Because the CPC discovers working communications paths automatically, candidate adapter ports do not need to be added by the user. Any ports that are added, are validated by the CPC during discovery, and may or may not actually be used. Authorization requirements: * Object-access permission to this storage group. * Object-access permission to the adapter of each specified port. * Task permission to the "Configure Storage - System Programmer" task. Parameters: ports (:class:`py:list`): List of :class:`~zhmcclient.Port` objects representing the ports to be added. All specified ports must not already be members of this storage group's candidate adapter ports list. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
4.541371
4.784651
0.949154
sg_cpc = self.cpc adapter_mgr = sg_cpc.adapters port_list = [] port_uris = self.get_property('candidate-adapter-port-uris') if port_uris: for port_uri in port_uris: m = re.match(r'^(/api/adapters/[^/]*)/.*', port_uri) adapter_uri = m.group(1) adapter = adapter_mgr.resource_object(adapter_uri) port_mgr = adapter.ports port = port_mgr.resource_object(port_uri) port_list.append(port) if full_properties: port.pull_full_properties() return port_list
def list_candidate_adapter_ports(self, full_properties=False)
Return the current candidate storage adapter port list of this storage group. The result reflects the actual list of ports used by the CPC, including any changes that have been made during discovery. The source for this information is the 'candidate-adapter-port-uris' property of the storage group object. Parameters: full_properties (bool): Controls that the full set of resource properties for each returned candidate storage adapter port is being retrieved, vs. only the following short set: "element-uri", "element-id", "class", "parent". TODO: Verify short list of properties. Returns: List of :class:`~zhmcclient.Port` objects representing the current candidate storage adapter ports of this storage group. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
2.977488
2.697731
1.103701
result = self.session.post(self.cpc.uri + '/adapters', body=properties) # There should not be overlaps, but just in case there are, the # returned props should overwrite the input props: props = copy.deepcopy(properties) props.update(result) name = props.get(self._name_prop, None) uri = props[self._uri_prop] adapter = Adapter(self, uri, name, props) self._name_uri_cache.update(name, uri) return adapter
def create_hipersocket(self, properties)
Create and configure a HiperSockets Adapter in this CPC. Authorization requirements: * Object-access permission to the scoping CPC. * Task permission to the "Create HiperSockets Adapter" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create Hipersocket' in the :term:`HMC API` book. Returns: :class:`~zhmcclient.Adapter`: The resource object for the new HiperSockets Adapter. The object will have its 'object-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
5.642114
4.841099
1.165461
# We do here some lazy loading. if not self._ports: family = self.get_property('adapter-family') try: port_type = self.port_type_by_family[family] except KeyError: port_type = None self._ports = PortManager(self, port_type) return self._ports
def ports(self)
:class:`~zhmcclient.PortManager`: Access to the :term:`Ports <Port>` of this Adapter.
4.896787
3.982337
1.229627
if self._port_uris_prop is None: family = self.get_property('adapter-family') try: self._port_uris_prop = self.port_uris_prop_by_family[family] except KeyError: self._port_uris_prop = '' return self._port_uris_prop
def port_uris_prop(self)
:term:`string`: Name of adapter property that specifies the adapter port URIs, or the empty string ('') for adapters without ports. For example, 'network-port-uris' for a network adapter.
2.911953
2.580349
1.128511
if self._port_uri_segment is None: family = self.get_property('adapter-family') try: self._port_uri_segment = self.port_uri_segment_by_family[ family] except KeyError: self._port_uri_segment = '' return self._port_uri_segment
def port_uri_segment(self)
:term:`string`: Adapter type specific URI segment for adapter port URIs, or the empty string ('') for adapters without ports. For example, 'network-ports' for a network adapter.
3.329892
2.862792
1.163163
if self.get_property('adapter-family') != 'crypto': return None card_type = self.get_property('detected-card-type') if card_type.startswith('crypto-express-'): max_domains = self.manager.cpc.maximum_active_partitions else: raise ValueError("Unknown crypto card type: {!r}". format(card_type)) return max_domains
def maximum_crypto_domains(self)
Integer: The maximum number of crypto domains on this crypto adapter. The following table shows the maximum number of crypto domains for crypto adapters supported on IBM Z machine generations in DPM mode. The corresponding LinuxONE machine generations are listed in the notes below the table: ================= ========================= =============== Adapter type Machine generations Maximum domains ================= ========================= =============== Crypto Express 5S z14 (3) / z13 (1) 85 Crypto Express 5S z14-ZR1 (4) / z13s (2) 40 Crypto Express 6S z14 (3) 85 Crypto Express 6S z14-ZR1 (4) 40 ================= ========================= =============== Notes: (1) Supported for z13 and LinuxONE Emperor (2) Supported for z13s and LinuxONE Rockhopper (3) Supported for z14 and LinuxONE Emperor II (4) Supported for z14-ZR1 and LinuxONE Rockhopper II If this adapter is not a crypto adapter, `None` is returned. If the crypto adapter card type is not known, :exc:`ValueError` is raised. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`ValueError`: Unknown crypto card type
6.797757
4.83476
1.406017
body = {'crypto-type': crypto_type} if zeroize is not None: body['zeroize'] = zeroize self.manager.session.post( self.uri + '/operations/change-crypto-type', body)
def change_crypto_type(self, crypto_type, zeroize=None)
Reconfigures a cryptographic adapter to a different crypto type. This operation is only supported for cryptographic adapters. The cryptographic adapter must be varied offline before its crypto type can be reconfigured. Authorization requirements: * Object-access permission to this Adapter. * Task permission to the "Adapter Details" task. Parameters: crypto_type (:term:`string`): - ``"accelerator"``: Crypto Express5S Accelerator - ``"cca-coprocessor"``: Crypto Express5S CCA Coprocessor - ``"ep11-coprocessor"``: Crypto Express5S EP11 Coprocessor zeroize (bool): Specifies whether the cryptographic adapter will be zeroized when it is reconfigured to a crypto type of ``"accelerator"``. `None` means that the HMC-implemented default of `True` will be used. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
3.5157
3.4146
1.029608
body = {'type': adapter_type} self.manager.session.post( self.uri + '/operations/change-adapter-type', body)
def change_adapter_type(self, adapter_type)
Reconfigures an adapter from one type to another, or to ungonfigured. Currently, only storage adapters can be reconfigured, and their adapter type is the supported storage protocol (FCP vs. FICON). Storage adapter instances (i.e. :class:`~zhmcclient.Adapter` objects) represent daughter cards on a physical storage card. Current storage cards require both daughter cards to be configured to the same protocol, so changing the type of the targeted adapter will also change the type of the adapter instance that represents the other daughter card on the same physical card. Zhmcclient users that need to determine the related adapter instance can do so by finding the storage adapter with a matching first 9 characters (card ID and slot ID) of their `card-location` property values. The targeted adapter and its related adapter on the same storage card must not already have the desired adapter type, they must not be attached to any partition, and they must not have an adapter status of 'exceptions'. Authorization requirements: * Object-access permission to this Adapter. * Task permission to the "Configure Storage - System Programmer" task. Parameters: adapter_type (:term:`string`): - ``"fcp"``: FCP (Fibre Channel Protocol) - ``"fc"``: FICON (Fibre Connection) protocol - ``"not-configured"``: No adapter type configured Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
6.273591
6.70444
0.935737
body = {} result = self.manager.session.post( self.uri + '/operations/stop', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result
def stop(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False)
Stop this LPAR, using the HMC operation "Stop Logical Partition". The stop operation stops the processors from processing instructions. This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to the CPC containing this LPAR. * Object-access permission to this LPAR. * Task permission for the "Stop" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status.
3.358942
3.218004
1.043797
if isinstance(exc, requests.exceptions.ConnectTimeout): raise ConnectTimeout(_request_exc_message(exc), exc, retry_timeout_config.connect_timeout, retry_timeout_config.connect_retries) elif isinstance(exc, requests.exceptions.ReadTimeout): raise ReadTimeout(_request_exc_message(exc), exc, retry_timeout_config.read_timeout, retry_timeout_config.read_retries) elif isinstance(exc, requests.exceptions.RetryError): raise RetriesExceeded(_request_exc_message(exc), exc, retry_timeout_config.connect_retries) else: raise ConnectionError(_request_exc_message(exc), exc)
def _handle_request_exc(exc, retry_timeout_config)
Handle a :exc:`request.exceptions.RequestException` exception that was raised.
1.771455
1.773634
0.998771
if exc.args: if isinstance(exc.args[0], Exception): org_exc = exc.args[0] if isinstance(org_exc, urllib3.exceptions.MaxRetryError): reason_exc = org_exc.reason message = str(reason_exc) else: message = str(org_exc.args[0]) else: message = str(exc.args[0]) # Eliminate useless object repr at begin of the message m = re.match(r'^(\(<[^>]+>, \'(.*)\'\)|<[^>]+>: (.*))$', message) if m: message = m.group(2) or m.group(3) else: message = "" return message
def _request_exc_message(exc)
Return a reasonable exception message from a :exc:`request.exceptions.RequestException` exception. The approach is to dig deep to the original reason, if the original exception is present, skipping irrelevant exceptions such as `urllib3.exceptions.MaxRetryError`, and eliminating useless object representations such as the connection pool object in `urllib3.exceptions.NewConnectionError`. Parameters: exc (:exc:`~request.exceptions.RequestException`): Exception Returns: string: A reasonable exception message from the specified exception.
3.421068
3.276207
1.044216
if text is None: text_repr = 'None' elif len(text) > max_len: text_repr = repr(text[0:max_len]) + '...' else: text_repr = repr(text) return text_repr
def _text_repr(text, max_len=1000)
Return the input text as a Python string representation (i.e. using repr()) that is limited to a maximum length.
1.978405
1.831084
1.080456
content_type = result.headers.get('content-type', None) if content_type is None or content_type.startswith('application/json'): # This function is only called when there is content expected. # Therefore, a response without content will result in a ParseError. try: return result.json(object_pairs_hook=OrderedDict) except ValueError as exc: raise ParseError( "JSON parse error in HTTP response: {}. " "HTTP request: {} {}. " "Response status {}. " "Response content-type: {!r}. " "Content (max.1000, decoded using {}): {}". format(exc.args[0], result.request.method, result.request.url, result.status_code, content_type, result.encoding, _text_repr(result.text, 1000))) elif content_type.startswith('text/html'): # We are in some error situation. The HMC returns HTML content # for some 5xx status codes. We try to deal with it somehow, # but we are not going as far as real HTML parsing. m = re.search(r'charset=([^;,]+)', content_type) if m: encoding = m.group(1) # e.g. RFC "ISO-8859-1" else: encoding = 'utf-8' try: html_uni = result.content.decode(encoding) except LookupError: html_uni = result.content.decode() # We convert to one line to be regexp-friendly. html_oneline = html_uni.replace('\r\n', '\\n').replace('\r', '\\n').\ replace('\n', '\\n') # Check for some well-known errors: if re.search(r'javax\.servlet\.ServletException: ' r'Web Services are not enabled\.', html_oneline): html_title = "Console Configuration Error" html_details = "Web Services API is not enabled on the HMC." html_reason = HTML_REASON_WEB_SERVICES_DISABLED else: m = re.search( r'<title>([^<]*)</title>.*' r'<h2>Details:</h2>(.*)(<hr size="1" noshade>)?</body>', html_oneline) if m: html_title = m.group(1) # Spend a reasonable effort to make the HTML readable: html_details = m.group(2).replace('<p>', '\\n').\ replace('<br>', '\\n').replace('\\n\\n', '\\n').strip() else: html_title = "Console Internal Error" html_details = "Response body: {!r}".format(html_uni) html_reason = HTML_REASON_OTHER message = "{}: {}".format(html_title, html_details) # We create a minimal JSON error object (to the extent we use it # when processing it): result_obj = { 'http-status': result.status_code, 'reason': html_reason, 'message': message, 'request-uri': result.request.url, 'request-method': result.request.method, } return result_obj elif content_type.startswith('application/vnd.ibm-z-zmanager-metrics'): content_bytes = result.content assert isinstance(content_bytes, six.binary_type) return content_bytes.decode('utf-8') # as a unicode object else: raise ParseError( "Unknown content type in HTTP response: {}. " "HTTP request: {} {}. " "Response status {}. " "Response content-type: {!r}. " "Content (max.1000, decoded using {}): {}". format(content_type, result.request.method, result.request.url, result.status_code, content_type, result.encoding, _text_repr(result.text, 1000)))
def _result_object(result)
Return the JSON payload in the HTTP response as a Python dict. Parameters: result (requests.Response): HTTP response object. Raises: zhmcclient.ParseError: Error parsing the returned JSON.
3.367682
3.272503
1.029084
ret = RetryTimeoutConfig() for attr in RetryTimeoutConfig._attrs: value = getattr(self, attr) if override_config and getattr(override_config, attr) is not None: value = getattr(override_config, attr) setattr(ret, attr, value) return ret
def override_with(self, override_config)
Return a new configuration object that represents the configuration from this configuration object acting as a default, and the specified configuration object overriding that default. Parameters: override_config (:class:`~zhmcclient.RetryTimeoutConfig`): The configuration object overriding the defaults defined in this configuration object. Returns: :class:`~zhmcclient.RetryTimeoutConfig`: A new configuration object representing this configuration object, overridden by the specified configuration object.
3.184766
2.785151
1.14348
if self._session_id is None: return False if verify: try: self.get('/api/console', logon_required=True) except ServerAuthError: return False return True
def is_logon(self, verify=False)
Return a boolean indicating whether the session is currently logged on to the HMC. By default, this method checks whether there is a session-id set and considers that sufficient for determining that the session is logged on. The `verify` parameter can be used to verify the validity of a session-id that is already set, by issuing a dummy operation ("Get Console Properties") to the HMC. Parameters: verify (bool): If a session-id is already set, verify its validity.
5.793538
5.919715
0.978685
if self._userid is None: raise ClientAuthError("Userid is not provided.") if self._password is None: if self._get_password: self._password = self._get_password(self._host, self._userid) else: raise ClientAuthError("Password is not provided.") logon_uri = '/api/sessions' logon_body = { 'userid': self._userid, 'password': self._password } self._headers.pop('X-API-Session', None) # Just in case self._session = self._new_session(self.retry_timeout_config) logon_res = self.post(logon_uri, logon_body, logon_required=False) self._session_id = logon_res['api-session'] self._headers['X-API-Session'] = self._session_id
def _do_logon(self)
Log on, unconditionally. This can be used to re-logon. This requires credentials to be provided. Raises: :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.HTTPError`
3.319466
3.161635
1.049921
retry = requests.packages.urllib3.Retry( total=None, connect=retry_timeout_config.connect_retries, read=retry_timeout_config.read_retries, method_whitelist=retry_timeout_config.method_whitelist, redirect=retry_timeout_config.max_redirects) session = requests.Session() session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retry)) session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retry)) return session
def _new_session(retry_timeout_config)
Return a new `requests.Session` object.
1.850678
1.745009
1.060555
session_uri = '/api/sessions/this-session' self.delete(session_uri, logon_required=False) self._session_id = None self._session = None self._headers.pop('X-API-Session', None)
def _do_logoff(self)
Log off, unconditionally. Raises: :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.HTTPError`
6.125429
7.525701
0.813935
if method == 'POST' and url.endswith('/api/sessions'): # In Python 3 up to 3.5, json.loads() requires unicode strings. if sys.version_info[0] == 3 and sys.version_info[1] in (4, 5) and \ isinstance(content, six.binary_type): content = content.decode('utf-8') # Because zhmcclient has built the request, we are not handling # any JSON parse errors from json.loads(). content_dict = json.loads(content) content_dict['password'] = '********' content = json.dumps(content_dict) if headers and 'X-API-Session' in headers: headers = headers.copy() headers['X-API-Session'] = '********' HMC_LOGGER.debug("Request: %s %s, headers: %r, " "content(max.1000): %.1000r", method, url, headers, content)
def _log_http_request(method, url, headers=None, content=None)
Log the HTTP request of an HMC REST API call, at the debug level. Parameters: method (:term:`string`): HTTP method name in upper case, e.g. 'GET' url (:term:`string`): HTTP URL (base URL and operation URI) headers (iterable): HTTP headers used for the request content (:term:`string`): HTTP body (aka content) used for the request
4.04531
4.230929
0.956128
if method == 'POST' and url.endswith('/api/sessions'): # In Python 3 up to 3.5, json.loads() requires unicode strings. if sys.version_info[0] == 3 and sys.version_info[1] in (4, 5) and \ isinstance(content, six.binary_type): content = content.decode('utf-8') try: content_dict = json.loads(content) except ValueError as exc: content = '"Error: Cannot parse JSON payload of response: ' \ '{}"'.format(exc) else: content_dict['api-session'] = '********' content_dict['session-credential'] = '********' content = json.dumps(content_dict) if headers and 'X-API-Session' in headers: headers = headers.copy() headers['X-API-Session'] = '********' HMC_LOGGER.debug("Respons: %s %s, status: %s, headers: %r, " "content(max.1000): %.1000r", method, url, status, headers, content)
def _log_http_response(method, url, status, headers=None, content=None)
Log the HTTP response of an HMC REST API call, at the debug level. Parameters: method (:term:`string`): HTTP method name in upper case, e.g. 'GET' url (:term:`string`): HTTP URL (base URL and operation URI) status (integer): HTTP status code headers (iterable): HTTP headers returned in the response content (:term:`string`): HTTP body (aka content) returned in the response
3.571208
3.668683
0.973431
if logon_required: self.logon() url = self.base_url + uri self._log_http_request('DELETE', url, headers=self.headers) stats = self.time_stats_keeper.get_stats('delete ' + uri) stats.begin() req = self._session or requests req_timeout = (self.retry_timeout_config.connect_timeout, self.retry_timeout_config.read_timeout) try: result = req.delete(url, headers=self.headers, verify=False, timeout=req_timeout) except requests.exceptions.RequestException as exc: _handle_request_exc(exc, self.retry_timeout_config) finally: stats.end() self._log_http_response('DELETE', url, status=result.status_code, headers=result.headers, content=result.content) if result.status_code in (200, 204): return elif result.status_code == 403: result_object = _result_object(result) reason = result_object.get('reason', None) if reason == 5: # API session token expired: re-logon and retry self._do_logon() self.delete(uri, logon_required) return else: msg = result_object.get('message', None) raise ServerAuthError("HTTP authentication failed: {}". format(msg), HTTPError(result_object)) else: result_object = _result_object(result) raise HTTPError(result_object)
def delete(self, uri, logon_required=True)
Perform the HTTP DELETE method against the resource identified by a URI. A set of standard HTTP headers is automatically part of the request. If the HMC session token is expired, this method re-logs on and retries the operation. Parameters: uri (:term:`string`): Relative URI path of the resource, e.g. "/api/session/{session-id}". This URI is relative to the base URL of the session (see the :attr:`~zhmcclient.Session.base_url` property). Must not be `None`. logon_required (bool): Boolean indicating whether the operation requires that the session is logged on to the HMC. For example, for the logoff operation, it does not make sense to first log on. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError`
3.001078
3.092723
0.970367
job_result_obj = self.session.get(self.uri) job_status = job_result_obj['status'] if job_status == 'complete': self.session.delete(self.uri) op_status_code = job_result_obj['job-status-code'] if op_status_code in (200, 201): op_result_obj = job_result_obj.get('job-results', None) elif op_status_code == 204: # No content op_result_obj = None else: error_result_obj = job_result_obj.get('job-results', None) if not error_result_obj: message = None elif 'message' in error_result_obj: message = error_result_obj['message'] elif 'error' in error_result_obj: message = error_result_obj['error'] else: message = None error_obj = { 'http-status': op_status_code, 'reason': job_result_obj['job-reason-code'], 'message': message, 'request-method': self.op_method, 'request-uri': self.op_uri, } raise HTTPError(error_obj) else: op_result_obj = None return job_status, op_result_obj
def check_for_completion(self)
Check once for completion of the job and return completion status and result if it has completed. If the job completed in error, an :exc:`~zhmcclient.HTTPError` exception is raised. Returns: : A tuple (status, result) with: * status (:term:`string`): Completion status of the job, as returned in the ``status`` field of the response body of the "Query Job Status" HMC operation, as follows: * ``"complete"``: Job completed (successfully). * any other value: Job is not yet complete. * result (:term:`json object` or `None`): `None` for incomplete jobs. For completed jobs, the result of the original asynchronous operation that was performed by the job, from the ``job-results`` field of the response body of the "Query Job Status" HMC operation. That result is a :term:`json object` as described for the asynchronous operation, or `None` if the operation has no result. Raises: :exc:`~zhmcclient.HTTPError`: The job completed in error, or the job status cannot be retrieved, or the job cannot be deleted. :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError`
2.256211
2.166304
1.041503
if operation_timeout is None: operation_timeout = \ self.session.retry_timeout_config.operation_timeout if operation_timeout > 0: start_time = time.time() while True: job_status, op_result_obj = self.check_for_completion() # We give completion of status priority over strictly achieving # the timeout, so we check status first. This may cause a longer # duration of the method than prescribed by the timeout. if job_status == 'complete': return op_result_obj if operation_timeout > 0: current_time = time.time() if current_time > start_time + operation_timeout: raise OperationTimeout( "Waiting for completion of job {} timed out " "(operation timeout: {} s)". format(self.uri, operation_timeout), operation_timeout) time.sleep(1)
def wait_for_completion(self, operation_timeout=None)
Wait for completion of the job, then delete the job on the HMC and return the result of the original asynchronous HMC operation, if it completed successfully. If the job completed in error, an :exc:`~zhmcclient.HTTPError` exception is raised. Parameters: operation_timeout (:term:`number`): Timeout in seconds, when waiting for completion of the job. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires, a :exc:`~zhmcclient.OperationTimeout` is raised. This method gives completion of the job priority over strictly achieving the timeout. This may cause a slightly longer duration of the method than prescribed by the timeout. Returns: :term:`json object` or `None`: The result of the original asynchronous operation that was performed by the job, from the ``job-results`` field of the response body of the "Query Job Status" HMC operation. That result is a :term:`json object` as described for the asynchronous operation, or `None` if the operation has no result. Raises: :exc:`~zhmcclient.HTTPError`: The job completed in error, or the job status cannot be retrieved, or the job cannot be deleted. :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for job completion.
4.753313
2.946095
1.613428
request = current_request() return request is not None and re.match(r'^django\.', request.__module__)
def supports(self, config, context)
Check whether this is a django request or not. :param config: honeybadger configuration. :param context: current honeybadger configuration. :return: True if this is a django request, False else.
15.380205
8.729347
1.761896
request = current_request() payload = { 'url': request.build_absolute_uri(), 'component': request.resolver_match.app_name, 'action': request.resolver_match.func.__name__, 'params': {}, 'session': {}, 'cgi_data': dict(request.META), 'context': context } if hasattr(request, 'session'): payload['session'] = filter_dict(dict(request.session), config.params_filters) payload['params'] = filter_dict(dict(getattr(request, request.method)), config.params_filters) return payload
def generate_payload(self, config, context)
Generate payload by checking Django request object. :param context: current context. :param config: honeybadger configuration. :return: a dict with the generated payload.
3.369612
3.209895
1.049758
logger.debug('Called generic_div({}, {})'.format(a, b)) return a / b
def generic_div(a, b)
Simple function to divide two numbers
4.529906
4.747963
0.954074
try: from flask import request except ImportError: return False else: return bool(request)
def supports(self, config, context)
Check whether we are in a Flask request context. :param config: honeybadger configuration. :param context: current honeybadger configuration. :return: True if this is a django request, False else.
7.244771
4.302402
1.68389
from flask import current_app, session, request as _request current_view = current_app.view_functions[_request.endpoint] if hasattr(current_view, 'view_class'): component = '.'.join((current_view.__module__, current_view.view_class.__name__)) else: component = current_view.__module__ cgi_data = { k: v for k, v in iteritems(_request.headers) } cgi_data.update({ 'REQUEST_METHOD': _request.method }) payload = { 'url': _request.base_url, 'component': component, 'action': _request.endpoint, 'params': {}, 'session': filter_dict(dict(session), config.params_filters), 'cgi_data': cgi_data, 'context': context } # Add query params params = filter_dict(dict(_request.args), config.params_filters) params.update(filter_dict(dict(_request.form), config.params_filters)) payload['params'] = params return payload
def generate_payload(self, config, context)
Generate payload by checking Flask request object. :param context: current context. :param config: honeybadger configuration. :return: a dict with the generated payload.
2.737763
2.596642
1.054347
from flask import request_tearing_down, got_request_exception self.app = app self.app.logger.info('Initializing Honeybadger') self.report_exceptions = report_exceptions self.reset_context_after_request = reset_context_after_request self._initialize_honeybadger(app.config) # Add hooks if self.report_exceptions: self._register_signal_handler('auto-reporting exceptions', got_request_exception, self._handle_exception) if self.reset_context_after_request: self._register_signal_handler('auto clear context on request end', request_tearing_down, self._reset_context) logger.info('Honeybadger helper installed')
def init_app(self, app, report_exceptions=False, reset_context_after_request=False)
Initialize honeybadger and listen for errors. :param Flask app: the Flask application object. :param bool report_exceptions: whether to automatically report exceptions raised by Flask on requests (i.e. by calling abort) or not. :param bool reset_context_after_request: whether to reset honeybadger context after each request.
3.506585
3.3636
1.04251
from flask import signals if not signals.signals_available: self.app.logger.warn('blinker needs to be installed in order to support %s'.format(description)) self.app.logger.info('Enabling {}'.format(description)) # Weak references won't work if handlers are methods rather than functions. signal.connect(handler, sender=self.app, weak=False)
def _register_signal_handler(self, description, signal, handler)
Registers a handler for the given signal. :param description: a short description of the signal to handle. :param signal: the signal to handle. :param handler: the function to use for handling the signal.
7.95177
8.2471
0.96419
if config.get('DEBUG', False): honeybadger.configure(environment='development') honeybadger_config = {} for key, value in iteritems(config): if key.startswith(self.CONFIG_PREFIX): honeybadger_config[key[len(self.CONFIG_PREFIX):].lower()] = value honeybadger.configure(**honeybadger_config) honeybadger.config.set_12factor_config()
def _initialize_honeybadger(self, config)
Initializes honeybadger using the given config object. :param dict config: a dict or dict-like object that contains honeybadger configuration properties.
2.860828
2.997114
0.954528
honeybadger.notify(exception) if self.reset_context_after_request: self._reset_context()
def _handle_exception(self, sender, exception=None)
Actual code handling the exception and sending it to honeybadger if it's enabled. :param T sender: the object sending the exception event. :param Exception exception: the exception to handle.
8.536892
7.55982
1.129245
if plugin.name not in self._registered: logger.info('Registering plugin %s' % plugin.name) self._registered[plugin.name] = plugin else: logger.warn('Plugin %s already registered' % plugin.name)
def register(self, plugin)
Register the given plugin. Registration order is kept. :param plugin: the plugin to register.
2.228755
2.445664
0.911309
for name, plugin in iteritems(self._registered): if plugin.supports(config, context): logger.debug('Returning payload from plugin %s' % name) return plugin.generate_payload(config, context) logger.debug('No active plugin to generate payload') return { 'context': context }
def generate_payload(self, config=None, context=None)
Generate payload by iterating over registered plugins. Merges . :param context: current context. :param config: honeybadger configuration. :return: a dict with the generated payload.
4.298316
3.892788
1.104174
a = float(request.GET.get('a', '0')) b = float(request.GET.get('b', '0')) return JsonResponse({'result': a / b})
def buggy_div(request)
A buggy endpoint to perform division between query parameters a and b. It will fail if b is equal to 0 or either a or b are not float. :param request: request object :return:
2.449738
2.603153
0.941066
from .._ascii import DAG from .._echo import echo_via_pager echo_via_pager(str(DAG(graph)))
def ascii(graph)
Format graph as an ASCII art.
14.204096
11.882412
1.195388
import json from pyld import jsonld from renku.models._jsonld import asjsonld output = getattr(jsonld, format)([ asjsonld(action) for action in graph.activities.values() ]) return json.dumps(output, indent=2)
def _jsonld(graph, format, *args, **kwargs)
Return formatted graph in JSON-LD ``format`` function.
6.10049
5.907532
1.032663
import sys from rdflib import ConjunctiveGraph from rdflib.plugin import register, Parser from rdflib.tools.rdf2dot import rdf2dot register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser') g = ConjunctiveGraph().parse( data=_jsonld(graph, 'expand'), format='json-ld', ) g.bind('prov', 'http://www.w3.org/ns/prov#') g.bind('wfdesc', 'http://purl.org/wf4ever/wfdesc#') g.bind('wf', 'http://www.w3.org/2005/01/wf/flow#') g.bind('wfprov', 'http://purl.org/wf4ever/wfprov#') if debug: rdf2dot(g, sys.stdout) return sys.stdout.write('digraph { \n node [ fontname="DejaVu Sans" ] ; \n ') if landscape: sys.stdout.write('rankdir="LR" \n') if simple: _rdf2dot_simple(g, sys.stdout) return _rdf2dot_reduced(g, sys.stdout)
def dot(graph, simple=True, debug=False, landscape=False)
Format graph as a dot file.
2.326614
2.305333
1.009231
from itertools import chain import re path_re = re.compile( r'file:///(?P<type>[a-zA-Z]+)/' r'(?P<commit>\w+)' r'(?P<path>.+)?' ) inputs = g.query( ) outputs = g.query( ) activity_nodes = {} artifact_nodes = {} for source, role, target, comment, in chain(inputs, outputs): # extract the pieces of the process URI src_path = path_re.match(source).groupdict() tgt_path = path_re.match(target).groupdict() # write the edge stream.write( '\t"{src_commit}:{src_path}" -> ' '"{tgt_commit}:{tgt_path}" ' '[label={role}] \n'.format( src_commit=src_path['commit'][:5], src_path=src_path.get('path') or '', tgt_commit=tgt_path['commit'][:5], tgt_path=tgt_path.get('path') or '', role=role ) ) if src_path.get('type') == 'commit': activity_nodes.setdefault(source, {'comment': comment}) artifact_nodes.setdefault(target, {}) if tgt_path.get('type') == 'commit': activity_nodes.setdefault(target, {'comment': comment}) artifact_nodes.setdefault(source, {}) # customize the nodes for node, content in activity_nodes.items(): node_path = path_re.match(node).groupdict() stream.write( '\t"{commit}:{path}" ' '[shape=box label="#{commit}:{path}:{comment}"] \n'.format( comment=content['comment'], commit=node_path['commit'][:5], path=node_path.get('path') or '' ) ) for node, content in artifact_nodes.items(): node_path = path_re.match(node).groupdict() stream.write( '\t"{commit}:{path}" ' '[label="#{commit}:{path}"] \n'.format( commit=node_path['commit'][:5], path=node_path.get('path') or '' ) ) stream.write('}\n')
def _rdf2dot_simple(g, stream)
Create a simple graph of processes and artifacts.
2.368118
2.29395
1.032332
import cgi import collections import rdflib from rdflib.tools.rdf2dot import LABEL_PROPERTIES, NODECOLOR types = collections.defaultdict(set) fields = collections.defaultdict(set) nodes = {} def node(x): return nodes.setdefault(x, 'node{0}'.format(len(nodes))) def label(x, g): for labelProp in LABEL_PROPERTIES: label_ = g.value(x, labelProp) if label_: return label_ try: return g.namespace_manager.compute_qname(x)[2] except Exception: return x def formatliteral(l, g): v = cgi.escape(l) if l.datatype: return '&quot;%s&quot;^^%s' % (v, qname(l.datatype, g)) elif l.language: return '&quot;%s&quot;@%s' % (v, l.language) return '&quot;%s&quot;' % v def qname(x, g): try: q = g.compute_qname(x) return q[0] + ':' + q[2] except Exception: return x def color(p): return 'BLACK' for s, p, o in g: sn = node(s) if p == rdflib.RDFS.label: continue # inject the type predicate into the node itself if p == rdflib.RDF.type: types[sn].add((qname(p, g), cgi.escape(o))) continue if p == rdflib.term.URIRef('http://purl.org/dc/terms/isPartOf'): fields[sn].add((qname(p, g), cgi.escape(o))) continue if p == rdflib.term.URIRef('http://www.w3.org/ns/prov#wasInformedBy'): continue if isinstance(o, (rdflib.URIRef, rdflib.BNode)): on = node(o) opstr = ( '\t%s -> %s [ color=%s, label=< <font point-size="12" ' 'color="#336633">%s</font> > ] ;\n' ) stream.write(opstr % (sn, on, color(p), qname(p, g))) else: fields[sn].add((qname(p, g), formatliteral(o, g))) for u, n in nodes.items(): stream.write(u"# %s %s\n" % (u, n)) f = [ '<tr><td align="left"><b>%s</b></td><td align="left">' '<b>%s</b></td></tr>' % x for x in sorted(types[n]) ] f += [ '<tr><td align="left">%s</td><td align="left">%s</td></tr>' % x for x in sorted(fields[n]) ] opstr = ( '%s [ shape=none, color=%s label=< <table color="#666666"' ' cellborder="0" cellspacing="0" border="1"><tr>' '<td colspan="2" bgcolor="grey"><B>%s</B></td></tr><tr>' '<td href="%s" bgcolor="#eeeeee" colspan="2">' '<font point-size="12" color="#6666ff">%s</font></td>' '</tr>%s</table> > ] \n' ) stream.write(opstr % (n, NODECOLOR, label(u, g), u, u, ''.join(f))) stream.write('}\n')
def _rdf2dot_reduced(g, stream)
A reduced dot graph. Adapted from original source: https://rdflib.readthedocs.io/en/stable/_modules/rdflib/tools/rdf2dot.html
2.547671
2.521664
1.010313
from renku.models.provenance.activities import ProcessRun, WorkflowRun for activity in graph.activities.values(): if not isinstance(activity, ProcessRun): continue elif isinstance(activity, WorkflowRun): steps = activity.subprocesses.values() else: steps = [activity] for step in steps: click.echo(' '.join(step.outputs) + ': ' + ' '.join(step.inputs)) tool = step.process click.echo( '\t@' + ' '.join(tool.to_argv()) + ' ' + ' '.join( tool.STD_STREAMS_REPR[key] + ' ' + str(path) for key, path in tool._std_streams().items() ) )
def makefile(graph)
Format graph as Makefile.
4.934762
4.812548
1.025395
from rdflib import ConjunctiveGraph from rdflib.plugin import register, Parser register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser') click.echo( ConjunctiveGraph().parse( data=_jsonld(graph, 'expand'), format='json-ld', ).serialize(format='nt') )
def nt(graph)
Format graph as n-tuples.
3.755394
3.469204
1.082495
import pkg_resources from git.index.fun import hook_path as get_hook_path for hook in HOOKS: hook_path = Path(get_hook_path(hook, client.repo.git_dir)) if hook_path.exists(): if not force: click.echo( 'Hook already exists. Skipping {0}'.format(str(hook_path)), err=True ) continue else: hook_path.unlink() # Make sure the hooks directory exists. hook_path.parent.mkdir(parents=True, exist_ok=True) Path(hook_path).write_bytes( pkg_resources.resource_string( 'renku.data', '{hook}.sh'.format(hook=hook) ) ) hook_path.chmod(hook_path.stat().st_mode | stat.S_IEXEC)
def install(client, force)
Install Git hooks.
2.924202
2.728989
1.071533
from git.index.fun import hook_path as get_hook_path for hook in HOOKS: hook_path = Path(get_hook_path(hook, client.repo.git_dir)) if hook_path.exists(): hook_path.unlink()
def uninstall(client)
Uninstall Git hooks.
4.634419
3.777212
1.226942
if datetime_fmt and isinstance(cell, datetime): return cell.strftime(datetime_fmt) return cell
def format_cell(cell, datetime_fmt=None)
Format a cell.
3.164407
3.055362
1.03569
if isinstance(headers, dict): attrs = headers.keys() # if mapping is not specified keep original names = [ key if value is None else value for key, value in headers.items() ] else: attrs = names = headers table = [( format_cell(cell, datetime_fmt=datetime_fmt) for cell in attrgetter(*attrs)(c) ) for c in collection] return tblte(table, headers=[h.upper() for h in names], **kwargs)
def tabulate(collection, headers, datetime_fmt='%Y-%m-%d %H:%M:%S', **kwargs)
Pretty-print a collection.
5.305233
5.329136
0.995514
output = {} for k, v in value.items(): inst = DatasetFile.from_jsonld(v) output[inst.path] = inst return output
def _convert_dataset_files(value)
Convert dataset files.
5.208471
5.202302
1.001186
from renku.api._git import _expand_directories def fmt_path(path): return str(Path(path).absolute().relative_to(client.path)) files = { fmt_path(source): fmt_path(file_or_dir) for file_or_dir in sources for source in _expand_directories((file_or_dir, )) } # 1. Update dataset metadata files. with progressbar( client.datasets.values(), item_show_func=lambda item: str(item.short_id) if item else '', label='Updating dataset metadata', width=0, ) as bar: for dataset in bar: remove = [] for key, file_ in dataset.files.items(): filepath = fmt_path(file_.full_path) if filepath in files: remove.append(key) if remove: for key in remove: dataset.unlink_file(key) dataset.to_yaml() # 2. Manage .gitattributes for external storage. tracked = tuple( path for path, attr in client.find_attr(*files).items() if attr.get('filter') == 'lfs' ) client.untrack_paths_from_storage(*tracked) existing = client.find_attr(*tracked) if existing: click.echo(WARNING + 'There are custom .gitattributes.\n') if click.confirm( 'Do you want to edit ".gitattributes" now?', default=False ): click.edit(filename=str(client.path / '.gitattributes')) # Finally remove the files. final_sources = list(set(files.values())) if final_sources: run(['git', 'rm', '-rf'] + final_sources, check=True)
def remove(ctx, client, sources)
Remove files and check repository for potential problems.
4.460354
4.406829
1.012146
dependencies = dependencies if dependencies is not None else {} for release in releases: url = release['url'] old_priority = dependencies.get(package, {}).get('priority', 0) for suffix, priority in SUFFIXES.items(): if url.endswith(suffix): if old_priority < priority: sha256 = release['digests']['sha256'] dependencies[package] = { 'package': package, 'url': url, 'sha256': sha256, 'priority': priority, } return dependencies[package]
def find_release(package, releases, dependencies=None)
Return the best release.
2.939902
2.855024
1.029729
# Should not be in ignore paths. if filepath in {'.gitignore', '.gitattributes'}: return False # Ignore everything in .renku ... if filepath.startswith('.renku'): # ... unless it can be a CWL. if can_be_cwl and filepath.endswith('.cwl'): return True return False return True
def _safe_path(filepath, can_be_cwl=False)
Check if the path should be used in output.
5.717729
5.286663
1.081538
from renku.models._tabulate import tabulate click.echo( tabulate( datasets, headers=OrderedDict(( ('short_id', 'id'), ('name', None), ('created', None), ('authors_csv', 'authors'), )), ) )
def tabular(client, datasets)
Format datasets with a tabular output.
5.564604
5.570752
0.998896
from renku.models._json import dumps from renku.models._jsonld import asjsonld data = [ asjsonld( dataset, basedir=os.path.relpath( '.', start=str(dataset.__reference__.parent) ) ) for dataset in datasets ] click.echo(dumps(data, indent=2))
def jsonld(client, datasets)
Format datasets as JSON-LD.
5.814986
5.560704
1.045728
try: submodules = node.submodules if submodules: submodule = ':'.join(submodules) return click.style(submodule, fg='green') + '@' + click.style( node.commit.hexsha[:8], fg='yellow' ) except KeyError: pass return click.style(node.commit.hexsha[:8], fg='yellow')
def _format_sha1(graph, node)
Return formatted text with the submodule information.
3.908655
3.281415
1.191149
if isinstance(value, (list, tuple)): return [ CommandLineBinding(**item) if isinstance(item, dict) else item for item in value ] return shlex.split(value)
def convert_arguments(value)
Convert arguments from various input formats.
4.543941
4.25471
1.067979
ctx.obj = LocalClient( path=path, renku_home=renku_home, use_external_storage=use_external_storage, )
def cli(ctx, path, renku_home, use_external_storage)
Check common Renku commands used in various situations.
2.516641
2.299409
1.094473
graph = Graph(client) outputs = graph.build(revision=revision, can_be_cwl=no_output, paths=paths) outputs = {node for node in outputs if graph.need_update(node)} if not outputs: click.secho( 'All files were generated from the latest inputs.', fg='green' ) sys.exit(0) # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs if _safe_path(node.path)} # Get all clean nodes. input_paths = {node.path for node in graph.nodes} - output_paths # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) workflow = graph.ascwl( input_paths=input_paths, output_paths=output_paths, outputs=outputs, ) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *(path for _, path in workflow.iter_input_files(client.workflow_path)) ) with output_file.open('w') as f: f.write( yaml.dump( ascwl( workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path, ), default_flow_style=False ) ) from ._cwl import execute execute(client, output_file, output_paths=output_paths)
def update(client, revision, no_output, siblings, paths)
Update existing files by rerunning their outdated workflow.
4.855447
4.744384
1.023409
if final and path: return path if path is None: path = default_config_dir() try: os.makedirs(path) except OSError as e: # pragma: no cover if e.errno != errno.EEXIST: raise return os.path.join(path, 'config.yml')
def config_path(path=None, final=False)
Return config path.
2.662292
2.440294
1.090972
try: with open(config_path(path, final=final), 'r') as configfile: return yaml.safe_load(configfile) or {} except FileNotFoundError: return {}
def read_config(path=None, final=False)
Read Renku configuration.
3.134948
3.028176
1.035259
with open(config_path(path, final=final), 'w+') as configfile: yaml.dump(config, configfile, default_flow_style=False)
def write_config(config, path, final=False)
Write Renku configuration.
2.842604
2.896233
0.981483
if ctx.obj is None: ctx.obj = {} ctx.obj['config_path'] = value ctx.obj['config'] = read_config(value) return value
def config_load(ctx, param, value)
Print application config path.
2.810399
2.525223
1.112931
# keep it. @click.pass_context def new_func(ctx, *args, **kwargs): # Invoked with custom config: if 'config' in kwargs: return ctx.invoke(f, *args, **kwargs) if ctx.obj is None: ctx.obj = {} config = ctx.obj['config'] project_enabled = not ctx.obj.get('no_project', False) project_config_path = get_project_config_path() if project_enabled and project_config_path: project_config = read_config(project_config_path) config['project'] = project_config result = ctx.invoke(f, config, *args, **kwargs) project_config = config.pop('project', None) if project_config: if not project_config_path: raise RuntimeError('Invalid config update') write_config(project_config, path=project_config_path) write_config(config, path=ctx.obj['config_path']) if project_config is not None: config['project'] = project_config return result return update_wrapper(new_func, f)
def with_config(f)
Add config to function.
2.971689
2.910008
1.021196
if not value or ctx.resilient_parsing: return click.echo(config_path(os.environ.get('RENKU_CONFIG'))) ctx.exit()
def print_app_config_path(ctx, param, value)
Print application config path.
3.569684
3.354895
1.064023
# FIXME check default directory mode project_path = Path(path).absolute().joinpath(RENKU_HOME) project_path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok) return str(project_path)
def create_project_config_path( path, mode=0o777, parents=False, exist_ok=False )
Create new project configuration folder.
5.164571
5.209619
0.991353
project_path = Path(path or '.').absolute().joinpath(RENKU_HOME) if project_path.exists() and project_path.is_dir(): return str(project_path)
def get_project_config_path(path=None)
Return project configuration folder if exist.
4.266126
3.79357
1.124568
path = Path(path) if path else Path.cwd() abspath = path.absolute() project_path = get_project_config_path(abspath) if project_path: return project_path for parent in abspath.parents: project_path = get_project_config_path(parent) if project_path: return project_path
def find_project_config_path(path=None)
Find project config path.
2.135514
1.986024
1.075271
# NOTE refactor so all outputs behave the same entity = getattr(output, 'entity', output) if isinstance(entity, Collection): for member in entity.members: if parent is not None: member = attr.evolve(member, parent=parent) yield from _nodes(member) yield output else: yield output
def _nodes(output, parent=None)
Yield nodes from entities.
5.819522
4.789079
1.215165
kwargs.setdefault('metadata', {}) kwargs['metadata']['jsonldPredicate'] = {'mapSubject': key} kwargs.setdefault('default', attr.Factory(list)) def converter(value): if isinstance(value, dict): result = [] for k, v in iteritems(value): if not hasattr(cls, 'from_cwl'): vv = dict(v) vv[key] = k else: vv = attr.evolve(cls.from_cwl(v), **{key: k}) result.append(vv) else: result = value def fix_keys(data): for a in fields(cls): a_name = a.name.rstrip('_') if a_name in data: yield a.name, data[a_name] return [ cls(**{kk: vv for kk, vv in fix_keys(v)}) if not isinstance(v, cls) else v for v in result ] kwargs['converter'] = converter return attr.ib(**kwargs)
def mapped(cls, key='id', **kwargs)
Create list of instances from a mapping.
3.827036
3.815246
1.00309
attrs = fields(inst.__class__) rv = dict_factory() def convert_value(v): if isinstance(v, Path): v = str(v) return os.path.relpath(v, str(basedir)) if basedir else v return v for a in attrs: if a.name.startswith('__'): continue a_name = a.name.rstrip('_') v = getattr(inst, a.name) if filter is not None and not filter(a, v): continue if recurse is True: if has(v.__class__): rv[a_name] = ascwl( v, recurse=True, filter=filter, dict_factory=dict_factory, basedir=basedir, ) elif isinstance(v, (tuple, list, set)): cf = v.__class__ if retain_collection_types is True else list rv[a_name] = cf([ ascwl( i, recurse=True, filter=filter, dict_factory=dict_factory, basedir=basedir, ) if has(i.__class__) else i for i in v ]) if 'jsonldPredicate' in a.metadata: k = a.metadata['jsonldPredicate'].get('mapSubject') if k: vv = dict_factory() for i in rv[a_name]: kk = i.pop(k) vv[kk] = i rv[a_name] = vv elif isinstance(v, dict): df = dict_factory rv[a_name] = df(( ascwl( kk, dict_factory=df, basedir=basedir, ) if has(kk.__class__) else convert_value(kk), ascwl( vv, dict_factory=df, basedir=basedir, ) if has(vv.__class__) else vv ) for kk, vv in iteritems(v)) else: rv[a_name] = convert_value(v) else: rv[a_name] = convert_value(v) if isinstance(inst, CWLClass): rv['class'] = inst.__class__.__name__ return rv
def ascwl( inst, recurse=True, filter=None, dict_factory=dict, retain_collection_types=False, basedir=None, )
Return the ``attrs`` attribute values of *inst* as a dict. Support ``jsonldPredicate`` in a field metadata for generating mappings from lists. Adapted from ``attr._funcs``.
2.253819
2.207687
1.020896
class_name = data.get('class', None) cls = cls.registry.get(class_name, cls) if __reference__: with with_reference(__reference__): self = cls( **{k: v for k, v in iteritems(data) if k != 'class'} ) else: self = cls(**{k: v for k, v in iteritems(data) if k != 'class'}) return self
def from_cwl(cls, data, __reference__=None)
Return an instance from CWL data.
2.747592
2.638035
1.04153
import yaml with path.open(mode='r') as fp: self = cls.from_cwl(yaml.safe_load(fp), __reference__=path) return self
def from_yaml(cls, path)
Return an instance from a YAML file.
6.924634
6.707411
1.032385
import pkg_resources # create the templated files for tpl_file in CI_TEMPLATES: tpl_path = client.path / tpl_file with pkg_resources.resource_stream(__name__, tpl_file) as tpl: content = tpl.read() if not force and tpl_path.exists(): click.confirm( 'Do you want to override "{tpl_file}"'.format( tpl_file=tpl_file ), abort=True, ) with tpl_path.open('wb') as dest: dest.write(content)
def template(client, force)
Render templated configuration files.
3.083673
3.038133
1.014989
from renku.models.provenance import ProcessRun activity = client.process_commmit() if not isinstance(activity, ProcessRun): click.secho('No tool was found.', fg='red', file=sys.stderr) return try: args = ['cwl-runner', activity.path] if job: job_file = tempfile.NamedTemporaryFile( suffix='.yml', dir=os.getcwd(), delete=False ) args.append(job_file.name) with job_file as fp: yaml.dump(yaml.safe_load(job), stream=fp, encoding='utf-8') if run: return call(args, cwd=os.getcwd()) finally: if job: os.unlink(job_file.name)
def rerun(client, run, job)
Re-run existing workflow or tool using CWL runner.
3.961304
3.701006
1.070332
if isinstance(value, File): return os.path.relpath( str((client.workflow_path / value.path).resolve()) ) return value
def _format_default(client, value)
Format default values.
6.071815
6.060686
1.001836
for input_ in workflow.inputs: click.echo( '{id}: {default}'.format( id=input_.id, default=_format_default(client, input_.default), ) ) sys.exit(0)
def show_inputs(client, workflow)
Show workflow inputs and exit.
3.845943
3.659793
1.050864
types = { 'int': int, 'string': str, 'File': lambda x: File(path=Path(x).resolve()), } for input_ in workflow.inputs: convert = types.get(input_.type, str) input_.default = convert( click.prompt( '{0.id} ({0.type})'.format(input_), default=_format_default(client, input_.default), ) ) return workflow
def edit_inputs(client, workflow)
Edit workflow inputs.
4.179832
4.002629
1.044271
graph = Graph(client) outputs = graph.build(paths=paths, revision=revision) # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs} # Normalize and check all starting paths. roots = {graph.normalize_path(root) for root in roots} assert not roots & output_paths, '--from colides with output paths' # Generate workflow and check inputs. # NOTE The workflow creation is done before opening a new file. workflow = inputs( client, graph.ascwl( input_paths=roots, output_paths=output_paths, outputs=outputs, ) ) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *(path for _, path in workflow.iter_input_files(client.workflow_path)) ) # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) with output_file.open('w') as f: f.write( yaml.dump( ascwl( workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path, ), default_flow_style=False ) ) # Execute the workflow and relocate all output files. from ._cwl import execute # FIXME get new output paths for edited tools # output_paths = {path for _, path in workflow.iter_output_files()} execute( client, output_file, output_paths=output_paths, )
def rerun(client, revision, roots, siblings, inputs, paths)
Recreate files generated by a sequence of ``run`` commands.
6.034076
6.021332
1.002116
from renku.models._jsonld import asjsonld from renku.models.datasets import Dataset from renku.models.refs import LinkReference from ._checks.location_datasets import _dataset_metadata_pre_0_3_4 for old_path in _dataset_metadata_pre_0_3_4(client): with old_path.open('r') as fp: dataset = Dataset.from_jsonld(yaml.safe_load(fp)) name = str(old_path.parent.relative_to(client.path / 'data')) new_path = ( client.renku_datasets_path / dataset.identifier.hex / client.METADATA ) new_path.parent.mkdir(parents=True, exist_ok=True) dataset = dataset.rename_files( lambda key: os.path. relpath(str(old_path.parent / key), start=str(new_path.parent)) ) with new_path.open('w') as fp: yaml.dump(asjsonld(dataset), fp, default_flow_style=False) old_path.unlink() LinkReference.create(client=client, name='datasets/' + name).set_reference(new_path)
def datasets(ctx, client)
Migrate dataset metadata.
4.175218
3.997996
1.044328
click.secho('Use "renku storage pull" instead.', fg='red', err=True) ctx.exit(2)
def path(ctx, paths)
DEPRECATED: use 'renku storage pull'.
11.028033
4.04521
2.726196
repo = client.repo config = repo.config_reader() # Find registry URL in .git/config remote_url = None try: registry_url = config.get_value('renku', 'registry', None) except NoSectionError: registry_url = None remote_branch = repo.head.reference.tracking_branch() if remote_branch is not None: remote_name = remote_branch.remote_name config_section = 'renku "{remote_name}"'.format( remote_name=remote_name ) try: registry_url = config.get_value( config_section, 'registry', registry_url ) except NoSectionError: pass remote_url = repo.remotes[remote_name].url if registry_url: # Look in [renku] and [renku "{remote_name}"] for registry_url key. url = GitURL.parse(registry_url) elif remote_url: # Use URL based on remote configuration. url = GitURL.parse(remote_url) # Replace gitlab. with registry. unless running on gitlab.com. hostname_parts = url.hostname.split('.') if len(hostname_parts) > 2 and hostname_parts[0] == 'gitlab': hostname_parts = hostname_parts[1:] hostname = '.'.join(['registry'] + hostname_parts) url = attr.evolve(url, hostname=hostname) else: raise errors.ConfigurationError( 'Configure renku.repository_url or Git remote.' ) if auto_login and url.username and url.password: try: subprocess.run([ 'docker', 'login', url.hostname, '-u', url.username, '--password-stdin', ], check=True, input=url.password.encode('utf-8')) except subprocess.CalledProcessError: raise errors.AuthenticationError( 'Check configuration of password or token in the registry URL' ) return url
def detect_registry_url(client, auto_login=True)
Return a URL of the Docker registry.
3.151392
3.147972
1.001086
if isinstance(obj, UUID): return obj.hex elif isinstance(obj, datetime.datetime): return obj.isoformat() return super().default(obj)
def default(self, obj)
Encode more types.
2.77099
2.605579
1.063483
working_dir = client.repo.working_dir mapped_std = _mapped_std_streams(client.candidate_paths) factory = CommandLineToolFactory( command_line=command_line, directory=os.getcwd(), working_dir=working_dir, successCodes=success_codes, **{ name: os.path.relpath(path, working_dir) for name, path in mapped_std.items() } ) with client.with_workflow_storage() as wf: with factory.watch( client, no_output=no_output, outputs=outputs ) as tool: # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *( path for _, path in tool.iter_input_files(client.workflow_path) ) ) returncode = call( factory.command_line, cwd=os.getcwd(), **{key: getattr(sys, key) for key in mapped_std.keys()}, ) if returncode not in (success_codes or {0}): raise errors.InvalidSuccessCode( returncode, success_codes=success_codes ) sys.stdout.flush() sys.stderr.flush() wf.add_step(run=tool)
def run(client, outputs, no_output, success_codes, isolation, command_line)
Tracking work on a specific problem.
4.642105
4.558013
1.018449
graph = Graph(client) if not paths: start, is_range, stop = revision.partition('..') if not is_range: stop = start elif not stop: stop = 'HEAD' commit = client.repo.rev_parse(stop) paths = ( str(client.path / item.a_path) for item in commit.diff(commit.parents or NULL_TREE) # if not item.deleted_file ) # NOTE shall we warn when "not no_output and not paths"? graph.build(paths=paths, revision=revision, can_be_cwl=no_output) FORMATS[format](graph)
def log(client, revision, format, no_output, paths)
Show logs for a file.
7.5281
7.769716
0.968903
graph = Graph(client) # TODO filter only paths = {graph.normalize_path(p) for p in path} status = graph.build_status(revision=revision, can_be_cwl=no_output) click.echo('On branch {0}'.format(client.repo.active_branch)) if status['outdated']: click.echo( 'Files generated from newer inputs:\n' ' (use "renku log [<file>...]" to see the full lineage)\n' ' (use "renku update [<file>...]" to ' 'generate the file from its latest inputs)\n' ) for filepath, stts in sorted(status['outdated'].items()): outdated = ( ', '.join( '{0}#{1}'.format( click.style( graph._format_path(n.path), fg='blue', bold=True ), _format_sha1(graph, n), ) for n in stts if n.path and n.path not in status['outdated'] ) ) click.echo( '\t{0}: {1}'.format( click.style( graph._format_path(filepath), fg='red', bold=True ), outdated ) ) click.echo() else: click.secho( 'All files were generated from the latest inputs.', fg='green' ) if status['multiple-versions']: click.echo( 'Input files used in different versions:\n' ' (use "renku log --revision <sha1> <file>" to see a lineage ' 'for the given revision)\n' ) for filepath, files in sorted(status['multiple-versions'].items()): # Do not show duplicated commits! (see #387) commits = {_format_sha1(graph, key) for key in files} click.echo( '\t{0}: {1}'.format( click.style( graph._format_path(filepath), fg='blue', bold=True ), ', '.join(commits) ) ) click.echo() if status['deleted']: click.echo( 'Deleted files used to generate outputs:\n' ' (use "git show <sha1>:<file>" to see the file content ' 'for the given revision)\n' ) for filepath, node in status['deleted'].items(): click.echo( '\t{0}: {1}'.format( click.style( graph._format_path(filepath), fg='blue', bold=True ), _format_sha1(graph, node) ) ) click.echo() ctx.exit(1 if status['outdated'] else 0)
def status(ctx, client, revision, no_output, path)
Show a status of the repository.
3.272918
3.292335
0.994102
if not value: value = os.path.basename(ctx.params['directory'].rstrip(os.path.sep)) return value
def validate_name(ctx, param, value)
Validate a project name.
4.384735
3.534716
1.240477