code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def close_position(self, repay_only): params = {'repay_only': repay_only} return self._send_message('post', '/position/close', data=json.dumps(params))
Close position. Args: repay_only (bool): Undocumented by cbpro. Returns: Undocumented
juraj-google-style
def colored(text: str, color: Optional[str]=None, background: Optional[str]=None, styles: Optional[List[str]]=None) -> str: if not termcolor: return text return termcolor.colored(text, color=color, on_color='on_' + background if background else None, attrs=styles)
Returns the colored text with ANSI color characters. Args: text: A string that may or may not already has ANSI color characters. color: A string for text colors. Applicable values are: 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'. background: A string for background colors. Applicable values are: 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'. styles: A list of strings for applying styles on the text. Applicable values are: 'bold', 'dark', 'underline', 'blink', 'reverse', 'concealed'. Returns: A string with ANSI color characters embracing the entire text.
github-repos
def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0): sys_formatters = {'systemroot': 'c:\\Windows'} if path_args: sys_formatters.update(path_args) if users: results = [] for user in users: user = GetUserInfo(knowledge_base, user) if user: formatters = dict(((x.name, y) for (x, y) in user.ListSetFields())) formatters.update(sys_formatters) try: results.append(path.format(**formatters)) except KeyError: pass return results else: try: path = path.format(**sys_formatters) except KeyError: logging.warning('Failed path interpolation on %s', path) return '' if (('{' in path) and (depth < 10)): path = InterpolatePath(path, knowledge_base=knowledge_base, users=users, path_args=path_args, depth=(depth + 1)) return path
Take a string as a path on a client and interpolate with client data. Args: path: A single string/unicode to be interpolated. knowledge_base: An rdf_client.KnowledgeBase object. users: A list of string usernames, or None. path_args: A dict of additional args to use in interpolation. These take precedence over any system provided variables. depth: A counter for recursion depth. Returns: A single string if users is None, otherwise a list of strings.
codesearchnet
def get_tensor_shape(self, tensor_name): tensor = self._name_to_tensor(tensor_name) if isinstance(tensor, mtf.Tensor): return tf.TensorShape(tensor.shape.to_integer_list) else: return tensor.shape
The tf.TensorShape of a tensor. Args: tensor_name: string, the name of a tensor in the graph. Returns: a tf.TensorShape
juraj-google-style
def create(self, data=None, uri=None, timeout=-1, custom_headers=None, force=False): if not uri: uri = self._base_uri if force: uri += '?force={}'.format(force) logger.debug('Create (uri = %s, resource = %s)' % (uri, str(data))) return self.do_post(uri, data, timeout, custom_headers)
Makes a POST request to create a resource when a request body is required. Args: data: Additional fields can be passed to create the resource. uri: Resouce uri timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows set specific HTTP headers. Returns: Created resource.
juraj-google-style
def ConvertCloudMetadataResponsesToCloudInstance(metadata_responses): if (metadata_responses.instance_type == 'GOOGLE'): cloud_instance = GoogleCloudInstance() result = CloudInstance(cloud_type='GOOGLE', google=cloud_instance) elif (metadata_responses.instance_type == 'AMAZON'): cloud_instance = AmazonCloudInstance() result = CloudInstance(cloud_type='AMAZON', amazon=cloud_instance) else: raise ValueError(('Unknown cloud instance type: %s' % metadata_responses.instance_type)) for cloud_metadata in metadata_responses.responses: setattr(cloud_instance, cloud_metadata.label, cloud_metadata.text) if (result.cloud_type == 'GOOGLE'): cloud_instance.unique_id = MakeGoogleUniqueID(cloud_instance) return result
Convert CloudMetadataResponses to CloudInstance proto. Ideally we'd just get the client to fill out a CloudInstance proto, but we need to keep the flexibility of collecting new metadata and creating new fields without a client push. So instead we bring back essentially a dict of results and fill the proto on the server side. Args: metadata_responses: CloudMetadataResponses object from the client. Returns: CloudInstance object Raises: ValueError: if client passes bad or unset cloud type.
codesearchnet
def set_room_alias(self, room_id, room_alias): data = {'room_id': room_id} return self._send('PUT', '/directory/room/{}'.format(quote(room_alias)), content=data)
Set alias to room id Args: room_id (str): The room id. room_alias (str): The room wanted alias name.
codesearchnet
def filter_distributed_callbacks(callbacks_list, model): if not model._in_multi_worker_mode(): raise ValueError('filter_distributed_callbacks() should only be called when Keras is in multi worker mode.') callbacks_list = callbacks_list or [] if not [c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)]: logging.warning('ModelCheckpoint callback is not provided. Workers will need to restart training if any fails.') if callbacks_list is None or is_current_worker_chief(): return callbacks_list return [callback for callback in callbacks_list if not callback._chief_worker_only]
Filter Callbacks based on the worker context when running multi-worker. Args: callbacks_list: A list of `Callback` instances. model: Keras model instance. Returns: The list of `Callback` instances that should be run on this worker.
github-repos
def _validate_aud(claims, audience=None): if ('aud' not in claims): return audience_claims = claims['aud'] if isinstance(audience_claims, string_types): audience_claims = [audience_claims] if (not isinstance(audience_claims, list)): raise JWTClaimsError('Invalid claim format in token') if any(((not isinstance(c, string_types)) for c in audience_claims)): raise JWTClaimsError('Invalid claim format in token') if (audience not in audience_claims): raise JWTClaimsError('Invalid audience')
Validates that the 'aud' claim is valid. The "aud" (audience) claim identifies the recipients that the JWT is intended for. Each principal intended to process the JWT MUST identify itself with a value in the audience claim. If the principal processing the claim does not identify itself with a value in the "aud" claim when this claim is present, then the JWT MUST be rejected. In the general case, the "aud" value is an array of case- sensitive strings, each containing a StringOrURI value. In the special case when the JWT has one audience, the "aud" value MAY be a single case-sensitive string containing a StringOrURI value. The interpretation of audience values is generally application specific. Use of this claim is OPTIONAL. Args: claims (dict): The claims dictionary to validate. audience (str): The audience that is verifying the token.
codesearchnet
def from_string(cls, public_key): public_key_data = _helpers.to_bytes(public_key) if _CERTIFICATE_MARKER in public_key_data: cert = cryptography.x509.load_pem_x509_certificate( public_key_data, _BACKEND) pubkey = cert.public_key() else: pubkey = serialization.load_pem_public_key( public_key_data, _BACKEND) return cls(pubkey)
Construct an Verifier instance from a public key or public certificate string. Args: public_key (Union[str, bytes]): The public key in PEM format or the x509 public key certificate. Returns: Verifier: The constructed verifier. Raises: ValueError: If the public key can't be parsed.
juraj-google-style
def output(ret, **kwargs): if ('opts' in kwargs): global __opts__ __opts__ = kwargs.pop('opts') base_indent = (kwargs.get('nested_indent', 0) or __opts__.get('out.table.nested_indent', 0)) rows_key = (kwargs.get('rows_key') or __opts__.get('out.table.rows_key')) labels_key = (kwargs.get('labels_key') or __opts__.get('out.table.labels_key')) title = (kwargs.get('title') or __opts__.get('out.table.title')) class_kvargs = {} argks = ('has_header', 'row_delimiter', 'delim', 'justify', 'separate_rows', 'prefix', 'suffix', 'width') for argk in argks: argv = (kwargs.get(argk) or __opts__.get('out.table.{key}'.format(key=argk))) if (argv is not None): class_kvargs[argk] = argv table = TableDisplay(**class_kvargs) out = [] if (title and rows_key): out.append(table.ustring(base_indent, title, table.WHITE, suffix='\n')) return '\n'.join(table.display(salt.utils.data.decode(ret), base_indent, out, rows_key=rows_key, labels_key=labels_key))
Display the output as table. Args: * nested_indent: integer, specify the left alignment. * has_header: boolean specifying if header should be displayed. Default: True. * row_delimiter: character to separate rows. Default: ``_``. * delim: character to separate columns. Default: ``" | "``. * justify: text alignment. Default: ``center``. * separate_rows: boolean specifying if row separator will be displayed between consecutive rows. Default: True. * prefix: character at the beginning of the row. Default: ``"| "``. * suffix: character at the end of the row. Default: ``" |"``. * width: column max width. Default: ``50``. * rows_key: display the rows under a specific key. * labels_key: use the labels under a certain key. Otherwise will try to use the dictionary keys (if any). * title: display title when only one table is selected (using the ``rows_key`` argument).
codesearchnet
def to_matrix(self): (w, x, y, z) = self.normalize().data mat = np.array([[((1 - (2 * (y ** 2))) - (2 * (z ** 2))), (((2 * x) * y) - ((2 * z) * w)), (((2 * x) * z) + ((2 * y) * w))], [(((2 * x) * y) + ((2 * z) * w)), ((1 - (2 * (x ** 2))) - (2 * (z ** 2))), (((2 * y) * z) - ((2 * x) * w))], [(((2 * x) * z) - ((2 * y) * w)), (((2 * y) * z) + ((2 * x) * w)), ((1 - (2 * (x ** 2))) - (2 * (y ** 2)))]], dtype=float) return mat
Converts a unit-length quaternion to a rotation matrix. Returns: ndarray: Rotation matrix.
codesearchnet
def terminate_ec2_instance(client, resource): instance = EC2Instance.get(resource.id) if instance.state == 'terminated': return ActionStatus.IGNORED, {} client.terminate_instances(InstanceIds=[resource.id]) return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip}
Terminate an EC2 Instance This function will terminate an EC2 Instance. Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to terminate Returns: `ActionStatus`
juraj-google-style
def _copy_and_clean_up_expectation(self, expectation, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True): new_expectation = copy.deepcopy(expectation) if ('success_on_last_run' in new_expectation): del new_expectation['success_on_last_run'] if discard_result_format_kwargs: if ('result_format' in new_expectation['kwargs']): del new_expectation['kwargs']['result_format'] if discard_include_configs_kwargs: if ('include_configs' in new_expectation['kwargs']): del new_expectation['kwargs']['include_configs'] if discard_catch_exceptions_kwargs: if ('catch_exceptions' in new_expectation['kwargs']): del new_expectation['kwargs']['catch_exceptions'] return new_expectation
Returns copy of `expectation` without `success_on_last_run` and other specified key-value pairs removed Returns a copy of specified expectation will not have `success_on_last_run` key-value. The other key-value \ pairs will be removed by default but will remain in the copy if specified. Args: expectation (json): \ The expectation to copy and clean. discard_result_format_kwargs (boolean): \ if True, will remove the kwarg `output_format` key-value pair from the copied expectation. discard_include_configs_kwargs (boolean): if True, will remove the kwarg `include_configs` key-value pair from the copied expectation. discard_catch_exceptions_kwargs (boolean): if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation. Returns: A copy of the provided expectation with `success_on_last_run` and other specified key-value pairs removed
codesearchnet
def __init__(self, subject_hash, hash_information): self.hash_information = hash_information self.subject_hash = subject_hash
Initializes analysis information about a hash. Args: subject_hash (str): hash that the hash_information relates to. hash_information (object): information about the hash. This object will be used by the GenerateLabels method in the HashTaggingAnalysisPlugin to tag events that relate to the hash.
juraj-google-style
def _should_unpack(arg): return type(arg) is tuple
Determines whether the caller needs to unpack the argument from a tuple. Args: arg: argument to check Returns: Indication of whether the caller needs to unpack the argument from a tuple.
github-repos
def get_dim_index(js_dict, dim): try: dim_index = js_dict['dimension'][dim]['category']['index'] except KeyError: dim_label = get_dim_label(js_dict, dim) dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index']) else: if (type(dim_index) is list): dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index']) else: dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index']) dim_index = dim_index.sort_index(by='index') return dim_index
Get index from a given dimension. Args: js_dict (dict): dictionary containing dataset data and metadata. dim (string): dimension name obtained from JSON file. Returns: dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
codesearchnet
def get_opt_attr(obj_pyxb, attr_str, default_val=None): v = getattr(obj_pyxb, attr_str, default_val) return (v if (v is not None) else default_val)
Get an optional attribute value from a PyXB element. The attributes for elements that are optional according to the schema and not set in the PyXB object are present and set to None. PyXB validation will fail if required elements are missing. Args: obj_pyxb: PyXB object attr_str: str Name of an attribute that the PyXB object may contain. default_val: any object Value to return if the attribute is not present. Returns: str : Value of the attribute if present, else ``default_val``.
codesearchnet
def parse_fs_url(fs_url): match = _RE_FS_URL.match(fs_url) if (match is None): raise ParseError('{!r} is not a fs2 url'.format(fs_url)) (fs_name, credentials, url1, url2, path) = match.groups() if (not credentials): username = None password = None url = url2 else: (username, _, password) = credentials.partition(':') username = unquote(username) password = unquote(password) url = url1 (url, has_qs, qs) = url.partition('?') resource = unquote(url) if has_qs: _params = parse_qs(qs, keep_blank_values=True) params = {k: unquote(v[0]) for (k, v) in six.iteritems(_params)} else: params = {} return ParseResult(fs_name, username, password, resource, params, path)
Parse a Filesystem URL and return a `ParseResult`. Arguments: fs_url (str): A filesystem URL. Returns: ~fs.opener.parse.ParseResult: a parse result instance. Raises: ~fs.errors.ParseError: if the FS URL is not valid.
codesearchnet
def _compute_sequence_length_from_mask(mask, time_major): timestep_index = 0 if time_major else 1 return tf.reduce_sum(tf.cast(mask, tf.int32), axis=timestep_index)
Calculate the sequence length tensor (1-D) based on the masking tensor. The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For any timestep that should be masked, the corresponding field will be False. Consider the following example: a = [[True, True, False, False], [True, True, True, False]] It is a (2, 4) tensor, and the corresponding sequence length result should be 1D tensor with value [2, 3]. Note that the masking tensor must be right padded that could be checked by, e.g., `is_sequence_right_padded()`. Args: mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if time_major=True. time_major: Boolean, which indicates whether the mask is time major or batch major. Returns: sequence_length: 1D int32 tensor.
github-repos
def _checkResponseWriteData(payload, writedata): _checkString(payload, minlength=4, description='payload') _checkString(writedata, minlength=2, maxlength=2, description='writedata') BYTERANGE_FOR_WRITEDATA = slice(2, 4) receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA] if (receivedWritedata != writedata): raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format(receivedWritedata, writedata, payload))
Check that the write data as given in the response is correct. The bytes 2 and 3 (zero based counting) in the payload holds the write data. Args: * payload (string): The payload * writedata (string): The data to write, length should be 2 bytes. Raises: TypeError, ValueError
codesearchnet
def AddWatchOnly(self, script_hash): if (script_hash in self._contracts): logger.error('Address already in contracts') return self._watch_only.append(script_hash)
Add a watch only address to the wallet. Args: script_hash (UInt160): a bytearray (len 20) representing the public key. Note: Prints a warning to the console if the address already exists in the wallet.
codesearchnet
def view(location, browser=None, new='same', autoraise=True): try: new = {'same': 0, 'window': 1, 'tab': 2}[new] except KeyError: raise RuntimeError(("invalid 'new' value passed to view: %r, valid values are: 'same', 'window', or 'tab'" % new)) if location.startswith('http'): url = location else: url = ('file: try: controller = get_browser_controller(browser) controller.open(url, new=new, autoraise=autoraise) except (SystemExit, KeyboardInterrupt): raise except: pass
Open a browser to view the specified location. Args: location (str) : Location to open If location does not begin with "http:" it is assumed to be a file path on the local filesystem. browser (str or None) : what browser to use (default: None) If ``None``, use the system default browser. new (str) : How to open the location. Valid values are: ``'same'`` - open in the current tab ``'tab'`` - open a new tab in the current window ``'window'`` - open in a new window autoraise (bool) : Whether to automatically raise the location in a new browser window (default: True) Returns: None
codesearchnet
def get_key_delivery_url(access_token, ck_id, key_type): path = '/ContentKeys' full_path = ''.join([path, "('", ck_id, "')", '/GetKeyDeliveryUrl']) endpoint = ''.join([ams_rest_endpoint, full_path]) body = (('{"keyDeliveryType": "' + key_type) + '"}') return do_ams_post(endpoint, full_path, body, access_token)
Get Media Services Key Delivery URL. Args: access_token (str): A valid Azure authentication token. ck_id (str): A Media Service Content Key ID. key_type (str): A Media Service key Type. Returns: HTTP response. JSON body.
codesearchnet
def in_same_dir(as_file, target_file): return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))
Return an absolute path to a target file that is located in the same directory as as_file Args: as_file: File name (including __file__) Use the directory path of this file target_file: Name of the target file
codesearchnet
def add_report(self, specification_name, report): self._reports[specification_name] = report self._total = (self._total + report.testsRun) self._failures = (self._failures + len(report.failures)) self._errors = (self._errors + len(report.errors)) self._success = ((self._total - self._failures) - self._errors)
Adds a given report with the given specification_name as key to the reports list and computes the number of success, failures and errors Args: specification_name: string representing the specification (with ".spec") report: The
codesearchnet
def VerifyStructure(self, parser_mediator, line): try: structure = self._HEADER.parseString(line) except pyparsing.ParseException: logger.debug('Not a XChat log file') return False _, month, day, hours, minutes, seconds, year = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format( structure.date_time)) return False return True
Verify that this file is a XChat log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
juraj-google-style
def scatter_max(self, sparse_delta, use_locking=False, name=None): raise NotImplementedError
Updates this variable with the max of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of max with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def update_restore_inputs(self, checkpoint_key, shape_and_slice_spec) -> tuple[List[str], List[str]]: return ([checkpoint_key], [shape_and_slice_spec])
Updates the specs to restore op. Override this method if the arguments to restore op need to be updated as per the resharding required. Args: checkpoint_key: The checkpoint key as requested by the caller shape_and_slice_spec: The shape and slice spec as requested by caller Returns: Tuple of list of checkpoint_keys and specs that the restore op should fetch as per the resharding requirement. The length of checkpoint keys returned by this method will match the length of checkpoint_values that are input to `reshard`.
github-repos
def gzip_uncompress(data, truncated=False): decompressor = SimpleGzipDecompressor() inflated_data = decompressor.decompress(data) if (not truncated): inflated_data += decompressor.flush() return inflated_data
Uncompress gzip data. Args: data (bytes): The gzip data. truncated (bool): If True, the decompressor is not flushed. This is a convenience function. Returns: bytes: The inflated data. Raises: zlib.error
codesearchnet
def get_slot_names(self): return sorted(self._slots.keys())
Return a list of the names of slots created by the `Optimizer`. See `get_slot()`. Returns: A list of strings.
github-repos
def __init__(self, build_tree=True): self._target_cache = {} self._item_cache = {} self._contains_cache = {} self._matrix_cache = {} self._graph_cache = {} self._treemap_cache = None self.modules = [] self.packages = [] if build_tree: self.build_tree()
Initialization method. Args: build_tree (bool): whether to immediately build the tree or not.
juraj-google-style
def _update_seek(self, offset, whence): with self._seek_lock: if (whence == SEEK_SET): self._seek = offset elif (whence == SEEK_CUR): self._seek += offset elif (whence == SEEK_END): self._seek = (offset + self._size) else: raise ValueError(('whence value %s unsupported' % whence)) return self._seek
Update seek value. Args: offset (int): Offset. whence (int): Whence. Returns: int: Seek position.
codesearchnet
def get_image_data(self, ids=None, voxels=None, dense=True): if (dense and (ids is None) and (voxels is None)): logger.warning('Warning: get_image_data() is being called without specifying a subset of studies or voxels to retrieve. This may result in a very large amount of data (several GB) being read into memory. If you experience any problems, consider returning a sparse matrix by passing dense=False, or pass in a list of ids of voxels to retrieve only a portion of the data.') result = self.data if (ids is not None): idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[0] result = result[(:, idxs)] if (voxels is not None): result = result[(voxels, :)] return (result.toarray() if dense else result)
Slices and returns a subset of image data. Args: ids (list, array): A list or 1D numpy array of study ids to return. If None, returns data for all studies. voxels (list, array): A list or 1D numpy array of voxel indices (i.e., rows) to return. If None, returns data for all voxels. dense (bool): Optional boolean. When True (default), convert the result to a dense array before returning. When False, keep as sparse matrix. Returns: A 2D numpy array with voxels in rows and studies in columns.
codesearchnet
def plot_scatter_matrix(self, freq=None, title=None, figsize=(10, 10), **kwargs): if (title is None): title = self._get_default_plot_title(freq, 'Return Scatter Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() pd.scatter_matrix(ser, figsize=figsize, **kwargs) return plt.suptitle(title)
Wrapper around pandas' scatter_matrix. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' scatter_matrix method
codesearchnet
def setup(pin, mode, pullup=None, initial=False): if pullup is not None: raise ValueError("sysfs does not support pullups") if mode not in (IN, OUT, LOW, HIGH): raise ValueError(mode) log.debug("Setup {0}: {1}".format(pin, mode)) f = _open[pin].direction _write(f, mode) if mode == OUT: if initial: set(pin, 1) else: set(pin, 0)
Setup pin with mode IN or OUT. Args: pin (int): mode (str): use either gpio.OUT or gpio.IN pullup (None): rpio compatibility. If anything but None, raises value Error pullup (bool, optional): Initial pin value. Default is False
juraj-google-style
def _ParseComment(self, structure): if (structure[1] == 'Date:'): (self._year, self._month, self._day_of_month, _, _, _) = structure.date_time elif (structure[1] == 'Fields:'): self._ParseFieldsMetadata(structure)
Parses a comment. Args: structure (pyparsing.ParseResults): structure parsed from the log file.
codesearchnet
def merge_lines(top, bot, icod="top"): ret = "" for topc, botc in zip(top, bot): if topc == botc: ret += topc elif topc in '┼╪' and botc == " ": ret += "│" elif topc == " ": ret += botc elif topc in '┬╥' and botc in " ║│" and icod == "top": ret += topc elif topc in '┬' and botc == " " and icod == "bot": ret += '│' elif topc in '╥' and botc == " " and icod == "bot": ret += '║' elif topc in '┬│' and botc == "═": ret += '╪' elif topc in '┬│' and botc == "─": ret += '┼' elif topc in '└┘║│░' and botc == " " and icod == "top": ret += topc elif topc in '─═' and botc == " " and icod == "top": ret += topc elif topc in '─═' and botc == " " and icod == "bot": ret += botc elif topc in "║╥" and botc in "═": ret += "╬" elif topc in "║╥" and botc in "─": ret += "╫" elif topc in '╫╬' and botc in " ": ret += "║" elif topc == '└' and botc == "┌": ret += "├" elif topc == '┘' and botc == "┐": ret += "┤" elif botc in "┐┌" and icod == 'top': ret += "┬" elif topc in "┘└" and botc in "─" and icod == 'top': ret += "┴" else: ret += botc return ret
Merges two lines (top and bot) in the way that the overlapping make senses. Args: top (str): the top line bot (str): the bottom line icod (top or bot): in case of doubt, which line should have priority? Default: "top". Returns: str: The merge of both lines.
juraj-google-style
def get_room_id(self, room_alias): content = self._send("GET", "/directory/room/{}".format(quote(room_alias))) return content.get("room_id", None)
Get room id from its alias. Args: room_alias (str): The room alias name. Returns: Wanted room's id.
juraj-google-style
def list(self): request = requests.Request('GET', 'https: pattern = re.compile('<([^>]*)>; rel="([^"]*)"') gists = [] while True: try: response = self.send(request).json() except Exception: break for gist in response: try: gists.append(GistInfo(gist['id'], gist['public'], gist['description'])) except KeyError: continue try: link = response.headers['link'] for result in pattern.finditer(link): url = result.group(1) rel = result.group(2) if (rel == 'next'): request.url = url break else: return gists except Exception: break return gists
Returns a list of the users gists as GistInfo objects Returns: a list of GistInfo objects
codesearchnet
def _ParseUpdateKeyValue(self, parser_mediator, registry_value, key_path): if (not registry_value.DataIsString()): parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data type: {0:s}'.format(registry_value.data_type_string)) return date_time_string = registry_value.GetDataAsObject() if (not date_time_string): parser_mediator.ProduceExtractionWarning('missing UpdateKey value data') return re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string) if (not re_match): parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data: {0!s}'.format(date_time_string)) return (month, day_of_month, year, hours, minutes, seconds, part_of_day) = re_match.groups() try: year = int(year, 10) month = int(month, 10) day_of_month = int(day_of_month, 10) hours = int(hours, 10) minutes = int(minutes, 10) seconds = int(seconds, 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(date_time_string)) return if (part_of_day == 'PM'): hours += 12 time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(time_elements_tuple)) return event_data = CCleanerUpdateEventData() event_data.key_path = key_path event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UPDATE, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the UpdateKey value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. key_path (str): Windows Registry key path.
codesearchnet
def check_docstrings(overwrite: bool=False, check_all: bool=False): module_diff_files = None if not check_all: module_diff_files = set() repo = Repo(PATH_TO_REPO) for modified_file_diff in repo.index.diff(None): if modified_file_diff.a_path.startswith('src/transformers'): module_diff_files.add(modified_file_diff.a_path) for modified_file_diff in repo.index.diff(repo.refs.main.commit): if modified_file_diff.a_path.startswith('src/transformers'): module_diff_files.add(modified_file_diff.a_path) if len(module_diff_files) == 0: return print(' Checking docstrings in the following files:' + '\n - ' + '\n - '.join(module_diff_files)) failures = [] hard_failures = [] to_clean = [] for name in dir(transformers): if name.startswith('_') or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE: continue obj = getattr(transformers, name) if not callable(obj) or not isinstance(obj, type) or getattr(obj, '__doc__', None) is None: continue if module_diff_files is not None: object_file = find_source_file(getattr(transformers, name)) object_file_relative_path = 'src/' + str(object_file).split('/src/')[1] if object_file_relative_path not in module_diff_files: continue try: result = match_docstring_with_signature(obj) if result is not None: old_doc, new_doc = result else: old_doc, new_doc = (None, None) except Exception as e: print(e) hard_failures.append(name) continue if old_doc != new_doc: print('name', name) print('old_doc', old_doc) print('new_doc', new_doc) if overwrite: fix_docstring(obj, old_doc, new_doc) else: failures.append(name) elif not overwrite and new_doc is not None and ('<fill_type>' in new_doc or '<fill_docstring>' in new_doc): to_clean.append(name) error_message = '' if len(hard_failures) > 0: error_message += 'The argument part of the docstrings of the following objects could not be processed, check they are properly formatted.' error_message += '\n' + '\n'.join([f'- {name}' for name in hard_failures]) if len(failures) > 0: error_message += 'The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in `utils/check_docstrings.py`.' error_message += '\n' + '\n'.join([f'- {name}' for name in failures]) if len(to_clean) > 0: error_message += 'The following objects docstrings contain templates you need to fix: search for `<fill_type>` or `<fill_docstring>`.' error_message += '\n' + '\n'.join([f'- {name}' for name in to_clean]) if len(error_message) > 0: error_message = 'There was at least one problem when checking docstrings of public objects.\n' + error_message raise ValueError(error_message)
Check docstrings of all public objects that are callables and are documented. By default, only checks the diff. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to fix inconsistencies or not. check_all (`bool`, *optional*, defaults to `False`): Whether to check all files.
github-repos
def update_table(self, table, fields, retry=DEFAULT_RETRY): partial = table._build_resource(fields) if (table.etag is not None): headers = {'If-Match': table.etag} else: headers = None api_response = self._call_api(retry, method='PATCH', path=table.path, data=partial, headers=headers) return Table.from_api_repr(api_response)
Change some fields of a table. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``table``, it will be deleted. If ``table.etag`` is not ``None``, the update will only succeed if the table on the server has the same ETag. Thus reading a table with ``get_table``, changing its fields, and then passing it to ``update_table`` will ensure that the changes will only be saved if no modifications to the table occurred since the read. Args: table (google.cloud.bigquery.table.Table): The table to update. fields (Sequence[str]): The fields of ``table`` to change, spelled as the Table properties (e.g. "friendly_name"). retry (google.api_core.retry.Retry): (Optional) A description of how to retry the API call. Returns: google.cloud.bigquery.table.Table: The table resource returned from the API call.
codesearchnet
def cache_penalty_model(penalty_model, database=None): if (not _is_index_labelled(penalty_model.graph)): (mapping, __) = _graph_canonicalization(penalty_model.graph) penalty_model = penalty_model.relabel_variables(mapping, inplace=False) if (database is None): conn = cache_connect() else: conn = cache_connect(database) with conn as cur: insert_penalty_model(cur, penalty_model) conn.close()
Caching function for penaltymodel_cache. Args: penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to be cached. database (str, optional): The path to the desired sqlite database file. If None, will use the default.
codesearchnet
def do_load(self, design, init=False): if design: filename = self._validated_config_filename(design) with open(filename, 'r') as f: text = f.read() structure = json_decode(text) else: structure = {} attributes = structure.get('attributes', structure) children = structure.get('children', structure) (name, mri, x, y, visible) = ([], [], [], [], []) for (part_name, d) in attributes.get('layout', {}).items(): name.append(part_name) mri.append('') x.append(d['x']) y.append(d['y']) visible.append(d['visible']) self.set_layout(LayoutTable(name, mri, x, y, visible)) (source, export) = ([], []) for (source_name, export_name) in attributes.get('exports', {}).items(): source.append(source_name) export.append(export_name) self.exports.set_value(ExportTable(source, export)) our_values = {k: v for (k, v) in attributes.items() if (k in self.our_config_attributes)} block = self.block_view() block.put_attribute_values(our_values) self.run_hooks((LoadHook(p, c, children.get(p.name, {}), init) for (p, c) in self.create_part_contexts(only_visible=False).items())) self._mark_clean(design, init)
Load a design name, running the child LoadHooks. Args: design: Name of the design json file, without extension init: Passed to the LoadHook to tell the children if this is being run at Init or not
codesearchnet
def _imputeMissing(X, center=True, unit=True, betaNotUnitVariance=False, betaA=1.0, betaB=1.0): typeX = X.dtype if (typeX != SP.int8): iNanX = (X != X) else: iNanX = (X == (- 9)) if (iNanX.any() or betaNotUnitVariance): if cparser: print('using C-based imputer') if (X.flags['C_CONTIGUOUS'] or (typeX != SP.float32)): X = SP.array(X, order='F', dtype=SP.float32) if (typeX == SP.int8): X[iNanX] = SP.nan parser.standardize(X, betaNotUnitVariance=betaNotUnitVariance, betaA=betaA, betaB=betaB) X = SP.array(X, dtype=SP.float64) else: parser.standardize(X, betaNotUnitVariance=betaNotUnitVariance, betaA=betaA, betaB=betaB) X = SP.array(X, dtype=SP.float64) else: if betaNotUnitVariance: raise NotImplementedError('Beta(betaA,betaB) standardization only in C-based parser, but not found') nObsX = (~ iNanX).sum(0) if (typeX != SP.float64): X = SP.array(X, dtype=SP.float64) X[iNanX] = 0.0 sumX = X.sum(0) meanX = (sumX / nObsX) if center: X -= meanX X[iNanX] = 0.0 X_ = X else: mean = SP.tile(meanX, (X.shape[0], 1)) X[iNanX] = mean[iNanX] X_ = (X - mean) if unit: stdX = SP.sqrt(((X_ * X_).sum(0) / nObsX)) stdX[(stdX == 0.0)] = 1.0 X /= stdX else: if (X.dtype != SP.float64): X = SP.array(X, dtype=SP.float64) if center: X -= X.mean(axis=0) if unit: stdX = X.std(axis=0) stdX[(stdX == 0.0)] = 1.0 X /= stdX return X
fill in missing values in the SNP matrix by the mean value optionally center the data and unit-variance it Args: X: scipy.array of SNP values. If dtype=='int8' the missing values are -9, otherwise the missing values are scipy.nan center: Boolean indicator if data should be mean centered Not supported in C-based parser unit: Boolean indicator if data should be normalized to have unit variance Not supported in C-based parser betaNotUnitVariance: use Beta(betaA,betaB) standardization instead of unit variance (only with C-based parser) (default: False) betaA: shape parameter for Beta(betaA,betaB) standardization (only with C-based parser) betaB: scale parameter for Beta(betaA,betaB) standardization (only with C-based parser) Returns: X: scipy.array of standardized SNPs with scipy.float64 values
codesearchnet
def dot_distance(t1, t2, name=None): with tf.name_scope(name, 'dot_distance', [t1, t2]) as scope: return (- dot_product(t1, t2, name=scope))
dot "distance" between t1 and t2. Args: t1: A tensor. t2: A tensor that is the same size as t1. name: Optional name for this op. Returns: The dot distance between t1 and t2.
codesearchnet
def _get_base_converter_args(self): args = {'input_format': constants.TENSORFLOW_GRAPHDEF, 'allow_custom_ops': self.allow_custom_ops, 'debug_info': self._debug_info, 'target_ops': self.target_spec.supported_ops, 'select_user_tf_ops': self.target_spec.experimental_select_user_tf_ops, 'supported_backends': self.target_spec.experimental_supported_backends, 'unfold_batchmatmul': self.unfold_batchmatmul, 'legalize_custom_tensor_list_ops': self.legalize_custom_tensor_list_ops, 'lower_tensor_list_ops': self._experimental_lower_tensor_list_ops, 'unfold_large_splat_constant': self._experimental_unfold_large_splat_constant, 'default_to_single_batch_in_tensor_list_ops': self._experimental_default_to_single_batch_in_tensor_list_ops, 'tf_quantization_mode': self._experimental_tf_quantization_mode, 'experimental_enable_resource_variables': self.experimental_enable_resource_variables, 'enable_dynamic_update_slice': self._experimental_enable_dynamic_update_slice, 'preserve_assert_op': self._experimental_preserve_assert_op, 'guarantee_all_funcs_one_use': self._experimental_guarantee_all_funcs_one_use, 'allow_all_select_tf_ops': self._experimental_allow_all_select_tf_ops, 'disable_fuse_mul_and_fc': self._experimental_disable_fuse_mul_and_fc, 'quantization_options': self._experimental_quantization_options, 'ir_dump_dir': self.ir_dump_dir, 'ir_dump_pass_regex': self.ir_dump_pass_regex, 'ir_dump_func_regex': self.ir_dump_func_regex, 'enable_timing': self.enable_timing, 'print_ir_before': self.print_ir_before, 'print_ir_after': self.print_ir_after, 'print_ir_module_scope': self.print_ir_module_scope, 'elide_elementsattrs_if_larger': self.elide_elementsattrs_if_larger, 'use_buffer_offset': self._experimental_use_buffer_offset, 'reduce_type_precision': self._experimental_reduce_type_precision, 'use_stablehlo_quantizer': self.experimental_use_stablehlo_quantizer, 'stablehlo_quantizer_config': self.experimental_stablehlo_quantizer_config, 'qdq_conversion_mode': self._experimental_qdq_conversion_mode, 'strict_qdq_mode': self._experimental_strict_qdq, 'disable_per_channel_quantization_for_dense_layers': self._experimental_disable_per_channel_quantization_for_dense_layers, 'enable_composite_direct_lowering': self._experimental_enable_composite_direct_lowering, 'model_origin_framework': self.model_origin_framework, 'canonicalizing_inf_as_min_max_float': self.canonicalizing_inf_as_min_max_float, 'serialize_debug_metadata': self.serialize_debug_metadata, 'unsafe_fuse_dynamic_shaped_broadcast': self._experimental_unsafe_fuse_dynamic_shaped_broadcast} if self.saved_model_dir: args.update({'saved_model_dir': self.saved_model_dir, 'saved_model_version': self._saved_model_version, 'saved_model_tags': self._saved_model_tags, 'saved_model_exported_names': self._saved_model_exported_names}) if self._experimental_quantization_options: logging.warning('Configs from custom methods in experimental_quantization_options may not produce a valid tflite model. Note that currently this option only supports StableHLO path. Setting this option in TFLite path will be a no-op.') if self.experimental_use_stablehlo_quantizer: self._assign_stablehlo_quantization_config_or_populate_default(args) elif self.experimental_stablehlo_quantizer_config is not None: raise ValueError('QuantizationConfig should be provided only when experimental_use_stablehlo_quantizer is set to true.') return args
Returns the base converter args. Returns: {key str: val}
github-repos
def NewRow(self, value=""): newrow = self.row_class() newrow.row = self.size + 1 newrow.table = self headers = self._Header() for header in headers: newrow[header] = value return newrow
Fetches a new, empty row, with headers populated. Args: value: Initial value to set each row entry to. Returns: A Row() object.
juraj-google-style
def fit3d(samples, e_x, e_y, e_z, remove_zeros=False, **kw): (height, width, depth) = ((len(e_y) - 1), (len(e_x) - 1), (len(e_z) - 1)) (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z)) p_est = (p_est / sum(p_est.flat)) p_est = p_est.flatten() if remove_zeros: non_zero = (~ (p_est == 0)) else: non_zero = (p_est >= 0) basis = spline_base3d(width, height, depth, **kw) model = linear_model.BayesianRidge() model.fit(basis[(:, non_zero)].T, p_est[(:, np.newaxis)][(non_zero, :)]) return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))
Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y)
codesearchnet
def construct_error_message(driver_id, error_type, message, timestamp): builder = flatbuffers.Builder(0) driver_offset = builder.CreateString(driver_id.binary()) error_type_offset = builder.CreateString(error_type) message_offset = builder.CreateString(message) ray.core.generated.ErrorTableData.ErrorTableDataStart(builder) ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId( builder, driver_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddType( builder, error_type_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage( builder, message_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp( builder, timestamp) error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd( builder) builder.Finish(error_data_offset) return bytes(builder.Output())
Construct a serialized ErrorTableData object. Args: driver_id: The ID of the driver that the error should go to. If this is nil, then the error will go to all drivers. error_type: The type of the error. message: The error message. timestamp: The time of the error. Returns: The serialized object.
juraj-google-style
def from_year_month_day(year, month, day, validate=True): year = tf.convert_to_tensor(year, tf.int32) month = tf.convert_to_tensor(month, tf.int32) day = tf.convert_to_tensor(day, tf.int32) control_deps = [] if validate: control_deps.append(tf.debugging.assert_positive(year, message='Year must be positive.')) control_deps.append(tf.debugging.assert_greater_equal(month, constants.Month.JANUARY.value, message=f'Month must be >= {constants.Month.JANUARY.value}')) control_deps.append(tf.debugging.assert_less_equal(month, constants.Month.DECEMBER.value, message='Month must be <= {constants.Month.JANUARY.value}')) control_deps.append(tf.debugging.assert_positive(day, message='Day must be positive.')) is_leap = date_utils.is_leap_year(year) days_in_months = tf.constant(_DAYS_IN_MONTHS_COMBINED, tf.int32) max_days = tf.gather(days_in_months, month + 12 * tf.dtypes.cast(is_leap, np.int32)) control_deps.append(tf.debugging.assert_less_equal(day, max_days, message='Invalid day-month pairing.')) with tf.compat.v1.control_dependencies(control_deps): year = tf.identity(year) month = tf.identity(month) day = tf.identity(day) with tf.compat.v1.control_dependencies(control_deps): ordinal = date_utils.year_month_day_to_ordinal(year, month, day) return DateTensor(ordinal, year, month, day)
Creates DateTensor from tensors of years, months and days. Args: year: Tensor of int32 type. Elements should be positive. month: Tensor of int32 type of same shape as `year`. Elements should be in range `[1, 12]`. day: Tensor of int32 type of same shape as `year`. Elements should be in range `[1, 31]` and represent valid dates together with corresponding elements of `month` and `year` Tensors. validate: Whether to validate the dates. Returns: DateTensor object. #### Example ```python year = tf.constant([2015, 2017], dtype=tf.int32) month = tf.constant([4, 12], dtype=tf.int32) day = tf.constant([15, 30], dtype=tf.int32) date_tensor = tff.datetime.dates_from_year_month_day(year, month, day) ```
github-repos
def write_to_text(pcoll, path: str): try: field_names = [name for name, _ in schemas.named_fields_from_element_type(pcoll.element_type)] except Exception as exn: raise ValueError('WriteToText requires an input schema with exactly one field.') from exn if len(field_names) != 1: raise ValueError('WriteToText requires an input schema with exactly one field, got %s' % field_names) sole_field_name, = field_names return pcoll | beam.Map(lambda x: str(getattr(x, sole_field_name))) | beam.io.WriteToText(path)
Writes a PCollection to a (set of) text files(s). The input must be a PCollection whose schema has exactly one field. Args: path (str): The file path to write to. The files written will begin with this prefix, followed by a shard identifier.
github-repos
def json(cls, message): if type(message) is OrderedDict: pprint(dict(message)) else: pprint(message)
Print a nice JSON output Args: message: the message to print
juraj-google-style
def SetCTypesForLibrary(libname, fn_table): libpath = ctypes.util.find_library(libname) if (not libpath): raise ErrorLibNotFound(('Library %s not found' % libname)) lib = ctypes.cdll.LoadLibrary(libpath) for (function, args, result) in fn_table: f = getattr(lib, function) f.argtypes = args f.restype = result return lib
Set function argument types and return types for an ObjC library. Args: libname: Library name string fn_table: List of (function, [arg types], return types) tuples Returns: ctypes.CDLL with types set according to fn_table Raises: ErrorLibNotFound: Can't find specified lib
codesearchnet
def from_dictionary(cls, options): flags = [] for k, v in options.items(): if isinstance(v, bool): if v: flags.append('--%s' % k) elif k in _FLAG_THAT_SETS_FALSE_VALUE: flag_that_disables_the_option = _FLAG_THAT_SETS_FALSE_VALUE[k] flags.append('--%s' % flag_that_disables_the_option) elif isinstance(v, list): for i in v: flags.append('--%s=%s' % (k, i)) elif isinstance(v, dict): flags.append('--%s=%s' % (k, json.dumps(v))) elif v is None: logging.warning('Not setting flag with value None: %s', k) else: flags.append('--%s=%s' % (k, v)) return cls(flags)
Returns a PipelineOptions from a dictionary of arguments. Args: options: Dictionary of argument value pairs. Returns: A PipelineOptions object representing the given arguments.
github-repos
def abspath(self, path): if ((not path.startswith(os.path.sep)) or path.startswith('~')): path = os.path.expanduser(os.path.join(self.base_path, path)) return path
Transform the path to an absolute path Args: path (string): The path to transform to an absolute path Returns: string: The absolute path to the file
codesearchnet
def DeregisterHelper(cls, resolver_helper): if resolver_helper.type_indicator not in cls._resolver_helpers: raise KeyError( 'Resolver helper object not set for type indicator: {0:s}.'.format( resolver_helper.type_indicator)) del cls._resolver_helpers[resolver_helper.type_indicator]
Deregisters a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is not set for the corresponding type indicator.
juraj-google-style
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name='restore_slice', preferred_shard=-1): base_type = dtypes.as_dtype(tensor_type).base_dtype return gen_io_ops.restore_slice(file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name)
Restore a tensor slice from a set of files with a given pattern. Example usage: RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT) Args: file_pattern: the file pattern used to match a set of checkpoint files. tensor_name: the name of the tensor to restore. shape_and_slice: the shape-and-slice spec of the slice. tensor_type: the type of the tensor to restore. name: string. Optional name for the op. preferred_shard: Int. Optional shard to open first in the checkpoint file. Returns: A tensor of type "tensor_type".
github-repos
def process(self, element): import collections import apache_beam as beam num_in_batch = 0 try: assert self._session is not None feed_dict = collections.defaultdict(list) for line in element: if line.endswith('\n'): line = line[:-1] feed_dict[self._input_alias_map.values()[0]].append(line) num_in_batch += 1 batch_result = self._session.run(fetches=self._tensor_names, feed_dict=feed_dict) if num_in_batch > 1: for result in zip(*batch_result): predictions = {} for name, value in zip(self._aliases, result): predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value) yield predictions else: predictions = {} for i in range(len(self._aliases)): value = batch_result[i] value = (value.tolist() if getattr(value, 'tolist', None) else value) predictions[self._aliases[i]] = value yield predictions except Exception as e: yield beam.pvalue.TaggedOutput('errors', (str(e), element))
Run batch prediciton on a TF graph. Args: element: list of strings, representing one batch input to the TF graph.
juraj-google-style
def num_lineages_at(self, distance): if not isinstance(distance, float) and not isinstance(distance, int): raise TypeError("distance must be an int or a float") if distance < 0: raise RuntimeError("distance cannot be negative") d = dict(); q = deque(); q.append(self.root); count = 0 while len(q) != 0: node = q.popleft() if node.is_root(): d[node] = 0 else: d[node] = d[node.parent] if node.edge_length is not None: d[node] += node.edge_length if d[node] < distance: q.extend(node.children) elif node.parent is None or d[node.parent] < distance: count += 1 return count
Returns the number of lineages of this ``Tree`` that exist ``distance`` away from the root Args: ``distance`` (``float``): The distance away from the root Returns: ``int``: The number of lineages that exist ``distance`` away from the root
juraj-google-style
def cellsiter_to_dataframe(cellsiter, args, drop_allna=True): from modelx.core.cells import shareable_parameters if len(args): indexes = shareable_parameters(cellsiter) else: indexes = get_all_params(cellsiter.values()) result = None for cells in cellsiter.values(): df = cells_to_dataframe(cells, args) if drop_allna and df.isnull().all().all(): continue if df.index.names != [None]: if isinstance(df.index, pd.MultiIndex): if _pd_ver < (0, 20): df = _reset_naindex(df) df = df.reset_index() missing_params = set(indexes) - set(df) for params in missing_params: df[params] = np.nan if result is None: result = df else: try: result = pd.merge(result, df, how="outer") except MergeError: result = pd.concat([result, df], axis=1) except ValueError: cols = set(result.columns) & set(df.columns) for col in cols: if ( len( [ str(frame[col].dtype) for frame in (result, df) if str(frame[col].dtype) == "object" ] ) == 1 ): if str(result[col].dtype) == "object": frame = df else: frame = result frame[[col]] = frame[col].astype("object") result = pd.merge(result, df, how="outer") if result is None: return pd.DataFrame() else: return result.set_index(indexes) if indexes else result
Convert multiple cells to a frame. If args is an empty sequence, all values are included. If args is specified, cellsiter must have shareable parameters. Args: cellsiter: A mapping from cells names to CellsImpl objects. args: A sequence of arguments
juraj-google-style
def __init__(self, range_str='', make_token=AlphanumericVersionToken, invalid_bound_error=True): self._str = None self.bounds = [] if range_str is None: return try: parser = _VersionRangeParser(range_str, make_token, invalid_bound_error=invalid_bound_error) bounds = parser.bounds except ParseException as e: raise VersionError("Syntax error in version range '%s': %s" % (range_str, str(e))) except VersionError as e: raise VersionError("Invalid version range '%s': %s" % (range_str, str(e))) if bounds: self.bounds = self._union(bounds) else: self.bounds.append(_Bound.any)
Create a VersionRange object. Args: range_str: Range string, such as "3", "3+<4.5", "2|6+". The range will be optimised, so the string representation of this instance may not match range_str. For example, "3+<6|4+<8" == "3+<8". make_token: Version token class to use. invalid_bound_error (bool): If True, raise an exception if an impossible range is given, such as '3+<2'.
juraj-google-style
def DeserializeExclusiveData(self, reader): if self.Version > 1: logger.error("format exception...") self.Code = FunctionCode() self.Code.Deserialize(reader) if self.Version >= 1: self.NeedStorage = reader.ReadBool() else: self.NeedStorage = False self.Name = reader.ReadVarString() self.CodeVersion = reader.ReadVarString() self.Author = reader.ReadVarString() self.Email = reader.ReadVarString() self.Description = reader.ReadVarString()
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def is_unitary(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool: return ((matrix.shape[0] == matrix.shape[1]) and np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))
Determines if a matrix is approximately unitary. A matrix is unitary if it's square and its adjoint is its inverse. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is unitary within the given tolerance.
codesearchnet
def _batch_prepare_for_model(self, batch_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False) -> BatchEncoding: batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model(first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose, split_special_tokens=split_special_tokens) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs
github-repos
def stop(self, block=True): self._stop = True self.empty_queue() for _ in range(self.threads_active()): self._queue.put(SetPrio(target=DoNothing)) if block: self.join() self.empty_queue()
Stops all active threads and rejects new tasks to be added Args: block (bool): If True, block until all threads are closed
juraj-google-style
def notify(self, method, params=None): log.debug('Sending notification: %s %s', method, params) message = {'jsonrpc': JSONRPC_VERSION, 'method': method} if (params is not None): message['params'] = params self._consumer(message)
Send a JSON RPC notification to the client. Args: method (str): The method name of the notification to send params (any): The payload of the notification
codesearchnet
def _process_req_body(self, body): try: return json.loads(body) except ValueError: return urlparse.parse_qs(body, keep_blank_values=True)
Process the body of the HTTP request. If the body is valid JSON, return the JSON as a dict. Else, convert the key=value format to a dict and return that. Args: body: The body of the HTTP request.
juraj-google-style
def process_document_events(events, use_buffers=True): json_events = [] references = set() buffers = ([] if use_buffers else None) for event in events: json_events.append(event.generate(references, buffers)) json = {'events': json_events, 'references': references_json(references)} return (serialize_json(json), (buffers if use_buffers else []))
Create a JSON string describing a patch to be applied as well as any optional buffers. Args: events : list of events to be translated into patches Returns: str, list : JSON string which can be applied to make the given updates to obj as well as any optional buffers
codesearchnet
def update(self, iterable): for pair in pairwise_longest(iterable, fillvalue=_FILL): self._edges.append(pair) self._results = None
Update with an ordered iterable of items. Args: iterable: An ordered iterable of items. The relative order of the items in this iterable will be respected in the TopoSet (in the absence of cycles).
codesearchnet
def __init__(self, max_entity_count=MAX_ENTITY_COUNT, mapreduce_spec=None): self.max_entity_count = max_entity_count params = mapreduce_spec.params if mapreduce_spec is not None else {} self.force_writes = bool(params.get("force_ops_writes", False)) self.puts = _ItemList(max_entity_count, self._flush_puts, repr_function=self._db_repr) self.deletes = _ItemList(max_entity_count, self._flush_deletes) self.ndb_puts = _ItemList(max_entity_count, self._flush_ndb_puts, repr_function=self._ndb_repr) self.ndb_deletes = _ItemList(max_entity_count, self._flush_ndb_deletes)
Constructor. Args: max_entity_count: maximum number of entities before flushing it to db. mapreduce_spec: An optional instance of MapperSpec.
juraj-google-style
def delete_project(self, project): if not is_valid_uuid(project): raise StorageArgumentException( 'Invalid UUID for project: {0}'.format(project)) self._authenticated_request \ .to_endpoint('project/{}/'.format(project)) \ .delete()
Delete a project. It will recursively delete all the content. Args: project (str): The UUID of the project to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes
juraj-google-style
def buckets_delete(self, bucket): url = (Api._ENDPOINT + (Api._BUCKET_PATH % bucket)) google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)
Issues a request to delete a bucket. Args: bucket: the name of the bucket. Raises: Exception if there is an error performing the operation.
codesearchnet
def _bisect(self, begin, end, listener): step = (end.date - begin.date) / 2 while abs(step) >= self._eps_bisect: date = begin.date + step if self.SPEAKER_MODE == "global": orb = self.propagate(date) else: orb = begin.propagate(date) if listener(begin) * listener(orb) > 0: begin = orb else: end = orb step = (end.date - begin.date) / 2 else: end.event = listener.info(end) return end
This method search for the zero-crossing of the watched parameter Args: begin (Orbit): end (Orbit) listener (Listener) Return Return
juraj-google-style
def index(self, ref, columns): from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) except NotFoundError: table_or_partition = ref self._backend.index(connection, table_or_partition, columns)
Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes.
juraj-google-style
def update(self, item): if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status.
juraj-google-style
def __init__(self, key_dtype, value_dtype): self._key_dtype = key_dtype self._value_dtype = value_dtype
Construct a table initializer object. Args: key_dtype: Type of the table keys. value_dtype: Type of the table values.
github-repos
def set(self, key, val): self._create_file_if_none_exists() with open(self.filename, 'r+b') as file_object: cache_pickle = pickle.load(file_object) cache_pickle[key] = val file_object.seek(0) pickle.dump(cache_pickle, file_object)
Sets a value in a key. Args: key (str): Key for the value. val: Value to set. Returns: Retrieved value.
juraj-google-style
def _FindStmtParent(node): if pytree_utils.NodeName(node) in _STATEMENT_NODES: return node else: return _FindStmtParent(node.parent)
Find the nearest parent of node that is a statement node. Arguments: node: node to start from Returns: Nearest parent (or node itself, if suitable).
github-repos
def add_mutex_switch(parser, dest, arguments=set(), default=None, single_arg=False, required=False): if default is not None: assert default in arguments if isinstance(arguments, set): arguments = {k: None for k in arguments} if not single_arg: mg = parser.add_mutually_exclusive_group(required=required) for name, help_text in arguments.items(): kwargs = { "action": "store_const", "dest": dest, "const": name, "help": help_text } if default == name: kwargs["default"] = name mg.add_argument("--{}".format(name), **kwargs) return mg else: kwargs = { "dest": dest, "type": str, "default": default, "help": "\n".join("{}: {}".format(k, v) for k, v in arguments.items()), "choices": list(arguments.keys()) } return parser.add_argument("--{}".format(dest), **kwargs)
Adds mutually exclusive switch arguments. Args: arguments: a dictionary that maps switch name to helper text. Use sets to skip help texts.
juraj-google-style
def save_def_args_in_temp(self, call_args, def_args, line_number, saved_function_call_index, first_node): args_mapping = dict() last_return_value_of_nested_call = None for (i, call_arg) in enumerate(call_args): def_arg_temp_name = ((('temp_' + str(saved_function_call_index)) + '_') + def_args[i]) return_value_of_nested_call = None if isinstance(call_arg, ast.Call): return_value_of_nested_call = self.visit(call_arg) restore_node = RestoreNode(((def_arg_temp_name + ' = ') + return_value_of_nested_call.left_hand_side), def_arg_temp_name, [return_value_of_nested_call.left_hand_side], line_number=line_number, path=self.filenames[(- 1)]) if (return_value_of_nested_call in self.blackbox_assignments): self.blackbox_assignments.add(restore_node) else: call_arg_label_visitor = LabelVisitor() call_arg_label_visitor.visit(call_arg) call_arg_rhs_visitor = RHSVisitor() call_arg_rhs_visitor.visit(call_arg) restore_node = RestoreNode(((def_arg_temp_name + ' = ') + call_arg_label_visitor.result), def_arg_temp_name, call_arg_rhs_visitor.result, line_number=line_number, path=self.filenames[(- 1)]) if (not first_node): first_node = restore_node if isinstance(call_arg, ast.Call): if last_return_value_of_nested_call: if isinstance(return_value_of_nested_call, BBorBInode): last_return_value_of_nested_call.connect(return_value_of_nested_call) else: last_return_value_of_nested_call.connect(return_value_of_nested_call.first_node) elif isinstance(return_value_of_nested_call, BBorBInode): first_node.inner_most_call = return_value_of_nested_call else: first_node.inner_most_call = return_value_of_nested_call.first_node last_return_value_of_nested_call = return_value_of_nested_call self.connect_if_allowed(self.nodes[(- 1)], restore_node) self.nodes.append(restore_node) if isinstance(call_arg, ast.Call): args_mapping[return_value_of_nested_call.left_hand_side] = def_args[i] else: args_mapping[def_args[i]] = call_arg_label_visitor.result return (args_mapping, first_node)
Save the arguments of the definition being called. Visit the arguments if they're calls. Args: call_args(list[ast.Name]): Of the call being made. def_args(ast_helper.Arguments): Of the definition being called. line_number(int): Of the call being made. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. Returns: args_mapping(dict): A mapping of call argument to definition argument. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
codesearchnet
def add_batch_parser(subparsers, parent_parser): parser = subparsers.add_parser('batch', help='Displays information about batches and submit new batches', description='Provides subcommands to display Batch information and submit Batches to the validator via the REST API.') grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand') grand_parsers.required = True add_batch_list_parser(grand_parsers, parent_parser) add_batch_show_parser(grand_parsers, parent_parser) add_batch_status_parser(grand_parsers, parent_parser) add_batch_submit_parser(grand_parsers, parent_parser)
Adds arguments parsers for the batch list, batch show and batch status commands Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
codesearchnet
def applicable_decision_points(self, dna_spec: pg.geno.DNASpec, global_state: pg.geno.AttributeDict, step: int) -> List[pg.geno.DecisionPoint]: applicable_points = [] for dp in dna_spec.decision_points: if isinstance(dp, pg.geno.Choices) and dp.is_subchoice: if dp.subchoice_index == 0: applicable_points.append(dp.parent_spec) else: applicable_points.append(dp) return applicable_points
Returns applicable decision points for this recombinator. The default behavior is to return all decision points in the search space, with multi-choice subchoices folded into a single decision point. Subclasses can override this method to select applicable points according to their semantics. Args: dna_spec: The root DNASpec. global_state: An optional keyword argument as the global state. Subclass can omit. step: An optional keyword argument as current step. Subclass can omit. Returns: A list of targeted decision points for point-wise recombination, which will be further filtered by the `where` statement later.
github-repos
def calculate_focus(self, reading): middle_index = (len(self.source.get_readings()) middle_reading = self.source.get_reading(middle_index) return self.convert_source_location(middle_reading, reading)
Determines what the focal point of the downloaded image should be. Returns: focal_point: (x, y) The location of the source in the middle observation, in the coordinate system of the current source reading.
codesearchnet
def ZerosLikeForExit(self, val): val_shape = val.get_shape() forward_ctxt = val.op._get_control_flow_context() outer_forward_ctxt = forward_ctxt.outer_context if outer_forward_ctxt: outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() outer_grad_state = None if outer_forward_ctxt: outer_grad_state = self._map.get(outer_forward_ctxt) if outer_grad_state: if val_shape.is_fully_defined(): outer_grad_state.grad_context.Enter() result = array_ops.zeros(val_shape.dims, val.dtype) outer_grad_state.grad_context.Exit() else: forward_ctxt.outer_context.Enter() shape = array_ops.shape_internal(val, optimize=False) forward_ctxt.outer_context.Exit() history_shape = outer_grad_state.AddForwardAccumulator(shape) outer_grad_ctxt = outer_grad_state.grad_context outer_grad_ctxt.Enter() real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_shape, shape) result = array_ops.zeros(real_shape, val.dtype) outer_grad_ctxt.Exit() elif val_shape.is_fully_defined(): result = array_ops.zeros(val_shape.dims, val.dtype) else: result = array_ops.zeros_like(val, optimize=False) return result
Create zeros_like gradient for a loop exit. If the result of a loop variable is not used but is involved in computing the result of some needed loop variable, we create a zero-valued tensor that is fed as gradient for the Exit node of that loop variable. Note that val.op is an Exit, and this method must be called in the control flow context where gradients() is called. Args: val: The output tensor of an Exit op. Returns: A zero tensor of the same shape of val.
github-repos
def stop(self, accountID, **kwargs): return self.create( accountID, order=StopOrderRequest(**kwargs) )
Shortcut to create a Stop Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def _piecewise_learning_rate(step, boundaries, values): values = ([1.0] + values) boundaries = [float(x) for x in boundaries] return tf.train.piecewise_constant(step, boundaries, values, name='piecewise_lr')
Scale learning rate according to the given schedule. Multipliers are not cumulative. Args: step: global step boundaries: List of steps to transition on. values: Multiplier to apply at each boundary transition. Returns: Scaled value for the learning rate.
codesearchnet
def get_json_type(obj): if hasattr(obj, 'get_config'): return {'class_name': obj.__class__.__name__, 'config': obj.get_config()} if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() if callable(obj): return obj.__name__ if type(obj).__name__ == type.__name__: return obj.__name__ if isinstance(obj, tensor_shape.Dimension): return obj.value if isinstance(obj, tensor_shape.TensorShape): return obj.as_list() if isinstance(obj, dtypes.DType): return obj.name if isinstance(obj, collections_abc.Mapping): return dict(obj) if obj is Ellipsis: return {'class_name': '__ellipsis__'} if isinstance(obj, wrapt.ObjectProxy): return obj.__wrapped__ raise TypeError(f'Object {obj} is not JSON-serializable. You may implement a `get_config()` method on the class (returning a JSON-serializable dictionary) to make it serializable.')
Serializes any object to a JSON-serializable structure. Args: obj: the object to serialize Returns: JSON-serializable structure representing `obj`. Raises: TypeError: if `obj` cannot be serialized.
github-repos
def db_get(table, record, column, if_exists=False): cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)] if if_exists: cmd += ['--if-exists'] cmd += ['list', table, record] result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: raise CommandExecutionError(result['stderr']) output = _stdout_parse_json(result['stdout']) if output['data'] and output['data'][0]: return output['data'][0][0] else: return None
Gets a column's value for a specific record. Args: table: A string - name of the database table. record: A string - identifier of the record. column: A string - name of the column. if_exists: A boolean - if True, it is not an error if the record does not exist. Returns: The column's value. CLI Example: .. code-block:: bash salt '*' openvswitch.db_get Port br0 vlan_mode
juraj-google-style
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) if (not m_dict['index']): if (name in results_dictionary): _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if (name in results_dictionary): if (not isinstance(results_dictionary.get(name), dict)): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) if (index in results_dictionary[name]): _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used.
codesearchnet
def _item_to_document_ref(iterator, item): document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] return iterator.collection.document(document_id)
Convert Document resource to document ref. Args: iterator (google.api_core.page_iterator.GRPCIterator): iterator response item (dict): document resource
juraj-google-style
def get_cuda_visible_devices(): gpu_ids_str = os.environ.get('CUDA_VISIBLE_DEVICES', None) if (gpu_ids_str is None): return None if (gpu_ids_str == ''): return [] return [int(i) for i in gpu_ids_str.split(',')]
Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable. Returns: if CUDA_VISIBLE_DEVICES is set, this returns a list of integers with the IDs of the GPUs. If it is not set, this returns None.
codesearchnet
def __call__(self, w): return w
Applies the constraint to the input weight variable. By default, the inputs weight variable is not modified. Users should override this method to implement their own projection function. Args: w: Input weight variable. Returns: Projected variable (by default, returns unmodified inputs).
github-repos
def market_exact(self, session, start_time: str, end_time: str) -> Session: if (session not in self.exch): return SessNA ss = self.exch[session] same_day = (ss[0] < ss[(- 1)]) if (not start_time): s_time = ss[0] else: s_time = param.to_hour(start_time) if same_day: s_time = max(s_time, ss[0]) if (not end_time): e_time = ss[(- 1)] else: e_time = param.to_hour(end_time) if same_day: e_time = min(e_time, ss[(- 1)]) if (same_day and (s_time > e_time)): return SessNA return Session(start_time=s_time, end_time=e_time)
Explicitly specify start time and end time Args: session: predefined session start_time: start time in terms of HHMM string end_time: end time in terms of HHMM string Returns: Session of start_time and end_time
codesearchnet
def acc_difference(points): data = [0] for before, after in pairwise(points): data.append(before.acc - after.acc) return data
Computes the accelaration difference between each adjacent point Args: points (:obj:`Point`) Returns: :obj:`list` of int: Indexes of changepoints
juraj-google-style
def list_street_poi_parking(self, **kwargs): url_args = {'language': util.language_code(kwargs.get('lang')), 'address': kwargs.get('address', '')} result = self.make_request('list_street_poi_parking', url_args) if (not util.check_result(result)): return (False, result.get('message', 'UNKNOWN ERROR')) values = util.response_list(result, 'Data') return (True, [emtype.ParkingPoi(**a) for a in values])
Obtain a list of addresses and POIs. This endpoint uses an address to perform the search Args: lang (str): Language code (*es* or *en*). address (str): Address in which to perform the search. Returns: Status boolean and parsed response (list[ParkingPoi]), or message string in case of error.
codesearchnet
def extract_value_from_output(canary, split_offset, kal_out): retval = '' while (retval == ''): for line in kal_out.splitlines(): if (canary in line): retval = str(line.split()[split_offset]) if (retval == ''): retval = None return retval
Return value parsed from output. Args: canary(str): This string must exist in the target line. split_offset(int): Split offset for target value in string. kal_out(int): Output from kal.
codesearchnet
def transform(self, transform, desc=None): if desc is None: desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace( transforms=self.transforms + [transform], desc_stack=self.desc_stack + [desc] )
Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query
juraj-google-style
def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE: return [self.on(target) for target in targets]
Returns a list of operations apply this gate to each of the targets. Args: *targets: The qubits to apply this gate to. Returns: Operations applying this gate to the target qubits. Raises: ValueError if targets are not instances of Qid.
codesearchnet