code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def sign(check_request): if (not isinstance(check_request, sc_messages.CheckRequest)): raise ValueError(u'Invalid request') op = check_request.operation if ((op is None) or (op.operationName is None) or (op.consumerId is None)): logging.error(u'Bad %s: not initialized => not signed', check_request) raise ValueError(u'check request must be initialized with an operation') md5 = hashlib.md5() md5.update(op.operationName.encode('utf-8')) md5.update(b'\x00') md5.update(op.consumerId.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) for value_set in op.metricValueSets: md5.update(b'\x00') md5.update(value_set.metricName.encode('utf-8')) for mv in value_set.metricValues: metric_value.update_hash(md5, mv) md5.update(b'\x00') if op.quotaProperties: md5.update(repr(op.quotaProperties).encode('utf-8')) md5.update(b'\x00') return md5.digest()
Obtains a signature for an operation in a `CheckRequest` Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `CheckRequest` Returns: string: a secure hash generated from the operation
codesearchnet
def LineWrap(text, omit_sgr=False): def _SplitWithSgr(text_line): token_list = sgr_re.split(text_line) text_line_list = [] line_length = 0 for (index, token) in enumerate(token_list): if token is '': continue if sgr_re.match(token): text_line_list.append(token) text_line = ''.join(token_list[index +1:]) else: if line_length + len(token) <= width: text_line_list.append(token) line_length += len(token) text_line = ''.join(token_list[index +1:]) else: text_line_list.append(token[:width - line_length]) text_line = token[width - line_length:] text_line += ''.join(token_list[index +1:]) break return (''.join(text_line_list), text_line) (_, width) = TerminalSize() text = str(text) text_multiline = [] for text_line in text.splitlines(): while ((omit_sgr and (len(StripAnsiText(text_line)) > width)) or (len(text_line) > width)): if not omit_sgr: text_multiline.append(text_line[:width]) text_line = text_line[width:] else: (multiline_line, text_line) = _SplitWithSgr(text_line) text_multiline.append(multiline_line) if text_line: text_multiline.append(text_line) return '\n'.join(text_multiline)
Break line to fit screen width, factoring in ANSI/SGR escape sequences. Args: text: String to line wrap. omit_sgr: Bool, to omit counting ANSI/SGR sequences in the length. Returns: Text with additional line wraps inserted for lines grater than the width.
juraj-google-style
async def _on_state_update(self, state_update): notification_type = state_update.WhichOneof('state_update') if state_update.HasField('conversation'): try: await self._handle_conversation_delta( state_update.conversation ) except exceptions.NetworkError: logger.warning( 'Discarding %s for %s: Failed to fetch conversation', notification_type.replace('_', ' '), state_update.conversation.conversation_id.id ) return if notification_type == 'typing_notification': await self._handle_set_typing_notification( state_update.typing_notification ) elif notification_type == 'watermark_notification': await self._handle_watermark_notification( state_update.watermark_notification ) elif notification_type == 'event_notification': await self._on_event( state_update.event_notification.event )
Receive a StateUpdate and fan out to Conversations. Args: state_update: hangouts_pb2.StateUpdate instance
juraj-google-style
def cmd_ssh_user(tar_aminame, inst_name): if tar_aminame == "Unknown": tar_aminame = inst_name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username
Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name.
juraj-google-style
def get_users(self, capacity=None): users = list() usersdicts = self.data.get('users') if (usersdicts is not None): for userdata in usersdicts: if ((capacity is not None) and (userdata['capacity'] != capacity)): continue id = userdata.get('id') if (id is None): id = userdata['name'] user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration) user['capacity'] = userdata['capacity'] users.append(user) return users
Returns the organization's users. Args: capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None. Returns: List[User]: Organization's users.
codesearchnet
def to_text_diagram( self, *, use_unicode_characters: bool = True, transpose: bool = False, precision: Optional[int] = 3, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str: diagram = self.to_text_diagram_drawer( use_unicode_characters=use_unicode_characters, precision=precision, qubit_order=qubit_order, transpose=transpose) return diagram.render( crossing_char=(None if use_unicode_characters else ('-' if transpose else '|')), horizontal_spacing=1 if transpose else 3, use_unicode_characters=use_unicode_characters)
Returns text containing a diagram describing the circuit. Args: use_unicode_characters: Determines if unicode characters are allowed (as opposed to ascii-only diagrams). transpose: Arranges qubit wires vertically instead of horizontally. precision: Number of digits to display in text diagram qubit_order: Determines how qubits are ordered in the diagram. Returns: The text diagram.
juraj-google-style
def _PrintStorageInformationAsText(self, storage_reader): table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Plaso Storage Information') table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)]) table_view.AddRow(['Format version', storage_reader.format_version]) table_view.AddRow(['Serialization format', storage_reader.serialization_format]) table_view.Write(self._output_writer) if (storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION): self._PrintSessionsOverview(storage_reader) self._PrintSessionsDetails(storage_reader) storage_counters = self._CalculateStorageCounters(storage_reader) if ('parsers' not in storage_counters): self._output_writer.Write('Unable to determine number of events generated per parser.\n') else: self._PrintParsersCounter(storage_counters['parsers']) if ('analysis_reports' not in storage_counters): self._output_writer.Write('Unable to determine number of reports generated per plugin.\n') else: self._PrintAnalysisReportCounter(storage_counters['analysis_reports']) if ('event_labels' not in storage_counters): self._output_writer.Write('Unable to determine number of event tags generated per label.\n') else: self._PrintEventLabelsCounter(storage_counters['event_labels']) self._PrintWarningCounters(storage_counters) if self._verbose: self._PrintWarningsDetails(storage_reader) self._PrintAnalysisReportsDetails(storage_reader) elif (storage_reader.storage_type == definitions.STORAGE_TYPE_TASK): self._PrintTasksInformation(storage_reader)
Prints information about the store as human-readable text. Args: storage_reader (StorageReader): storage reader.
codesearchnet
def PyParseIntCast(string, location, tokens): for (index, token) in enumerate(tokens): try: tokens[index] = int(token) except ValueError: logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(token)) tokens[index] = 0 for key in tokens.keys(): try: tokens[key] = int(tokens[key], 10) except ValueError: logger.error('Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(key, tokens[key])) tokens[key] = 0
Return an integer from a string. This is a pyparsing callback method that converts the matched string into an integer. The method modifies the content of the tokens list and converts them all to an integer value. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
codesearchnet
def create_header(cls, request_id=None): header = {'msgid': bkserial.make_id(), 'msgtype': cls.msgtype} if (request_id is not None): header['reqid'] = request_id return header
Return a message header fragment dict. Args: request_id (str or None) : Message ID of the message this message replies to Returns: dict : a message header
codesearchnet
def recipe_bigquery_storage(config, auth_read, bucket, auth_write, path, dataset, table, schema): bigquery(config, {'auth': auth_read, 'from': {'bucket': bucket, 'path': path}, 'to': {'auth': auth_write, 'dataset': dataset, 'table': table}, 'schema': schema})
Move using bucket and path prefix. Args: auth_read (authentication) - Credentials used for reading data. bucket (string) - Google cloud bucket. auth_write (authentication) - Credentials used for writing data. path (string) - Path prefix to read from, no * required. dataset (string) - Existing BigQuery dataset. table (string) - Table to create from this query. schema (json) - Schema provided in JSON list format or empty list.
github-repos
def MeshViewers( shape=(1, 1), titlebar="Mesh Viewers", keepalive=False, window_width=1280, window_height=960 ): if not test_for_opengl(): return Dummy() mv = MeshViewerLocal( shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive, window_width=window_width, window_height=window_height ) return mv.get_subwindows()
Allows subplot-style inspection of primitives in multiple subwindows. Args: shape: a tuple indicating the number of vertical and horizontal windows requested Returns: a list of lists of MeshViewer objects: one per window requested.
juraj-google-style
def standard_to_absl(level): if (not isinstance(level, int)): raise TypeError('Expect an int level, found {}'.format(type(level))) if (level < 0): level = 0 if (level < STANDARD_DEBUG): return ((STANDARD_DEBUG - level) + 1) elif (level < STANDARD_INFO): return ABSL_DEBUG elif (level < STANDARD_WARNING): return ABSL_INFO elif (level < STANDARD_ERROR): return ABSL_WARNING elif (level < STANDARD_CRITICAL): return ABSL_ERROR else: return ABSL_FATAL
Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging.
codesearchnet
def get_help_datapacks(module_name, server_prefix): _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) module_dir = '{}/../{}'.format(_dir, module_name, '_help.json') if os.path.isdir(module_dir): module_help_path = '{}/{}'.format(module_dir, '_help.json') if os.path.isfile(module_help_path): return helptools.get_help_datapacks(module_help_path, server_prefix) else: return [('Help', '{} does not have a help.json file'.format(module_name), False)] else: return [('Help', 'No module found called {}'.format(module_name), False)]
Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module
codesearchnet
def export(self, filename, offset=0, length=None): self.__validate_offset(filename=filename, offset=offset, length=length) with open(filename, 'w') as f: if length is None: length = len(self.data) - offset if offset > 0: output = self.data[offset:length] else: output = self.data[:length] f.write(output)
Exports byte array to specified destination Args: filename (str): destination to output file offset (int): byte offset (default: 0)
juraj-google-style
def _compute_diff(left: pg.DNA, right: pg.DNA) -> Tuple[int, int, int]: if left.value == right.value: assert len(left.children) == len(right.children) n = 0 if left.value is None else 1 w = 0 d = 0 for c1, c2 in zip(left.children, right.children): cn, cw, cd = _compute_diff(c1, c2) n += cn w += cw d += cd return (n, w, d) else: nl = len(left.to_numbers()) nr = len(right.to_numbers()) n = max(nl, nr) return (n, 1, n - 1)
Compute different positions in two DNAs. Args: left: the first DNA to compare. right: the right DNA to compare. Returns: A tuple of (N, W, D). 'N' is the total number of components in the larger DNA, 'W' is the number of matching genes with different values, and 'D' is the number of disjoint genes. PyGlove DNAs have no notion of 'E' (i.e. excess genes from the original paper), so we exclude them.
github-repos
def vgg11_bn(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) return model
VGG 11-layer model (configuration "A") with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
juraj-google-style
def refresh(self, refresh_binary=True): updated_self = self.repo.get_resource(self.uri) if not isinstance(self, type(updated_self)): raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) ) if updated_self: self.status_code = updated_self.status_code self.rdf.data = updated_self.rdf.data self.headers = updated_self.headers self.exists = updated_self.exists if type(self) != NonRDFSource: self._parse_graph() self.versions = SimpleNamespace() if type(updated_self) == NonRDFSource and refresh_binary: self.binary.refresh(updated_self) if hasattr(self,'_post_refresh'): self._post_refresh() del(updated_self) else: logger.debug('resource %s not found, dumping values') self._empty_resource_attributes()
Performs GET request and refreshes RDF information for resource. Args: None Returns: None
juraj-google-style
def get_data_node(self, path: DataPath) -> Optional[DataNode]: addr = self.schema_data.path2route(path) node = self.schema for p in addr: node = node.get_data_child(*p) if (node is None): return None return node
Return the data node addressed by a data path. Args: path: Data path. Returns: Data node if found in the schema, or ``None``. Raises: InvalidSchemaPath: If the schema path is invalid.
codesearchnet
def convert_to_numpy(x): if any_symbolic_tensors((x,)): return np.array(x) return backend.convert_to_numpy(x)
Convert a tensor to a NumPy array. Args: x: A tensor. Returns: A NumPy array.
github-repos
def _process_arguments(arguments): if (arguments is None): return '' result = '' for (key, value) in arguments.items(): if (not key.startswith('bokeh-')): result += '&{}={}'.format(quote_plus(str(key)), quote_plus(str(value))) return result
Return user-supplied HTML arguments to add to a Bokeh server URL. Args: arguments (dict[str, object]) : Key/value pairs to add to the URL Returns: str
codesearchnet
def serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None): sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.serialize_many_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)
Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. out_type: The `dtype` to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`.
github-repos
def __frontend_limit_rules_descriptor(self, api_info): if (not api_info.frontend_limits.rules): return None rules = [] for rule in api_info.frontend_limits.rules: descriptor = {} for (propname, descname) in (('match', 'match'), ('qps', 'qps'), ('user_qps', 'userQps'), ('daily', 'daily'), ('analytics_id', 'analyticsId')): if (getattr(rule, propname) is not None): descriptor[descname] = getattr(rule, propname) if descriptor: rules.append(descriptor) return rules
Builds a frontend limit rules descriptor from API info. Args: api_info: An _ApiInfo object. Returns: A list of dictionaries with frontend limit rules information.
codesearchnet
def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Laplace'): parameters = dict(locals()) with ops.name_scope(name, values=[loc, scale]) as name: with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []): self._loc = array_ops.identity(loc, name='loc') self._scale = array_ops.identity(scale, name='scale') check_ops.assert_same_float_dtype([self._loc, self._scale]) super(Laplace, self).__init__(dtype=self._loc.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)
Construct Laplace distribution with parameters `loc` and `scale`. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g., `loc / scale` is a valid operation). Args: loc: Floating point tensor which characterizes the location (center) of the distribution. scale: Positive floating point tensor which characterizes the spread of the distribution. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `loc` and `scale` are of different dtype.
github-repos
def get_filetypes_info(editor_quote='`', flag_leaf=True): NONE_REPL = '' import f311 data = [] for attr in f311.classes_file(flag_leaf): description = a99.get_obj_doc0(attr) def_ = (NONE_REPL if (attr.default_filename is None) else attr.default_filename) ee = attr.editors if (ee is None): ee = NONE_REPL else: ee = ', '.join(['{0}{1}{0}'.format(editor_quote, x, editor_quote) for x in ee]) data.append({'description': description, 'default_filename': def_, 'classname': attr.__name__, 'editors': ee, 'class': attr, 'txtbin': ('text' if attr.flag_txt else 'binary')}) data.sort(key=(lambda x: x['description'])) return data
Reports available data types Args: editor_quote: character to enclose the name of the editor script between. flag_leaf: see tabulate_filetypes_rest() Returns: list: list of FileTypeInfo
codesearchnet
def cancel_id(cls, id): conn = Qubole.agent() data = {"status": "kill"} return conn.put(cls.element_path(id), data)
Cancels command denoted by this id Args: `id`: command id
juraj-google-style
def _Open(self, path_spec, mode='rb'): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) cpio_archive_file = cpio.CPIOArchiveFile() try: cpio_archive_file.Open(file_object) except: file_object.close() raise self._file_object = file_object self._cpio_archive_file = cpio_archive_file
Opens the file system defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def find_resistance(record): for feature in record.features: labels = set(feature.qualifiers.get("label", [])) cassettes = labels.intersection(_ANTIBIOTICS) if len(cassettes) > 1: raise RuntimeError("multiple resistance cassettes detected") elif len(cassettes) == 1: return _ANTIBIOTICS.get(cassettes.pop()) raise RuntimeError("could not find the resistance of '{}'".format(record.id))
Infer the antibiotics resistance of the given record. Arguments: record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence. Raises: RuntimeError: when there's not exactly one resistance cassette.
juraj-google-style
def read_from_bigquery(*, table: Optional[str]=None, query: Optional[str]=None, row_restriction: Optional[str]=None, fields: Optional[Iterable[str]]=None): if query is None: assert table is not None else: assert table is None and row_restriction is None and (fields is None) return ReadFromBigQuery(query=query, table=table, row_restriction=row_restriction, selected_fields=fields, method='DIRECT_READ', output_type='BEAM_ROW')
Reads data from BigQuery. Exactly one of table or query must be set. If query is set, neither row_restriction nor fields should be set. Args: table (str): The table to read from, specified as `DATASET.TABLE` or `PROJECT:DATASET.TABLE`. query (str): A query to be used instead of the table argument. row_restriction (str): Optional SQL text filtering statement, similar to a WHERE clause in a query. Aggregates are not supported. Restricted to a maximum length for 1 MB. selected_fields (list[str]): Optional List of names of the fields in the table that should be read. If empty, all fields will be read. If the specified field is a nested field, all the sub-fields in the field will be selected. The output field order is unrelated to the order of fields given here.
github-repos
def ParseFileObject(self, parser_mediator, file_object): display_name = parser_mediator.GetDisplayName() self.ParseFileLNKFile(parser_mediator, file_object, display_name)
Parses a Windows Shortcut (LNK) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
juraj-google-style
def add(self, *value): flattenedValueList = list(flatten(value)) return self._add(flattenedValueList, self.value)
convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added
codesearchnet
def _UploadChunk(self, chunk): blob = _CompressedDataBlob(chunk) self._action.ChargeBytesToSession(len(chunk.data)) self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID) return rdf_client_fs.BlobImageChunkDescriptor( digest=hashlib.sha256(chunk.data).digest(), offset=chunk.offset, length=len(chunk.data))
Uploads a single chunk to the transfer store flow. Args: chunk: A chunk to upload. Returns: A `BlobImageChunkDescriptor` object.
juraj-google-style
def set_syslog_server(server=None, type="primary"): if not server: raise salt.exceptions.CommandExecutionError("The SYSLOG server must be specified.") if type == "primary": dn = "sys/svc-ext/syslog/client-primary" inconfig = .format(server) elif type == "secondary": dn = "sys/svc-ext/syslog/client-secondary" inconfig = .format(server) else: raise salt.exceptions.CommandExecutionError("The SYSLOG type must be either primary or secondary.") ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
Set the SYSLOG server on the host. Args: server(str): The hostname or IP address of the SYSLOG server. type(str): Specifies the type of SYSLOG server. This can either be primary (default) or secondary. CLI Example: .. code-block:: bash salt '*' cimc.set_syslog_server foo.bar.com salt '*' cimc.set_syslog_server foo.bar.com primary salt '*' cimc.set_syslog_server foo.bar.com secondary
juraj-google-style
def _build_http_client(cls, session: AppSession): stream_factory = functools.partial(HTTPStream, ignore_length=session.args.ignore_length, keep_alive=session.args.http_keep_alive) return session.factory.new('HTTPClient', connection_pool=session.factory['ConnectionPool'], stream_factory=stream_factory)
Create the HTTP client. Returns: Client: An instance of :class:`.http.Client`.
codesearchnet
def counter(urn: str, labels: Optional[Dict[str, str]]=None, process_wide: bool=False) -> UserMetrics.DelegatingCounter: return UserMetrics.DelegatingCounter(MetricName(namespace=None, name=None, urn=urn, labels=labels), process_wide=process_wide)
Obtains or creates a Counter metric. Args: namespace: A class or string that gives the namespace to a metric name: A string that gives a unique name to a metric urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness. labels: Labels to populate on a MonitoringInfo process_wide: Whether or not the metric is specific to the current bundle or should be calculated for the entire process. Returns: A Counter object.
github-repos
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size) gate_up = torch.bmm(hidden_states, self.gate_up_proj) gate, up = gate_up.chunk(2, dim=-1) next_states = torch.bmm(up * self.act_fn(gate), self.down_proj) next_states = next_states.view(-1, self.hidden_size) return next_states
This should really not be run on a single machine, as we are reaching compute bound: - the inputs are expected to be "sorted" per expert already. - the weights are viewed with another dim, to match num_expert, 1, shape * num_tokens, shape Args: hidden_states (torch.Tensor): (batch_size * token_num, hidden_size) selected_experts (torch.Tensor): (batch_size * token_num, top_k) routing_weights (torch.Tensor): (batch_size * token_num, top_k) Returns: torch.Tensor
github-repos
def _sym_inferred(self, key: str, **kwargs): if key not in self._sym_attributes: raise AttributeError(key) v = pg_utils.contextual.get_scoped_value(self._contextual_overrides, key) if v is not None: return v.value override = pg_utils.contextual.get_contextual_override(key) if override and override.override_attrs: return override.value return super()._sym_inferred(key, context_override=override, **kwargs)
Override to allow attribute to access scoped value. Args: key: attribute name. **kwargs: Optional keyword arguments for value inference. Returns: The value of the symbolic attribute. If not available, returns the default value. Raises: AttributeError: If the attribute does not exist or contextual attribute is not ready.
github-repos
def list_container_services(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
List the container services in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON model.
juraj-google-style
def load_manual_sequence(self, seq, ident=None, write_fasta_file=False, outdir=None, set_as_representative=False, force_rewrite=False): if write_fasta_file: if (not outdir): outdir = self.sequence_dir if (not outdir): raise ValueError('Output directory must be specified') outfile = op.join(outdir, '{}.faa'.format(ident)) else: outfile = None if (isinstance(seq, str) or isinstance(seq, Seq)): if (not ident): raise ValueError('ID must be specified if sequence is a string or Seq object') manual_sequence = SeqProp(id=ident, seq=seq) else: if (not ident): ident = seq.id else: seq.id = ident manual_sequence = SeqProp(id=ident, seq=seq, name=seq.name, description=seq.description) if write_fasta_file: manual_sequence.write_fasta_file(outfile=outfile, force_rerun=force_rewrite) self.sequences.append(manual_sequence) if set_as_representative: self.representative_sequence = manual_sequence return self.sequences.get_by_id(ident)
Load a manual sequence given as a string and optionally set it as the representative sequence. Also store it in the sequences attribute. Args: seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object ident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing IDs in Seq or SeqRecord objects if set. write_fasta_file (bool): If this sequence should be written out to a FASTA file outdir (str): Path to output directory set_as_representative (bool): If this sequence should be set as the representative one force_rewrite (bool): If the FASTA file should be overwritten if it already exists Returns: SeqProp: Sequence that was loaded into the ``sequences`` attribute
codesearchnet
def is_finite(val_1, val_2=None): val_1_finite = (tf.math.is_finite(val_1.f) & tf.math.is_finite(val_1.df)) if (val_2 is not None): return ((val_1_finite & tf.math.is_finite(val_2.f)) & tf.math.is_finite(val_2.df)) return val_1_finite
Checks if the supplied values are finite. Args: val_1: A namedtuple instance with the function value and derivative, as returned e.g. by value_and_gradients_function evaluations. val_2: (Optional) A namedtuple instance with the function value and derivative, as returned e.g. by value_and_gradients_function evaluations. Returns: is_finite: Scalar boolean `Tensor` indicating whether the function value and the derivative in `val_1` (and optionally in `val_2`) are all finite.
codesearchnet
def load_variable(ckpt_dir_or_file, name): if name.endswith(':0'): name = name[:-2] reader = load_checkpoint(ckpt_dir_or_file) return reader.get_tensor(name)
Returns the tensor value of the given variable in the checkpoint. When the variable name is unknown, you can use `tf.train.list_variables` to inspect all the variable names. Example usage: ```python import tensorflow as tf a = tf.Variable(1.0) b = tf.Variable(2.0) ckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b}) ckpt_path = ckpt.save('tmp-ckpt') var= tf.train.load_variable( ckpt_path, 'var_list/a/.ATTRIBUTES/VARIABLE_VALUE') print(var) # 1.0 ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. name: Name of the variable to return. Returns: A numpy `ndarray` with a copy of the value of this variable.
github-repos
def send(self, data_to_send): request_payload = json.dumps([ a.write() for a in data_to_send ]) request = HTTPClient.Request(self._service_endpoint_uri, bytearray(request_payload, 'utf-8'), { 'Accept': 'application/json', 'Content-Type' : 'application/json; charset=utf-8' }) try: response = HTTPClient.urlopen(request, timeout=self._timeout) status_code = response.getcode() if 200 <= status_code < 300: return except HTTPError as e: if e.getcode() == 400: return except Exception as e: pass for data in data_to_send: self._queue.put(data)
Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the passed in items are pushed back to the :func:`queue`. Args: data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.
juraj-google-style
def get_text_config(self, decoder=False) -> 'PretrainedConfig': decoder_possible_text_config_names = ('decoder', 'generator', 'text_config') encoder_possible_text_config_names = ('text_encoder',) if decoder: possible_text_config_names = decoder_possible_text_config_names else: possible_text_config_names = encoder_possible_text_config_names + decoder_possible_text_config_names valid_text_config_names = [] for text_config_name in possible_text_config_names: if hasattr(self, text_config_name): text_config = getattr(self, text_config_name, None) if text_config is not None: valid_text_config_names += [text_config_name] if len(valid_text_config_names) > 1: raise ValueError(f'Multiple valid text configs were found in the model config: {valid_text_config_names}. In this case, using `get_text_config()` would be ambiguous. Please specify the desied text config directly.') elif len(valid_text_config_names) == 1: config_to_return = getattr(self, valid_text_config_names[0]) else: config_to_return = self return config_to_return
Returns the config that is meant to be used with text IO. On most models, it is the original config instance itself. On specific composite models, it is under a set of valid names. Args: decoder (`Optional[bool]`, *optional*, defaults to `False`): If set to `True`, then only search for decoder config names.
github-repos
def create_list(self, list_json): return trolly.list.List(trello_client=self, list_id=list_json['id'], name=list_json['name'], data=list_json)
Create List object from JSON object Returns: List: The list from the given `list_json`.
codesearchnet
def view_edit(name=None): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if (name is None): return template('edit', type='edit', name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime('%Y%m%d'), content='') else: files = glob.glob('{0}.rst'.format(name)) if (len(files) > 0): file_handle = open(files[0], 'r') return template('edit', type='edit', name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime('%Y%m%d'), content=file_handle.read()) else: return abort(404)
Edit or creates a new page. .. note:: this is a bottle view if no page name is given, creates a new page. Keyword Arguments: :name: (str) -- name of the page (OPTIONAL) Returns: bottle response object
codesearchnet
def normalize_expression(self, expression_parts): expression_parts[3] = expression_parts[3].replace("?", "*") expression_parts[5] = expression_parts[5].replace("?", "*") if expression_parts[0].startswith("0/"): expression_parts[0] = expression_parts[ 0].replace("0/", "*/") if expression_parts[1].startswith("0/"): expression_parts[1] = expression_parts[ 1].replace("0/", "*/") if expression_parts[2].startswith("0/"): expression_parts[2] = expression_parts[ 2].replace("0/", "*/") if expression_parts[3].startswith("1/"): expression_parts[3] = expression_parts[3].replace("1/", "*/") if expression_parts[4].startswith("1/"): expression_parts[4] = expression_parts[ 4].replace("1/", "*/") if expression_parts[5].startswith("1/"): expression_parts[5] = expression_parts[5].replace("1/", "*/") if expression_parts[6].startswith("1/"): expression_parts[6] = expression_parts[6].replace("1/", "*/") if self._options.day_of_week_start_index_zero is False: expression_parts[5] = self.decrease_days_of_week(expression_parts[5]) if expression_parts[3] == "?": expression_parts[3] = "*" for day_number in self._cron_days: expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number)) for month_number in self._cron_months: expression_parts[4] = expression_parts[4].upper().replace( self._cron_months[month_number], str(month_number)) if expression_parts[0] == "0": expression_parts[0] = '' length = len(expression_parts) for i in range(length): if expression_parts[i] == "*/1": expression_parts[i] = "*" if "/" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False: choices = { 4: "12", 5: "6", 6: "9999" } step_range_through = choices.get(i) if step_range_through is not None: parts = expression_parts[i].split('/') expression_parts[i] = "{0}-{1}/{2}".format(parts[0], step_range_through, parts[1])
Converts cron expression components into consistent, predictable formats. Args: expression_parts: A 7 part string array, one part for each component of the cron expression Returns: None
juraj-google-style
def execute(command, cwd=os.path.curdir, **options): process = subprocess.Popen(shlex.split(command), cwd=cwd, **options) stdout, stderr = process.communicate() return process, stdout, stderr
Run the system command with optional options. Args: * command: system command. * cwd: current working directory. * verbose: direct options for :func:`subprocess.Popen`. Returns: Opened process, standard output & error.
juraj-google-style
async def upload_artifacts(context, files): def to_upload_future(target_path): path = os.path.join(context.config['artifact_dir'], target_path) (content_type, content_encoding) = compress_artifact_if_supported(path) return asyncio.ensure_future(retry_create_artifact(context, path, target_path=target_path, content_type=content_type, content_encoding=content_encoding)) tasks = list(map(to_upload_future, files)) (await raise_future_exceptions(tasks))
Compress and upload the requested files from ``artifact_dir``, preserving relative paths. Compression only occurs with files known to be supported. This function expects the directory structure in ``artifact_dir`` to remain the same. So if we want the files in ``public/...``, create an ``artifact_dir/public`` and put the files in there. Args: context (scriptworker.context.Context): the scriptworker context. files (list of str): files that should be uploaded as artifacts Raises: Exception: any exceptions the tasks raise.
codesearchnet
def add_vtep(self, name, vtep, vlan=None): if not vlan: cmd = 'vxlan flood vtep add {}'.format(vtep) else: cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep) return self.configure_interface(name, cmd)
Adds a new VTEP endpoint to the global or local flood list EosVersion: 4.13.7M Args: name (str): The name of the interface to configure vtep (str): The IP address of the remote VTEP endpoint to add vlan (str): The VLAN ID associated with this VTEP. If the VLAN keyword is used, then the VTEP is configured as a local flood endpoing Returns: True if the command completes successfully
juraj-google-style
def _document_path(self): if (self._document_path_internal is None): if (self._client is None): raise ValueError('A document reference requires a `client`.') self._document_path_internal = _get_document_path(self._client, self._path) return self._document_path_internal
Create and cache the full path for this document. Of the form: ``projects/{project_id}/databases/{database_id}/... documents/{document_path}`` Returns: str: The full document path. Raises: ValueError: If the current document reference has no ``client``.
codesearchnet
def get(self, resource): return self.service.get( resource, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
juraj-google-style
def make_tables(grammar, precedence): ACTION = {} GOTO = {} labels = {} def get_label(closure): if (closure not in labels): labels[closure] = len(labels) return labels[closure] def resolve_shift_reduce(lookahead, s_action, r_action): (s_assoc, s_level) = precedence[lookahead] (r_assoc, r_level) = precedence[r_action[1]] if (s_level < r_level): return r_action elif ((s_level == r_level) and (r_assoc == LEFT)): return r_action else: return s_action (initial, closures, goto) = grammar.closures() for closure in closures: label = get_label(closure) for rule in closure: (new_action, lookahead) = (None, rule.lookahead) if (not rule.at_end): symbol = rule.rhs[rule.pos] is_terminal = (symbol in grammar.terminals) has_goto = (symbol in goto[closure]) if (is_terminal and has_goto): next_state = get_label(goto[closure][symbol]) (new_action, lookahead) = (('shift', next_state), symbol) elif ((rule.production == grammar.start) and rule.at_end): new_action = ('accept',) elif rule.at_end: new_action = ('reduce', rule.production) if (new_action is None): continue prev_action = ACTION.get((label, lookahead)) if ((prev_action is None) or (prev_action == new_action)): ACTION[(label, lookahead)] = new_action else: types = (prev_action[0], new_action[0]) if (types == ('shift', 'reduce')): chosen = resolve_shift_reduce(lookahead, prev_action, new_action) elif (types == ('reduce', 'shift')): chosen = resolve_shift_reduce(lookahead, new_action, prev_action) else: raise TableConflictError(prev_action, new_action) ACTION[(label, lookahead)] = chosen for symbol in grammar.nonterminals: if (symbol in goto[closure]): GOTO[(label, symbol)] = get_label(goto[closure][symbol]) return (get_label(initial), ACTION, GOTO)
Generates the ACTION and GOTO tables for the grammar. Returns: action - dict[state][lookahead] = (action, ...) goto - dict[state][just_reduced] = new_state
codesearchnet
def VerifyStructure(self, parser_mediator, line): try: structure = self._LOG_LINE.parseString(line) except pyparsing.ParseException: logger.debug('Not a Sophos Anti-Virus log file') return False if ' ' not in (line[8], line[15]): logger.debug('Not a Sophos Anti-Virus log file') return False try: dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) except ValueError: logger.debug(( 'Not a Sophos Anti-Virus log file, invalid date and time: ' '{0!s}').format(structure.date_time)) return False return True
Verify that this file is a Sophos Anti-Virus log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
juraj-google-style
def from_concrete_function(concrete_fn, specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]]=None): context.ensure_initialized() fn_name = concrete_fn.name filtered_flat_specs = specialized_flat_specs or list(nest.flatten(concrete_fn.structured_input_signature)) if not all((s.shape.is_fully_defined() for s in filtered_flat_specs)): raise ValueError(f'Only support static input shape but got inputs = {concrete_fn.inputs}') def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None): if device_name is not None: if platform_name is not None: raise ValueError('device_name and platform_name cannot be provided at the same time.') warnings.warn('device_name is being deprecated. Use platform_name.') device_name = maybe_get_device_name(device_name) res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage) if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'): return res_bytes else: return res_bytes.decode('utf-8') return compiler_ir_generator
Generate the Compiler Ir from tf concrete function with TensorSpec. Args: concrete_fn: returned by using get_concrete_function. specialized_flat_specs: specialized flat tf.TensorSpecs for function args. Returns: Function callable that generate the HLO text. Raises: ValueError: if concrete_fn is not "compilable" without concrete inputs.
github-repos
def artifact(self, counter, stage, job, stage_counter=1): return Artifact(self.server, self.name, counter, stage, job, stage_counter)
Helper to instantiate an :class:`gocd.api.artifact.Artifact` object Args: counter (int): The pipeline counter to get the artifact for stage: Stage name job: Job name stage_counter: Defaults to 1 Returns: Artifact: :class:`gocd.api.artifact.Artifact` object
juraj-google-style
def _GetUnifiedDiff(before, after, filename='code'): before = before.splitlines() after = after.splitlines() return '\n'.join(difflib.unified_diff(before, after, filename, filename, '(original)', '(reformatted)', lineterm='')) + '\n'
Get a unified diff of the changes. Arguments: before: (unicode) The original source code. after: (unicode) The reformatted source code. filename: (unicode) The code's filename. Returns: The unified diff text.
github-repos
def flatten(array: _ArrayT, pattern: str) -> tuple[_ArrayT, _Shape]: array, (batch_shape,) = einops.pack([array], pattern.replace('...', '*')) return (array, tuple(batch_shape))
Flatten an array along custom dimensions. Uses `einops` syntax. ```python flat_x, batch_shape = enp.flatten(x, '... h w c') y = enp.unflatten(y, batch_shape, '... h w c') ``` * `x.shape == (h, w, c)` -> `flat_x.shape == (1, h, w, c)` * `x.shape == (b, h, w, c)` -> `flat_x.shape == (b, h, w, c)` * `x.shape == (b, n, h, w, c)` -> `flat_x.shape == (b * n, h, w, c)` Args: array: Array to flatten. pattern: Einops pattern to flatten the array. Returns: Tuple of (flattened array, batch shape).
github-repos
def _update_flags(compiler_flags, remove_flags=()): for flag in GFORTRAN_SHARED_FLAGS: if flag not in compiler_flags: compiler_flags.append(flag) if DEBUG_ENV in os.environ: to_add = GFORTRAN_DEBUG_FLAGS to_remove = GFORTRAN_OPTIMIZE_FLAGS else: to_add = GFORTRAN_OPTIMIZE_FLAGS if os.environ.get(WHEEL_ENV) is None: to_add += (GFORTRAN_NATIVE_FLAG,) to_remove = GFORTRAN_DEBUG_FLAGS for flag in to_add: if flag not in compiler_flags: compiler_flags.append(flag) return [ flag for flag in compiler_flags if not (flag in to_remove or flag in remove_flags) ]
Update a given set of compiler flags. Args: compiler_flags (List[str]): Existing flags associated with a compiler. remove_flags (Optional[Container[str]]): A container of flags to remove that will override any of the defaults. Returns: List[str]: The modified list (i.e. some flags added and some removed).
juraj-google-style
def target_code_to_name(code): TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()} return TARGET_NAMES[code]
Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code.
juraj-google-style
def delete_customer(self, customer_id): return self.client._delete((self.url + 'customers/{}'.format(customer_id)), headers=self.get_headers())
Removes a user from the system. Args: customer_id: Identifier of the client to be deleted. Returns:
codesearchnet
def _convert_schemas(mapping, schemas): schemas = deepcopy(schemas) for schema in schemas: for fk in schema.get('foreignKeys', []): resource = fk['reference']['resource'] if (resource != 'self'): if (resource not in mapping): message = 'Not resource "%s" for foreign key "%s"' message = (message % (resource, fk)) raise ValueError(message) fk['reference']['resource'] = mapping[resource] return schemas
Convert schemas to be compatible with storage schemas. Foreign keys related operations. Args: mapping (dict): mapping between resource name and table name schemas (list): schemas Raises: ValueError: if there is no resource for some foreign key in given mapping Returns: list: converted schemas
codesearchnet
def subscribe(self, subject, callback, queue=''): s = Subscription( sid=self._next_sid, subject=subject, queue=queue, callback=callback, connetion=self ) self._subscriptions[s.sid] = s self._send('SUB %s %s %d' % (s.subject, s.queue, s.sid)) self._next_sid += 1 return s
Subscribe will express interest in the given subject. The subject can have wildcards (partial:*, full:>). Messages will be delivered to the associated callback. Args: subject (string): a string with the subject callback (function): callback to be called
juraj-google-style
def to_dict(cls): return dict(((item.name, item.number) for item in iter(cls)))
Make dictionary version of enumerated class. Dictionary created this way can be used with def_num. Returns: A dict (name) -> number
codesearchnet
def __call__(self, *args: Union[str, 'Image.Image', List['Image.Image'], List[str]], **kwargs: Any) -> List[Any]: return super().__call__(*args, **kwargs)
Extract the features of the input(s). Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: A nested list of `float`: The features computed by the model.
github-repos
def remove(path, dir_fd=None): system = get_instance(path) if (system.is_locator(path) or (path[(- 1)] == '/')): raise is_a_directory_error(("Is a directory: '%s'" % path)) system.remove(path)
Remove a file. Equivalent to "os.remove" and "os.unlink". Args: path (path-like object): Path or URL. dir_fd: directory descriptors; see the os.remove() description for how it is interpreted. Not supported on cloud storage objects.
codesearchnet
def pop(self, name, defval=None): valu = self.info.pop(name, defval) lkey = self.pref + name.encode('utf8') self.slab.pop(lkey, db=self.db) return valu
Pop a name from the SlabDict. Args: name (str): The name to remove. defval (obj): The default value to return if the name is not present. Returns: object: The object stored in the SlabDict, or defval if the object was not present.
juraj-google-style
def validate_session(self, token, remote='127.0.0.1', proxy=None): params = {'validationFactors': [{'name': 'remote_address', 'value': remote}]} if proxy: params['validation-factors']['validationFactors'].append({'name': 'X-Forwarded-For', 'value': proxy}) url = (self.rest_url + ('/session/%s' % token)) response = self._post(url, data=json.dumps(params), params={'expand': 'user'}) if (not response.ok): return None return response.json()
Validate a session token. Validate a previously acquired session token against the Crowd server. This may be a token provided by a user from a http cookie or by some other means. Args: token: The session token. remote: The remote address of the user. proxy: Value of X-Forwarded-For server header Returns: dict: A dict mapping of user attributes if the application authentication was successful. See the Crowd documentation for the authoritative list of attributes. None: If authentication failed.
codesearchnet
def parse_relations( belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: quotes = char_locs["quotes"] quoted_range = set([i for start, end in quotes.items() for i in range(start, end)]) for match in relations_pattern_middle.finditer(belstr): (start, end) = match.span(1) end = end - 1 if start != end: test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } for match in relations_pattern_end.finditer(belstr): (start, end) = match.span(1) log.debug(f"Relation-end {match}") end = end - 1 if start != end: test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } return parsed, errors
Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors):
juraj-google-style
def as_session(name_or_func): if callable(name_or_func): func = name_or_func name = func.__name__ name = "".join([(' ' + x) if x.isupper() else x for x in name]) name = name.replace('_', ' ') return as_session(name)(func) else: name = name_or_func def get_func(func): @wraps(func) def wrapper(*args, **kwargs): start() title(name) result = func(*args, **kwargs) end() return result return wrapper return get_func
print start/title/end info before and after the function call Args: title: title will show after the start, if has any
juraj-google-style
def _read_git_tags(default_version=DEFAULT_VERSION, git_command=('git', 'tag')): try: current_tags = check_output(git_command).splitlines() except Exception: raise if (not current_tags[0]): warnings.warn('Unable to resolve current version', exceptions.ProsperDefaultVersionWarning) return default_version latest_version = semantic_version.Version(default_version) for tag in current_tags: tag_str = decode(tag, 'utf-8').replace('v', '') try: tag_ver = semantic_version.Version(tag_str) except Exception: continue if (tag_ver > latest_version): latest_version = tag_ver return str(latest_version)
tries to find current git tag Notes: git_command exposed for testing null case Args: default_version (str): what version to make git_command (:obj:`list`): subprocess command Retruns: str: latest version found, or default Warns: exceptions.ProsperDefaultVersionWarning: git version not found
codesearchnet
def save_pkl(filename=None, times=None): if times is None: if not f.root.stopped: times = collapse.collapse_times() else: times = f.root.times else: if isinstance(times, (list, tuple)): for t in times: if not isinstance(t, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") elif not isinstance(times, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") if filename is not None: with open(str(filename), 'wb') as file: pickle.dump(times, file) else: return pickle.dumps(times)
Serialize and / or save a Times data object using pickle (cPickle). Args: filename (None, optional): Filename to dump to. If not provided, returns serialized object. times (None, optional): object to dump. If non provided, uses current root. Returns: pkl: Pickled Times data object, only if no filename provided. Raises: TypeError: If 'times' is not a Times object or a list of tuple of them.
juraj-google-style
async def receive(self, timeout: float = None) -> Union[Message, None]: if timeout: coro = self.queue.get() try: msg = await asyncio.wait_for(coro, timeout=timeout) except asyncio.TimeoutError: msg = None else: try: msg = self.queue.get_nowait() except asyncio.QueueEmpty: msg = None return msg
Receives a message for this behaviour. If timeout is not None it returns the message or "None" after timeout is done. Args: timeout (float): number of seconds until return Returns: spade.message.Message: a Message or None
juraj-google-style
def transform(self, program: moderngl.Program, buffer: moderngl.Buffer, mode=None, vertices=(- 1), first=0, instances=1): vao = self.instance(program) if (mode is None): mode = self.mode vao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)
Transform vertices. Stores the output in a single buffer. Args: program: The ``moderngl.Program`` buffer: The ``moderngl.buffer`` to store the output Keyword Args: mode: Draw mode (for example ``moderngl.POINTS``) vertices (int): The number of vertices to transform first (int): The index of the first vertex to start with instances (int): The number of instances
codesearchnet
def resolve_lookups(variable, context, provider): resolved_lookups = {} for lookup in variable.lookups: try: handler = LOOKUP_HANDLERS[lookup.type] except KeyError: raise UnknownLookupType(lookup) try: resolved_lookups[lookup] = handler( value=lookup.input, context=context, provider=provider, ) except Exception as e: raise FailedVariableLookup(variable.name, lookup, e) return resolved_lookups
Resolve a set of lookups. Args: variable (:class:`stacker.variables.Variable`): The variable resolving it's lookups. context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider Returns: dict: dict of Lookup -> resolved value
juraj-google-style
def reduce_by(self, package_request): self.solver.reduction_broad_tests_count += 1 if self.package_request.conflict: return (self, []) (new_slice, reductions) = self.variant_slice.reduce_by(package_request) if (new_slice is None): self.solver.reductions_count += 1 if self.pr: reqstr = _short_req_str(package_request) self.pr('%s was reduced to nothing by %s', self, reqstr) self.pr.br() return (None, reductions) if (new_slice is not self.variant_slice): self.solver.reductions_count += 1 scope = self._copy(new_slice) if self.pr: reqstr = _short_req_str(package_request) self.pr('%s was reduced to %s by %s', self, scope, reqstr) self.pr.br() return (scope, reductions) return (self, [])
Reduce this scope wrt a package request. Returns: A (_PackageScope, [Reduction]) tuple, where the scope is a new scope copy with reductions applied, or self if there were no reductions, or None if the scope was completely reduced.
codesearchnet
def infer_graph(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode]) -> Graph: graph = Graph() graph.outputs.update(outputs) pending_nodes: Set[EventSetNode] = outputs.copy() done_nodes: Set[EventSetNode] = set() missing_nodes: Set[EventSetNode] = set() while pending_nodes: node = next(iter(pending_nodes)) pending_nodes.remove(node) assert node not in done_nodes graph.add_node(node) if inputs is not None and node in inputs: graph.inputs.add(node) continue if node.creator is None: if inputs is not None: missing_nodes.add(node) else: graph.inputs.add(node) continue graph.add_operator(node.creator) for input_node in node.creator.inputs.values(): if input_node in done_nodes: continue pending_nodes.add(input_node) for output_node in node.creator.outputs.values(): graph.add_node(output_node) if missing_nodes: raise ValueError(f'The following input nodes are required but not provided as input:\n{missing_nodes}') for e in graph.nodes: graph.add_sampling(e.sampling_node) for f in e.feature_nodes: graph.add_feature(f) return graph
Extracts the nodes in between the output and input nodes. If inputs is set, fails if outputs cannot be computed from `inputs`. If inputs is not set, infers the required set of inputs. Args: inputs: Set of available input nodes. If None, inputs are inferred. outputs: Set of expected output nodes. Returns: The inferred graph. Raises: ValueError: If there are repeated nodes in the `inputs`; an unexpected type of input is provided; an unnamed node is inferred as input; or some nodes are required but not provided.
github-repos
def CreateAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService', 'v201809') adgroup = { 'adGroupType': 'SHOPPING_SHOWCASE_ADS', 'campaignId': campaign_id, 'name': 'AdGroup 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC', 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': 100000 } }] } } adgroup_operations = { 'operator': 'ADD', 'operand': adgroup } adgroup = ad_group_service.mutate(adgroup_operations)['value'][0] print ('AdGroup with name "%s" and ID "%s" was added.' % (adgroup['name'], adgroup['id'])) return adgroup
Creates an AdGroup for the given shopping campaign ID. Args: client: an AdWordsClient instance. campaign_id: the str ID of a shopping campaign. Returns: The created AdGroup as a sudsobject.
juraj-google-style
def handle_or_else(self, orelse, test): if isinstance(orelse[0], ast.If): control_flow_node = self.visit(orelse[0]) control_flow_node.test.label = 'el' + control_flow_node.test.label test.connect(control_flow_node.test) return control_flow_node.last_nodes else: else_connect_statements = self.stmt_star_handler( orelse, prev_node_to_avoid=self.nodes[-1] ) test.connect(else_connect_statements.first_statement) return else_connect_statements.last_statements
Handle the orelse part of an if or try node. Args: orelse(list[Node]) test(Node) Returns: The last nodes of the orelse branch.
juraj-google-style
def _save_model(self, epoch, batch, logs): filepath = self._get_file_path(epoch, batch, logs) try: if self._should_save_model(epoch, batch, logs, filepath): dirname = os.path.dirname(filepath) if dirname and (not file_utils.exists(dirname)): file_utils.makedirs(dirname) if self.save_weights_only: self.model.save_weights(filepath, overwrite=True) else: self.model.save(filepath, overwrite=True) except IsADirectoryError: raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {filepath}') except IOError as e: if 'is a directory' in str(e.args[0]).lower(): raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: f{filepath}') raise e
Saves the model. Args: epoch: the epoch this iteration is in. batch: the batch this iteration is in. `None` if the `save_freq` is set to `"epoch"`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
github-repos
def delete(self): cmd = self.command_builder('ntp source', disable=True) return self.configure(cmd)
Delete the NTP source entry from the node. Returns: True if the operation succeeds, otherwise False.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def interconnects(self): if (not self.__interconnects): self.__interconnects = Interconnects(self.__connection) return self.__interconnects
Gets the Interconnects API client. Returns: Interconnects:
codesearchnet
def __init__(self, rate, validate_args=False, allow_nan_stats=True, name='Exponential'): parameters = dict(locals()) with ops.name_scope(name, values=[rate]) as name: self._rate = ops.convert_to_tensor(rate, name='rate') super(Exponential, self).__init__(concentration=array_ops.ones([], dtype=self._rate.dtype), rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name) self._parameters = parameters self._graph_parents += [self._rate]
Construct Exponential distribution with parameter `rate`. Args: rate: Floating point tensor, equivalent to `1 / mean`. Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class.
github-repos
def max_neighbor(self, in_lon, in_lat, radius=0.05): out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where((self.data[d] > 0)) if (len(nz_points[0]) > 0): nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for (n, neighbors) in enumerate(all_neighbors): if (len(neighbors) > 0): out_data[(d, out_rows[neighbors], out_cols[neighbors])] = nz_vals[nz_rank][n] return out_data
Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data
codesearchnet
def FileEntryExistsByPathSpec(self, path_spec): store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec) if store_index is None: location = getattr(path_spec, 'location', None) return location is not None and location == self.LOCATION_ROOT return 0 <= store_index < self._vshadow_volume.number_of_stores
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists.
juraj-google-style
def _create_L_ind(self, L): if issparse(L): L = L.todense() L_ind = np.zeros((self.n, (self.m * self.k))) for y in range(1, (self.k + 1)): L_ind[(:, (y - 1)::self.k)] = np.where((L == y), 1, 0) return L_ind
Convert a label matrix with labels in 0...k to a one-hot format Args: L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k} Returns: L_ind: An [n,m*k] dense np.ndarray with values in {0,1} Note that no column is required for 0 (abstain) labels.
codesearchnet
def __init__(self, context): self.multiplexer = context.multiplexer self.logdir = context.logdir self._handlers = None self.readers = {} self.run_paths = None self._configs = {} self.old_num_run_paths = None self.config_fpaths = None self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY) self._is_active = False self._thread_for_determining_is_active = None if self.multiplexer: self.run_paths = self.multiplexer.RunPaths()
Instantiates ProjectorPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def _get(self, rec_id=None, upstream=None): if rec_id: self.record_url = self.__class__.get_record_url(rec_id) self.debug_logger.debug('GET {} record with ID {}: {}'.format(self.__class__.__name__, rec_id, self.record_url)) response = requests.get(url=self.record_url, headers=HEADERS, verify=False) if ((not response.ok) and (response.status_code == requests.codes.NOT_FOUND)): raise RecordNotFound("Search for {} record with ID '{}' returned no results.".format(self.__class__.__name__, rec_id)) self.write_response_html_to_file(response, 'get_bob.html') response.raise_for_status() return response.json() elif upstream: rec_json = self.__class__.find_by({'upstream_identifier': upstream}, require=True) self.record_url = self.__class__.get_record_url(rec_json['id']) return rec_json
Fetches a record by the record's ID or upstream_identifier. Raises: `pulsarpy.models.RecordNotFound`: A record could not be found.
codesearchnet
def batch(self, timelimit=None): from .launcher import BatchLauncher prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1]) prev_dir = os.path.join(os.path.sep, prev_dir) workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch") return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
Run the flow in batch mode, return exit status of the job script. Requires a manager.yml file and a batch_adapter adapter. Args: timelimit: Time limit (int with seconds or string with time given with the slurm convention: "days-hours:minutes:seconds"). If timelimit is None, the default value specified in the `batch_adapter` entry of `manager.yml` is used.
juraj-google-style
def UpdateTaskAsPendingMerge(self, task): with self._lock: is_abandoned = (task.identifier in self._tasks_abandoned) is_processing = (task.identifier in self._tasks_processing) is_queued = (task.identifier in self._tasks_queued) if ((not is_queued) and (not is_processing) and (not is_abandoned)): raise KeyError('Status of task {0:s} is unknown.'.format(task.identifier)) if (is_abandoned and task.has_retry): raise KeyError('Will not merge a task {0:s} with retry task.'.format(task.identifier)) if is_queued: logger.debug('Task {0:s} was queued, now merging.'.format(task.identifier)) del self._tasks_queued[task.identifier] if is_processing: logger.debug('Task {0:s} was processing, now merging.'.format(task.identifier)) del self._tasks_processing[task.identifier] if is_abandoned: logger.debug('Task {0:s} was abandoned, now merging.'.format(task.identifier)) del self._tasks_abandoned[task.identifier] self._tasks_pending_merge.PushTask(task) self.SampleTaskStatus(task, 'pending_merge') task.UpdateProcessingTime() self._UpdateLatestProcessingTime(task)
Updates the task manager to reflect the task is ready to be merged. Args: task (Task): task. Raises: KeyError: if the task was not queued, processing or abandoned, or the task was abandoned and has a retry task.
codesearchnet
def assertDTypeEqual(self, target, expected_dtype): target = self._GetNdArray(target) if not isinstance(target, list): arrays = [target] for arr in arrays: self.assertEqual(arr.dtype, expected_dtype)
Assert ndarray data type is equal to expected. Args: target: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). expected_dtype: Expected data type.
github-repos
def find(self, query=None, func=None, labels=None, colors=None, pinned=None, archived=None, trashed=False): if (labels is not None): labels = [(i.id if isinstance(i, _node.Label) else i) for i in labels] return (node for node in self.all() if (((query is None) or ((isinstance(query, six.string_types) and ((query in node.title) or (query in node.text))) or (isinstance(query, Pattern) and (query.search(node.title) or query.search(node.text))))) and ((func is None) or func(node)) and ((labels is None) or ((not labels) and (not node.labels.all())) or any(((node.labels.get(i) is not None) for i in labels))) and ((colors is None) or (node.color in colors)) and ((pinned is None) or (node.pinned == pinned)) and ((archived is None) or (node.archived == archived)) and ((trashed is None) or (node.trashed == trashed))))
Find Notes based on the specified criteria. Args: query (Union[_sre.SRE_Pattern, str, None]): A str or regular expression to match against the title and text. func (Union[callable, None]): A filter function. labels (Union[List[str], None]): A list of label ids or objects to match. An empty list matches notes with no labels. colors (Union[List[str], None]): A list of colors to match. pinned (Union[bool, None]): Whether to match pinned notes. archived (Union[bool, None]): Whether to match archived notes. trashed (Union[bool, None]): Whether to match trashed notes. Return: List[gkeepapi.node.TopLevelNode]: Results.
codesearchnet
def Collect(self, knowledge_base, artifact_definition, searcher, file_system): for source in artifact_definition.sources: if (source.type_indicator not in (artifact_definitions.TYPE_INDICATOR_FILE, artifact_definitions.TYPE_INDICATOR_PATH)): continue for path in source.paths: path_segments = path.split(source.separator) find_spec = file_system_searcher.FindSpec(location_glob=path_segments[1:], case_sensitive=False) for path_specification in searcher.Find(find_specs=[find_spec]): self._ParsePathSpecification(knowledge_base, searcher, file_system, path_specification, source.separator)
Collects values using a file artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. Raises: PreProcessFail: if the preprocessing fails.
codesearchnet
def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str: try: spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader) spec_dict["admin"] = {} spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_") spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py") add_relations(spec_dict) add_functions(spec_dict) add_namespaces(spec_dict) enhance_function_signatures(spec_dict) add_function_signature_help(spec_dict) with open(json_fn, "w") as f: json.dump(spec_dict, f) except Exception as e: log.error( "Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format( yaml_fn ) ) sys.exit() return spec_dict["version"]
Enhance BEL specification and save as JSON file Load all BEL Specification YAML files and convert to JSON files after enhancing them. Also create a bel_versions.json file with all available BEL versions for fast loading. Args: yaml_fn: original YAML version of BEL Spec json_fn: enhanced JSON version of BEL Spec Returns: str: version of BEL Spec
juraj-google-style
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]': return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
Applies a function to the contained :meth:`Result.Ok` value. Args: op: The function to apply to the :meth:`Result.Ok` value. Returns: A :class:`Result` with its success value as the function result if `self` is an :meth:`Result.Ok` value, otherwise returns `self`. Examples: >>> Ok(1).map(lambda x: x * 2) Ok(2) >>> Err(1).map(lambda x: x * 2) Err(1)
juraj-google-style
def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index): if isinstance(sparse_or_dense_matrix, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): num_rows = _get_shape(sparse_or_dense_matrix)[-2] batch_shape = _get_shape(sparse_or_dense_matrix)[:-2] slice_start = tf.concat([tf.zeros_like(batch_shape), [0, col_index]], axis=0) slice_size = tf.concat([batch_shape, [num_rows, 1]], axis=0) sparse_slice = tf.sparse.slice(sparse_or_dense_matrix, tf.cast(slice_start, tf.int64), tf.cast(slice_size, tf.int64)) output_shape = tf.concat([batch_shape, [num_rows]], axis=0) return tf.reshape(tf.sparse.to_dense(sparse_slice), output_shape) else: return tf.gather(sparse_or_dense_matrix, col_index, axis=-1)
Returns a (dense) column of a Tensor or SparseTensor. Args: sparse_or_dense_matrix: matrix-shaped, `float` `Tensor` or `SparseTensor`. col_index: scalar, `int` `Tensor` representing the index of the desired column. Returns: column: vector-shaped, `float` `Tensor` with the same dtype as `sparse_or_dense_matrix`, representing the `col_index`th column of `sparse_or_dense_matrix`.
juraj-google-style
def filter_with_theta(image, theta, sigma=1.0, filter_size=9): x = np.arange((((- filter_size) g = np.array([np.exp(((- (x ** 2)) / (2 * (sigma ** 2))))]) gp = np.array([((- (x / sigma)) * np.exp(((- (x ** 2)) / (2 * (sigma ** 2)))))]) ix = convolve2d(image, (- gp), mode='same', boundary='fill', fillvalue=0) ix = convolve2d(ix, g.T, mode='same', boundary='fill', fillvalue=0) iy = convolve2d(image, g, mode='same', boundary='fill', fillvalue=0) iy = convolve2d(iy, (- gp.T), mode='same', boundary='fill', fillvalue=0) output = ((np.cos(theta) * ix) + (np.sin(theta) * iy)) gt_filter = np.matmul(g.T, gp) gt_filter = ((np.cos(theta) * gt_filter) + (np.sin(theta) * gt_filter.T)) return (output, gt_filter)
Implements a steerable Gaussian filter. This function can be used to evaluate the first directional derivative of an image, using the method outlined in W. T. Freeman and E. H. Adelson, "The Design and Use of Steerable Filters", IEEE PAMI, 1991. It evaluates the directional derivative of the input image I, oriented at THETA degrees with respect to the image rows. The standard deviation of the Gaussian kernel is given by SIGMA (assumed to be equal to unity by default). Args: image: any input image (only one channel) theta: orientation of filter [0, 2 * pi] sigma (float, optional): standard derivation of Gaussian filter_size (int, optional): filter support Returns: filtered image and the filter
codesearchnet
def random_line_data(chars_per_line=80): return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))
Function to create a line of a random string Args: chars_per_line: An integer that says how many characters to return Returns: A String
juraj-google-style
def resolve_image_exif(self, image_url): files = self.mets.find_files(url=image_url) if files: image_filename = self.download_file(files[0]).local_filename else: image_filename = self.download_url(image_url) if image_url not in self.image_cache['exif']: self.image_cache['exif'][image_url] = OcrdExif(Image.open(image_filename)) return self.image_cache['exif'][image_url]
Get the EXIF metadata about an image URL as :class:`OcrdExif` Args: image_url (string) : URL of image Return :class:`OcrdExif`
juraj-google-style
def _convert_tf2_model(flags): if flags.saved_model_dir: converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir, signature_keys=_parse_array(flags.saved_model_signature_key), tags=_parse_set(flags.saved_model_tag_set)) elif flags.keras_model_file: model = keras_deps.get_load_model_function()(flags.keras_model_file) converter = lite.TFLiteConverterV2.from_keras_model(model) converter.experimental_new_converter = flags.experimental_new_converter if flags.experimental_new_quantizer is not None: converter.experimental_new_quantizer = flags.experimental_new_quantizer tflite_model = converter.convert() with gfile.GFile(flags.output_file, 'wb') as f: f.write(tflite_model)
Calls function to convert the TensorFlow 2.0 model into a TFLite model. Args: flags: argparse.Namespace object. Raises: ValueError: Unsupported file format.
github-repos
def parse_response(service, response, search_type): _LOG.debug('Parse response "%s" from service "%s" of type "%s"', response, service, search_type) items = [] if 'searchResult' in response: response = response['searchResult'] elif 'getMetadataResult' in response: response = response['getMetadataResult'] else: raise ValueError('"response" should contain either the key ' '"searchResult" or "getMetadataResult"') search_metadata = { 'number_returned': response['count'], 'total_matches': None, 'search_type': search_type, 'update_id': None, } for result_type in ('mediaCollection', 'mediaMetadata'): result_type_proper = result_type[0].upper() + result_type[1:] raw_items = response.get(result_type, []) if isinstance(raw_items, OrderedDict): raw_items = [raw_items] for raw_item in raw_items: class_key = result_type_proper + raw_item['itemType'].title() cls = get_class(class_key) items.append(cls.from_music_service(service, raw_item)) return SearchResult(items, **search_metadata)
Parse the response to a music service query and return a SearchResult Args: service (MusicService): The music service that produced the response response (OrderedDict): The response from the soap client call search_type (str): A string that indicates the search type that the response is from Returns: SearchResult: A SearchResult object
juraj-google-style