code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _GetNumericProjectId(self): project_id = 'project/numeric-project-id' return self.watcher.GetMetadata(metadata_key=project_id, recursive=False)
Get the numeric project ID for this VM. Returns: string, the numeric project ID if one is found.
codesearchnet
def generate_token(key, user_id, action_id='', when=None): digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8')) digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8')) digester.update(DELIMITER) digester.update(_helpers._to_bytes(action_id, encoding='utf-8')) digester.update(DELIMITER) when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8') digester.update(when) digest = digester.digest() token = base64.urlsafe_b64encode(digest + DELIMITER + when) return token
Generates a URL-safe token for the given user, action, time tuple. Args: key: secret key to use. user_id: the user ID of the authenticated user. action_id: a string identifier of the action they requested authorization for. when: the time in seconds since the epoch at which the user was authorized for this action. If not set the current time is used. Returns: A string XSRF protection token.
juraj-google-style
def is_multiline_string(self): return self.is_string and self.value.endswith(('"""', "'''"))
Test if this string is a multiline string. Returns: A multiline string always ends with triple quotes, so if it is a string token, inspect the last 3 characters and return True if it is a triple double or triple single quote mark.
github-repos
def log_images(self, name, images, step=None): if isinstance(images, six.string_types): raise TypeError('"images" should be a list of ndarrays, got {}'.format(type(images))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._image_summary(tf_name, images, step=step) self._log_summary(tf_name, summary, images, step=step)
Log new images for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). images (list): list of images to visualize step (int): non-negative integer used for visualization
codesearchnet
def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt='png', **kwargs): graph = self.get_graphviz(**kwargs) graph.format = fmt graph.attr(dpi=str(dpi)) (_, tmpname) = tempfile.mkstemp() path = graph.render(tmpname, view=False, cleanup=True) (ax, fig, _) = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi) import matplotlib.image as mpimg ax.imshow(mpimg.imread(path, format='png')) ax.axis('off') return fig
Generate flow graph in the DOT language and plot it with matplotlib. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. figsize: matplotlib figure size (None to use default) dpi: DPI value. fmt: Select format for output image Return: matplotlib Figure
codesearchnet
def _encode_fhir_path_constraint(self, struct_def: _fhir_path_data_types.StructureDataType, fhir_path_expression: str, node_context: expressions.Builder) -> Optional[_BuilderSql]: if node_context.get_root_builder().fhir_path == node_context.fhir_path: node_context = None try: new_builder = expressions.from_fhir_path_expression(fhir_path_expression, self._context, struct_def, self._primitive_handler, node_context) except Exception as e: self._error_reporter.report_fhir_path_error(self._abs_path_invocation(node_context), f'{node_context}.{fhir_path_expression}', self._error_message_for_exception(e)) return None return self._encode_fhir_path_builder_constraint(new_builder, node_context)
Returns a Standard SQL translation of the constraint `fhir_path_expression`. If an error is encountered during encoding, the associated error reporter will be notified, and this method will return `None`. Args: struct_def: The Structure definition that the fhir_path_expression originates from. fhir_path_expression: The fluent-style dot-delimited ('.') FHIRPath expression that encodes to Standard SQL. node_context: The root builder of the fhir_path_expression. May be another FHIRPath expression. Returns: A Standard SQL encoding of the constraint `fhir_path_expression` upon successful completion. The SQL will evaluate to a single boolean indicating whether the constraint is satisfied and the builder that created it. May be different from the input builder(s).
github-repos
def MakeSuiteFromCdf(cdf, name=None): if name is None: name = cdf.name suite = Suite(name=name) prev = 0.0 for val, prob in cdf.Items(): suite.Incr(val, prob - prev) prev = prob return suite
Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object
juraj-google-style
def remove_liers(points): result = [points[0]] for i in range(1, len(points) - 2): prv = points[i-1] crr = points[i] nxt = points[i+1] if prv.time <= crr.time and crr.time <= nxt.time: result.append(crr) result.append(points[-1]) return result
Removes obvious noise points Checks time consistency, removing points that appear out of order Args: points (:obj:`list` of :obj:`Point`) Returns: :obj:`list` of :obj:`Point`
juraj-google-style
def __init__(self, num_layers: int=1, in_channels: int=3, out_channels: int=64, use_batchnorm: bool=True): super().__init__() self.conv = Conv2dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False) self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity() self.relu = nn.ReLU() self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
Constructs a Conv2DDownsample model. Args: in_channels (`int`, *optional*, defaults to 3): The number of input channels. out_channels (`int`, *optional*, defaults to 64): The number of conv output channels. use_batchnorm (`bool`, *optional*, defaults to `True`): Whether to use batchnorm.
github-repos
def _expect(self, expected, times=50): logger.debug('[%s] Expecting [%s]', self.port, expected) retry_times = 10 while times: if not retry_times: break line = self._readline() if line == expected: return if not line: retry_times -= 1 time.sleep(0.1) times -= 1 raise Exception('failed to find expected string[%s]' % expected)
Find the `expected` line within `times` trials. Args: expected str: the expected string times int: number of trials
juraj-google-style
def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor): image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes) bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list] bpe_tokens = torch.cat(bpe_tokens_list) return bpe_tokens
Tokenizes images into discrete tokens with VQGAN module. Converts obtained image tokens into BPE tokens and wraps with "boi" and "eoi" special tokens. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`): The sizes of the images in the batch, being (height, width) for each image.
github-repos
def simple_vertex_array(self, program, buffer, *attributes, index_buffer=None, index_element_size=4) -> 'VertexArray': if type(buffer) is list: raise SyntaxError('Change simple_vertex_array to vertex_array') content = [(buffer, detect_format(program, attributes)) + attributes] return self.vertex_array(program, content, index_buffer, index_element_size)
Create a :py:class:`VertexArray` object. Args: program (Program): The program used when rendering. buffer (Buffer): The buffer. attributes (list): A list of attribute names. Keyword Args: index_element_size (int): byte size of each index element, 1, 2 or 4. index_buffer (Buffer): An index buffer. Returns: :py:class:`VertexArray` object
juraj-google-style
def _ReverseHostname(self, hostname): if not hostname: return '' if len(hostname) <= 1: return hostname if hostname[-1] == '.': return hostname[::-1][1:] return hostname[::-1][0:]
Reverses the hostname and strips the leading dot. The hostname entry is reversed: moc.elgoog.www. Should be: www.google.com Args: hostname (str): reversed hostname. Returns: str: hostname without a leading dot.
juraj-google-style
def get_meta_references(self, datas): rule = datas.get(RULE_META_REFERENCES, {}) if (not rule): msg = "Manifest lacks of '.{}' or is empty" raise SerializerError(msg.format(RULE_META_REFERENCES)) elif rule.get('names', None): names = rule.get('names').split(' ') elif rule.get('auto', None): names = self.get_available_references(datas) else: msg = "'.{}' either require '--names' or '--auto' variable to be defined" raise SerializerError(msg.format(RULE_META_REFERENCES)) for item in names: self.validate_rule_name(item) return names
Get manifest enabled references declaration This required declaration is readed from ``styleguide-metas-references`` rule that require either a ``--names`` or ``--auto`` variable, each one define the mode to enable reference: Manually Using ``--names`` which define a list of names to enable, every other non enabled rule will be ignored. Section name (and so Reference name also) must not contains special character nor ``-`` so they still be valid variable name for almost any languages. For word separator inside name, use ``_``. Automatic Using ``--auto`` variable every reference rules will be enabled. The value of this variable is not important since it is not empty. If both of these variables are defined, the manual enable mode is used. Arguments: datas (dict): Data where to search for meta references declaration. This is commonly the fully parsed manifest. Returns: list: A list of reference names.
codesearchnet
def search(self, queryType, query=None, vendorSpecific=None, **kwargs): response = self.searchResponse(queryType, query, vendorSpecific, **kwargs) return self._read_dataone_type_response(response, 'ObjectList')
See Also: searchResponse() Args: queryType: query: vendorSpecific: **kwargs: Returns:
juraj-google-style
def get(cls, sha1=''): with conf.within_proj_dir(): cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format( sha1 ) result = shell.run(cmd, capture=True, never_pretend=True).stdout sha1, name, email, title, desc, parents = result.split('||') return CommitDetails( sha1=sha1, author=Author(name, email), title=title, desc=desc, parents_sha1=parents.split(), )
Return details about a given commit. Args: sha1 (str): The sha1 of the commit to query. If not given, it will return the details for the latest commit. Returns: CommitDetails: Commit details. You can use the instance of the class to query git tree further.
juraj-google-style
def options(self, options=None): if options is None: return self._options if not isinstance(options, (list, tuple)): raise ValueError('__options__') if self._type not in ['base64', 'date', 'datetime', 'decimal', 'float', \ 'int', 'ip', 'md5', 'price', 'string', 'time', \ 'timestamp', 'uint', 'uuid']: raise TypeError('can not set __options__ for ' + self._type) lOpts = [] for i in range(len(options)): if self._type in ['base64', 'date', 'datetime', 'ip', 'md5', 'time', 'uuid']: if not isinstance(options[i], basestring) \ or not _typeToRegex[self._type].match(options[i]): raise ValueError('__options__[%d]' % i) elif self._type == 'decimal': if isinstance(options[i], Decimal): pass else: try: options[i] = Decimal(options[i]) except ValueError: raise ValueError('__options__[%d]' % i) elif self._type == 'float': try: options[i] = float(options[i]) except ValueError: raise ValueError('__options__[%d]' % i) elif self._type in ['int', 'timestamp', 'uint']: if not isinstance(options[i], (int, long)): if not isinstance(options[i], basestring): raise ValueError('__options__[%d]' % i) try: options[i] = int(options[i], 0) except ValueError: raise ValueError('__options__[%d]' % i) if self._type in ['timestamp', 'uint'] and options[i] < 0: raise ValueError('__options__[' + str(i) + ']') elif self._type == 'price': if isinstance(options[i], Decimal): pass elif not isinstance(options[i], basestring) or not _typeToRegex['price'].match(options[i]): raise ValueError('__options__[%d]' % i) options[i] = Decimal(options[i]) elif self._type == 'string': if not isinstance(options[i], basestring): try: options[i] = str(options[i]) except ValueError: raise ValueError('__options__[%d]' % i) else: raise TypeError('can not set __options__ for ' + self._type) if options[i] in lOpts: sys.stderr.write('__options__[' + str(i) + '] is a duplicate') else: lOpts.append(options[i]) self._options = lOpts
Options Sets or gets the list of acceptable values for the Node Arguments: options {list} -- A list of valid values Raises: TypeError, ValueError Returns: None | list
juraj-google-style
def logged(level=logging.DEBUG): def wrap(f): _logger = logging.getLogger("{}.{}".format(f.__module__, f.__name__)) def wrapped_f(*args, **kwargs): _logger.log(level, "Called at {} with args = {} and kwargs = {}" .format(datetime.datetime.now(), args, kwargs)) data = f(*args, **kwargs) _logger.log(level, "Done at {} with args = {} and kwargs = {}" .format(datetime.datetime.now(), args, kwargs)) return data return wrapped_f return wrap
Useful logging decorator. If a method is logged, the beginning and end of the method call will be logged at a pre-specified level. Args: level: Level to log method at. Defaults to DEBUG.
juraj-google-style
def get_table_map() -> t.Dict: fs = GCSFileSystem() table_map = {} with fs.open(METADATA_URI) as f: table_map = json.load(f) return table_map
Load and return the table map from dataset-meta.json file. Returns: dict: Dictionary containing table names as keys and their metadata as values.
github-repos
def transfer(self, data): settings = self.transfer_settings settings.spi_tx_size = len(data) self.transfer_settings = settings response = '' for i in range(0, len(data), 60): response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data time.sleep(0.01) while len(response) < len(data): response += self.sendCommand(commands.SPITransferCommand('')).data return ''.join(response)
Transfers data over SPI. Arguments: data: The data to transfer. Returns: The data returned by the SPI device.
juraj-google-style
def write(name, value): def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): existing_env = core.read(name, allow_none=True) core.write(name, value) func_val = func(*args, **kwargs) core.write(name, existing_env) return func_val return _decorator return wrapped
Temporarily change or set the environment variable during the execution of a function. Args: name: The name of the environment variable value: A value to set for the environment variable Returns: The function return value.
codesearchnet
def _upsample_filters(filters, rate): if rate == 1: return filters filters_up = np.transpose(filters, [2, 3, 0, 1]) ker = np.zeros([rate, rate], dtype=np.float32) ker[0, 0] = 1 filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)] filters_up = np.transpose(filters_up, [2, 3, 0, 1]) return filters_up
Upsamples the filters by a factor of rate along the spatial dimensions. Args: filters: [h, w, in_depth, out_depth]. Original filters. rate: An int, specifying the upsampling rate. Returns: filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with h_up = h + (h - 1) * (rate - 1) w_up = w + (w - 1) * (rate - 1) containing (rate - 1) zeros between consecutive filter values along the filters' spatial dimensions.
github-repos
def __init__(self, density_matrix: np.ndarray, measurements: Dict[str, np.ndarray], qubit_map: Dict[ops.Qid, int], dtype: Type[np.number] = np.complex64): super().__init__(measurements) self._density_matrix = density_matrix self._qubit_map = qubit_map self._dtype = dtype
DensityMatrixStepResult. Args: density_matrix: The density matrix at this step. Can be mutated. measurements: The measurements for this step of the simulation. qubit_map: A map from qid to index used to define the ordering of the basis in density_matrix. dtype: The numpy dtype for the density matrix.
juraj-google-style
def GetKeyByScriptHash(self, script_hash): contract = self.GetContract(script_hash) if contract: return self.GetKey(contract.PublicKeyHash) return None
Get the KeyPair belonging to the script hash. Args: script_hash (UInt160): a bytearray (len 20) representing the public key. Returns: KeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None
codesearchnet
def ConvertValueForCsv(pql_value): if 'value' in pql_value: field = pql_value['value'] elif 'values' in pql_value: field = pql_value['values'] else: field = None if field: if isinstance(field, list): return ','.join(['"%s"' % str(ConvertValueForCsv(single_field)) for single_field in field]) else: class_type = ad_manager.AdManagerClassType(pql_value) if class_type == 'TextValue': return field.replace('"', '""').encode('UTF8') elif class_type == 'NumberValue': return float(field) if '.' in field else int(field) elif class_type == 'DateTimeValue': return ConvertDateTimeToOffset(field) elif class_type == 'DateValue': return date(int(field['date']['year']), int(field['date']['month']), int(field['date']['day'])).isoformat() else: return field else: return '-'
Sanitizes a field value from a Value object to a CSV suitable format. Args: pql_value: dict a dictionary containing the data for a single field of an entity. Returns: str a CSV writer friendly value formatted by Value.Type.
juraj-google-style
def __init__(self, prevHash=None, timestamp=None, index=None, consensusData=None, nextConsensus=None, script=None, transactions=None, build_root=False): super(Block, self).__init__() self.Version = 0 self.PrevHash = prevHash self.Timestamp = timestamp self.Index = index self.ConsensusData = consensusData self.NextConsensus = nextConsensus self.Script = script if transactions: self.Transactions = transactions else: self.Transactions = [] if build_root: self.RebuildMerkleRoot()
Create an instance. Args: prevHash (UInt160): timestamp (int): seconds since Unix epoch. index (int): block height. consensusData (int): uint64. nextConsensus (UInt160): script (neo.Core.Witness): script used to verify the block. transactions (list): of neo.Core.TX.Transaction.Transaction objects. build_root (bool): flag indicating whether to rebuild the merkle root.
juraj-google-style
def ReadFrom(self, byte_stream): try: return self._struct.unpack_from(byte_stream) except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format( exception))
Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read.
juraj-google-style
def loads(s, single=False): corpus = etree.fromstring(s) if single: ds = _deserialize_mrs(next(corpus)) else: ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus) return ds
Deserialize MRX string representations Args: s (str): a MRX string single (bool): if `True`, only return the first Xmrs object Returns: a generator of Xmrs objects (unless *single* is `True`)
juraj-google-style
def get_from(input_file, property_names): with open(input_file) as f: feature_collection = geojson.load(f) features = feature_collection['features'] values = [tuple([feat['properties'].get(x) for x in property_names]) for feat in features] return values
Reads a geojson and returns a list of value tuples, each value corresponding to a property in property_names. Args: input_file (str): File name. property_names: List of strings; each string is a property name. Returns: List of value tuples.
juraj-google-style
def _deferred_dependencies(self): return self._self_unconditional_deferred_dependencies
A dictionary with deferred dependencies. Stores restorations for other Trackable objects on which this object may eventually depend. May be overridden by sub-classes (e.g. Optimizers use conditional dependencies based the current graph, and so need separate management of deferred dependencies too). Returns: A dictionary mapping from local name to a list of CheckpointPosition objects.
github-repos
def predict_features(self, df_features, df_target, idx=0, **kwargs): X = df_features.values y = df_target.values regressor = DecisionTreeRegressor() regressor.fit(X, y) return regressor.feature_importances_
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
juraj-google-style
def report_conversion_error(self, element_path: str, msg: str) -> None:
Reports the given error during FHIR conversion. This indicates that the resource does not fully comply with the FHIR specification or profile, and the field could not be converted to the target structure. Data may have been lost during the conversion. Args: element_path: The path to the field where the issue occurred. msg: The error message produced.
github-repos
def package_in_memory(cls, workflow_name, workflow_files): s = StringIO() p = cls(s, workflow_name, meta_data=[]) p.add_bpmn_files_by_glob(workflow_files) p.create_package() return s.getvalue()
Generates wf packages from workflow diagrams. Args: workflow_name: Name of wf workflow_files: Diagram file. Returns: Workflow package (file like) object
juraj-google-style
def verify_mfa(self, mfa_token): response = self.resource.verify_mfa({'mfa_token': mfa_token}) return ((response['valid'] == True) or (response['valid'] == 'true'))
Verify an SMS or TOTP MFA token for this user. Args: mfa_token (str): An alphanumeric code from either a User's TOTP application or sent to them via SMS. Returns: True if the mfa_token is valid, False otherwise.
codesearchnet
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern): submissions = self._storage_client.list_blobs(prefix=os.path.join(self._round_name, dir_suffix)) return {id_pattern.format(idx): SubmissionDescriptor(path=s, participant_id=participant_from_submission_path(s)) for (idx, s) in enumerate(submissions)}
Loads list of submissions from the directory. Args: dir_suffix: suffix of the directory where submissions are stored, one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR or DEFENSE_SUBDIR. id_pattern: pattern which is used to generate (internal) IDs for submissins. One of the following constants: ATTACK_ID_PATTERN, TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN. Returns: dictionary with all found submissions
codesearchnet
def split(x, split_dim, num_or_size_splits, name=None): return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors.
codesearchnet
def find_code_and_splits(object_name: str, base_path: str, buffer: Optional[dict]=None): if buffer is None: buffer = {} if (object_name, base_path) in buffer: lines, code, code_splits = buffer[object_name, base_path] else: code, (lines, target_start_index, target_end_index) = find_code_in_transformers(object_name, base_path=base_path, return_indices=True) indent = get_indent(code) code_splits = split_code_into_blocks(lines, target_start_index, target_end_index, len(indent) + 4, backtrace=True) buffer[object_name, base_path] = (lines, code, code_splits) return (lines, code, code_splits)
Find the code of an object (specified by `object_name`) and split it into blocks. Args: object_name (`str`): The name of the object, e.g. `transformers.models.bert.modeling_bert.BertAttention` or `tests.models.llama.test_modeling_llama.LlamaModelTest.test_config`. base_path (`str`): The path to the base directory within which the search will be performed. It could be either `TRANSFORMERS_PATH` or `MODEL_TEST_PATH`. buffer (`dict`, *optional*): The buffer used to store the previous results in order to speed up the process. Returns: lines (`List[str]`): The lines of the whole file where the object is defined. code (`str`): The object's code. code_splits (`List[Tuple[str, int, int]]`): `code` splitted into blocks. See `split_code_into_blocks`.
github-repos
def compute_mup_vector(config): intermediate_size = config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size) groups_time_state_size = config.mamba_n_groups * config.mamba_d_state num_heads = config.mamba_n_heads zxbcdt_multipliers = config.ssm_multipliers vector_shape = 2 * intermediate_size + 2 * groups_time_state_size + num_heads mup_vector = torch.ones(1, 1, vector_shape) mup_vector[:, :, :intermediate_size] *= zxbcdt_multipliers[0] mup_vector[:, :, intermediate_size:2 * intermediate_size] *= zxbcdt_multipliers[1] mup_vector[:, :, 2 * intermediate_size:2 * intermediate_size + groups_time_state_size] *= zxbcdt_multipliers[2] mup_vector[:, :, 2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size] *= zxbcdt_multipliers[3] mup_vector[:, :, 2 * intermediate_size + 2 * groups_time_state_size:] *= zxbcdt_multipliers[4] return mup_vector
Computes the MuP vector based on model configuration. FalconH1 applies different MuP multiplier for each dimension of the hidden states. The MuP vector is partitioned into chunks, and each chunk is multiplied with its corresponding projected dimension. Args: config: FalconH1Config object Returns: torch.Tensor: The computed MuP vector
github-repos
def build_vocab(self, texts, verbose=1, **kwargs): if self.has_vocab: logger.warn('Tokenizer already has existing vocabulary. Overriding and building new vocabulary.') progbar = Progbar(len(texts), verbose=verbose, interval=0.25) count_tracker = utils._CountTracker() self._token_counts.clear() self._num_texts = len(texts) for token_data in self.token_generator(texts, **kwargs): (indices, token) = (token_data[:(- 1)], token_data[(- 1)]) count_tracker.update(indices) self._token_counts[token] += 1 progbar.update(indices[0]) self.create_token_indices(self._token_counts.keys()) count_tracker.finalize() self._counts = count_tracker.counts progbar.update(len(texts))
Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`.
codesearchnet
def collect_human_trajectory(env, device): obs = env.reset() env.set_robot_joint_positions([0, -1.18, 0.00, 2.18, 0.00, 0.57, 1.5708]) env.viewer.set_camera(camera_id=2) env.render() is_first = True reset = False task_completion_hold_count = -1 device.start_control() while not reset: state = device.get_controller_state() dpos, rotation, grasp, reset = ( state["dpos"], state["rotation"], state["grasp"], state["reset"], ) current = env._right_hand_orn drotation = current.T.dot(rotation) dquat = T.mat2quat(drotation) grasp = grasp - 1. action = np.concatenate([dpos, dquat, [grasp]]) obs, reward, done, info = env.step(action) if is_first: is_first = False initial_mjstate = env.sim.get_state().flatten() xml_str = env.model.get_xml() env.reset_from_xml_string(xml_str) env.sim.reset() env.sim.set_state_from_flattened(initial_mjstate) env.sim.forward() env.viewer.set_camera(camera_id=2) env.render() if task_completion_hold_count == 0: break if env._check_success(): if task_completion_hold_count > 0: task_completion_hold_count -= 1 else: task_completion_hold_count = 10 else: task_completion_hold_count = -1 env.close()
Use the device (keyboard or SpaceNav 3D mouse) to collect a demonstration. The rollout trajectory is saved to files in npz format. Modify the DataCollectionWrapper wrapper to add new fields or change data formats. Args: env: environment to control device (instance of Device class): to receive controls from the device
juraj-google-style
def annotate_op(self, op): if isinstance(op, Label): return op else: return AnnotatedOp(self, op.name, op.arg)
Takes a bytecode operation (:class:`Op`) and annotates it using the data contained in this code object. Arguments: op(Op): An :class:`Op` instance. Returns: AnnotatedOp: An annotated bytecode operation.
juraj-google-style
def configure(self, cfg, handler, path=""): for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if attr.expected_type not in [list, dict]: cfg[name] = self.set(handler, attr, name, path, cfg) elif attr.default is None and not hasattr(handler, "configure_%s" % name): self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip(".")) for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if hasattr(handler, "configure_%s" % name): fn = getattr(handler, "configure_%s" % name) fn(self, cfg, "%s.%s"% (path, name)) if attr.expected_type in [list, dict] and not cfg.get(name): try: del cfg[name] except KeyError: pass
Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress
juraj-google-style
def ValidateDict(self, dict_value, outer_messages): valid_dict = {} for f in self.fields: if f.name in dict_value: valid_dict[f.name] = self._ValidateField(f, dict_value[f.name], outer_messages) elif not f.optional: raise NameError("Mandatoray field missing in message '%s': %s" % (self.name, f.name)) return valid_dict
Validate a dictionary value. It checks whether all individual fields of |dict_value| are valid, i.e. all required fields exist and the values of fields correspond to their types. Args: dict_value: Dictionary value to validate. outer_messages: Messages visible from the scope of |dict_value|. Returns: Dictionary value validated. Raises: NameError: If any required fields are missed.
github-repos
def non_slot_devices(self, var_list): raise NotImplementedError('must be implemented in descendants')
Device(s) for non-slot variables. DEPRECATED: TF 1.x ONLY. This method returns non-slot devices where non-slot variables are placed. Users can create non-slot variables on these devices by using a block: ```python with tf.distribute.StrategyExtended.colocate_vars_with(tf.distribute.StrategyExtended.non_slot_devices(...)): ... ``` Args: var_list: The list of variables being optimized, needed with the default `tf.distribute.Strategy`. Returns: A sequence of devices for non-slot variables.
github-repos
def n_choose_k(n, k): if (n == 0): return 0 return reduce((lambda x, y: ((x * y[0]) / y[1])), zip(range(((n - k) + 1), (n + 1)), range(1, (k + 1))), 1)
Return the number of combinations for n choose k. Args: n (int): the total number of options . k (int): The number of elements. Returns: int: returns the binomial coefficient
codesearchnet
def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10): super().__init__() self.in_features = config.bottleneck_features self.bin_centers_type = config.bin_centers_type self.min_depth = min_depth self.max_depth = max_depth self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0) self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == 'normed' else nn.Softplus()
Bin center regressor network. Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval. Args: config (`int`): Model configuration. n_bins (`int`, *optional*, defaults to 16): Number of bin centers. mlp_dim (`int`, *optional*, defaults to 256): Hidden dimension. min_depth (`float`, *optional*, defaults to 1e-3): Min depth value. max_depth (`float`, *optional*, defaults to 10): Max depth value.
github-repos
def _valuelistToBytestring(valuelist, numberOfRegisters): MINVALUE = 0 MAXVALUE = 65535 _checkInt(numberOfRegisters, minvalue=1, description='number of registers') if not isinstance(valuelist, list): raise TypeError('The valuelist parameter must be a list. Given {0!r}.'.format(valuelist)) for value in valuelist: _checkInt(value, minvalue=MINVALUE, maxvalue=MAXVALUE, description='elements in the input value list') _checkInt(len(valuelist), minvalue=numberOfRegisters, maxvalue=numberOfRegisters, \ description='length of the list') numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters bytestring = '' for value in valuelist: bytestring += _numToTwoByteString(value, signed=False) assert len(bytestring) == numberOfBytes return bytestring
Convert a list of numerical values to a bytestring. Each element is 'unsigned INT16'. Args: * valuelist (list of int): The input list. The elements should be in the range 0 to 65535. * numberOfRegisters (int): The number of registers. For error checking. Returns: A bytestring (str). Length = 2*numberOfRegisters Raises: TypeError, ValueError
juraj-google-style
def __init__(self, channel): self.ListInstances = channel.unary_unary( "/google.cloud.redis.v1beta1.CloudRedis/ListInstances", request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesResponse.FromString, ) self.GetInstance = channel.unary_unary( "/google.cloud.redis.v1beta1.CloudRedis/GetInstance", request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.GetInstanceRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.Instance.FromString, ) self.CreateInstance = channel.unary_unary( "/google.cloud.redis.v1beta1.CloudRedis/CreateInstance", request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.CreateInstanceRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.UpdateInstance = channel.unary_unary( "/google.cloud.redis.v1beta1.CloudRedis/UpdateInstance", request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.UpdateInstanceRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteInstance = channel.unary_unary( "/google.cloud.redis.v1beta1.CloudRedis/DeleteInstance", request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.DeleteInstanceRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def start(self, channel): super(TileBasedVirtualDevice, self).start(channel) for tile in self._tiles.values(): tile.start(channel=channel)
Start running this virtual device including any necessary worker threads. Args: channel (IOTilePushChannel): the channel with a stream and trace routine for streaming and tracing data through a VirtualInterface
juraj-google-style
def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False): if get_dirs is None and get_files is None: get_dirs = True get_files = True source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name) dirs = [] for dir_or_file_name in os.listdir(source_dir): path = os.path.join(source_dir, dir_or_file_name) if hide_ignored and dir_or_file_name.startswith('_'): continue is_dir = os.path.isdir(path) if get_dirs and is_dir or get_files and not is_dir: dirs.append(dir_or_file_name) return dirs
Return list of all dirs and files inside given dir. Also can filter contents to return only dirs or files. Args: - dir_name: Which directory we need to scan (relative) - get_dirs: Return dirs list - get_files: Return files list - hide_ignored: Exclude files and dirs with initial underscore
juraj-google-style
def delete(self, **options): fut = delete_async(self.key(), **options) fut.get_result()
Permanently delete this blob from Blobstore. Args: **options: Options for create_rpc().
juraj-google-style
def __init__(self, particle_kind="bead"): super(Bead, self).__init__() self.add(mb.Particle(name=particle_kind), particle_kind) self.add(mb.Port(anchor=self.labels[particle_kind]), 'up') self['up'].translate(np.array([0, 0.7, 0])) self.add(mb.Port(anchor=self.labels[particle_kind]), 'down') self['down'].translate(np.array([0, -0.7, 0]))
Initialize a Bead object. Args: particle_kind (str): Descriptive name for the Bead.
juraj-google-style
def has_member(self, device_object): if device_object.tag == "computer": container_search = "computers/computer" elif device_object.tag == "mobile_device": container_search = "mobile_devices/mobile_device" else: raise ValueError return len([device for device in self.findall(container_search) if device.findtext("id") == device_object.id]) is not 0
Return bool whether group has a device as a member. Args: device_object (Computer or MobileDevice). Membership is determined by ID, as names can be shared amongst devices.
juraj-google-style
def _generate_fieldnames_if_bai_query(self, node_value, bai_field_variation, query_bai_field_if_dots_in_name): if (bai_field_variation not in (FieldVariations.search, FieldVariations.raw)): raise ValueError('Non supported field variation "{}".'.format(bai_field_variation)) normalized_author_name = normalize_name(node_value).strip('.') if (ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and ElasticSearchVisitor.BAI_REGEX.match(node_value)): return [((ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.') + bai_field_variation)] elif ((not whitespace.search(normalized_author_name)) and query_bai_field_if_dots_in_name and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and ('.' in normalized_author_name)): return ([((ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.') + bai_field_variation)] + force_list(ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'])) else: return None
Generates new fieldnames in case of BAI query. Args: node_value (six.text_type): The node's value (i.e. author name). bai_field_variation (six.text_type): Which field variation to query ('search' or 'raw'). query_bai_field_if_dots_in_name (bool): Whether to query BAI field (in addition to author's name field) if dots exist in the name and name contains no whitespace. Returns: list: Fieldnames to query on, in case of BAI query or None, otherwise. Raises: ValueError, if ``field_variation`` is not one of ('search', 'raw').
codesearchnet
def assert_type(__x, __t) -> None: del __x, __t
Prevent runtime errors from assert_type statements. assert_type is handled internally by pytype at type-checking time; it should do nothing at runtime. Usage example: ``` import pytype_extensions assert_type = pytype_extensions.assert_type x = 3 assert_type(x, int) ``` Args: __x: The object to make the type assertion about. __t: The type we want to assert.
github-repos
def _tensor_product(t1, t2): return tf.matmul(tf.expand_dims(t1, axis=(- 1)), tf.expand_dims(t2, axis=(- 2)))
Computes the outer product of two possibly batched vectors. Args: t1: A `tf.Tensor` of shape `[..., n]`. t2: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n, m]` with matching batch dimensions, let's call it `r`, whose components are: ```None r[..., i, j] = t1[..., i] * t2[..., j] ```
codesearchnet
def find_matching(self) -> Dict[(TLeft, TRight)]: directed_graph = {} for (left, right) in self._edges: tail = (LEFT, left) head = (RIGHT, right) if (tail not in directed_graph): directed_graph[tail] = {head} else: directed_graph[tail].add(head) matching = HopcroftKarp(directed_graph).maximum_matching() return dict(((tail[1], head[1]) for (tail, head) in matching.items() if (tail[0] == LEFT)))
Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part.
codesearchnet
def _GetMember(component, args): members = dir(component) arg = args[0] arg_names = [arg, arg.replace('-', '_')] for arg_name in arg_names: if arg_name in members: return (getattr(component, arg_name), [arg], args[1:]) raise FireError('Could not consume arg:', arg)
Returns a subcomponent of component by consuming an arg from args. Given a starting component and args, this function gets a member from that component, consuming one arg in the process. Args: component: The component from which to get a member. args: Args from which to consume in the search for the next component. Returns: component: The component that was found by consuming an arg. consumed_args: The args that were consumed by getting this member. remaining_args: The remaining args that haven't been consumed yet. Raises: FireError: If we cannot consume an argument to get a member.
github-repos
def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None, max_window=3000, variant_type='snv'): if variant_type == 'snv': nr_variants = case_obj['nr_variants'] else: nr_variants = case_obj['nr_sv_variants'] nr_inserted = 0 case_id = case_obj['case_id'] if skip_case_id: case_id = None with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar: variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar) if variant_type == 'sv': for sv_variant in variants: if not sv_variant: continue adapter.add_structural_variant(variant=sv_variant, max_window=max_window) nr_inserted += 1 if variant_type == 'snv': nr_inserted = adapter.add_variants(variants) LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type) return nr_inserted
Load variants for a family into the database. Args: adapter (loqusdb.plugins.Adapter): initialized plugin case_obj(Case): dict with case information nr_variants(int) skip_case_id (bool): whether to include the case id on variant level or not gq_treshold(int) max_window(int): Specify the max size for sv windows variant_type(str): 'sv' or 'snv' Returns: nr_inserted(int)
juraj-google-style
def _process_arguments(arguments): if arguments is None: return "" result = "" for key, value in arguments.items(): if not key.startswith("bokeh-"): result += "&{}={}".format(quote_plus(str(key)), quote_plus(str(value))) return result
Return user-supplied HTML arguments to add to a Bokeh server URL. Args: arguments (dict[str, object]) : Key/value pairs to add to the URL Returns: str
juraj-google-style
def getFingerprintForExpression(self, body, sparsity=1.0): return self._expressions.resolveExpression(self._retina, body, sparsity)
Resolve an expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: Fingerprint Raises: CorticalioException: if the request was not successful
juraj-google-style
def ExpandGlobs(path, opts=None): precondition.AssertType(path, Text) if (not path): raise ValueError('Path is empty') if (not _IsAbsolutePath(path, opts)): raise ValueError(("Path '%s' is not absolute" % path)) if ((opts is not None) and (opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY)): (root_dir, tail) = path.replace('\\', '/').lstrip('/').split('/', 1) components = list(ParsePath(tail, opts=opts)) else: (drive, tail) = os.path.splitdrive(path) root_dir = os.path.join(drive, os.path.sep).upper() components = list(ParsePath(tail[1:], opts=opts)) return _ExpandComponents(root_dir, components)
Performs glob expansion on a given path. Path can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For example, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield `bar` and `baz`. Args: path: A path to expand. opts: A `PathOpts` object. Returns: Generator over all possible glob expansions of a given path. Raises: ValueError: If given path is empty or relative.
codesearchnet
def subdivide_with(self, branches, join_function, name='mixed'): return _subdivide_context(self, branches, join_function, name)
Branches this pretty tensor and uses an explicit join function. This should be used in a with statement, for example to fork and join with a sum: with pt.subdivide_with(2, tf.add_n) as [a, b]: a... b... Args: branches: The number of branches. join_function: A function to use when rejoining. name: A base name for this branch. Returns: A python context manager to use in a with statement that supplies a sequence of tensors with one per branch. Raises: ValueError: if join_function is None.
codesearchnet
def get_callback_task(self, *args, **kwargs): if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs)
Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller.
juraj-google-style
def do_dock6_flexible(self, ligand_path, force_rerun=False): log.debug('{}: running DOCK6...'.format(self.id)) ligand_name = os.path.basename(ligand_path).split('.')[0] in_name = op.join(self.dock_dir, "{}_{}_flexdock.in".format(self.id, ligand_name)) out_name = op.join(self.dock_dir, "{}_{}_flexdock.out".format(self.id, ligand_name)) conformers_out = op.join(self.dock_dir, '{}_{}_flexdock_conformers.mol2'.format(self.id, ligand_name)) scored_out = op.join(self.dock_dir, '{}_{}_flexdock_scored.mol2'.format(self.id, ligand_name)) ranked_out = op.join(self.dock_dir, '{}_{}_flexdock_ranked.mol2'.format(self.id, ligand_name)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=ranked_out): with open(in_name, "w") as f: dock_text = .format(ligand_path, op.basename(self.sphsel_path), op.splitext(op.basename(self.grid_path))[0], op.splitext(op.basename(self.grid_path))[0], self.amb_file, self.flex1_file, self.flex2_file, self.id, ligand_name) f.write(dock_text) os.chdir(self.dock_dir) cmd = "dock6 -i {} -o {} -v".format(in_name, out_name) os.system(cmd) if ssbio.utils.is_non_zero_file(ranked_out): self.dock_flexible_outfile = out_name self.dock_flexible_conformers_result = conformers_out self.dock_flexible_scored_result = scored_out log.debug('{}: successful docking!'.format(self.dock_flexible_outfile)) else: log.error('{}+{}: empty DOCK6 ranked file, execution error (or ligand failed to dock)'.format(self.id, op.basename(ligand_path)))
Dock a ligand to the protein. Args: ligand_path (str): Path to ligand (mol2 format) to dock to protein force_rerun (bool): If method should be rerun even if output file exists
juraj-google-style
def modify_object(self, modification, obj): d = obj.as_dict() self.modify(modification, d) return obj.from_dict(d)
Modify an object that supports pymatgen's as_dict() and from_dict API. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (object): Object to modify
juraj-google-style
def series_expand(self, param: Symbol, about, order: int): s = self.shape emats = zip(*[o.series_expand(param, about, order) for o in self.matrix.ravel()]) return tuple((Matrix(np_array(em).reshape(s)) for em in emats))
Expand the matrix expression as a truncated power series in a scalar parameter. Args: param: Expansion parameter. about (.Scalar): Point about which to expand. order: Maximum order of expansion >= 0 Returns: tuple of length (order+1), where the entries are the expansion coefficients.
juraj-google-style
def load(tiff_filename): tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(tiff_filename)) raise return numpy.array(img)
Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file
juraj-google-style
def Dump(obj): text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
Stringifies a Python object into its YAML representation. Args: obj: A Python object to convert to YAML. Returns: A YAML representation of the given object.
juraj-google-style
def Verify(self, public_key): if (self.digest_type != self.HashType.SHA256): raise rdfvalue.DecodeError('Unsupported digest.') if (self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]): raise rdfvalue.DecodeError('Unsupported signature type.') try: public_key.Verify(self.data, self.signature) except InvalidSignature as e: raise rdfvalue.DecodeError(('Could not verify blob. Error: %s' % e)) return True
Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.
codesearchnet
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance): if (not isinstance(scf_task, ScfTask)): raise TypeError(('task `%s` does not inherit from ScfTask' % scf_task)) multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: 'WFK'}) ddk_tasks.append(ddk_task) bec_deps = {ddk_task: 'DDK' for ddk_task in ddk_tasks} bec_deps.update({scf_task: 'WFK'}) bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance) bec_tasks = [] for bec_inp in bec_inputs: bec_task = self.register_bec_task(bec_inp, deps=bec_deps) bec_tasks.append(bec_task) return (ddk_tasks, bec_tasks)
Build tasks for the computation of Born effective charges and add them to the work. Args: scf_task: ScfTask object. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run. None to use AbiPy default. ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run. None to use AbiPy default. Return: (ddk_tasks, bec_tasks)
codesearchnet
def deconv_output_length(input_length, filter_size, padding, output_padding=None, stride=0, dilation=1): assert padding in {'same', 'valid', 'full'} if input_length is None: return None filter_size = filter_size + (filter_size - 1) * (dilation - 1) if output_padding is None: if padding == 'valid': length = input_length * stride + max(filter_size - stride, 0) elif padding == 'full': length = input_length * stride - (stride + filter_size - 2) elif padding == 'same': length = input_length * stride else: if padding == 'same': pad = filter_size elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 length = (input_length - 1) * stride + filter_size - 2 * pad + output_padding return length
Determines output length of a transposed convolution given input length. Args: input_length: Integer. filter_size: Integer. padding: one of `"same"`, `"valid"`, `"full"`. output_padding: Integer, amount of padding along the output dimension. Can be set to `None` in which case the output length is inferred. stride: Integer. dilation: Integer. Returns: The output length (integer).
github-repos
def SplitPatch(data): patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): unused, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): unused, temp_filename = line.split(':', 1) temp_filename = to_slash(temp_filename.strip()) if temp_filename != filename: new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches
Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename.
juraj-google-style
def validate(self): if (self.value is not None): if (not isinstance(self.value, six.integer_types)): raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value)))
Verify that the value of the BigInteger is valid. Raises: TypeError: if the value is not of type int or long
codesearchnet
def autosave(self, index): finfo = self.stack.data[index] document = finfo.editor.document() if ((not document.changed_since_autosave) or finfo.newly_created): return autosave_filename = self.get_autosave_filename(finfo.filename) logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename) try: self.stack._write_to_file(finfo, autosave_filename) document.changed_since_autosave = False except EnvironmentError as error: action = _('Error while autosaving {} to {}').format(finfo.filename, autosave_filename) msgbox = AutosaveErrorDialog(action, error) msgbox.exec_if_enabled()
Autosave a file. Do nothing if the `changed_since_autosave` flag is not set or the file is newly created (and thus not named by the user). Otherwise, save a copy of the file with the name given by `self.get_autosave_filename()` and clear the `changed_since_autosave` flag. Errors raised when saving are silently ignored. Args: index (int): index into self.stack.data
codesearchnet
class JanusVQVAEOutput(ModelOutput): decoded_pixel_values: Optional[torch.FloatTensor] = None embedding_loss: torch.FloatTensor = None
Base class for Janus VQ-VAE mode model outputs. Args: decoded_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): Reconstructed pixel values after encoding and decoding the input. embedding_loss (`torch.FloatTensor`): Embedding loss.
github-repos
def __copy_extracted(self, path, destination): unpacked_dir = self.filename + '.unpacked' if not os.path.isdir(unpacked_dir): LOGGER.warn( 'Failed to copy extracted file %s, no extracted dir', path ) return source_path = os.path.join(unpacked_dir, path) if not os.path.exists(source_path): LOGGER.warn( 'Failed to copy extracted file %s, does not exist', path ) return destination_path = os.path.join(destination, path) shutil.copyfile(source_path, destination_path)
Copies a file that was already extracted to the destination directory. Args: path (str): Relative (to the root of the archive) of the file to copy. destination (str): Directory to extract the archive to.
juraj-google-style
def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None): if self.feature_file: raise ValueError('Feature file associated with sequence, please remove file association to append additional features.') if (not feat_type): feat_type = 'Manually added protein sequence single residue feature' newfeat = SeqFeature(location=FeatureLocation(ExactPosition((resnum - 1)), ExactPosition(resnum)), type=feat_type, id=feat_id, qualifiers=qualifiers) self.features.append(newfeat)
Add a feature to the features list describing a single residue. Args: resnum (int): Protein sequence residue number feat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue') feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
codesearchnet
def matrix_worker(data): matrix = data['matrix'] Logger.get_logger(__name__ + '.worker').info( "Processing pipeline for matrix entry '%s'", matrix['name']) env = matrix['env'].copy() env.update({'PIPELINE_MATRIX': matrix['name']}) pipeline = Pipeline(model=data['model'], env=env, options=data['options']) pipeline.hooks = data['hooks'] return pipeline.process(data['pipeline'])
Run pipelines in parallel. Args: data(dict): parameters for the pipeline (model, options, ...). Returns: dict: with two fields: success True/False and captured output (list of str).
juraj-google-style
def validate_user_name(self, user_name, timeout=(- 1)): uri = ((self.URI + '/validateLoginName/') + user_name) return self._client.create_with_zero_body(uri=uri, timeout=timeout)
Verifies if a userName is already in use. Args: user_name: The userName to be verified. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: True if user name is in use, False if it is not.
codesearchnet
def add_status_parser(subparsers, parent_parser): parser = subparsers.add_parser('status', help='Displays information about validator status', description="Provides a subcommand to show a validator's status") grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand') grand_parsers.required = True add_status_show_parser(grand_parsers, parent_parser)
Adds argument parser for the status command Args: subparsers: Add parsers to this subparser object parent_parser: The parent argparse.ArgumentParser object
codesearchnet
def swo_num_bytes(self): res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_NUM_BYTES, 0) if res < 0: raise errors.JLinkException(res) return res
Retrives the number of bytes in the SWO buffer. Args: self (JLink): the ``JLink`` instance Returns: Number of bytes in the SWO buffer. Raises: JLinkException: on error
juraj-google-style
def trim_whitespace(self, text): lines = text.split('\n') new_lines = [x.lstrip() for x in lines] return '\n'.join(new_lines)
Remove leading whitespace from each line of a multiline string Args: text (string): The text to be unindented Returns: string: The unindented block of text
codesearchnet
def to_json(self): web_resp = collections.OrderedDict() web_resp['status_code'] = self.status_code web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code) web_resp['data'] = (self.data if (self.data is not None) else {}) web_resp['errors'] = (self.errors or []) return web_resp
Short cut for JSON response service data. Returns: Dict that implements JSON interface.
codesearchnet
def persist_config(run, session, cfg): from benchbuild.utils import schema as s for cfg_elem in cfg: session.add(s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id))
Persist the configuration in as key-value pairs. Args: run: The run we attach the config to. session: The db transaction we belong to. cfg: The configuration we want to persist.
codesearchnet
def abort(cls, mapreduce_id, **kwargs): cls(key_name=('%s:%s' % (mapreduce_id, cls._KEY_NAME)), command=cls.ABORT).put(**kwargs)
Causes a job to abort. Args: mapreduce_id: The job to abort. Not verified as a valid job.
codesearchnet
def get_counter(self, counter_name, default=0): self.__update_state() return self._state.counters_map.get(counter_name, default)
Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter.
juraj-google-style
def create_table_from(self, name, src): query = self.execute("SELECT sql FROM sqlite_master WHERE type='table' and name=?", (src,)) try: cmd = query.fetchone()[0] except TypeError: raise sql.OperationalError("Cannot copy non-existent table '{0}'".format(src)) new_cmd = re.sub('(CREATE TABLE) \\w+', ('\\1 ' + name), cmd, re.IGNORECASE) self.execute(new_cmd)
Create a new table with same schema as the source. If the named table already exists, nothing happens. Arguments: name (str): The name of the table to create. src (str): The name of the source table to duplicate. Raises: sql.OperationalError: If source table does not exist.
codesearchnet
def from_json(cls, json): if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid.
juraj-google-style
def dockprep(self, force_rerun=False): log.debug('{}: running dock preparation...'.format(self.id)) prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id)) prep_py = op.join(self.dock_dir, 'prep.py') if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2): with open(prep_py, 'w') as f: f.write('import chimera\n') f.write('from DockPrep import prep\n') f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\n') f.write('prep(models)\n') f.write('from WriteMol2 import writeMol2\n') f.write('writeMol2(models, "{}")\n'.format(prep_mol2)) cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py) os.system(cmd) os.remove(prep_py) os.remove('{}c'.format(prep_py)) if ssbio.utils.is_non_zero_file(prep_mol2): self.dockprep_path = prep_mol2 log.debug('{}: successful dockprep execution'.format(self.dockprep_path)) else: log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path))
Prepare a PDB file for docking by first converting it to mol2 format. Args: force_rerun (bool): If method should be rerun even if output file exists
codesearchnet
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields): if remove_all_metadata_fields: for df in concated_meta_dfs: df.drop(df.columns, axis=1, inplace=True) all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0) n_rows = all_concated_meta_df.shape[0] logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs]) assert n_rows == n_rows_cumulative all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1) return all_concated_meta_df_sorted
Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df)
juraj-google-style
def stringify(self, string, phrases, parent=None): last_tag = 0 beauty = '' for phrase in phrases: beauty += string[last_tag:phrase.opening] if ((phrase.string in self.always) and (not phrase.override)): phrase.style = self.always[phrase.string] if phrase.arguments: combination = 0 for i in phrase.arguments: try: combination |= self.positional[i] except IndexError: raise errors.ArgumentError("Positional argument '{0}' is out of range!".format(i)) phrase.style |= combination elif ((phrase.string not in self.always) or phrase.increment or phrase.override): try: combination = self.positional[self.counter] if (phrase.increment or (not phrase.override)): self.counter += 1 except IndexError: self.raise_not_enough_arguments(phrase.string) phrase.style |= combination phrase.style = flags.codify(phrase.style) if phrase.nested: phrase.string = self.stringify(phrase.string, phrase.nested, phrase) reset = (parent.style if parent else '') beauty += '\x1b[{0}m{1}\x1b[0;{2}m'.format(phrase.style, phrase, reset) last_tag = (phrase.closing + 1) beauty += string[last_tag:] return beauty
Stringifies phrases. After parsing of the string via self.parse(), this method takes the escaped string and the list of phrases returned by self.parse() and replaces the original phrases (with tags) with the Phrase-objects in the list and adds the appropriate flag-combinations as determined by the string or the position of the phrase (the string if it's in self.always, i.e. an 'always' argument). This method also works recursively to handle nested phrases (and resetting of parent-phrase styles). Arguments: string (str): The escaped string returned by self.parse(). phrases (list): The list of Phrase-objects returned by self.parse(). parent (Phrase): For recursive calls, the current parent Phrase. Returns: The finished, beautifully beautified string. Raises: errors.ArgumentError: If more positional arguments are requested than were supplied.
codesearchnet
def default_scan(self, region='mainland', expected_num=20, val_thr_num=4, queue_timeout=3, val_timeout=5, out_file='proxies.json', src_files=None): if (expected_num > 30): self.logger.warn('The more proxy you expect, the more time it will take. It is highly recommended to limit the expected num under 30.') proxy_scanner = ProxyScanner() if (src_files is None): src_files = [] elif isinstance(src_files, str): src_files = [src_files] for filename in src_files: proxy_scanner.register_func(proxy_scanner.scan_file, {'src_file': filename}) if (region == 'mainland'): proxy_scanner.register_func(proxy_scanner.scan_cnproxy, {}) elif (region == 'overseas'): proxy_scanner.register_func(proxy_scanner.scan_free_proxy_list, {}) proxy_scanner.register_func(proxy_scanner.scan_ip84, {'region': region, 'page': 5}) proxy_scanner.register_func(proxy_scanner.scan_mimiip, {'region': region, 'page': 5}) self.scan(proxy_scanner, expected_num, val_thr_num, queue_timeout, val_timeout, out_file)
Default scan method, to simplify the usage of `scan` method. It will register following scan functions: 1. scan_file 2. scan_cnproxy (if region is mainland) 3. scan_free_proxy_list (if region is overseas) 4. scan_ip84 5. scan_mimiip After scanning, all the proxy info will be saved in out_file. Args: region: Either 'mainland' or 'overseas' expected_num: An integer indicating the expected number of proxies, if this argument is set too great, it may take long to finish scanning process. val_thr_num: Number of threads used for validating proxies. queue_timeout: An integer indicating the timeout for getting a candidate proxy from the queue. val_timeout: An integer indicating the timeout when connecting the test url using a candidate proxy. out_file: the file name of the output file saving all the proxy info src_files: A list of file names to scan
codesearchnet
def stop(self, block: bool=False, timeout: Optional[float]=None): if self._generation_thread is None: logger.warning('Manager not started.') return if not self.stop_event.is_set(): self.stop_event.set() logger.info('Stopping continuous batching manager...') if block: self.join(timeout)
Signal the background thread to stop. Args: block: Whether to wait for the thread to stop timeout: Maximum time to wait for the thread to stop
github-repos
def process(self, element, *args, **kwargs): yield {'text': element.data.decode('utf-8'), 'id': element.attributes['id']}
For each element in the input PCollection, retrieve the id and decode the bytes into string Args: element: The element that is being processed.
github-repos
def read(self, offset, size): self._file_object.seek(offset, os.SEEK_SET) return self._file_object.read(size)
Reads a byte string from the image object at the specified offset. Args: offset (int): offset where to start reading. size (int): number of bytes to read. Returns: bytes: data read.
juraj-google-style
def mix_over_posterior_draws(means, variances): with tf.compat.v1.name_scope('mix_over_posterior_draws', values=[means, variances]): num_posterior_draws = dist_util.prefer_static_value(tf.shape(input=means))[0] component_observations = tfd.Independent(distribution=tfd.Normal(loc=dist_util.move_dimension(means, 0, (- 2)), scale=tf.sqrt(dist_util.move_dimension(variances, 0, (- 2)))), reinterpreted_batch_ndims=1) return tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(logits=tf.zeros([num_posterior_draws], dtype=component_observations.dtype)), components_distribution=component_observations)
Construct a predictive normal distribution that mixes over posterior draws. Args: means: float `Tensor` of shape `[num_posterior_draws, ..., num_timesteps]`. variances: float `Tensor` of shape `[num_posterior_draws, ..., num_timesteps]`. Returns: mixture_dist: `tfd.MixtureSameFamily(tfd.Independent(tfd.Normal))` instance representing a uniform mixture over the posterior samples, with `batch_shape = ...` and `event_shape = [num_timesteps]`.
codesearchnet
def _ParseInsserv(self, data): p = config_file.FieldParser() entries = p.ParseEntries(data) raw = {e[0]: e[1:] for e in entries} facilities = {} for k, v in iteritems(raw): k = k.replace("<", "").replace(">", "") facilities[k] = v for k, vals in iteritems(facilities): self.insserv[k] = [] for v in vals: self.insserv[k].extend(self._InsservExpander(facilities, v))
/etc/insserv.conf* entries define system facilities. Full format details are in man 8 insserv, but the basic structure is: $variable facility1 facility2 $second_variable facility3 $variable Any init script that specifies Required-Start: $second_variable needs to be expanded to facility1 facility2 facility3. Args: data: A string of insserv definitions.
juraj-google-style
def QA_fetch_user(user_cookie, db=DATABASE): collection = DATABASE.account return [res for res in collection.find({'user_cookie': user_cookie}, {'_id': 0})]
get the user Arguments: user_cookie : str the unique cookie_id for a user Keyword Arguments: db: database for query Returns: list --- [ACCOUNT]
codesearchnet
def __init__(self, prev_hash=None, prev_index=None): self.PrevHash = prev_hash self.PrevIndex = prev_index
Create an instance. Args: prev_hash (UInt256): hash of the previous output. prev_index (int):
juraj-google-style