code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def traverse_preorder(self, leaves=True, internal=True): for node in self.root.traverse_preorder(leaves=leaves, internal=internal): (yield node)
Perform a preorder traversal of the ``Node`` objects in this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
codesearchnet
def leap_days_between(start_date, end_date): def leap_days_since_year_0(date_tensor): year = date_tensor.year() month = date_tensor.month() leap_years_since_0 = year needs_adjustment = is_leap_year(year) & (month <= 2) return leap_years_since_0 - tf.where(needs_adjustment, ...
Calculates number of leap days (29 Feb) between two dates. 'start_date' is included and 'end_date' is excluded from the period. For example, for dates `2019-12-24` and `2024-3-10` the result is 2: there is 29 Feb 2020 and 29 Feb 2024 between 24 Dec 2019 (inclusive) and 10 Mar 2024 (exclusive). If `end_date` is earli...
github-repos
def _QueryHash(self, nsrl_socket, digest): try: query = 'QUERY {0:s}\n'.format(digest).encode('ascii') except UnicodeDecodeError: logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest)) return False response = None try: nsrl_socket.sendall(query) r...
Queries nsrlsvr for a specific hash. Args: nsrl_socket (socket._socketobject): socket of connection to nsrlsvr. digest (str): hash to look up. Returns: bool: True if the hash was found, False if not or None on error.
codesearchnet
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable): variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_s...
Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these ...
github-repos
def MapByteStream(self, byte_stream, byte_offset=0, **unused_kwargs): return byte_stream[byte_offset:byte_offset + self.byte_size]
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None): pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) return image_embeds
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The t...
github-repos
def nic_s(msg): tc = typecode(msg) if (tc != 31): raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg)) msgbin = common.hex2bin(msg) nic_s = int(msgbin[75]) return nic_s
Obtain NIC supplement bit, TC=31 message Args: msg (string): 28 bytes hexadecimal message string Returns: int: NICs number (0 or 1)
codesearchnet
def get_identity_broadcaster(cls, nvals, dtype=None): return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))
Create an identity broadcaster. TODO(martinz): an identity broadcaster can be far more efficient than a generic broadcaster. Add an optimized implementation. Args: nvals: the number of values for the broadcaster. dtype: the dtype of the broadcaster, or None to use the dtype of nvals. Returns: an identity broadcaster ...
github-repos
def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None): length = (- 1) if (category in ('snv', 'indel', 'cancer')): if (ref_len == alt_len): length = alt_len else: length = abs((ref_len - alt_len)) elif (category == 'sv'): if (svtype == ...
Return the length of a variant Args: alt_len(int) ref_len(int) category(str) svtype(str) svlen(int)
codesearchnet
def load(self, path): path = os.path.expandvars(os.path.expanduser(path)) gdg = cgaddag.gdg_load(path.encode('ascii')) if (not gdg): errno = ctypes.c_int.in_dll(ctypes.pythonapi, 'errno').value raise OSError(errno, os.strerror(errno), path) self.__del__() self.gdg = gdg.contents
Load a GADDAG from file, replacing the words currently in this GADDAG. Args: path: path to saved GADDAG to be loaded.
codesearchnet
def _ValidateCacheEntryHeader(self, cache_entry_header): return ( cache_entry_header.request_size > 0 and cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and cache_entry_header.major_format_version == 1 and cache_entry_header.last_fetched_time > 0 and cache_en...
Determines whether the values in the cache entry header are valid. Args: cache_entry_header (firefox_cache1_entry_header): cache entry header. Returns: bool: True if the cache entry header is valid.
juraj-google-style
def get_canonical_path(resource_key, pk=None): if resource_key not in resource_map: return None base_path = get_script_prefix() + resource_map[resource_key]['path'] if pk: return '%s/%s/' % (base_path, pk) else: return base_path
Return canonical resource path. Arguments: resource_key - Canonical resource key i.e. Serializer.get_resource_key(). pk - (Optional) Object's primary key for a single-resource URL. Returns: Absolute URL as string.
juraj-google-style
def RemoveObject(self, identifier): if (identifier not in self._values): raise KeyError('Missing cached object for identifier: {0:s}'.format(identifier)) del self._values[identifier]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.
codesearchnet
def register_actor(name, actor_handle): if (not isinstance(name, str)): raise TypeError('The name argument must be a string.') if (not isinstance(actor_handle, ray.actor.ActorHandle)): raise TypeError('The actor_handle argument must be an ActorHandle object.') actor_name = _calculate_key(nam...
Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name
codesearchnet
def label(self, input_grid): marked = self.find_local_maxima(input_grid) marked = np.where((marked >= 0), 1, 0) markers = splabel(marked)[0] return markers
Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels
codesearchnet
def read_geojson(filename): json_file = open(filename) data = json.load(json_file) json_file.close() times = data["properties"]["times"] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data["features"]: for main_name in mai...
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject
juraj-google-style
def minute(self, value=None): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `minute`'.format(value)) if (value < 0): raise ValueError('value need to be greater or equal 0 for ...
Corresponds to IDD Field `minute` Args: value (int): value for IDD Field `minute` value >= 0 value <= 60 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def AddProcessingOptions(self, argument_group): argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argumen...
Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group.
juraj-google-style
def load_config(paths=DEFAULT_CONFIG_PATHS): config = Config() for path in paths: if os.path.isfile(path): config.load_pyfile(path) return config
Attempt to load config from paths, in order. Args: paths (List[string]): list of paths to python files Return: Config: loaded config
codesearchnet
def dummy_inputs(self): batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) re...
Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs
github-repos
def configure_profile(msg_type, profile_name, data, auth): with jsonconfig.Config("messages", indent=4) as cfg: write_data(msg_type, profile_name, data, cfg) write_auth(msg_type, profile_name, auth, cfg) print("[+] Configuration entry for <" + profile_name + "> created.") print("[+] Co...
Create the profile entry. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :auth: (dict) auth parameters
juraj-google-style
def set_api_url(self, api_url="https: old_api_url = self._api_url old_lang = self._lang self._lang = lang.lower() self._api_url = api_url.format(lang=self._lang) try: self._get_site_info() self.__supported_languages = None except MediaWi...
Set the API URL and language Args: api_url (str): API URL to use lang (str): Language of the API URL Raises: :py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \ url is not a valid MediaWiki site
juraj-google-style
def __init__(self, output_mediator): super(JSONOutputModule, self).__init__(output_mediator) self._event_counter = 0
Initializes the output module object. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
juraj-google-style
def fit(self, x_train, y_train, x_valid=None, y_valid=None, epochs=1, batch_size=32, verbose=1, callbacks=None, shuffle=True): p = IndexTransformer(initial_vocab=self.initial_vocab, use_char=self.use_char) p.fit(x_train, y_train) embeddings = filter_embeddings(self.embeddings, p._word_vocab.vocab, self.word...
Fit the model for a fixed number of epochs. Args: x_train: list of training data. y_train: list of training target (label) data. x_valid: list of validation data. y_valid: list of validation target (label) data. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32...
codesearchnet
def add(self, other): if not isinstance(other, Chi): other = Chi(other) if self.dim != other.dim: raise QiskitError("other QuantumChannel dimensions are not equal") return Chi(self._data + other.data, self._input_dims, self._output_dims)
Return the QuantumChannel self + other. Args: other (QuantumChannel): a quantum channel. Returns: Chi: the linear addition self + other as a Chi object. Raises: QiskitError: if other is not a QuantumChannel subclass, or has incompatible dimensions.
juraj-google-style
def upload(self, params={}): if (self.upload_token is not None): status = self.check() if (status['status'] != 4): return self.commit() else: self.new_slice() while (self.slice_task_id != 0): self.upload_slice() return self.comm...
start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload succe...
codesearchnet
def import_object_from_path(path, object): with open(path) as f: return import_object_from_string_code(f.read(), object)
Used to import an object from an absolute path. This function takes an absolute path and imports it as a Python module. It then returns the object with name `object` from the imported module. Args: path (string): Absolute file path of .py file to import object (string): Name of object to extract from imported module
codesearchnet
def _protobuf_value_type(value): if value.HasField('number_value'): return api_pb2.DATA_TYPE_FLOAT64 if value.HasField('string_value'): return api_pb2.DATA_TYPE_STRING if value.HasField('bool_value'): return api_pb2.DATA_TYPE_BOOL return None
Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message.
codesearchnet
def log_every_n_seconds(level, msg, n_seconds, *args): should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) log_if(level, msg, should_log, *args)
Logs 'msg % args' at level 'level' iff 'n_seconds' elapsed since last call. Logs the first call, logs subsequent calls if 'n' seconds have elapsed since the last logging call from the same call site (file + line). Not thread-safe. Args: level: int, the absl logging level at which to log. msg: str, the message to be l...
codesearchnet
def Name(self): name = '' if self.Version: name = self.Version.UserAgent return name
Get the peer name. Returns: str:
codesearchnet
def infer_namespace(ac): namespaces = infer_namespaces(ac) if (not namespaces): return None if (len(namespaces) > 1): raise BioutilsError('Multiple namespaces possible for {}'.format(ac)) return namespaces[0]
Infer the single namespace of the given accession This function is convenience wrapper around infer_namespaces(). Returns: * None if no namespaces are inferred * The (single) namespace if only one namespace is inferred * Raises an exception if more than one namespace is inferred >>> infer_namespace("ENST00000530893.6...
codesearchnet
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_...
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and...
github-repos
def hardware_version(self): version = self._dll.JLINKARM_GetHardwareVersion() major = version / 10000 % 100 minor = version / 100 % 100 return '%d.%02d' % (major, minor)
Returns the hardware version of the connected J-Link as a major.minor string. Args: self (JLink): the ``JLink`` instance Returns: Hardware version string.
juraj-google-style
def __init__(self, directory, loader_factory, path_filter=lambda x: True): if directory is None: raise ValueError('A directory is required') if loader_factory is None: raise ValueError('A loader factory is required') self._directory = directory self._path = None self._loader_factory...
Constructs a new DirectoryWatcher. Args: directory: The directory to load files from. loader_factory: A factory for creating loaders. The factory should take a path and return an object that has a Load method returning an iterator that will yield all events that have not been yielded yet. path_filter: If specified, on...
juraj-google-style
def info(self, **kwargs): path = self._get_series_id_season_number_episode_number_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the primary information about a TV episode by combination of a season and episode number. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any TV series method. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False): s = evoformer_output_dict['single'] if mask is None: mask = s.new_ones(s.shape[:-1]) s = self.layer_norm_s(s) z = self.layer_norm_z(evoformer_output_dict['pair']) z_reference_list = None if _offload_in...
Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs
github-repos
def atmospheric_station_pressure(self, value=999999): if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `atmospheric_station_pre...
Corresponds to IDD Field `atmospheric_station_pressure` Args: value (int): value for IDD Field `atmospheric_station_pressure` Unit: Pa value > 31000 value < 120000 Missing value: 999999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `val...
juraj-google-style
def update_ip_info(self, since_days=10, save=False, force=False): try: last_check = IPInfoCheck.objects.get( ip_address=self.client_ip_address) since_last = datetime.date.today() - last_check.date if since_last <= datetime.timed...
Update the IP info. Args: since_days (int): if checked less than this number of days ago, don't check again (default to 10 days). save (bool): whether to save anyway or not. force (bool): whether to update ip_info to last checked one. Returns: bool: check was run. IPInfo might not have been updated.
juraj-google-style
def _fix_fdef_in_place(fdef, functions, shared_name_suffix, new_gradient_op_types): orig_name = fdef.signature.name contains_unsaved_custom_gradients = False for node_def in fdef.node_def: fix_node_def(node_def, functions, shared_name_suffix) op_type = _get_gradient_op_type(node_def) ...
Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: fdef: FunctionDef proto to fix. It is mutated in-place. functions: map from function name to a ConcreteFuncti...
github-repos
def UsesArtifact(self, artifacts): if isinstance(artifacts, string_types): return artifacts in self.artifacts else: return any(True for artifact in artifacts if artifact in self.artifacts)
Determines if the check uses the specified artifact. Args: artifacts: Either a single artifact name, or a list of artifact names Returns: True if the check uses a specific artifact.
juraj-google-style
def _get_account_xml(soco): device = (soco or discovery.any_soco()) log.debug('Fetching account data from %s', device) settings_url = 'http: result = requests.get(settings_url).content log.debug('Account data: %s', result) return result
Fetch the account data from a Sonos device. Args: soco (SoCo): a SoCo instance to query. If soco is `None`, a random device will be used. Returns: str: a byte string containing the account data xml
codesearchnet
def GetTimeOfDay(self): normalized_timestamp = self._GetNormalizedTimestamp() if (normalized_timestamp is None): return (None, None, None) (_, hours, minutes, seconds) = self._GetTimeValues(normalized_timestamp) return (hours, minutes, seconds)
Retrieves the time of day represented by the date and time values. Returns: tuple[int, int, int]: hours, minutes, seconds or (None, None, None) if the date and time values do not represent a time of day.
codesearchnet
def make_sgf(move_history, result_string, ruleset='Chinese', komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[]): boardsize = go.N game_moves = ''.join((translate_sgf_move(*z) for z in itertools.zip_longest(move_history, comments))) result = result_string return SGF_TEMP...
Turn a game into SGF. Doesn't handle handicap games or positions with incomplete history. Args: move_history: iterable of PlayerMoves result_string: "B+R", "W+0.5", etc. comments: iterable of string/None. Will be zipped with move_history.
codesearchnet
def _error_and_gradient(self, x): coords = x.reshape((self.m, self.n)) d = squareform(pdist(coords)) diff = self.D - d error = self._error(diff) gradient = self._gradient(diff, d, coords) return error, gradient.ravel()
Compute the error and the gradient. This is the function optimized by :obj:`scipy.optimize.minimize`. Args: x (`array-like`): [`m` * `n`, ] matrix. Returns: `tuple`: containing: - Error (`float`) - Gradient (`np.array`) [`m`, `n`]
juraj-google-style
def copy_pkg(self, filename, _): basename = os.path.basename(filename) self._copy(filename, os.path.join(self.connection["mount_point"], "Packages", basename))
Copy a package to the repo's Package subdirectory. Args: filename: Path for file to copy. _: Ignored. Used for compatibility with JDS repos.
juraj-google-style
def recipe_twitter(config, auth_read, auth_write, recipe_name, twitter_secret, recipe_slug, twitter_key): dataset(config, {'description': 'Create a dataset where data will be combined and transfored for upload.', 'auth': auth_write, 'dataset': recipe_slug}) sheets(config, {'description': 'Read mapping of hash t...
Adjusts line item settings based on Twitter hashtags and locations specified in a sheet. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. recipe_name (string) - Name of sheet where Line Item settings will be read from. twitter_secret...
github-repos
def get(self, name, default=None): option = self._options.get(name, None) if option is None: return default return option.__get__(self)
Fetch an option from the dictionary. Args: name (str): The name of the option. default: The value to return if the name is missing. Returns: any: The value stored by the option. This method resolves the option to its value rather than returning the option object itself. Use the 'options()' method or this object's it...
juraj-google-style
def parse_psqs(psqs_results_file): psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None) psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb')) psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:...
Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results
juraj-google-style
def clear(self, rows=None): rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
Reset episodes in the memory. Internally, this only sets their lengths to zero. The memory entries will be overridden by future calls to append() or replace(). Args: rows: Episodes to clear, defaults to all. Returns: Operation.
juraj-google-style
def _convert_reward(self, reward): if (not np.isfinite(reward).all()): raise ValueError('Infinite reward encountered.') return np.array(reward, dtype=np.float32)
Convert the reward to 32 bits. Args: reward: Numpy reward. Raises: ValueError: Rewards contain infinite values. Returns: Numpy reward with 32-bit data type.
codesearchnet
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05): m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites]) mask = ((m_projs - np.amax(m_projs)) >= (- height)) surf_sites = [slab.sites[n] for n in np.where(mask)[0]] if xy_tol: surf_sites = [s for (h, s) in ...
This method finds surface sites by determining which sites are within a threshold value in height from the topmost site in a list of sites Args: site_list (list): list of sites from which to select surface sites height (float): threshold in angstroms of distance from topmost site in slab along the slab c-vector to inc...
codesearchnet
def map_batch_parallel(input_list, batch_size, item_mapper=None, batch_mapper=None, flatten=True, n_jobs=(- 1), **kwargs): if ((item_mapper is None) and (batch_mapper is None)): raise ValueError('You should specify either batch_mapper or item_mapper.') if (batch_mapper is None): batch_mapper = _...
Split the data into batches and process each batch in its own thread. Args: input_list: An input object that has a list-like interface (indexing and slicing). item_mapper: (optional) A function to apply to each item in the batch. batch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_m...
codesearchnet
def difference(self, second_iterable, selector=identity): if self.closed(): raise ValueError('Attempt to call difference() on a closed Queryable.') if (not is_iterable(second_iterable)): raise TypeError('Cannot compute difference() with second_iterableof non-iterable {0}'.format(str(type(second_...
Returns those elements which are in the source sequence which are not in the second_iterable. This method is equivalent to the Except() LINQ operator, renamed to a valid Python identifier. Note: This method uses deferred execution, but as soon as execution commences the entirety of the second_iterable is consumed; th...
codesearchnet
def transformer_revnet_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder"): def f(x, side_input): encoder_self_attention_bias = side_input[0] old_hid_size = hparams.hidden_size h...
A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
juraj-google-style
def right_shift_blockwise(x, query_shape, name=None): with tf.variable_scope( name, default_name="right_shift_blockwise", values=[x]): x_list_shape = x.get_shape().as_list() x_shape = common_layers.shape_list(x) x = tf.expand_dims(x, axis=1) x = pad_to_multiple_2d(x, query_shape) pad...
Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x
juraj-google-style
def __init__(self, variant, building): self.variant = variant self.building = building
Create a package variant. Args: variant (`Variant`): Package variant. building (bool): True if a build is occurring.
juraj-google-style
def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')): tuple_list = [ ] if not key_vals_dict: return tuple_list vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())]) for k, vs in key_vals_dict.items(): try: tuple_list.extend([k + tuple(v) + (fi...
Convert ``key_vals_dict`` to `tuple_list``. Args: key_vals_dict (dict): The first parameter. fill: a value to fill missing data Returns: A list of tuples
juraj-google-style
def to_string(cls, error_code): if error_code == cls.ZONE_NOT_FOUND_ERROR: return 'Zone not found' return super(JLinkReadErrors, cls).to_string(error_code)
Returns the string message for the given ``error_code``. Args: cls (JLinkReadErrors): the ``JLinkReadErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
juraj-google-style
def DeleteCampaignFeed(client, campaign_feed): campaign_feed_service = client.GetService('CampaignFeedService', 'v201809') operation = {'operand': campaign_feed, 'operator': 'REMOVE'} campaign_feed_service.mutate([operation])
Deletes a campaign feed. Args: client: an AdWordsClient instance. campaign_feed: the campaign feed to delete.
codesearchnet
def open(self, host, port=23): self._telnet_client.open(host, port) config_str = self._telnet_client.cmd('MN?') if config_str.startswith('MN='): config_str = config_str[len('MN='):] self.properties = dict(zip(['model', 'max_freq', 'max_atten'], config_str.split('-', 2))) self.max_atten = flo...
Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23)
codesearchnet
def sample(self, count=5, fields=None, sampling=None, use_cache=True, dialect=None, billing_tier=None): return Query.sampling_query(self._sql, self._context, count=count, fields=fields, sampling=sampling, udfs=self._udfs, data_sources=self._data_sources).results(use_cache=use_cache, dialect=dialect, billing_tier=bi...
Retrieves a sampling of rows for the query. Args: count: an optional count of rows to retrieve which is used if a specific sampling is not specified (default 5). fields: the list of fields to sample (default None implies all). sampling: an optional sampling strategy to apply to the table. use_cache: whether to use cac...
codesearchnet
def review_score(self, reviewer, product): return self._g.retrieve_review(reviewer, product).score
Find a review score from a given reviewer to a product. Args: reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`. product: Product i.e. an instance of :class:`ria.bipartite.Product`. Returns: A review object representing the review from the reviewer to the product.
codesearchnet
def get_request_header(self): if (self._client_id is not None): self._request_header.client_identifier.resource = self._client_id return self._request_header
Return ``request_header`` for use when constructing requests. Returns: Populated request header.
codesearchnet
def fit_texture(layer): x, y = layer x = (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x)) y = (y - np.nanmin(y)) / (np.nanmax(y) - np.nanmin(y)) return x, y
Fits a layer into a texture by scaling each axis to (0, 1). Does not preserve aspect ratio (TODO: make this an option). Args: layer (layer): the layer to scale Returns: texture: A texture.
juraj-google-style
def pnl_search(self, asset_manager_id, pnl_type, business_date, **kwargs): self.logger.info(('Retrieving Pnls - Asset Manager: %s - Business Date: %s' % (asset_manager_id, business_date))) url = ('%s/pnls/%s' % (self.endpoint, asset_manager_id)) search_params = {'pnl_type': pnl_type, 'business_date': busine...
Search pnl records. Args: asset_manager_id (int): id of asset manager owning the pnl records pnl_type (str): either "Position" or "Transaction business_date (date): date of the pnl records to return book_ids (list): book id filter on pnl records asset_ids (list): asset id filter on pnl records transaction_ids (list): ...
codesearchnet
def ParseMany(text): precondition.AssertType(text, Text) if compatibility.PY2: text = text.encode('utf-8') return list(yaml.safe_load_all(text))
Parses many YAML documents into a list of Python objects. Args: text: A YAML source with multiple documents embedded. Returns: A list of Python data structures corresponding to the YAML documents.
codesearchnet
def from_proto(saver_def, import_scope=None): return Saver(saver_def=saver_def, name=import_scope)
Returns a `Saver` object created from `saver_def`. Args: saver_def: a `SaverDef` protocol buffer. import_scope: Optional `string`. Name scope to use. Returns: A `Saver` built from saver_def.
github-repos
def plot_tree3d(ax, tree, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) coll...
Generates a figure of the tree in 3d. If the tree contains one single point the plot will be empty \ since no segments can be constructed. Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree diameter_scale(float): Scale factor multiplied with segment diameters before...
codesearchnet
def get_dependencies(self): all_deps = OrderedSet() for (key, _) in list(self.__config.items()): if (key in self.__cli): continue if key.endswith('sources'): all_deps |= self.get_sources(key[:((len('sources') * (- 1)) - 1)]) for (key, _) in list(self.__cli.items()): ...
Retrieve the set of all dependencies for a given configuration. Returns: utils.utils.OrderedSet: The set of all dependencies for the tracked configuration.
codesearchnet
def patch_deepCopy(self, patches): patchesCopy = [] for patch in patches: patchCopy = patch_obj() patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 ...
Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects.
juraj-google-style
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE): name = os.path.splitext(os.path.basename(source))[0] yaml_checker.check(source, y) assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role)...
Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the ...
codesearchnet
def notify(self, new_issues, existing_issues, fixed_issues): if len(new_issues + existing_issues + fixed_issues) > 0: maxlen = max(len(x['properties']['source']) for x in (new_issues + existing_issues + fixed_issues)) + 2 text_tmpl = get_template('domain_hijacking.txt') ...
Send notifications (email, slack, etc.) for any issues that are currently open or has just been closed Args: new_issues (`list` of :obj:`DomainHijackIssue`): List of newly discovered issues existing_issues (`list` of :obj:`DomainHijackIssue`): List of existing open issues fixed_issues (`list` of `dict`): List of fixed...
juraj-google-style
def _GetNumberOfSeconds(self, fat_date_time): day_of_month = (fat_date_time & 0x1f) month = ((fat_date_time >> 5) & 0x0f) year = (fat_date_time >> 9) & 0x7f days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of ...
Retrieves the number of seconds from a FAT date time. Args: fat_date_time (int): FAT date time. Returns: int: number of seconds since January 1, 1980 00:00:00. Raises: ValueError: if the month, day of month, hours, minutes or seconds value is out of bounds.
juraj-google-style
def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None): super(AutomountUpdater, self).__init__(map_name, timestamp_dir, cache_options, automount_mountpoint) self.local_master = False if self.OPT_LOCAL_MASTER in cache_options: if cache_options[self.OPT_LOCAL_MASTER] == ...
Initialize automount-specific updater options. Args: map_name: A string representing the type of the map we are an Updater for. timestamp_dir: A string with the directory containing our timestamp files. cache_options: A dict containing the options for any caches we create. automount_mountpoint: An optional string cont...
github-repos
def get_versions(self): versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri) versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers) for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion...
retrieves all versions of an object, and stores them at self.versions Args: None Returns: None: appends instances
juraj-google-style
def parse_ids(chrom, pos, ref, alt, case_id, variant_type): ids = {} pos = str(pos) ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt) ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type) ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type) ...
Construct the necessary ids for a variant Args: chrom(str): Variant chromosome pos(int): Variant position ref(str): Variant reference alt(str): Variant alternative case_id(str): Unique case id variant_type(str): 'clinical' or 'research' Returns: ids(dict): Dictionary with the relevant ids
juraj-google-style
def all(self, data={}, **kwargs): return super(VirtualAccount, self).all(data, **kwargs)
Fetch all Virtual Account entities Returns: Dictionary of Virtual Account data
codesearchnet
def _parse_banners(self): motd_value = login_value = None matches = re.findall('^banner\\s+(login|motd)\\s?$\n(.*?)$\nEOF$\n', self.config, (re.DOTALL | re.M)) for match in matches: if (match[0].strip() == 'motd'): motd_value = match[1] elif (match[0].strip() == 'login'): ...
Parses the global config and returns the value for both motd and login banners. Returns: dict: The configure value for modtd and login banners. If the banner is not set it will return a value of None for that key. The returned dict object is intendd to be merged into the resource dict
codesearchnet
def _list_like_func(self, func, axis, *args, **kwargs): func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) new_index = ( [f if isin...
Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
juraj-google-style
def load_settings(path, setttings_only = True): if not os.path.exists(path): print(path) raise AttributeError('Path given does not exist!') tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:]) search_str = os....
loads the settings that has been save with Script.save_b26. Args: path: path to folder saved by Script.save_b26 setttings_only: if true returns only the settings if the .b26 file contains only a single script Returns: a dictionary with the settings
juraj-google-style
def _ParseKey(self, knowledge_base, registry_key, value_name): try: registry_value = registry_key.GetValueByName(value_name) except IOError as exception: raise errors.PreProcessFail(( 'Unable to retrieve Windows Registry key: {0:s} value: {1:s} ' 'with error: {2!s}').format(...
Parses a Windows Registry key for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. value_name (str): name of the Windows Registry value. Raises: PreProcessFail: if the preprocessing fails.
juraj-google-style
def approx_eq(val: Any, other: Any, *, atol: Union[(int, float)]=1e-08) -> bool: approx_eq_getter = getattr(val, '_approx_eq_', None) if (approx_eq_getter is not None): result = approx_eq_getter(other, atol) if (result is not NotImplemented): return result other_approx_eq_getter ...
Approximately compares two objects. If `val` implements SupportsApproxEquality protocol then it is invoked and takes precedence over all other checks: - For primitive numeric types `int` and `float` approximate equality is delegated to math.isclose(). - For complex primitive type the real and imaginary parts are treat...
codesearchnet
def is_http_running_on(port): try: conn = httplib.HTTPConnection('127.0.0.1:' + str(port)) conn.connect() conn.close() return True except Exception: return False
Check if an http server runs on a given port. Args: The port to check. Returns: True if it is used by an http server. False otherwise.
juraj-google-style
def stacked_highway_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None): for (n_layer, n_hidden) in enumerate(n_hidden_list): input_units = units if (input_units.get_shape().as_list()[(- 1)] != n_hidden): input_units = tf...
Highway convolutional network. Skip connection with gating mechanism. Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the output of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalizat...
codesearchnet
def _parse_normalization(normalization): parsed_normalization = None if isinstance(normalization, dict): if (len(normalization.keys()) == 1): items = list(normalization.items())[0] if (len(items) == 2): if (items[1] and isinstance(items[1], dict)): ...
Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization.
codesearchnet
def module_entry(yfile): ytxt = yfile.read() mp = ModuleParser(ytxt) mst = mp.statement() submod = (mst.keyword == 'submodule') import_only = True rev = '' features = [] includes = [] rec = {} for sst in mst.substatements: if ((not rev) and (sst.keyword == 'revision')): ...
Add entry for one file containing YANG module text. Args: yfile (file): File containing a YANG module or submodule.
codesearchnet
def __init__(self, name, display_name='', description='', default=False): self._name = name self._display_name = display_name self._description = description self._default = default
Attribute constructor. Args: name (str): Attribute name. display_name (str): Attribute display name. description (str): Attribute description. default (bool): Whether the attribute is a default attribute of the corresponding datasets.
juraj-google-style
def set_window_size(self, width, height, window_handle='current'): self._execute(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), 'window_handle': window_handle})
Sets the width and height of the current window. Support: Web(WebView) Args: width(int): the width in pixels. height(int): the height in pixels. window_handle(str): Identifier of window_handle, default to 'current'. Returns: WebDriver Object.
codesearchnet
def id_in_cluster(cluster_spec, task_type, task_id): _validate_cluster_spec(cluster_spec, task_type, task_id) cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type == 'chief': return 0 if task_type == 'worker': return task_id + len(cluster_spec.get('chief', [])) ...
Returns a unique id for the task in the `task_type`'s cluster. It returns an id ranging from [0, `worker_count(task_type, task_id)`). Note: this function assumes that "evaluate" job is in its own cluster or its own partition of a cluster. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validat...
github-repos
def _make_actor_method_executor(self, method_name, method, actor_imported): def actor_method_executor(dummy_return_id, actor, *args): self._worker.actor_task_counter += 1 try: if is_class_method(method): method_returns = method(*args) else: me...
Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor cl...
codesearchnet
def read_serializable_array(self, class_name, max_size=sys.maxsize): module = '.'.join(class_name.split('.')[:-1]) class_name = class_name.split('.')[-1] class_attr = getattr(importlib.import_module(module), class_name) length = self.read_var_int(max_size=max_size) items...
Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream.
juraj-google-style
def unserialize_data(data, compression=False, encryption=False): try: if encryption: data = encryption.decrypt(data) except Exception as err: logger.error(('Decryption Error: ' + str(err))) message = False try: if compression: data = binascii.a2b_base6...
Unserializes the packet data and converts it from json format to normal Python datatypes. If you choose to enable encryption and/or compression when serializing data, you MUST enable the same options when unserializing data. Args: data (str): The raw, serialized packet data delivered from the transport protocol. comp...
codesearchnet
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Op...
Returns: Example: ```python >>> from transformers import AutoTokenizer, XLMProphetNetModel >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") >>> model = XLMProphetNetModel.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") >>> input_ids = to...
github-repos
def bootstrap(score_objs, n_boot=1000): all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True) return all_samples.sum(axis=1)
Given a set of DistributedROC or DistributedReliability objects, this function performs a bootstrap resampling of the objects and returns n_boot aggregations of them. Args: score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method n_boot (int): Number of bootstrap samp...
codesearchnet
def __init__(self, client_id, client_secret): self.box_request = BoxRestRequest(client_id, client_secret) self.client_id = client_id self.client_secret = client_secret
Constructor Args: client_id (str): Client ID provided by Box. client_secret (str): Client Secret provided by Box.
juraj-google-style
def delete_resource_view(self, resource_view): if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView({'id': resource_view}, configur...
Delete a resource view from the resource and HDX Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
juraj-google-style
def remove_vtep(self, name, vtep, vlan=None): if not vlan: cmd = 'vxlan flood vtep remove {}'.format(vtep) else: cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep) return self.configure_interface(name, cmd)
Removes a VTEP endpoint from the global or local flood list EosVersion: 4.13.7M Args: name (str): The name of the interface to configure vtep (str): The IP address of the remote VTEP endpoint to add vlan (str): The VLAN ID associated with this VTEP. If the VLAN keyword is used, then the VTEP is configured as a local...
juraj-google-style
def load_index(self, filename, reindex=False): self._reset_index() with open(filename, 'r') as fobj: data = json.load(fobj) for (path, file) in data.items(): (ents, domains) = (file['entities'], file['domains']) (root, f) = (dirname(path), basename(path)) if reindex: ...
Load the Layout's index from a plaintext file. Args: filename (str): Path to the plaintext index file. reindex (bool): If True, discards entity values provided in the loaded index and instead re-indexes every file in the loaded index against the entities defined in the config. Default is False, in which case it is ass...
codesearchnet
def delete_snl(self, snl_ids): try: payload = {"ids": json.dumps(snl_ids)} response = self.session.post( "{}/snl/delete".format(self.preamble), data=payload) if response.status_code in [200, 400]: resp = json.loads(response.text, cls=...
Delete earlier submitted SNLs. .. note:: As of now, this MP REST feature is open only to a select group of users. Opening up submissions to all users is being planned for the future. Args: snl_ids: List of SNL ids. Raises: MPRestError
juraj-google-style
def save(self, branch, commit_message, **kwargs): self.branch = branch self.commit_message = commit_message self.file_path = self.file_path.replace('/', '%2F') super(ProjectFile, self).save(**kwargs)
Save the changes made to the file to the server. The object is updated to match what the server returns. Args: branch (str): Branch in which the file will be updated commit_message (str): Message to send with the commit **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If a...
codesearchnet