code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def keys(self, full_grid=False): keys = super(GridSpace, self).keys() if self.ndims == 1 or not full_grid: return keys dim1_keys = sorted(set(k[0] for k in keys)) dim2_keys = sorted(set(k[1] for k in keys)) return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys]
Returns the keys of the GridSpace Args: full_grid (bool, optional): Return full cross-product of keys Returns: List of keys
juraj-google-style
def define_batch_env(constructor, num_agents, env_processes): with tf.variable_scope('environments'): if env_processes: envs = [ tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)] else: envs = [constructor() for _ in range(num_agents)] batch_env = tools.BatchEnv(envs, blocking=not env_processes) batch_env = tools.InGraphBatchEnv(batch_env) return batch_env
Create environments and apply all desired wrappers. Args: constructor: Constructor of an OpenAI gym environment. num_agents: Number of environments to combine in the batch. env_processes: Whether to step environment in external processes. Returns: In-graph environments object.
juraj-google-style
def get_sfa_conjecture(self): sfa = SFA(self.alphabet) for s in self.observation_table.sm_vector: transitions = self._get_predicate_guards( s, self.observation_table.training_data[s]) for (t, pred) in transitions: src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(t) assert isinstance( pred, SetPredicate), "Invalid type for predicate {}".format(pred) sfa.add_arc(src_id, dst_id, pred) i = 0 for s in self.observation_table.sm_vector: sfa.states[i].final = self.observation_table[s, self.epsilon] i += 1 return sfa
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
juraj-google-style
def serialize_training_step(features, model_fn, batch_dim, num_splits): for v in features.values(): mesh = v.mesh graph = v.graph microbatch_dim = Dimension('microbatch', num_splits) smaller_batch_dim = Dimension(batch_dim.name, (batch_dim.size cache = {} def select(t, microbatch_num): return gather(replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]), microbatch_num, microbatch_dim) def cond_fn(microbatch_num): return less(microbatch_num, num_splits) def body_fn(microbatch_num): 'Body function for mtf.while_loop.\n\n Args:\n microbatch_num: a mtf Scalar\n Returns:\n a list of mtf Tensors\n ' my_features = {} for (k, v) in six.iteritems(features): my_features[k] = select(v, microbatch_num) outputs = model_fn(my_features) grads = gradients([outputs['loss']], [v.outputs[0] for v in graph.trainable_variables]) output_keys = outputs.keys() cache['output_keys'] = output_keys ret = [] ret.append((microbatch_num + 1)) for t in outputs.values(): if (smaller_batch_dim in t.shape): t = einsum([t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)], output_shape=replace_dimensions(t.shape, smaller_batch_dim, [smaller_batch_dim, microbatch_dim])) t = replace_dimensions(t, [smaller_batch_dim, microbatch_dim], batch_dim) ret.append(t) else: ret.append(t) ret.extend(grads) return ret while_out = while_loop(cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)], has_accumulators=True) num_outputs = len(cache['output_keys']) combined_outputs = {} for (k, v) in zip(cache['output_keys'], while_out[1:(1 + num_outputs)]): combined_outputs[k] = v combined_grads = while_out[(1 + num_outputs):] return (combined_grads, combined_outputs)
Break the training batch into multiple microbatches. Returns two structures: grads - a list of Tensors corresponding to the gradients on graph.trainable_variables. These are summed across all microbatches outputs - a dictionary of Tensors corresponding to the output dictionary of model_fn. Each value is either summed across all microbatches (if it has no batch-dimension), or concatenated across all microbatches to represent the original batch (if it does have a batch-dimension). Args: features: a dictionary of Tensors, each with a batch_dim dimension model_fn: a function from feature dictionary to output dictionary output_dictionary must contain "loss" batch_dim: a Dimension num_splits: an integer dividing batch_dim.size Returns: grads: a list of Tensors corresponding to the gradients on graph.trainable_variables outputs: dictionary of output Tensors summed across microbatches
codesearchnet
def insert(self, i, species, coords, coords_are_cartesian=False, validate_proximity=False, properties=None): if (not coords_are_cartesian): new_site = PeriodicSite(species, coords, self._lattice, properties=properties) else: frac_coords = self._lattice.get_fractional_coords(coords) new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties) if validate_proximity: for site in self: if (site.distance(new_site) < self.DISTANCE_TOLERANCE): raise ValueError('New site is too close to an existing site!') self._sites.insert(i, new_site)
Insert a site to the structure. Args: i (int): Index to insert site species (species-like): Species of inserted site coords (3x1 array): Coordinates of inserted site coords_are_cartesian (bool): Whether coordinates are cartesian. Defaults to False. validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to False. properties (dict): Properties associated with the site. Returns: New structure with inserted site.
codesearchnet
def write_other_members(self, f, catch_all=False): if catch_all: names = self._members.items() else: names = inspect.getmembers(self._module) leftovers = [] for name, _ in names: if name in self._members and name not in self._documented: leftovers.append(name) if leftovers: print("%s: undocumented members: %d" % (self._title, len(leftovers))) print("\n for name in sorted(leftovers): print(" %s" % name) self._documented.add(name) self._mentioned.add(name) self._write_member_markdown_to_file(f, "
Writes the leftover members to `f`. Args: f: File to write to. catch_all: If true, document all missing symbols from any module. Otherwise, document missing symbols from just this module.
juraj-google-style
def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None): key = 'charset' if not unique_glyphs else 'ownCharset' internals_dir = os.path.dirname(os.path.abspath(__file__)) target = os.path.join(internals_dir, namFilename) result = readNamelist(target, unique_glyphs, cache) return result[key]
Returns the set of codepoints contained in a given Namelist file. This is a replacement CodepointsInSubset and implements the "#$ include" header format. Args: namFilename: The path to the Namelist file. unique_glyphs: Optional, whether to only include glyphs unique to subset. Returns: A set containing the glyphs in the subset.
juraj-google-style
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return array_ops.pad(x, pattern)
Pads the 2nd and 3rd dimensions of a 4D tensor. Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
github-repos
def _escape_token(token, alphabet): if (not isinstance(token, six.text_type)): raise ValueError(('Expected string type for token, got %s' % type(token))) token = token.replace(u'\\', u'\\\\').replace(u'_', u'\\u') ret = [(c if ((c in alphabet) and (c != u'\n')) else ('\\%d;' % ord(c))) for c in token] return (u''.join(ret) + '_')
Escape away underscores and OOV characters and append '_'. This allows the token to be expressed as the concatenation of a list of subtokens from the vocabulary. The underscore acts as a sentinel which allows us to invertibly concatenate multiple such lists. Args: token: A unicode string to be escaped. alphabet: A set of all characters in the vocabulary's alphabet. Returns: escaped_token: An escaped unicode string. Raises: ValueError: If the provided token is not unicode.
codesearchnet
def create(self, data=None, uri=None, timeout=-1, force=True): if not data: data = {} default_values = self._get_default_values() for key, value in default_values.items(): if not data.get(key): data[key] = value resource_data = self._helper.create(data, uri, timeout, force=force) new_resource = self.new(self._connection, resource_data) return new_resource
Makes a POST request to create a resource when a request body is required. Args: data: Additional fields can be passed to create the resource. uri: Resouce uri timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. force: Flag to force the operation Returns: Created resource.
juraj-google-style
def compute_positions(cls, screen_width, line): left = 1 right = (screen_width + 1) flexible = None for field in line: if field.is_flexible(): if flexible: raise FormatError('There can be only one flexible field per line.') flexible = field elif (not flexible): left += field.width else: right -= field.width available = (right - left) if (available <= 0): raise FormatError('Too much data for screen width') if flexible: if (available < 1): raise FormatError(('Not enough space to display flexible field %s' % flexible.name)) flexible.width = available positions = [] left = 1 for field in line: positions.append((left, field)) left += field.width logger.debug('Positions are %r', positions) return positions
Compute the relative position of the fields on a given line. Args: screen_width (int): the width of the screen line (mpdlcd.display_fields.Field list): the list of fields on the line Returns: ((int, mpdlcd.display_fields.Field) list): the positions of fields, as (position, field) tuples. Raises: FormatError: if the line contains more than one flexible field, or is too long for the screen size.
codesearchnet
def decode_single_feature_from_dict( feature_k, feature, tfexample_dict): if not feature.serialized_keys: data_to_decode = tfexample_dict[feature_k] else: data_to_decode = { k: tfexample_dict[posixpath.join(feature_k, k)] for k in feature.serialized_keys } return feature.decode_example(data_to_decode)
Decode the given feature from the tfexample_dict. Args: feature_k (str): Feature key in the tfexample_dict feature (FeatureConnector): Connector object to use to decode the field tfexample_dict (dict): Dict containing the data to decode. Returns: decoded_feature: The output of the feature.decode_example
juraj-google-style
def ReSpecTh_to_ChemKED(filename_xml, file_author='', file_author_orcid='', *, validate=False): tree = etree.parse(filename_xml) root = tree.getroot() properties = get_file_metadata(root) properties['reference'] = get_reference(root) properties['reference']['detail'] = ((properties['reference'].get('detail', '') + 'Converted from ReSpecTh XML file ') + os.path.basename(filename_xml)) properties.update(get_experiment_kind(root)) properties['common-properties'] = get_common_properties(root) properties['common-properties']['ignition-type'] = get_ignition_type(root) properties['datapoints'] = get_datapoints(root) has_pres_rise = (('pressure-rise' in properties['common-properties']) or any([True for dp in properties['datapoints'] if ('pressure-rise' in dp)])) if (has_pres_rise and (properties['apparatus']['kind'] == 'rapid compression machine')): raise KeywordError('Pressure rise cannot be defined for RCM.') has_vol_hist = any([(t.get('type') == 'volume') for dp in properties['datapoints'] for t in dp.get('time-histories', [{}])]) if (has_vol_hist and (properties['apparatus']['kind'] == 'shock tube')): raise KeywordError('Volume history cannot be defined for shock tube.') if (file_author_orcid and (not file_author)): raise KeywordError('If file_author_orcid is specified, file_author must be as well') if file_author: temp_author = {'name': file_author} if file_author_orcid: temp_author['ORCID'] = file_author_orcid properties['file-authors'].append(temp_author) for idx in range(len(properties['datapoints'])): for prop in properties['common-properties']: properties['datapoints'][idx][prop] = properties['common-properties'][prop] if validate: chemked.ChemKED(dict_input=properties) return properties
Convert ReSpecTh XML file to ChemKED-compliant dictionary. Args: filename_xml (`str`): Name of ReSpecTh XML file to be converted. file_author (`str`, optional): Name to override original file author file_author_orcid (`str`, optional): ORCID of file author validate (`bool`, optional, keyword-only): Set to `True` to validate the resulting property dictionary with `ChemKED`. Set to `False` if the file is being loaded and will be validated at some other point before use.
codesearchnet
def add_metric(self, labels, buckets, gsum_value, timestamp=None): for bucket, value in buckets: self.samples.append(Sample( self.name + '_bucket', dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), value, timestamp)) self.samples.extend([ Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp), Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp), ])
Add a metric to the metric family. Args: labels: A list of label values buckets: A list of pairs of bucket names and values. The buckets must be sorted, and +Inf present. gsum_value: The sum value of the metric.
juraj-google-style
def __init__(self, input_queue, output_queue): super(WorkerThread, self).__init__() self.daemon = True self.input_queue = input_queue self.output_queue = output_queue self.interrupted = False self.polltime = FLAGS.polltime
Initializer. Args: input_queue: Queue this worker consumes work from. output_queue: Queue where this worker puts new work items, if any.
juraj-google-style
def list_cert_bindings(site): ret = dict() sites = list_sites() if site not in sites: log.warning('Site not found: %s', site) return ret for binding in sites[site]['bindings']: if sites[site]['bindings'][binding]['certificatehash']: ret[binding] = sites[site]['bindings'][binding] if not ret: log.warning('No certificate bindings found for site: %s', site) return ret
List certificate bindings for an IIS site. .. versionadded:: 2016.11.0 Args: site (str): The IIS site name. Returns: dict: A dictionary of the binding names and properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_bindings site
juraj-google-style
def status(self, targets, jobs=None, remote=None, show_checksums=False): cloud = self._get_cloud(remote, "status") return self.repo.cache.local.status( targets, jobs=jobs, remote=cloud, show_checksums=show_checksums )
Check status of data items in a cloud-agnostic way. Args: targets (list): list of targets to check status for. jobs (int): number of jobs that can be running simultaneously. remote (dvc.remote.base.RemoteBase): optional remote to compare targets to. By default remote from core.remote config option is used. show_checksums (bool): show checksums instead of file names in information messages.
juraj-google-style
def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir, epsilon): dataset_images = [f for f in os.listdir(dataset_batch_dir) if f.endswith('.png')] image_hashes = {} resize_warning = False for img_name in dataset_images: if (not os.path.exists(os.path.join(adv_dir, img_name))): logging.warning('Image %s not found in the output', img_name) continue image = np.array(Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB')) image = image.astype('int32') image_max_clip = np.clip((image + epsilon), 0, 255).astype('uint8') image_min_clip = np.clip((image - epsilon), 0, 255).astype('uint8') adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB') if (adv_image.size[::(- 1)] != image.shape[:2]): resize_warning = True adv_image = adv_image.resize((image.shape[1], image.shape[0]), Image.BICUBIC) adv_image = np.array(adv_image) clipped_adv_image = np.clip(adv_image, image_min_clip, image_max_clip) Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name)) image_hashes[img_name[:(- 4)]] = hashlib.sha1(clipped_adv_image.view(np.uint8)).hexdigest() if resize_warning: logging.warning('One or more adversarial images had incorrect size') return image_hashes
Enforces size of perturbation on images, and compute hashes for all images. Args: dataset_batch_dir: directory with the images of specific dataset batch adv_dir: directory with generated adversarial images output_dir: directory where to copy result epsilon: size of perturbation Returns: dictionary with mapping form image ID to hash.
codesearchnet
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states) pooled_output = text_outputs.pooler_output return pooled_output
Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Siglip2TextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, AutoModel >>> import torch >>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224") >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip2-base-patch16-224") >>> # important: make sure to set padding="max_length" as that's how the model was trained >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt") >>> with torch.no_grad(): ... text_features = model.get_text_features(**inputs) ```
github-repos
def _internal_add(self, pattern: Pattern, label, renaming) -> int: pattern_index = len(self.patterns) renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints] constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints] self.patterns.append((pattern, label, constraint_indices)) self.pattern_vars.append(renaming) pattern = rename_variables(pattern.expression, renaming) state = self.root patterns_stack = [deque([pattern])] self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index) return pattern_index
Add a new pattern to the matcher. Equivalent patterns are not added again. However, patterns that are structurally equivalent, but have different constraints or different variable names are distinguished by the matcher. Args: pattern: The pattern to add. Returns: The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.
juraj-google-style
def zip_(*structures, **kwargs): flatten = kwargs.pop('flatten', False) assert not kwargs, 'zip() got unexpected keyword arguments.' return map( lambda *x: x if len(x) > 1 else x[0], *structures, flatten=flatten)
Combine corresponding elements in multiple nested structure to tuples. The nested structures can consist of any combination of lists, tuples, and dicts. All provided structures must have the same nesting. Args: *structures: Nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure.
juraj-google-style
def _ParseTimezoneOption(self, options): time_zone_string = self.ParseStringOption(options, 'timezone') if isinstance(time_zone_string, py2to3.STRING_TYPES): if (time_zone_string.lower() == 'list'): self.list_timezones = True elif time_zone_string: try: pytz.timezone(time_zone_string) except pytz.UnknownTimeZoneError: raise errors.BadConfigOption('Unknown time zone: {0:s}'.format(time_zone_string)) self._preferred_time_zone = time_zone_string
Parses the timezone options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
codesearchnet
def turb45(msg): d = hex2bin(data(msg)) if d[0] == '0': return None turb = bin2int(d[1:3]) return turb
Turbulence. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Turbulence level. 0=NIL, 1=Light, 2=Moderate, 3=Severe
juraj-google-style
def set_property(self, name, value, update_session=True): if (type(value) == datetime): value = value.isoformat() else: value = value try: prop = self.get_property(name) if (prop.value == value): return False prop.value = value except AttributeError: prop = ResourceProperty() prop.resource_id = self.id prop.name = name prop.value = value if update_session: db.session.add(prop) return True
Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if there were no changes to the value of the property. Args: name (str): Name of the property to create or update value (any): Value of the property. This can be any type of JSON serializable data update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True Returns: `bool`
codesearchnet
def _on_connection_finished(self, result): success, retval, context = self._parse_return(result) conn_id = context['connection_id'] callback = context['callback'] if success is False: callback(conn_id, self.id, False, 'Timeout opening connection') with self.count_lock: self.connecting_count -= 1 return handle = retval['handle'] context['disconnect_handler'] = self._on_connection_failed context['connect_time'] = time.time() context['state'] = 'preparing' self._connections[handle] = context self.probe_services(handle, conn_id, self._probe_services_finished)
Callback when the connection attempt to a BLE device has finished This function if called when a new connection is successfully completed Args: event (BGAPIPacket): Connection event
juraj-google-style
def get_rotation_matrix(axis, angle): axis = normalize(np.array(axis)) if not (np.array([1, 1, 1]).shape) == (3, ): raise ValueError('axis.shape has to be 3') angle = float(angle) return _jit_get_rotation_matrix(axis, angle)
Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array):
juraj-google-style
def project_surface(surface, angle=DEFAULT_ANGLE): z_coef = np.sin(np.radians(angle)) y_coef = np.cos(np.radians(angle)) surface_height, surface_width = surface.shape slope = np.tile(np.linspace(0., 1., surface_height), [surface_width, 1]).T return slope * y_coef + surface * z_coef
Returns the height of the surface when projected at the given angle. Args: surface (surface): the surface to project angle (float): the angle at which to project the surface Returns: surface: A projected surface.
juraj-google-style
def parse_results_mol2(mol2_outpath): docked_ligands = pd.DataFrame() lines = [line.strip() for line in open(mol2_outpath, 'r')] props = {} for (i, line) in enumerate(lines): if line.startswith(' ligand = line.strip().strip(' line = lines[(i + 1)] props = {} props['Ligand'] = ligand if line.startswith(' splitter = line.strip().strip(' props[splitter[0]] = float(splitter[1]) if line.startswith('@<TRIPOS>MOLECULE'): if props: docked_ligands = docked_ligands.append(props, ignore_index=True) return docked_ligands
Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results. Args: mol2_outpath (str): Path to mol2 output file Returns: DataFrame: Pandas DataFrame of the results
codesearchnet
def set(self, *args, **kwargs): if args: for arg in args: if (arg is not None): for name in self.__slots__: self._set(name, getattr(arg, name, UNSET)) for name in kwargs: self._set(name, kwargs.get(name, UNSET))
Conveniently set one or more fields at a time. Args: *args: Optionally set from other objects, available fields from the passed object are used in order **kwargs: Set from given key/value pairs (only names defined in __slots__ are used)
codesearchnet
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') storage_file = cls._ParseStringOption(options, 'storage_file') setattr(configuration_object, '_storage_file_path', storage_file)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
juraj-google-style
def delete_detector(self, detector_id, **kwargs): resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), **kwargs) resp.raise_for_status() return resp
Remove a detector. Args: detector_id (string): the ID of the detector.
juraj-google-style
def _Open(self, path_spec=None, mode='rb'): if not path_spec: raise ValueError('Missing path specification.') data_stream = getattr(path_spec, 'data_stream', None) file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) file_entry = file_system.GetFileEntryByPathSpec(path_spec) if not file_entry: file_system.Close() raise IOError('Unable to retrieve file entry.') tsk_file = file_entry.GetTSKFile() tsk_attribute = None if getattr(tsk_file, 'info', None) is None: file_system.Close() raise IOError('Missing attribute info in file (pytsk3.File).') if getattr(tsk_file.info, 'meta', None) is None: file_system.Close() raise IOError( 'Missing attribute meta in file.info pytsk3.TSK_FS_FILE).') if not hasattr(tsk_file.info.meta, 'size'): file_system.Close() raise IOError( 'Missing attribute size in file.info.meta (pytsk3.TSK_FS_META).') if not hasattr(tsk_file.info.meta, 'type'): file_system.Close() raise IOError( 'Missing attribute type in file.info.meta (pytsk3.TSK_FS_META).') if data_stream: for attribute in tsk_file: if getattr(attribute, 'info', None) is None: continue attribute_name = getattr(attribute.info, 'name', None) if attribute_name is None: attribute_name = '' else: try: attribute_name = attribute_name.decode('utf8') except UnicodeError: continue attribute_type = getattr(attribute.info, 'type', None) if attribute_name == data_stream and attribute_type in ( pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA): tsk_attribute = attribute break if tsk_attribute is None: file_system.Close() raise IOError('Unable to open data stream: {0:s}.'.format(data_stream)) if (not tsk_attribute and tsk_file.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG): file_system.Close() raise IOError('Not a regular file.') self._current_offset = 0 self._file_system = file_system self._tsk_attribute = tsk_attribute self._tsk_file = tsk_file if self._tsk_attribute: self._size = self._tsk_attribute.info.size else: self._size = self._tsk_file.info.meta.size
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def scatter_update(self, sparse_delta, use_locking=False, name=None): raise NotImplementedError
Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def filter_keys_by_dataset_id(did, key_container): keys = iter(key_container) for key in DATASET_KEYS: if (getattr(did, key) is not None): if (key == 'wavelength'): keys = [k for k in keys if ((getattr(k, key) is not None) and DatasetID.wavelength_match(getattr(k, key), getattr(did, key)))] else: keys = [k for k in keys if ((getattr(k, key) is not None) and (getattr(k, key) == getattr(did, key)))] return keys
Filer provided key iterable by the provided `DatasetID`. Note: The `modifiers` attribute of `did` should be `None` to allow for **any** modifier in the results. Args: did (DatasetID): Query parameters to match in the `key_container`. key_container (iterable): Set, list, tuple, or dict of `DatasetID` keys. Returns (list): List of keys matching the provided parameters in no specific order.
codesearchnet
def build_rank_score_dict(rank_scores): logger = getLogger(__name__) logger.debug("Checking rank scores: {0}".format(rank_scores)) scores = {} for family in rank_scores: entry = family.split(':') try: family_id = entry[0] logger.debug("Extracting rank score for family:{0}".format(family_id)) score = entry[1] logger.debug("Score:{0}".format(score)) except Exception: raise SyntaxError("Malformed rank score input") scores[family_id] = score return scores
Take a list with annotated rank scores for each family and returns a dictionary with family_id as key and a list of genetic models as value. Args: rank_scores : A list on the form ['1:12','2:20'] Returns: scores : A dictionary with family id:s as key and scores as value { '1':'12', '2':'20' }
juraj-google-style
def _parse(json_str: str, primitive_cls: Type[Base64Binary], *, separator_stride_cls: Type[SeparatorStride]) -> Base64Binary: result = primitive_cls() stride = json_str.find(' ') if stride != -1: end = stride while end < len(json_str) and json_str[end] == ' ': end += 1 separator = json_str[stride:end] separator_stride_extension = cast(Any, separator_stride_cls()) separator_stride_extension.separator.value = separator separator_stride_extension.stride.value = stride extensions.add_message_to_extension(separator_stride_extension, result.extension.add()) json_str = json_str.replace(separator, '') try: result.value = base64.b64decode(json_str, validate=True) except binascii.Error as e: raise fhir_errors.InvalidFhirError('Invalid base64-encoded string.') from e return result
Parses the json_str into a Base64Binary FHIR primitive protobuf message. Args: json_str: The raw JSON string to parse. primitive_cls: The type of FHIR primitive to parse into. separator_stride_cls: The type of Base64BinarySeparatorStride extension associated with primitive_cls. Returns: A FHIR primitive Base64Binary protobuf message. Raises: fhir_errors.InvalidFhirError: In the event that the provided json_str is not a valid base64-encoded string.
github-repos
def ReadFileObject(self, definitions_registry, file_object): last_definition_object = None error_location = None error_message = None try: yaml_generator = yaml.safe_load_all(file_object) for yaml_definition in yaml_generator: definition_object = self._ReadDefinition(definitions_registry, yaml_definition) if (not definition_object): error_location = self._GetFormatErrorLocation(yaml_definition, last_definition_object) error_message = '{0:s} Missing definition object.'.format(error_location) raise errors.FormatError(error_message) definitions_registry.RegisterDefinition(definition_object) last_definition_object = definition_object except errors.DefinitionReaderError as exception: error_message = 'in: {0:s} {1:s}'.format((exception.name or '<NAMELESS>'), exception.message) raise errors.FormatError(error_message) except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception: error_location = self._GetFormatErrorLocation({}, last_definition_object) error_message = '{0:s} {1!s}'.format(error_location, exception) raise errors.FormatError(error_message)
Reads data type definitions from a file-like object into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. file_object (file): file-like object to read from. Raises: FormatError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def insert_arguments_into_match_query(compilation_result, arguments): if (compilation_result.language != MATCH_LANGUAGE): raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query argument_types = compilation_result.input_metadata sanitized_arguments = {key: _safe_match_argument(argument_types[key], value) for (key, value) in six.iteritems(arguments)} return base_query.format(**sanitized_arguments)
Insert the arguments into the compiled MATCH query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a MATCH query with inserted argument data
codesearchnet
def requested_packages(self, include_implicit=False): if include_implicit: return (self._package_requests + self.implicit_packages) else: return self._package_requests
Get packages in the request. Args: include_implicit (bool): If True, implicit packages are appended to the result. Returns: List of `PackageRequest` objects.
codesearchnet
def _convert_type(self, t, as_instance=False): src = textwrap.dedent(f'\n from typing import Any, Callable, Iterator, Tuple, Type, Union\n from protocols import Sequence, SupportsLower\n x = ... filename = str(hash((t, as_instance))) x = self._parse_and_lookup(src, 'x', filename).type if as_instance: x = abstract_utils.AsInstance(x) return self.ctx.convert.constant_to_value(x, {}, self.ctx.root_node)
Convenience function for turning a string into an abstract value. Note that this function cannot be called more than once per test with the same arguments, since we hash the arguments to get a filename for the temporary pyi. Args: t: The string representation of a type. as_instance: Whether to convert as an instance. Returns: A BaseValue.
github-repos
def erfinv(x, name='erfinv'): with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name='x') if (dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]): raise TypeError('x.dtype={} is not handled, see docstring for supported types.'.format(dtype_util.name(x.dtype))) return (ndtri(((x + 1.0) / 2.0)) / np.sqrt(2.0))
The inverse function for erf, the error function. Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="erfinv"). Returns: x: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x` is not floating-type.
codesearchnet
def custom(colors, bins=None, bin_method=BinMethod.quantiles): return {'colors': colors, 'bins': (bins if (bins is not None) else len(colors)), 'bin_method': bin_method}
Create a custom scheme. Args: colors (list of str): List of hex values for styling data bins (int, optional): Number of bins to style by. If not given, the number of colors will be used. bin_method (str, optional): Classification method. One of the values in :obj:`BinMethod`. Defaults to `quantiles`, which only works with quantitative data.
codesearchnet
def matmul(self, matmul_input: core.Tensor) -> Mapping[str, core.Tensor]: out = math_ops.matmul(matmul_input, self.matmul_filters) return {'output': out}
Performs a matrix multiplication. Args: matmul_input: Input tensor to matmul with the filter. Returns: A map of: output key -> output result.
github-repos
def lines_from_stream(f, as_interned=False): if as_interned: return [sys.intern(line) for line in f.read().splitlines()] return f.read().splitlines()
Create a list of file lines from a given file stream. Args: f (io.TextIOWrapper): File stream as_interned (bool): List of "interned" strings (default False) Returns: strings (list): File line list
codesearchnet
def _tensor_name_base(full_tensor_name): if full_tensor_name.startswith('^'): return full_tensor_name[1:] return full_tensor_name.split(':')[0]
Removes the device assignment code from a tensor. e.g. _tensor_name_base("foo:3") => "foo" Args: full_tensor_name: A tensor name that is annotated with a device placement (this is what tensor flow introspection gives). Returns: A name without any device assignment.
github-repos
def _StructPackDecoder(wire_type, format): value_size = struct.calcsize(format) local_unpack = struct.unpack def InnerDecode(buffer, pos): new_pos = (pos + value_size) result = local_unpack(format, buffer[pos:new_pos])[0] return (result, new_pos) return _SimpleDecoder(wire_type, InnerDecode)
Return a constructor for a decoder for a fixed-width field. Args: wire_type: The field's wire type. format: The format string to pass to struct.unpack().
codesearchnet
def parse_storage_size(storage_size): pattern = re.compile(r'^([0-9]+(\.[0-9]+)?)([gmk])?$', re.I) units = { 'k': 1024, 'm': 1024 * 1024, 'g': 1024 * 1024 * 1024 } match = pattern.fullmatch(str(storage_size)) if match is None: raise ValueError('Invalid partition size: {0}'.format(storage_size)) groups = match.groups() if groups[2] is None: return int(float(groups[0])) return int(float(groups[0]) * units[groups[2].lower()])
Parses an expression that represents an amount of storage/memory and returns the number of bytes it represents. Args: storage_size(str): Size in bytes. The units ``k`` (kibibytes), ``m`` (mebibytes) and ``g`` (gibibytes) are supported, i.e. a ``partition_size`` of ``1g`` equates :math:`2^{30}` bytes. Returns: int: Number of bytes.
juraj-google-style
def _VerifyRecord(self, pls_record): future_timestamp = (timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS) if (pls_record.last_written_time > future_timestamp): return False (first_word, _, _) = pls_record.query.partition(' ') if (first_word.lower() not in self._PLS_KEYWORD): return False return True
Verifies a PLS Recall record. Args: pls_record (pls_recall_record): a PLS Recall record to verify. Returns: bool: True if this is a valid PLS Recall record, False otherwise.
codesearchnet
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = HangoutsMessageData() event_data.sender = self._GetRowValue(query_hash, row, 'full_name') event_data.body = self._GetRowValue(query_hash, row, 'text') event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.query = query event_data.message_status = self._GetRowValue(query_hash, row, 'status') event_data.message_type = self._GetRowValue(query_hash, row, 'type') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
class FalconMambaOutput(ModelOutput): last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
Class for the FALCONMAMBA model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
github-repos
def callback_trigger(msg, arg): gnlh = genlmsghdr(nlmsg_data(nlmsg_hdr(msg))) if (gnlh.cmd == nl80211.NL80211_CMD_SCAN_ABORTED): arg.value = 1 elif (gnlh.cmd == nl80211.NL80211_CMD_NEW_SCAN_RESULTS): arg.value = 0 return libnl.handlers.NL_SKIP
Called when the kernel is done scanning. Only signals if it was successful or if it failed. No other data. Positional arguments: msg -- nl_msg class instance containing the data sent by the kernel. arg -- mutable integer (ctypes.c_int()) to update with results. Returns: An integer, value of NL_SKIP. It tells libnl to stop calling other callbacks for this message and proceed with processing the next kernel message.
codesearchnet
def _UpdateYear(self, mediator, month): if (not self._year_use): self._year_use = mediator.GetEstimatedYear() if (not self._maximum_year): self._maximum_year = mediator.GetLatestYear() if (not self._last_month): self._last_month = month return if (self._last_month > (month + 1)): if (self._year_use != self._maximum_year): self._year_use += 1 self._last_month = month
Updates the year to use for events, based on last observed month. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. month (int): month observed by the parser, where January is 1.
codesearchnet
def positions(self, account: str='') -> List[Position]: if account: return list(self.wrapper.positions[account].values()) else: return [v for d in self.wrapper.positions.values() for v in d.values()]
List of positions for the given account, or of all accounts if account is left blank. Args: account: If specified, filter for this account name.
codesearchnet
def end_run_group(group, session): from datetime import datetime group.end = datetime.now() group.status = 'completed' session.commit()
End the run_group successfully. Args: group: The run_group we want to complete. session: The database transaction we will finish.
juraj-google-style
def port_tag_details(cls, tags): for tag in tags: match = port_tag_re.match(tag) if match: (source_sink, port, extra) = match.groups() return ((source_sink == 'source'), cls(port), extra)
Search tags for port info, returning it Args: tags: A list of tags to check Returns: None or (is_source, port, connected_value|disconnected_value) where port is one of the Enum entries of Port
codesearchnet
def to_dataframe(self, view: views.View, limit: Optional[int]=None) -> pandas.DataFrame: df = self.run_query(view, limit).result().to_dataframe() return runner_utils.clean_dataframe(df, view.get_select_columns_to_return_type())
Returns a Pandas dataframe of the results, if Pandas is installed. Args: view: the view that defines the query to run. limit: optional limit of the number of items to return. Returns: pandas.DataFrame: dataframe of the view contents. Raises: ValueError propagated from the BigQuery client if pandas is not installed.
github-repos
def get_attribute_list(self, uid=None): batch_item = self._build_get_attribute_list_batch_item(uid) request = self._build_request_message(None, [batch_item]) response = self._send_and_receive_message(request) results = self._process_batch_items(response) return results[0]
Send a GetAttributeList request to the server. Args: uid (string): The ID of the managed object with which the retrieved attribute names should be associated. Returns: result (GetAttributeListResult): A structure containing the results of the operation.
juraj-google-style
def __init__(self, sizes, scope='mlp-baseline', summary_labels=()): network = [] for size in sizes: network.append(dict(type='dense', size=size)) super(MLPBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)
Multi-layer perceptron baseline. Args: sizes: List of dense layer sizes
juraj-google-style
def get_factors(dividend: int) -> Set[int]: factors_set = set() for i in range(1, int(dividend ** 0.5) + 1): if dividend % i == 0: factors_set.add(i) factors_set.add(dividend return factors_set
Calculate all factors of a given number, i.e. a divisor that leaves no remainder. For example, if dividend=12, it will return {1, 2, 3, 4, 6, 12}. Args: dividend (int): The number to find factors for. Returns: set: A set containing all factors of the number.
github-repos
def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200): if config: session_config = copy.deepcopy(config) session_config.MergeFrom(self._session_config) else: session_config = self._session_config if not self._strategy or self._strategy.extended.experimental_should_init: logging.info('Creating chief session creator with config: %r', config) return monitored_session.ChiefSessionCreator(scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path) else: logging.info('Creating worker session creator with config: %r', config) return monitored_session.WorkerSessionCreator(scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs)
Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the `strategy` object when `create_session` is called on it. Args: scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator.
github-repos
def pprint_value_string(self, value): unit = ('' if (self.unit is None) else (' ' + bytes_to_unicode(self.unit))) value = self.pprint_value(value) return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)
Pretty print the dimension value and unit. Args: value: Dimension value to format Returns: Formatted dimension value string with unit
codesearchnet
def parse_brome_config_from_browser_config(browser_config): config = {} brome_keys = [key for key in browser_config if (key.find(':') != (- 1))] for brome_key in brome_keys: (section, option) = brome_key.split(':') value = browser_config[brome_key] if (section not in config): config[section] = {} config[section][option] = value return config
Parse the browser config and look for brome specific config Args: browser_config (dict)
codesearchnet
def set_xla_env_flag(flag: str='') -> Callable[[_F], _F]: def decorator(f: _F) -> _F: @functools.wraps(f) def decorated(*args, **kwargs): original_xla_flags = os.environ.get('XLA_FLAGS') new_xla_flags = flag if original_xla_flags: new_xla_flags = new_xla_flags + ' ' + original_xla_flags os.environ['XLA_FLAGS'] = new_xla_flags try: return f(*args, **kwargs) finally: if original_xla_flags is None: del os.environ['XLA_FLAGS'] else: os.environ['XLA_FLAGS'] = original_xla_flags return decorated return decorator
Decorator for setting XLA_FLAGS prior to running a test. This function returns a decorator intended to be applied to test methods in a `tf.test.TestCase` class. Doing so will allow users to set any xla flags exposed via the XLA_FLAGS environment variable, execute the test, then reset the XLA_FLAGS to the state it was in prior to this test. Example: class MyTest(test.TestCase): @set_xla_env_flag(flag='--xla_gpu_enable_fast_min_max=false') def testFoo(self): ... Args: flag: The xla flag to be set in the XLA_FLAGS env variable. Returns: A decorator which sets the configured flag in XLA_FLAGS for the decorated function.
github-repos
def set_datastore_policy(self, func): if func is None: func = self.default_datastore_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._datastore_policy = func
Set the context datastore policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should use the datastore. May be None.
juraj-google-style
def GetBatchJobHelper(self, version=sorted(_SERVICE_MAP.keys())[(- 1)], server=None): if (not server): server = _DEFAULT_ENDPOINT request_builder = BatchJobHelper.GetRequestBuilder(self, version=version, server=server) response_parser = BatchJobHelper.GetResponseParser() return BatchJobHelper(request_builder, response_parser)
Returns a BatchJobHelper to work with the BatchJobService. This is a convenience method. It is functionally identical to calling BatchJobHelper(adwords_client, version). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: An initialized BatchJobHelper tied to this client.
codesearchnet
def write_examples(fp, examples): def write_tensor(fp, name, x): fp.write('name,%s\n' % name) fp.write('dtype,%s\n' % x.dtype) fp.write('shape,' + ','.join(map(str, x.shape)) + '\n') fp.write('values,' + format_result(x) + '\n') fp.write('test_cases,%d\n' % len(examples)) for example in examples: fp.write('inputs,%d\n' % len(example['inputs'])) for name, value in example['inputs'].items(): if value is not None: write_tensor(fp, name, value) fp.write('outputs,%d\n' % len(example['outputs'])) for name, value in example['outputs'].items(): write_tensor(fp, name, value)
Given a list `examples`, write a text format representation. The file format is csv like with a simple repeated pattern. We would ike to use proto here, but we can't yet due to interfacing with the Android team using this format. Args: fp: File-like object to write to. examples: Example dictionary consisting of keys "inputs" and "outputs"
github-repos
def __init__(self, encoding='utf-8'): super(StdinInputReader, self).__init__(sys.stdin, encoding=encoding)
Initializes an stdin input reader. Args: encoding (Optional[str]): input encoding.
juraj-google-style
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs)
Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer Args: pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model Examples: ```python from transformers import TFGPT2Tokenizer tf_tokenizer = TFGPT2Tokenizer.from_pretrained("openai-community/gpt2") ```
github-repos
def _get_prefix_length(number1, number2, bits): for i in range(bits): if ((number1 >> i) == (number2 >> i)): return (bits - i) return 0
Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers.
codesearchnet
def tensor_equals(self, other): if other is None: return False g = getattr(self, 'graph', None) if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and (g is None or g.building_function): self, other = override_binary_operator.maybe_promote_tensors(self, other) return gen_math_ops.equal(self, other, incompatible_shape_error=False) else: return self is other
The operation invoked by the `Tensor.__eq__` operator. Compares two tensors element-wise for equality if they are broadcast-compatible; or returns False if they are not broadcast-compatible. (Note that this behavior differs from `tf.math.equal`, which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__eq__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the `==` operator. other: The right-hand side of the `==` operator. Returns: The result of the elementwise `==` operation, or `False` if the arguments are not broadcast-compatible.
github-repos
def __getitem__(self, key: InstanceKey) -> "InstanceNode": if isinstance(self.value, ObjectValue): return self._member(key) if isinstance(self.value, ArrayValue): return self._entry(key) raise InstanceValueError(self.json_pointer(), "scalar instance")
Return member or entry with the given key. Args: key: Entry index (for an array) or member name (for an object). Raises: NonexistentInstance: If receiver's value doesn't contain member `name`. InstanceValueError: If the receiver's value is not an object.
juraj-google-style
def print_test_summary(self, executed_tests): separator = '---------------------' with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: test_batch = session.query(Testbatch).filter(Testbatch.mongo_id == self.test_batch_id).one() self.info_log('******* TEST BATCH SUMMARY ********') base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id) total_test = base_query.count() total_test_successful = base_query.filter(Testresult.result == True).count() base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id) total_test_failed = base_query.filter(Testresult.result == False).count() self.info_log( 'Total_test: %s; Total_test_successful: %s; Total_test_failed: %s' % (total_test, total_test_successful, total_test_failed) ) self.info_log( "Total execution time: %s" % (test_batch.ending_timestamp - test_batch.starting_timestamp) ) self.info_log(separator) self.info_log('Failed tests:') failed_test_list = [] test_results = session.query(Testresult)\ .filter(Testresult.result == False)\ .filter(Testresult.test_batch_id == self.test_batch_id).all() for test_result in test_results: if test_result.title not in failed_test_list: failed_test_list.append(test_result.title) query = session.query(Test)\ .filter(Test.mongo_id == test_result.test_id) if query.count(): test = query.one() self.info_log( "[%s] %s" % (test.test_id, test.name) ) else: self.info_log( "[noid] %s" % (test_result.title) ) if not failed_test_list: self.info_log('No test failed!') self.info_log(separator) for test in executed_tests: self.info_log( '%s %s' % (test._name, test.pdriver.get_id()) ) test_instance = session.query(Testinstance)\ .filter(Testinstance.mongo_id == test._test_instance_id)\ .one() try: self.info_log( "Test execution time: %s" % (test_instance.ending_timestamp - test_instance.starting_timestamp) ) except TypeError: self.info_log("Test execution time exception") results = test.get_test_result_summary() for result in results: self.info_log(result) if test._crash_error: self.info_log(test._crash_error) else: self.info_log('No crash!') self.info_log(separator) self.info_log('Finished')
Print test summary When the test batch is finished a test summary will be printed Args: executed_tests (list)
juraj-google-style
def assign_stream_id_raster(stream_file, subbasin_file, out_stream_file): stream_raster = RasterUtilClass.read_raster(stream_file) stream_data = stream_raster.data nrows = stream_raster.nRows ncols = stream_raster.nCols nodata = stream_raster.noDataValue subbain_data = RasterUtilClass.read_raster(subbasin_file).data nodata_array = ones((nrows, ncols)) * DEFAULT_NODATA newstream_data = where((stream_data > 0) & (stream_data != nodata), subbain_data, nodata_array) RasterUtilClass.write_gtiff_file(out_stream_file, nrows, ncols, newstream_data, stream_raster.geotrans, stream_raster.srs, DEFAULT_NODATA, GDT_Int16)
Assign stream link ID according to subbasin ID. Args: stream_file: input stream raster file subbasin_file: subbasin raster file out_stream_file: output stream raster file
juraj-google-style
def get_cqz(self, callsign, timestamp=timestamp_now): return self.get_all(callsign, timestamp)[const.CQZ]
Returns CQ Zone of a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: containing the callsign's CQ Zone Raises: KeyError: no CQ Zone found for callsign
juraj-google-style
def insert(self, point, data=None): assert len(point) == self.k if self.size == 0: if self.region is None: self.region = [[-math.inf, math.inf]] * self.k axis = 0 return self.new_node(point, self.region, axis, data) current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if point[axis] < parent_node.point[axis]: next_id, left = parent_node.left, True else: next_id, left = parent_node.right, False if next_id is None: break current_id = next_id region = parent_node.region[:] region[axis] = parent_node.region[axis][:] limit = parent_node.point[axis] if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, (axis + 1) % self.k, data)
Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data)
juraj-google-style
def append_to_list(self, key, *value, pipeline=False): if pipeline: self._pipeline.rpush(key, *value) else: self._db.rpush(key, *value)
Add new element to the end of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
juraj-google-style
async def download_cot(chain): artifact_tasks = [] for link in chain.links: task_id = link.task_id parent_dir = link.cot_dir urls = [] unsigned_url = get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json') urls.append(unsigned_url) if chain.context.config['verify_cot_signature']: urls.append(get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json.sig')) artifact_tasks.append(asyncio.ensure_future(download_artifacts(chain.context, urls, parent_dir=parent_dir, valid_artifact_task_ids=[task_id]))) artifacts_paths = (await raise_future_exceptions(artifact_tasks)) for path in artifacts_paths: sha = get_hash(path[0]) log.debug('{} downloaded; hash is {}'.format(path[0], sha))
Download the signed chain of trust artifacts. Args: chain (ChainOfTrust): the chain of trust to add to. Raises: BaseDownloadError: on failure.
codesearchnet
def __init__(self, config, log): self._config = config self._log = log log.info('Pulsar Search Interface Initialisation')
Constructor. The supplied configuration dictionary must contain all parameters needed to define new user See pulsar_receiver_config.json for an example. Args: config (dict): Dictionary containing JSON configuration file. log: Logger.
juraj-google-style
def get(self, id=None, name=None): if (not ((id is None) ^ (name is None))): raise ValueError('Either id or name must be set (but not both!)') if (id is not None): return super(TaskQueueManager, self).get(id=id) return self.list(filters={'name': name})[0]
Get a task queue. Either the id xor the name of the task type must be specified. Args: id (int, optional): The id of the task type to get. name (str, optional): The name of the task type to get. Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue requested. Raises: ValueError: Neither id nor name were set *or* both id and name were set.
codesearchnet
def unit(x1, x2, block_num, depth, num_layers, dim='2d', bottleneck=True, first_batch_norm=True, stride=1, training=True): scope_name = ('unit_%d' % block_num) if bottleneck: depth1 = depth depth2 = (depth * 4) else: depth1 = depth2 = depth residual = wrapped_partial(f, depth1=depth1, depth2=depth2, dim=dim, training=training, bottleneck=bottleneck) with tf.variable_scope(scope_name): downsample = (downsample_bottleneck if bottleneck else downsample_residual) with tf.variable_scope('downsampling'): with tf.variable_scope('x1'): hx1 = downsample(x1, depth2, dim=dim, stride=stride) fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm) x1 = (hx1 + fx2) with tf.variable_scope('x2'): hx2 = downsample(x2, depth2, dim=dim, stride=stride) fx1 = residual(x1) x2 = (hx2 + fx1) with tf.variable_scope('full_block'): (x1, x2) = tf.contrib.layers.rev_block(x1, x2, residual, residual, num_layers=num_layers) return (x1, x2)
Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors.
codesearchnet
def remove_config(self, id): url = self._url('/configs/{0}', id) res = self._delete(url) self._raise_for_status(res) return True
Remove a config Args: id (string): Full ID of the config to remove Returns (boolean): True if successful Raises: :py:class:`docker.errors.NotFound` if no config with that ID exists
codesearchnet
def group_structures(self, s_list, anonymous=False): if self._subset: raise ValueError('allow_subset cannot be used with group_structures') original_s_list = list(s_list) s_list = self._process_species(s_list) if anonymous: c_hash = (lambda c: c.anonymized_formula) else: c_hash = self._comparator.get_hash s_hash = (lambda s: c_hash(s[1].composition)) sorted_s_list = sorted(enumerate(s_list), key=s_hash) all_groups = [] for (k, g) in itertools.groupby(sorted_s_list, key=s_hash): unmatched = list(g) while (len(unmatched) > 0): (i, refs) = unmatched.pop(0) matches = [i] if anonymous: inds = filter((lambda i: self.fit_anonymous(refs, unmatched[i][1])), list(range(len(unmatched)))) else: inds = filter((lambda i: self.fit(refs, unmatched[i][1])), list(range(len(unmatched)))) inds = list(inds) matches.extend([unmatched[i][0] for i in inds]) unmatched = [unmatched[i] for i in range(len(unmatched)) if (i not in inds)] all_groups.append([original_s_list[i] for i in matches]) return all_groups
Given a list of structures, use fit to group them by structural equality. Args: s_list ([Structure]): List of structures to be grouped anonymous (bool): Wheher to use anonymous mode. Returns: A list of lists of matched structures Assumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put in different groups without comparison.
codesearchnet
def start_listener_thread(self, timeout_ms=30000, exception_handler=None): try: thread = Thread(target=self.listen_forever, args=(timeout_ms, exception_handler)) thread.daemon = True self.sync_thread = thread self.should_listen = True thread.start() except RuntimeError: e = sys.exc_info()[0] logger.error('Error: unable to start thread. %s', str(e))
Start a listener thread to listen for events in the background. Args: timeout (int): How long to poll the Home Server for before retrying. exception_handler (func(exception)): Optional exception handler function which can be used to handle exceptions in the caller thread.
codesearchnet
def from_files(path_dir, dos_spin=1): (run_type, warning, efermi, gap, doping_levels) = BoltztrapAnalyzer.parse_outputtrans(path_dir) vol = BoltztrapAnalyzer.parse_struct(path_dir) intrans = BoltztrapAnalyzer.parse_intrans(path_dir) if (run_type == 'BOLTZ'): (dos, pdos) = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=False) (mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, carrier_conc) = BoltztrapAnalyzer.parse_cond_and_hall(path_dir, doping_levels) return BoltztrapAnalyzer(gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning) elif (run_type == 'DOS'): trim = (True if (intrans['dos_type'] == 'HISTO') else False) (dos, pdos) = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=trim) return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol) elif (run_type == 'BANDS'): bz_kpoints = np.loadtxt(os.path.join(path_dir, 'boltztrap_band.dat'))[(:, (- 3):)] bz_bands = np.loadtxt(os.path.join(path_dir, 'boltztrap_band.dat'))[(:, 1:(- 6))] return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol) elif (run_type == 'FERMI'): '\n ' if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')): fs_data = read_cube_file(os.path.join(path_dir, 'boltztrap_BZ.cube')) elif os.path.exists(os.path.join(path_dir, 'fort.30')): fs_data = read_cube_file(os.path.join(path_dir, 'fort.30')) else: raise BoltztrapError('No data file found for fermi surface') return BoltztrapAnalyzer(fermi_surface_data=fs_data) else: raise ValueError('Run type: {} not recognized!'.format(run_type))
get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object
codesearchnet
def __init__(self, details): if isinstance(details, basestring): details = {"__type__": details} elif not isinstance(details, dict): raise ValueError('details') if '__type__' not in details or details['__type__'] not in self._VALID_TYPES: raise KeyError('__type__') self._type = details['__type__'] del details['__type__'] self._regex = None self._options = None self._minimum = None self._maximum = None if '__regex__' in details: self.regex(details['__regex__']) del details['__regex__'] elif '__options__' in details: self.options(details['__options__']) del details['__options__'] else: bMin = ('__minimum__' in details and True or False) bMax = ('__maximum__' in details and True or False) if bMin or bMax: self.minmax( (bMin and details['__minimum__'] or None), (bMax and details['__maximum__'] or None) ) if bMin: del details['__minimum__'] if bMax: del details['__maximum__'] super(Node, self).__init__(details, 'Node')
Constructor Initialises the instance Arguments: details {dict} -- Details describing the type of value allowed for the node Raises: KeyError ValueError Returns: Node
juraj-google-style
def all_gather(self, input_tensor: core.TensorLike, axis: core.TensorLike, options: Optional[collective_util.Options]=None) -> core.Tensor: if context.executing_eagerly(): raise RuntimeError('all_gather is not supported in eager mode.') with ops.device(self._device), ops.control_dependencies([array_ops.identity(input_tensor)]): perm_pre = array_ops.concat(([axis], math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(input_tensor))), axis=0) input_tensor_t = array_ops.transpose(input_tensor, perm=perm_pre) gathered_shape = self._all_gather(array_ops.expand_dims_v2(array_ops.shape_v2(input_tensor_t), axis=0), options) first_dims = gathered_shape[:, 0] full_axis_dim = math_ops.reduce_max(first_dims) padded_input_tensor = _pad_util(input_tensor_t, full_axis_dim) gather_padded_out_tensor = self._all_gather(padded_input_tensor, options) split_tensors = [] for i in range(self._group_size): start_pos = i * full_axis_dim split_tensors.append(gather_padded_out_tensor[start_pos:start_pos + first_dims[i]]) out_tensor_t = array_ops.concat(split_tensors, 0) perm_after = array_ops.concat((math_ops.range(1, axis + 1), [0], math_ops.range(axis + 1, array_ops.rank(input_tensor_t))), axis=0) return array_ops.transpose(out_tensor_t, perm=perm_after)
All-gather a dense tensor. This method must be called inside a tf.function. Args: input_tensor: a dense tensor. It must have the same rank on all replicas, and dimensions other than `axis` need to be the same as well. axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the range [0, rank(value)). options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: The gathered Tensor. Raises: RuntimeError: if called in eager mode.
github-repos
def get_output_info_dict(self, signature=None): return self._spec.get_output_info_dict(signature=signature, tags=self._tags)
Describes the outputs provided by a signature. Args: signature: A string with the signature to get ouputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_output_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature.
codesearchnet
def _lookup_namespace(self, symbol, namespace): for namespace_part in symbol.parts: namespace = namespace.get(namespace_part) if namespace is None: break if not isinstance(namespace, dict): return namespace raise Error('%s not found' % symbol.name)
Helper for lookup_symbol that only looks up variables in a namespace. Args: symbol: Symbol namespace: pointer into self.namespaces
juraj-google-style
def channels_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'thread_ts': thread_ts}) return self.api_call('channels.replies', http_verb='GET', params=kwargs)
Retrieve a thread of messages posted to a channel Args: channel (str): The channel id. e.g. 'C1234567890' thread_ts (str): The timestamp of an existing message with 0 or more replies. e.g. '1234567890.123456'
codesearchnet
def remove_function(self, name): self.ensure_initialized() pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
Remove a function from the context. Once removed, the function cannot be executed anymore. Args: name: function signature name.
github-repos
def import_submodules(package: Union[str, ModuleType], base_package_for_relative_import: str = None, recursive: bool = True) -> Dict[str, ModuleType]: if isinstance(package, str): package = importlib.import_module(package, base_package_for_relative_import) results = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + '.' + name log.debug("importing: {}", full_name) results[full_name] = importlib.import_module(full_name) if recursive and is_pkg: results.update(import_submodules(full_name)) return results
Import all submodules of a module, recursively, including subpackages. Args: package: package (name or actual module) base_package_for_relative_import: path to prepend? recursive: import submodules too? Returns: dict: mapping from full module name to module
juraj-google-style
def heightmap_add_fbm(hm: np.ndarray, noise: tcod.noise.Noise, mulx: float, muly: float, addx: float, addy: float, octaves: float, delta: float, scale: float) -> None: noise = (noise.noise_c if (noise is not None) else ffi.NULL) lib.TCOD_heightmap_add_fbm(_heightmap_cdata(hm), noise, mulx, muly, addx, addy, octaves, delta, scale)
Add FBM noise to the heightmap. The noise coordinate for each map cell is `((x + addx) * mulx / width, (y + addy) * muly / height)`. The value added to the heightmap is `delta + noise * scale`. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. noise (Noise): A Noise instance. mulx (float): Scaling of each x coordinate. muly (float): Scaling of each y coordinate. addx (float): Translation of each x coordinate. addy (float): Translation of each y coordinate. octaves (float): Number of octaves in the FBM sum. delta (float): The value added to all heightmap cells. scale (float): The noise value is scaled with this parameter. .. deprecated:: 8.1 An equivalent array of noise samples can be taken using a method such as :any:`Noise.sample_ogrid`.
codesearchnet
def __call__(self, environ, start_response): request = wrappers.Request(environ) parsed_url = urlparse.urlparse(request.path) clean_path = _clean_path(parsed_url.path, self._path_prefix) if clean_path in self.data_applications: return self.data_applications[clean_path](environ, start_response) else: logger.warn('path %s not found, sending 404', clean_path) return http_util.Respond(request, 'Not found', 'text/plain', code=404)( environ, start_response)
Central entry point for the TensorBoard application. This method handles routing to sub-applications. It does simple routing using regular expression matching. This __call__ method conforms to the WSGI spec, so that instances of this class are WSGI applications. Args: environ: See WSGI spec. start_response: See WSGI spec. Returns: A werkzeug Response.
juraj-google-style
def _my_top_k(x, k): if (k > 10): return tf.nn.top_k(x, k) values = [] indices = [] depth = tf.shape(x)[1] for i in range(k): values.append(tf.reduce_max(x, 1)) argmax = tf.argmax(x, 1) indices.append(argmax) if ((i + 1) < k): x += tf.one_hot(argmax, depth, (- 1000000000.0)) return (tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)))
GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, seems not to be, so if we use tf.nn.top_k, then both the top_k and its gradient go on cpu. Once this is not an issue, this function becomes obsolete and should be replaced by tf.nn.top_k. Args: x: a 2d Tensor. k: a small integer. Returns: values: a Tensor of shape [batch_size, k] indices: a int32 Tensor of shape [batch_size, k]
codesearchnet
def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False): _not_hidden = (lambda f: (not f.startswith('.'))) _match_pattern = None _filter_padding = None _join = os.path.join seq = None dirpath = pattern if (not os.path.isdir(pattern)): (dirpath, filepat) = os.path.split(pattern) if (not os.path.isdir(dirpath)): return [] seq = cls(filepat) patt = seq.basename().replace('.', '\\.') if seq.padding(): patt += '\\d+' if seq.extension(): patt += seq.extension() view = bytearray(patt) matches = re.finditer('{(.*?)(?:,(.*?))*}', patt) for match in reversed(list(matches)): (i, j) = match.span() view[i:j] = ('(%s)' % '|'.join([m.strip() for m in match.groups()])) view = view.replace('*', '.*') view = view.replace('?', '.') view += '$' try: _match_pattern = re.compile(str(view)).match except re.error: msg = 'Invalid file pattern: {}'.format(filepat) raise FileSeqException(msg) if (seq.padding() and strictPadding): _filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill()) ret = next(os.walk(dirpath), None) files = (ret[(- 1)] if ret else []) if (not include_hidden): files = ifilter(_not_hidden, files) if _match_pattern: files = ifilter(_match_pattern, files) if _filter_padding: files = _filter_padding(files) sep = utils._getPathSep(dirpath) if (not dirpath.endswith(sep)): dirpath += sep files = (_join(dirpath, f) for f in files) files = list(files) seqs = list(FileSequence.yield_sequences_in_list(files)) if (_filter_padding and seq): pad = cls.conformPadding(seq.padding()) for s in seqs: s.setPadding(pad) return seqs
Yield the sequences found in the given directory. Examples: >>> findSequencesOnDisk('/path/to/files') The `pattern` can also specify glob-like shell wildcards including the following: * ``?`` - 1 wildcard character * ``*`` - 1 or more wildcard character * ``{foo,bar}`` - either 'foo' or 'bar' Exact frame ranges are not considered, and padding characters are converted to wildcards (``#`` or ``@``) Examples: >>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg') >>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True) Args: pattern (str): directory to scan, or pattern to filter in directory include_hidden (bool): if true, show .hidden files as well strictPadding (bool): if True, ignore files with padding length different from pattern Returns: list:
codesearchnet
def remove_global_handler(self, event, handler): with self.mutex: if event not in self.handlers: return 0 for h in self.handlers[event]: if handler == h.callback: self.handlers[event].remove(h) return 1
Removes a global handler function. Arguments: event -- Event type (a string). handler -- Callback function. Returns 1 on success, otherwise 0.
juraj-google-style
def execute(self, action): next_state, rew, done, _ = self.env.step(action) return next_state, rew, done
Executes action, observes next state and reward. Args: actions: Actions to execute. Returns: Tuple of (next state, bool indicating terminal, reward)
juraj-google-style
def inplace_update(x, i, v): return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)
Applies an inplace update on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y = v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] = v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] = v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.
github-repos
def generate_name_variations(name): def _update_name_variations_with_product(set_a, set_b): name_variations.update([ unidecode((names_variation[0] + separator + names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower() for names_variation in product(set_a, set_b) for separator in _LASTNAME_NON_LASTNAME_SEPARATORS ]) parsed_name = ParsedName.loads(name) if len(parsed_name) == 1: return [parsed_name.dumps().lower()] name_variations = set() non_lastnames = [ non_lastname for non_lastname in parsed_name.first_list + parsed_name.suffix_list if non_lastname ] if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD: LOGGER.error('Skipping name variations generation - too many names in: "%s"', name) return [name] non_lastnames_variations = \ _generate_non_lastnames_variations(non_lastnames) lastnames_variations = _generate_lastnames_variations(parsed_name.last_list) _update_name_variations_with_product(lastnames_variations, non_lastnames_variations) _update_name_variations_with_product(non_lastnames_variations, lastnames_variations) return list(name_variations)
Generate name variations for a given name. Args: name (six.text_type): The name whose variations are to be generated. Returns: list: All the name variations for the given name. Notes: Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
juraj-google-style
def parseMagnitude(m): m = NumberService().parse(m) def toDecimalPrecision(n, k): return float(('%.*f' % (k, round(n, k)))) digits = 2 magnitude = toDecimalPrecision(m, digits) while (not magnitude): digits += 1 magnitude = toDecimalPrecision(m, digits) if (m < 1.0): magnitude = toDecimalPrecision(m, (digits + 1)) if (int(magnitude) == magnitude): magnitude = int(magnitude) magString = str(magnitude) magString = re.sub('(\\d)e-(\\d+)', '\\g<1> times ten to the negative \\g<2>', magString) magString = re.sub('(\\d)e\\+(\\d+)', '\\g<1> times ten to the \\g<2>', magString) magString = re.sub('-(\\d+)', 'negative \\g<1>', magString) magString = re.sub('\\b0(\\d+)', '\\g<1>', magString) return magString
Parses a number m into a human-ready string representation. For example, crops off floats if they're too accurate. Arguments: m (float): Floating-point number to be cleaned. Returns: Human-ready string description of the number.
codesearchnet