code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_es_value(obj, def_obj): def get_dict_val(item): if isinstance(item, dict): return str(item.get('value')) return str(item) value_flds = [] if def_obj.es_defs.get('kds_esValue'): value_flds = def_obj.es_defs['kds_esValue'].copy() else: value_flds = set(obj).difference(__ALL_IGN__) value_flds = list(value_flds) value_flds += __COMBINED__ try: obj['value'] = [obj.get(label) for label in value_flds if obj.get(label)][0] except IndexError: obj['value'] = ", ".join(["%s: %s" % (value.get('label'), value.get('value')) for prop, value in obj.items() if isinstance(value, dict) and \ value.get('label')]) if isinstance(obj['value'], list): obj['value'] = ", ".join([get_dict_val(item) for item in obj['value']]) else: obj['value'] = get_dict_val(obj['value']) if str(obj['value']).strip().endswith("/"): obj['value'] = str(obj['value']).strip()[:-1].strip() if not obj['value']: obj['value'] = obj.get('uri', '') return obj
Returns the value for an object that goes into the elacticsearch 'value' field args: obj: data object to update def_obj: the class instance that has defintion values
juraj-google-style
def dict_itemstr_list(dict_, **dictkw): r import utool as ut explicit = dictkw.get('explicit', False) dictkw['explicit'] = _rectify_countdown_or_bool(explicit) dosort = dictkw.get('sorted_', None) if dosort is None: dosort = True if dosort and not isinstance(dict_, collections.OrderedDict): key_order = dictkw.get('key_order', None) def iteritems(d): if key_order is None: try: return iter(sorted(six.iteritems(d))) except TypeError: return six.iteritems(d) else: unordered_keys = list(d.keys()) other_keys = sorted(list(set(unordered_keys) - set(key_order))) keys = key_order + other_keys return ((key, d[key]) for key in keys) else: iteritems = six.iteritems _valstr = _make_valstr(**dictkw) precision = dictkw.get('precision', None) kvsep = dictkw.get('kvsep', ': ') if explicit: kvsep = '=' def make_item_str(key, val): if explicit or dictkw.get('strkeys', False): key_str = six.text_type(key) else: key_str = repr2(key, precision=precision) prefix = key_str + kvsep val_str = _valstr(val) if util_type.HAVE_NUMPY and isinstance(val, np.ndarray): item_str = hz_str(prefix, val_str) else: item_str = prefix + val_str return item_str itemstr_list = [make_item_str(key, val) for (key, val) in iteritems(dict_)] reverse = False key_order_metric = dictkw.get('key_order_metric', None) if key_order_metric is not None: if key_order_metric.startswith('-'): key_order_metric = key_order_metric[1:] reverse = True if key_order_metric == 'strlen': metric_list = [len(itemstr) for itemstr in itemstr_list] itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse) elif key_order_metric == 'val': metric_list = [val for (key, val) in iteritems(dict_)] itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse) maxlen = dictkw.get('maxlen', None) if maxlen is not None and len(itemstr_list) > maxlen: itemstr_list = itemstr_list[0:maxlen] return itemstr_list
r""" Returns: list: a list of human-readable dictionary items Args: explicit : if True uses dict(key=val,...) format instead of {key:val,...}
juraj-google-style
def get_testcase_io(testcase): test_runner = testcase.runner variables = testcase.config.get("variables", {}) output_list = testcase.config.get("output", []) output_mapping = test_runner.extract_output(output_list) return { "in": variables, "out": output_mapping }
get and print testcase input(variables) and output. Args: testcase (unittest.suite.TestSuite): corresponding to one YAML/JSON file, it has been set two attributes: config: parsed config block runner: initialized runner.Runner() with config Returns: dict: input(variables) and output mapping.
juraj-google-style
def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD): d = line_distance_similarity(p1a, p1b, p2a, p2b, T=T) a = abs(angle_similarity(normalize(line(p1a, p1b)), normalize(line(p2a, p2b)))) return d * a
Similarity between two lines Args: p1a ([float, float]): x and y coordinates. Line A start p1b ([float, float]): x and y coordinates. Line A end p2a ([float, float]): x and y coordinates. Line B start p2b ([float, float]): x and y coordinates. Line B end Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different
juraj-google-style
def register_for_auto_class(cls, auto_class='AutoFeatureExtractor'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class
Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoFeatureExtractor`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`): The auto class to register this new feature extractor with.
github-repos
def __init__(self, id, buckets=None, **kwargs): buckets = buckets or [] super(Song, self).__init__(id, buckets, **kwargs)
Song class Args: id (str): a song ID Kwargs: buckets (list): A list of strings specifying which buckets to retrieve Returns: A Song object Example: >>> s = song.Song('SOPEXHZ12873FD2AC7', buckets=['song_hotttnesss', 'artist_hotttnesss']) >>> s.song_hotttnesss 0.58602500000000002 >>> s.artist_hotttnesss 0.80329715999999995 >>>
juraj-google-style
def info(self, **kwargs): path = self._get_series_id_season_number_episode_number_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the primary information about a TV episode by combination of a season and episode number. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any TV series method. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def embed(self, url, format='json', **opt): if format not in ['json', 'xml']: raise OEmbedInvalidRequest('Format must be json or xml') opt['format'] = format return self._request(url, **opt)
Get an OEmbedResponse from one of the providers configured in this consumer according to the resource url. Args: url: The url of the resource to get. format: Desired response format. **opt: Optional parameters to pass in the url to the provider. Returns: OEmbedResponse object.
juraj-google-style
def __init__( self, learning_rate, cg_max_iterations=20, cg_damping=1e-3, cg_unroll_loop=False, scope='natural-gradient', summary_labels=() ): assert learning_rate > 0.0 self.learning_rate = learning_rate self.solver = ConjugateGradient( max_iterations=cg_max_iterations, damping=cg_damping, unroll_loop=cg_unroll_loop ) super(NaturalGradient, self).__init__(scope=scope, summary_labels=summary_labels)
Creates a new natural gradient optimizer instance. Args: learning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps. cg_max_iterations: Conjugate gradient solver max iterations. cg_damping: Conjugate gradient solver damping factor. cg_unroll_loop: Unroll conjugate gradient loop if true.
juraj-google-style
def validate_yaml(self, properties): validator = OurValidator(schema) if (not validator.validate(properties)): for (key, value) in validator.errors.items(): if any([('unallowed value' in v) for v in value]): print('{key} has an illegal value. Allowed values are {values} and are case sensitive.'.format(key=key, values=schema[key]['allowed'])) raise ValueError(validator.errors)
Validate the parsed YAML file for adherance to the ChemKED format. Arguments: properties (`dict`): Dictionary created from the parsed YAML file Raises: `ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose string contains the errors that are present.
codesearchnet
def _add_sample_measure(self, measure_params, num_samples): measured_qubits = list({qubit for (qubit, cmembit) in measure_params}) num_measured = len(measured_qubits) axis = list(range(self._number_of_qubits)) for qubit in reversed(measured_qubits): axis.remove(((self._number_of_qubits - 1) - qubit)) probabilities = np.reshape(np.sum((np.abs(self._statevector) ** 2), axis=tuple(axis)), (2 ** num_measured)) samples = self._local_random.choice(range((2 ** num_measured)), num_samples, p=probabilities) memory = [] for sample in samples: classical_memory = self._classical_memory for (count, (qubit, cmembit)) in enumerate(sorted(measure_params)): qubit_outcome = int(((sample & (1 << count)) >> count)) membit = (1 << cmembit) classical_memory = ((classical_memory & (~ membit)) | (qubit_outcome << cmembit)) value = bin(classical_memory)[2:] memory.append(hex(int(value, 2))) return memory
Generate memory samples from current statevector. Args: measure_params (list): List of (qubit, cmembit) values for measure instructions to sample. num_samples (int): The number of memory samples to generate. Returns: list: A list of memory values in hex format.
codesearchnet
def assert_no_text(self, *args, **kwargs): query = TextQuery(*args, **kwargs) @self.synchronize(wait=query.wait) def assert_no_text(): count = query.resolve_for(self) if matches_count(count, query.options) and ( count > 0 or expects_none(query.options)): raise ExpectationNotMet(query.negative_failure_message) return True return assert_no_text()
Asserts that the page or current node doesn't have the given text content, ignoring any HTML tags. Args: *args: Variable length argument list for :class:`TextQuery`. **kwargs: Arbitrary keyword arguments for :class:`TextQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
juraj-google-style
def on_core_metadata_event(self, event): core_metadata = json.loads(event.log_message.message) input_names = ','.join(core_metadata['input_names']) output_names = ','.join(core_metadata['output_names']) target_nodes = ','.join(core_metadata['target_nodes']) self._run_key = RunKey(input_names, output_names, target_nodes) if not self._graph_defs: self._graph_defs_arrive_first = False else: for device_name in self._graph_defs: self._add_graph_def(device_name, self._graph_defs[device_name]) self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time)) logger.info('on_core_metadata_event() waiting for client ack (meta)...') self._incoming_channel.get() logger.info('on_core_metadata_event() client ack received (meta).')
Implementation of the core metadata-carrying Event proto callback. Args: event: An Event proto that contains core metadata about the debugged Session::Run() in its log_message.message field, as a JSON string. See the doc string of debug_data.DebugDumpDir.core_metadata for details.
juraj-google-style
def run_cell(self, cell): globals = self.ipy_shell.user_global_ns locals = self.ipy_shell.user_ns globals.update({'__ipy_scope__': None}) try: with redirect_stdout(self.stdout): self.run(cell, globals, locals) except: self.code_error = True if self.options.debug: raise BdbQuit finally: self.finalize()
Run the Cell code using the IPython globals and locals Args: cell (str): Python code to be executed
codesearchnet
def add_site_property(self, property_name, values): if len(values) != len(self.sites): raise ValueError("Values must be same length as sites.") for site, val in zip(self.sites, values): site.properties[property_name] = val
Adds a property to a site. Args: property_name (str): The name of the property to add. values (list): A sequence of values. Must be same length as number of sites.
juraj-google-style
def normalize_genotypes(genotypes): genotypes = genotypes.genotypes return ((genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes))
Normalize the genotypes. Args: genotypes (Genotypes): The genotypes to normalize. Returns: numpy.array: The normalized genotypes.
codesearchnet
def check(self, namespace, level, explicit=False): return ((self.get_permissions(namespace, explicit=explicit) & level) != 0)
Checks if the permset has permission to the specified namespace at the specified level Arguments: namespace -- permissioning namespace (str) level -- permissioning level (int) (PERM_READ for example) explicit -- require explicitly set permissions to the provided namespace Returns: bool
codesearchnet
def __init__(self, image_true_sampler): if isinstance(image_true_sampler, ImageTrueSampler) is False: raise TypeError() self.__image_true_sampler = image_true_sampler
Init. Args: image_true_sampler: is-a `ImageTrueSampler`.
juraj-google-style
def put(self, key, value): key = self._service_key(key) self._service_ops['put'](key, value)
Stores the object `value` named by `key` in `service`. Args: key: Key naming `value`. value: the object to store.
juraj-google-style
def _update_general_statistics(a_float, dist): if not dist.count: dist.count = 1 dist.maximum = a_float dist.minimum = a_float dist.mean = a_float dist.sumOfSquaredDeviation = 0 else: old_count = dist.count old_mean = dist.mean new_mean = ((old_count * old_mean) + a_float) / (old_count + 1) delta_sum_squares = (a_float - old_mean) * (a_float - new_mean) dist.count += 1 dist.mean = new_mean dist.maximum = max(a_float, dist.maximum) dist.minimum = min(a_float, dist.minimum) dist.sumOfSquaredDeviation += delta_sum_squares
Adds a_float to distribution, updating the statistics fields. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated
juraj-google-style
def skip_log_prefix(func): if callable(func): func_code = getattr(func, '__code__', None) if (func_code is None): raise ValueError('Input callable does not have a function code object.') file_name = func_code.co_filename func_name = func_code.co_name func_lineno = func_code.co_firstlineno elif isinstance(func, six.string_types): file_name = get_absl_logger().findCaller()[0] func_name = func func_lineno = None else: raise TypeError('Input is neither callable nor a string.') ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno) return func
Skips reporting the prefix of a given function or name by ABSLLogger. This is a convenience wrapper function / decorator for `ABSLLogger.register_frame_to_skip`. If a callable function is provided, only that function will be skipped. If a function name is provided, all functions with the same name in the file that this is called in will be skipped. This can be used as a decorator of the intended function to be skipped. Args: func: Callable function or its name as a string. Returns: func (the input, unchanged). Raises: ValueError: The input is callable but does not have a function code object. TypeError: The input is neither callable nor a string.
codesearchnet
def create_empty_output_dir(output_directory: str, overwrite: bool=True) -> None: if overwrite and file_io.file_exists_v2(output_directory): logging.info('Deleting existing output directory: %s .', output_directory) file_io.delete_recursively_v2(output_directory) file_io.recursive_create_dir_v2(output_directory)
Creates the `output_directory`. If `output_directory` already exists, it recursively deletes all contents inside the directory. Also creates the parent & intermediate directories. Args: output_directory: Output directory. overwrite: Where to clean the output directory if exists.
github-repos
def __init__(self, timestamp, family=None, reverse=False): self.timestamp = timestamp self.reverse = reverse self._family = family
Create a timestamp rule. Args: timestamp (int): Epoch time. family (str): Package family to apply the rule to. reverse (bool): If True, reverse the logic so that packages released *after* the timestamp are matched.
juraj-google-style
def expand_value_set_url(self, value_set_url: str) -> value_set_pb2.ValueSet: value_set_url, value_set_version = url_utils.parse_url_version(value_set_url) base_url, terminology_service_url = _expansion_request_url_for_value_set_url(value_set_url) auth = self.auth_per_terminology_server.get(base_url) return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)
Expands the value set using a terminology server. Requests an expansion of the value set from the appropriate terminology server for the given URL and version if present on the URL. The terminology service is chosen based on the domain of `value_set_url`. Retrieves the current definition of the value set from the terminology service as well as its expansion. Args: value_set_url: The url of the value set to expand. Raises: ValueError: If a terminology service can not be found for `value_set_url`. Returns: The current definition of the value set from the server with its expanded codes present.
github-repos
def verify_oauth2_token(id_token, request, audience=None): return verify_token(id_token, request, audience=audience, certs_url=_GOOGLE_OAUTH2_CERTS_URL)
Verifies an ID Token issued by Google's OAuth 2.0 authorization server. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your application's OAuth 2.0 client ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
codesearchnet
def merge(self, other_rel): if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds): self.frequencies += other_rel.frequencies else: print("Input table thresholds do not match.")
Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object.
juraj-google-style
def object(self, key): return _object.Object(self._name, key, context=self._context)
Retrieves a Storage Object for the specified key in this bucket. The object need not exist. Args: key: the key of the object within the bucket. Returns: An Object instance representing the specified key.
codesearchnet
def AddRow(self, values): if self._number_of_columns and len(values) != self._number_of_columns: raise ValueError('Number of values is out of bounds.') if not self._column_sizes and self._columns: self._column_sizes = [len(column) for column in self._columns] value_strings = [] for value_index, value_string in enumerate(values): if not isinstance(value_string, py2to3.UNICODE_TYPE): value_string = '{0!s}'.format(value_string) value_strings.append(value_string) self._column_sizes[value_index] = max( self._column_sizes[value_index], len(value_string)) self._rows.append(value_strings) if not self._number_of_columns: self._number_of_columns = len(value_strings)
Adds a row of values. Args: values (list[object]): values. Raises: ValueError: if the number of values is out of bounds.
juraj-google-style
def success(self, value): if value == self._defaults['success'] and 'success' in self._values: del self._values['success'] else: self._values['success'] = value
The success property. Args: value (bool). the property value.
juraj-google-style
def vert_quality(script, min_quality=0.0, max_quality=0.05, inclusive=True): filter_xml = ''.join([' <filter name="Select by Vertex Quality">\n', ' <Param name="minQ" ', 'value="{}" '.format(min_quality), 'description="Min Quality" ', 'min="0" ', 'max="{}" '.format((2 * max_quality)), 'type="RichDynamicFloat" ', '/>\n', ' <Param name="maxQ" ', 'value="{}" '.format(max_quality), 'description="Max Quality" ', 'min="0" ', 'max="{}" '.format((2 * max_quality)), 'type="RichDynamicFloat" ', '/>\n', ' <Param name="Inclusive" ', 'value="{}" '.format(str(inclusive).lower()), 'description="Inclusive Sel." ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Select all the faces and vertexes within the specified vertex quality range. Args: script: the FilterScript object or script filename to write the filter] to. min_quality (float): Minimum acceptable quality value. max_quality (float): Maximum acceptable quality value. inclusive (bool): If True only the faces with ALL the vertices within the specified range are selected. Otherwise any face with at least one vertex within the range is selected. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def dates(self): return _gen_periodic_schedule(self._start_date, self._end_date, self._tenor, holiday_calendar=self._holiday_calendar, roll_convention=self._roll_convention, backward=self._backward, end_of_month=self._end_of_month)
Returns the dates as computed from the schedule as a DateTensor. Constructs the date schedule from the supplied data. For more details see the initializer docstring. Returns: `DateTensor` of rank one more than `start_date` or `end_date` (depending on `backwards`), representing schedules for each element of the input.
github-repos
def update_q(self, predicted_q_arr, reward_value_arr, next_max_q_arr): return predicted_q_arr + (self.alpha_value * (reward_value_arr + (self.gamma_value * next_max_q_arr) - predicted_q_arr))
Update Q. Args: predicted_q_arr: `np.ndarray` of predicted Q-Values. reward_value_arr: `np.ndarray` of reward values. next_max_q_arr: `np.ndarray` of maximum Q-Values in next time step. Returns: `np.ndarray` of real Q-Values.
juraj-google-style
def verify_bitcoin(message, signature, address): magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") ver, h160 = address_to_key_hash(address) hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig)
Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise.
juraj-google-style
def GetDevicePath(device_handle): io_service_obj = iokit.IOHIDDeviceGetService(device_handle) str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE) iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer) return str_buffer.value
Obtains the unique path for the device. Args: device_handle: reference to the device Returns: A unique path for the device, obtained from the IO Registry
codesearchnet
def vq_gating(x, num_experts, k, bneck, hparams=None, name="vq_gating"): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): if hparams.use_scales: scales = tf.get_variable( "scales", [num_experts], tf.float32, initializer=tf.ones_initializer()) scales = tf.nn.softmax(scales) hparams.scales = scales input_size = x.get_shape().as_list()[-1] batch_size = common_layers.shape_list(x)[0] if k > 1: x = tf.layers.dense(x, input_size * k) x = tf.reshape(x, [batch_size * k, input_size]) inputs = tf.expand_dims(x, axis=1) inputs = tf.expand_dims(inputs, axis=1) hparams.z_size = int(math.log(num_experts, 2)) hparams.hidden_size = input_size hparams.top_k = k d = bneck.discrete_bottleneck(inputs) centroids = None exp_discrete = d["discrete"] embed_lookup = d["embed"] extra_loss = d["loss"] if hparams.residual_centroids: centroids = embed_lookup(exp_discrete) top_k_indices = tf.squeeze(exp_discrete, axis=1) tf.summary.histogram("discrete_counts", top_k_indices) if k > 1: top_k_indices = tf.reshape(top_k_indices, [batch_size, k]) top_k_gates = tf.ones([batch_size, k]) gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) count_per_expert = tf.reduce_sum(gates, axis=0) if hparams.use_scales: scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales) extra_loss += scale_loss if common_layers.should_generate_summaries(): tf.summary.histogram("vq_loss", extra_loss) tf.summary.historgram("scale_loss", scale_loss) return gates, extra_loss, centroids
VQ gating. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer k: an integer - number of experts per example bneck: a bottleneck object hparams: optional hparams name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts]
juraj-google-style
def get_num_filters(layer): if (K.ndim(layer.output) == 2): return K.int_shape(layer.output)[(- 1)] channel_idx = (1 if (K.image_data_format() == 'channels_first') else (- 1)) return K.int_shape(layer.output)[channel_idx]
Determines the number of filters within the given `layer`. Args: layer: The keras layer to use. Returns: Total number of filters within `layer`. For `keras.layers.Dense` layer, this is the total number of outputs.
codesearchnet
def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False): site_fcoords = np.mod(self.frac_coords, 1) neighbors = [] for (fcoord, dist, i, img) in self._lattice.get_points_in_sphere(site_fcoords, pt, r): nnsite = PeriodicSite(self[i].species, fcoord, self._lattice, properties=self[i].properties) nn_data = ((nnsite, dist) if (not include_index) else (nnsite, dist, i)) if include_image: nn_data += (img,) neighbors.append(nn_data) return neighbors
Find all sites within a sphere from the point. This includes sites in other periodic images. Algorithm: 1. place sphere of radius r in crystal and determine minimum supercell (parallelpiped) which would contain a sphere of radius r. for this we need the projection of a_1 on a unit vector perpendicular to a_2 & a_3 (i.e. the unit vector in the direction b_1) to determine how many a_1"s it will take to contain the sphere. Nxmax = r * length_of_b_1 / (2 Pi) 2. keep points falling within r. Args: pt (3x1 array): cartesian coordinates of center of sphere. r (float): Radius of sphere. include_index (bool): Whether the non-supercell site index is included in the returned data include_image (bool): Whether to include the supercell image is included in the returned data Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
codesearchnet
def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): gzip_file = resolver.Resolver.OpenFileObject( path_spec, resolver_context=resolver_context) if not gzip_file: raise errors.BackEndError('Missing gzip file.') super(GzipFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._gzip_file = gzip_file self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file Raises: BackEndError: when the gzip file is missing.
juraj-google-style
def _ConvertDateTimeToOffset(self, date_time_value): date_time_obj = datetime.datetime(int(date_time_value['date']['year']), int(date_time_value['date']['month']), int(date_time_value['date']['day']), int(date_time_value['hour']), int(date_time_value['minute']), int(date_time_value['second'])) if self._version > 'v201808': time_zone_str = 'timeZoneId' else: time_zone_str = 'timeZoneID' date_time_str = pytz.timezone( date_time_value[time_zone_str]).localize(date_time_obj).isoformat() if date_time_str[-5:] == '00:00': return date_time_str[:-6] + 'Z' else: return date_time_str
Converts the PQL formatted response for a dateTime object. Output conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.' Args: date_time_value: dict The date time value from the PQL response. Returns: str: A string representation of the date time value uniform to ReportService.
juraj-google-style
def sia_bipartitions(nodes, node_labels=None): if config.CUT_ONE_APPROXIMATION: bipartitions = directed_bipartition_of_one(nodes) else: bipartitions = directed_bipartition(nodes, nontrivial=True) return [Cut(bipartition[0], bipartition[1], node_labels) for bipartition in bipartitions]
Return all |big_phi| cuts for the given nodes. This value changes based on :const:`config.CUT_ONE_APPROXIMATION`. Args: nodes (tuple[int]): The node indices to partition. Returns: list[Cut]: All unidirectional partitions.
codesearchnet
def write(name, value): def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): existing_env = core.read(name, allow_none=True) core.write(name, value) func_val = func(*args, **kwargs) core.write(name, existing_env) return func_val return _decorator return wrapped
Temporarily change or set the environment variable during the execution of a function. Args: name: The name of the environment variable value: A value to set for the environment variable Returns: The function return value.
juraj-google-style
def _obtain_health_pills_at_step(self, events_directory, node_names, step): pattern = os.path.join(events_directory, _DEBUGGER_EVENTS_GLOB_PATTERN) file_paths = glob.glob(pattern) if (not file_paths): raise IOError(('No events files found that matches the pattern %r.' % pattern)) file_paths.sort() mapping = collections.defaultdict(list) node_name_set = frozenset(node_names) for file_path in file_paths: should_stop = self._process_health_pill_event(node_name_set, mapping, step, file_path) if should_stop: break return mapping
Reads disk to obtain the health pills for a run at a specific step. This could be much slower than the alternative path of just returning all health pills sampled by the event multiplexer. It could take tens of minutes to complete this call for large graphs for big step values (in the thousands). Args: events_directory: The directory containing events for the desired run. node_names: A list of node names for which to retrieve health pills. step: The step to obtain health pills for. Returns: A dictionary mapping from node name to a list of health pill objects (see docs for _serve_health_pills_handler for properties of those objects). Raises: IOError: If no files with health pill events could be found.
codesearchnet
def stack1d(*points): result = np.empty((2, len(points)), order="F") for index, point in enumerate(points): result[:, index] = point return result
Fill out the columns of matrix with a series of points. This is because ``np.hstack()`` will just make another 1D vector out of them and ``np.vstack()`` will put them in the rows. Args: points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e. arrays with shape ``(2,)``. Returns: numpy.ndarray: The array with each point in ``points`` as its columns.
juraj-google-style
def _get_flow_for_token(csrf_token, request): flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None) return None if flow_pickle is None else jsonpickle.decode(flow_pickle)
Looks up the flow in session to recover information about requested scopes. Args: csrf_token: The token passed in the callback request that should match the one previously generated and stored in the request on the initial authorization view. Returns: The OAuth2 Flow object associated with this flow based on the CSRF token.
juraj-google-style
def __delitem__(self, key): path = self.keypath(key) if fs.exists(path): fs.rm(path) else: raise KeyError(key)
Delete cached file. Arguments: key: Key. Raises: KeyError: If file not in cache.
juraj-google-style
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3): if (kmip_version < enums.KMIPVersion.KMIP_1_3): raise exceptions.VersionNotSupported('KMIP {} does not support the RNGParameters object.'.format(kmip_version.value)) local_buffer = BytearrayStream() if self._rng_algorithm: self._rng_algorithm.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The RNGParameters structure is missing the RNG algorithm field.') if self._cryptographic_algorithm: self._cryptographic_algorithm.write(local_buffer, kmip_version=kmip_version) if self._cryptographic_length: self._cryptographic_length.write(local_buffer, kmip_version=kmip_version) if self._hashing_algorithm: self._hashing_algorithm.write(local_buffer, kmip_version=kmip_version) if self._drbg_algorithm: self._drbg_algorithm.write(local_buffer, kmip_version=kmip_version) if self._recommended_curve: self._recommended_curve.write(local_buffer, kmip_version=kmip_version) if self._fips186_variation: self._fips186_variation.write(local_buffer, kmip_version=kmip_version) if self._prediction_resistance: self._prediction_resistance.write(local_buffer, kmip_version=kmip_version) self.length = local_buffer.length() super(RNGParameters, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the RNGParameters structure encoding to the data stream. Args: output_buffer (stream): A data stream in which to encode Attributes structure data, supporting a write method. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 2.0. Raises: InvalidField: Raised if the RNG algorithm field is not defined. VersionNotSupported: Raised when a KMIP version is provided that does not support the RNGParameters structure.
codesearchnet
def ExamineEvent(self, mediator, event): url = getattr(event, 'url', None) if not url: return source, _ = formatters_manager.FormattersManager.GetSourceStrings(event) if source != 'WEBHIST': return for engine, url_expression, method_name in self._URL_FILTERS: callback_method = getattr(self, method_name, None) if not callback_method: logger.warning('Missing method: {0:s}'.format(callback_method)) continue match = url_expression.search(url) if not match: continue search_query = callback_method(url) if not search_query: logger.warning('Missing search query for URL: {0:s}'.format(url)) continue search_query = self._DecodeURL(search_query) if not search_query: continue event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS) mediator.ProduceEventTag(event_tag) self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1 timestamp = getattr(event, 'timestamp', 0) source = getattr(event, 'parser', 'N/A') source = getattr(event, 'plugin', source) self._search_term_timeline.append( SEARCH_OBJECT(timestamp, source, engine, search_query))
Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
juraj-google-style
def load_json(path): with open(path, 'rt') as f: jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict) if (not jsondict): raise LoadError(('JSON file: %s is empty!' % path)) return jsondict
Load JSON file into an ordered dictionary Args: path (str): Path to JSON file Returns: OrderedDict: Ordered dictionary containing loaded JSON file
codesearchnet
def Parse(self, raw_data): self.results = set() if (not self.filters): self.results.update(raw_data) else: for f in self.filters: self.results.update(f.Parse(raw_data)) return list(self.results)
Take the data and yield results that passed through the filters. The output of each filter is added to a result set. So long as the filter selects, but does not modify, raw data, the result count will remain accurate. Args: raw_data: An iterable series of rdf values. Returns: A list of rdf values that matched at least one filter.
codesearchnet
def _HasExpectedLineLength(self, file_object): original_file_position = file_object.tell() line_reader = self._CreateLineReader(file_object) for _ in range(0, 20): sample_line = line_reader.readline((self._maximum_line_length + 1)) if (len(sample_line) > self._maximum_line_length): file_object.seek(original_file_position) return False file_object.seek(original_file_position) return True
Determines if a file begins with lines of the expected length. As we know the maximum length of valid lines in the DSV file, the presence of lines longer than this indicates that the file will not be parsed successfully, without reading excessive data from a large file. Args: file_object (dfvfs.FileIO): file-like object. Returns: bool: True if the file has lines of the expected length.
codesearchnet
def _from_dict_record(data): return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that are in turn OrderedDicts these will be turned into RECORD types. Ideally this will be an OrderedDict but it is not required. Args: data: The dict to infer a schema from. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema.
juraj-google-style
def remove_chars(str_, char_list): outstr = str_[:] for char in char_list: outstr = outstr.replace(char, '') return outstr
removes all chars in char_list from str_ Args: str_ (str): char_list (list): Returns: str: outstr Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> str_ = '1, 2, 3, 4' >>> char_list = [','] >>> result = remove_chars(str_, char_list) >>> print(result) 1 2 3 4
codesearchnet
def has_strategy(): return get_strategy() is not _get_default_strategy()
Return if there is a current non-default `tf.distribute.Strategy`. ``` assert not tf.distribute.has_strategy() with strategy.scope(): assert tf.distribute.has_strategy() ``` Returns: True if inside a `with strategy.scope():`.
github-repos
def convert_nested_model(weights): trainable_weights = weights[:len(layer.trainable_weights)] non_trainable_weights = weights[len(layer.trainable_weights):] new_trainable_weights = [] new_non_trainable_weights = [] for sublayer in layer.layers: num_trainable_weights = len(sublayer.trainable_weights) num_non_trainable_weights = len(sublayer.non_trainable_weights) if sublayer.weights: preprocessed = preprocess_weights_for_loading(layer=sublayer, weights=trainable_weights[:num_trainable_weights] + non_trainable_weights[:num_non_trainable_weights], original_keras_version=original_keras_version, original_backend=original_backend) new_trainable_weights.extend(preprocessed[:num_trainable_weights]) new_non_trainable_weights.extend(preprocessed[num_trainable_weights:]) trainable_weights = trainable_weights[num_trainable_weights:] non_trainable_weights = non_trainable_weights[num_non_trainable_weights:] return new_trainable_weights + new_non_trainable_weights
Converts layers nested in `Model` or `Sequential`. This function uses `preprocess_weights_for_loading()` for converting nested layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).
github-repos
def GetMap(self, cache_info, data): for line in cache_info: line = line.rstrip('\n') if not line or line[0] == ' continue entry = self._ReadEntry(line) if entry is None: self.log.warning('Could not create entry from line %r in cache, skipping', line) continue if not data.Add(entry): self.log.warning('Could not add entry %r read from line %r in cache', entry, line) return data
Returns a map from a cache. Args: cache_info: file like object containing the cache. data: a Map to populate. Returns: A child of Map containing the cache data.
github-repos
def _compute_euclidean_distance(cls, inputs, clusters): output = [] for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): squared_distance = math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) - 2 * math_ops.matmul(inp, clusters, transpose_b=True) + array_ops.transpose(math_ops.reduce_sum(math_ops.square(clusters), 1, keepdims=True)) output.append(squared_distance) return output
Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers.
github-repos
def format_map(self, format_string, mapping): return self.vformat(format_string, args=None, kwargs=mapping)
format a string by a map Args: format_string(str): A format string mapping(dict): A map to format the string Returns: A formatted string. Raises: KeyError: if key is not provided by the given map.
codesearchnet
def _get_present_locations(match_traversals): present_locations = set() present_non_optional_locations = set() for match_traversal in match_traversals: for step in match_traversal: if (step.as_block is not None): (location_name, _) = step.as_block.location.get_location_name() present_locations.add(location_name) if (isinstance(step.root_block, Traverse) and (not step.root_block.optional)): present_non_optional_locations.add(location_name) if (not present_non_optional_locations.issubset(present_locations)): raise AssertionError(u'present_non_optional_locations {} was not a subset of present_locations {}. THis hould never happen.'.format(present_non_optional_locations, present_locations)) return (present_locations, present_non_optional_locations)
Return the set of locations and non-optional locations present in the given match traversals. When enumerating the possibilities for optional traversals, the resulting match traversals may have sections of the query omitted. These locations will not be included in the returned `present_locations`. All of the above locations that are not optional traverse locations will be included in present_non_optional_locations. Args: match_traversals: one possible list of match traversals generated from a query containing @optional traversal(s) Returns: tuple (present_locations, present_non_optional_locations): - present_locations: set of all locations present in the given match traversals - present_non_optional_locations: set of all locations present in the match traversals that are not reached through optional traverses. Guaranteed to be a subset of present_locations.
codesearchnet
def scatter_update(self, sparse_delta, use_locking=False, name=None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}') return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def _get_required_params_for_impression(self, experiment, variation_id): snapshot = {} snapshot[self.EventParams.DECISIONS] = [{self.EventParams.EXPERIMENT_ID: experiment.id, self.EventParams.VARIATION_ID: variation_id, self.EventParams.CAMPAIGN_ID: experiment.layerId}] snapshot[self.EventParams.EVENTS] = [{self.EventParams.EVENT_ID: experiment.layerId, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: 'campaign_activated', self.EventParams.UUID: str(uuid.uuid4())}] return snapshot
Get parameters that are required for the impression event to register. Args: experiment: Experiment for which impression needs to be recorded. variation_id: ID for variation which would be presented to user. Returns: Dict consisting of decisions and events info for impression event.
codesearchnet
def _build(self, inputs): input_shape = tf.shape(inputs) input_dtype = inputs.dtype.as_numpy_dtype batch_size = tf.expand_dims(input_shape[0], 0) number_of_params = inputs.get_shape()[1] if number_of_params != self._constraints.num_free_params: raise base.Error('Input size is not consistent with constraint ' 'definition: {} parameters expected, {} provided.' .format(self._constraints.num_free_params, number_of_params)) num_output_dimensions = len(self._psi) def get_input_slice(start, size): return basic.SliceByDim([1], [start], [size])(inputs) warped_grid = [] var_index_offset = 0 number_of_points = np.prod(self._output_shape) for i in xrange(num_output_dimensions): if self._psi[i] is not None: grid_coord = self._psi[i].astype(input_dtype) num_active_vars = self._psi[i].shape[0] active_vars = get_input_slice(var_index_offset, num_active_vars) warped_coord = tf.matmul(active_vars, grid_coord) warped_coord = tf.expand_dims(warped_coord, 1) var_index_offset += num_active_vars offset = self._psi[num_output_dimensions + i] if offset is not None: offset = offset.astype(input_dtype) tiling_params = tf.concat( [ batch_size, tf.constant( 1, shape=(1,)), tf.ones_like(offset.shape) ], 0) offset = offset.reshape((1, 1) + offset.shape) warped_coord += tf.tile(offset, tiling_params) else: warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype) tiling_params = tf.concat( [ batch_size, tf.constant( 1, shape=(1,)), tf.ones_like(warped_coord.shape) ], 0) warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape) warped_coord = tf.tile(warped_coord, tiling_params) warped_coord += self._psi[i + 2 * num_output_dimensions] warped_coord.set_shape([None, 1, number_of_points]) warped_grid.append(warped_coord) grid_shape = self._output_shape + (1,) warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid] return tf.concat(warped_grid, len(grid_shape))
Assembles the module network and adds it to the graph. The internal computation graph is assembled according to the set of constraints provided at construction time. Args: inputs: Tensor containing a batch of transformation parameters. Returns: A batch of warped grids. Raises: Error: If the input tensor size is not consistent with the constraints passed at construction time.
juraj-google-style
def rename_v2(src, dst, overwrite=False): _pywrap_file_io.RenameFile(compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)
Rename or move a file / directory. Args: src: string, pathname for a file dst: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for `dst` to be occupied by an existing file. Raises: errors.OpError: If the operation fails.
github-repos
def _get_session(self): if (self._session is None): self._session = _boto3.session.Session(**self._storage_parameters.get('session', dict())) return self._session
S3 Boto3 Session. Returns: boto3.session.Session: session
codesearchnet
def __init__(self, kw: YangIdentifier, arg: Optional[str], pref: YangIdentifier = None): self.prefix = pref self.keyword = kw self.argument = arg self.superstmt = None self.substatements = []
Initialize the class instance. Args: kw: Keyword. arg: Argument. sup: Parent statement. sub: List of substatements. pref: Keyword prefix (``None`` for built-in statements).
juraj-google-style
def print_colored_columns(printer, rows, padding=2): rows_ = [x[:(- 1)] for x in rows] colors = [x[(- 1)] for x in rows] for (col, line) in zip(colors, columnise(rows_, padding=padding)): printer(line, col)
Like `columnise`, but with colored rows. Args: printer (`colorize.Printer`): Printer object. Note: The last entry in each row is the row color, or None for no coloring.
codesearchnet
def sheets_get(config, auth, sheet_url_or_name): sheet_id = sheets_id(config, auth, sheet_url_or_name) if sheet_id: return API_Sheets(config, auth).spreadsheets().get(spreadsheetId=sheet_id).execute() else: return None
Get sheets definition. Args: config - see starthinker/util/configuration.py auth - user or service sheet_url_or_name - one of: URL, document title, or id Returns: Dictionary with all sheets information from Rest API.
github-repos
def export_tensorflow(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin'], model: 'TFPreTrainedModel', config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None) -> Tuple[List[str], List[str]]: import onnx import tensorflow as tf import tf2onnx if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError('You cannot provide both a tokenizer and preprocessor to export the model.') if tokenizer is not None: warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning) logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.') preprocessor = tokenizer model.config.return_dict = True if config.values_override is not None: logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for override_config_key, override_config_value in config.values_override.items(): logger.info(f'\t- {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) input_signature = [tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items()] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() return (matched_inputs, onnx_outputs)
Export a TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): The preprocessor used for encoding the data. model ([`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
github-repos
def _apply_options(self, token): if (token.is_punct and self.remove_punct): return None if (token.is_stop and self.remove_stop_words): return None if (token.is_digit and self.remove_digits): return None if (token.is_oov and self.exclude_oov): return None if (token.pos_ in self.exclude_pos_tags): return None if (token.ent_type_ in self.exclude_entities): return None if self.lemmatize: return token.lemma_ if self.lower: return token.lower_ return token.orth_
Applies various filtering and processing options on token. Returns: The processed token. None if filtered.
codesearchnet
def print_file_results(file_result): print_results_header(file_result.filepath, file_result.is_valid) for object_result in file_result.object_results: if object_result.warnings: print_warning_results(object_result, 1) if object_result.errors: print_schema_results(object_result, 1) if file_result.fatal: print_fatal_results(file_result.fatal, 1)
Print the results of validating a file. Args: file_result: A FileValidationResults instance.
codesearchnet
def Update(self, attribute=None): client_id = self.urn.Split()[0] if (attribute == 'CONTAINS'): flow_id = flow.StartAFF4Flow(client_id=client_id, flow_name='ListDirectory', pathspec=self.real_pathspec, notify_to_user=False, token=self.token) return flow_id
Refresh an old attribute. Note that refreshing the attribute is asynchronous. It does not change anything about the current object - you need to reopen the same URN some time later to get fresh data. Attributes: CONTAINS - Refresh the content of the directory listing. Args: attribute: An attribute object as listed above. Returns: The Flow ID that is pending Raises: IOError: If there has been an error starting the flow.
codesearchnet
def _get_file_iterator(self, file_obj): file_obj.seek(0) return iter((lambda : file_obj.read(self.read_bs)), '')
For given `file_obj` return iterator, which will read the file in `self.read_bs` chunks. Args: file_obj (file): File-like object. Return: iterator: Iterator reading the file-like object in chunks.
codesearchnet
def reduce_to_best_decode(metrics, reduce_func): num_videos = metrics.shape[1] mean_across_frames = np.mean(metrics, axis=-1) best_decode_ind = reduce_func(mean_across_frames, axis=0) best_metrics = metrics[best_decode_ind, np.arange(num_videos), :] return best_metrics, best_decode_ind
Extracts the best-decode from the metrics according to reduce_func. Args: metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames) reduce_func: callable, np.argmax or np.argmin. Returns: best_metrics: 2-D numpy array, shape=(num_samples, num_frames). best_decode_ind: 1-D numpy array, shape=(num_samples,)
juraj-google-style
def read_bytes(self, length) -> bytes: value = self.stream.read(length) return value
Read the specified number of bytes from the stream. Args: length (int): number of bytes to read. Returns: bytes: `length` number of bytes.
juraj-google-style
def calc_update_events(self, asin_to_progress): new_events = [] for (asin, new_progress) in asin_to_progress.iteritems(): try: book_snapshot = self.get_book(asin) except KeyError: new_events.append(AddEvent(asin)) else: if (book_snapshot.status == ReadingStatus.CURRENT): change = (new_progress - book_snapshot.progress) if (change > 0): new_events.append(ReadEvent(asin, change)) return new_events
Calculate and return an iterable of `KindleEvent`s which, when applied to the current snapshot, result in the the current snapshot reflecting the progress state of the `asin_to_progress` mapping. Functionally, this method generates `AddEvent`s and `ReadEvent`s from updated Kindle Library state. Args: asin_to_progress: A map of book asins to the integral representation of progress used in the current snapshot. Returns: A list of Event objects that account for the changes detected in the `asin_to_progress`.
codesearchnet
def _validate(cls, message): valid = False if ((('name' in message) and ('value' in message)) or (('id' in message) and ('data' in message))): valid = True return valid
Confirm the validitiy of a given dict as an OpenXC message. Returns: ``True`` if the message contains at least a ``name`` and ``value``.
codesearchnet
def save(self, data: Union[dict, List[dict]]): raise NotImplementedError()
Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`dict` or list of `dict`): The data to store.
github-repos
def transpile(circuits, backend=None, basis_gates=None, coupling_map=None, initial_layout=None, seed_mapper=None, pass_manager=None): warnings.warn('qiskit.transpiler.transpile() has been deprecated and will be removed in the 0.9 release. Use qiskit.compiler.transpile() instead.', DeprecationWarning) return compiler.transpile(circuits=circuits, backend=backend, basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, seed_transpiler=seed_mapper, pass_manager=pass_manager)
transpile one or more circuits. Args: circuits (QuantumCircuit or list[QuantumCircuit]): circuits to compile backend (BaseBackend): a backend to compile for basis_gates (list[str]): list of basis gate names supported by the target. Default: ['u1','u2','u3','cx','id'] coupling_map (list): coupling map (perhaps custom) to target in mapping initial_layout (Layout or dict or list): Initial position of virtual qubits on physical qubits. The final layout is not guaranteed to be the same, as the transpiler may permute qubits through swaps or other means. seed_mapper (int): random seed for the swap_mapper pass_manager (PassManager): a pass_manager for the transpiler stages Returns: QuantumCircuit or list[QuantumCircuit]: transpiled circuit(s). Raises: TranspilerError: in case of bad inputs to transpiler or errors in passes
codesearchnet
def get_user(self, user_id): try: return User.objects.get(id=user_id) except User.DoesNotExist: return None
Returns a user, given his or her user id. Required for a custom authentication backend. Args: user_id The user id of the user to fetch. Returns: User or None
juraj-google-style
class BeamJarExpansionService(JavaJarExpansionService): def __init__(self, gradle_target, extra_args=None, gradle_appendix=None, classpath=None, append_args=None): path_to_jar = subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target, gradle_appendix) self.gradle_target = gradle_target super().__init__(path_to_jar, extra_args, classpath=classpath, append_args=append_args)
An expansion service based on an Beam Java Jar file. Attempts to use a locally-built copy of the jar based on the gradle target, if it exists, otherwise attempts to download and cache the released artifact corresponding to this version of Beam from the apache maven repository. Args: gradle_target: Beam Gradle target for building an executable jar which will be used to start the expansion service. extra_args: arguments to be provided when starting up the expansion service using the jar file. These arguments will replace the default arguments. gradle_appendix: Gradle appendix of the artifact. classpath: Additional dependencies to be added to the classpath. append_args: arguments to be provided when starting up the expansion service using the jar file. These arguments will be appended to the default arguments.
github-repos
def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient: if len(servers) > 1: sorted_servers = [ server_url for (server_url, _) in sort_servers_closest(servers) ] log.info( 'Automatically selecting matrix homeserver based on RTT', sorted_servers=sorted_servers, ) elif len(servers) == 1: sorted_servers = servers else: raise TransportError('No valid servers list given') last_ex = None for server_url in sorted_servers: server_url: str = server_url client = GMatrixClient(server_url, *args, **kwargs) try: client.api._send('GET', '/versions', api_path='/_matrix/client') except MatrixError as ex: log.warning('Selected server not usable', server_url=server_url, _exception=ex) last_ex = ex else: break else: raise TransportError( 'Unable to find a reachable Matrix server. Please check your network connectivity.', ) from last_ex return client
Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers
juraj-google-style
def set_logging_levels(remote=None, local=None): logging_options = ['emergency', 'alert', 'critical', 'error', 'warning', 'notice', 'informational', 'debug'] query = "" if remote: if remote in logging_options: query += ' remoteSeverity="{0}"'.format(remote) else: raise salt.exceptions.CommandExecutionError("Remote Severity option is not valid.") if local: if local in logging_options: query += ' localSeverity="{0}"'.format(local) else: raise salt.exceptions.CommandExecutionError("Local Severity option is not valid.") dn = "sys/svc-ext/syslog" inconfig = .format(query) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
Sets the logging levels of the CIMC devices. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 Args: remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. CLI Example: .. code-block:: bash salt '*' cimc.set_logging_levels remote=error local=notice
juraj-google-style
def _add_unique_metric_name(self, metric_name, metric_fn, output_index): if len(self.output_names) > 1: if not getattr(metric_fn, '_from_serialized', False): metric_name = '%s_%s' % (self.output_names[output_index], metric_name) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 return metric_name
Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name
github-repos
def _patch_expand_paths(self, settings, name, value): return [self._patch_expand_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Returns: list: Patched path list to an absolute path.
codesearchnet
def parse(path): def paired(iterable): 's -> (s0, s1), (s2, s3), (s4, s5), ...' cursor = iter(iterable) return zip(cursor, cursor) def unwrap_if_sexp_symbol(datum): "Convert Symbol(':key') to ':key' (Symbol isn't hashable for dict keys).\n " return (datum.value() if isinstance(datum, sexpdata.Symbol) else datum) def sexp2dict(sexps): 'Transforms a nested list structure from sexpdata to dict.' newdict = {} for (key, value) in paired(sexps): key = str(unwrap_if_sexp_symbol(key)).lstrip(':') if (isinstance(value, list) and value): if isinstance(value[0], list): newdict[key] = [sexp2dict(val) for val in value] elif isinstance(value[0], sexpdata.Symbol): newdict[key] = sexp2dict(value) else: newdict[key] = value else: newdict[key] = value return newdict conf = sexpdata.loads(Util.read_file(path)) return sexp2dict(conf)
Parse an ``.ensime`` config file from S-expressions. Args: path (str): Path of an ``.ensime`` file to parse. Returns: dict: Configuration values with string keys.
codesearchnet
def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags, units, parens_as_neg=True): conversion = cell_str.strip() if re.search(allregex.control_wrapping_regex, cell_str): stripped_cell = cell_str.strip() mod_cell_str = stripped_cell[1:][:(- 1)].strip() neg_mult = False if ((stripped_cell[0] == '(') and (stripped_cell[(- 1)] == ')') and re.search(allregex.contains_numerical_regex, mod_cell_str)): neg_mult = True flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['removed-wrapping']) converted_value = auto_convert_cell(flagable, mod_cell_str, position, worksheet, flags, units) neg_mult = (neg_mult and check_cell_type(converted_value, get_cell_type(0))) if (neg_mult and parens_as_neg): flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['converted-wrapping-to-neg']) return ((- converted_value) if neg_mult else converted_value) elif re.search(allregex.contains_numerical_regex, cell_str): conversion = auto_convert_numeric_string_cell(flagable, conversion, position, worksheet, flags, units) elif re.search(allregex.bool_regex, cell_str): flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['bool-to-int']) conversion = (1 if re.search(allregex.true_bool_regex, cell_str) else 0) return conversion
Handles the string case of cell and attempts auto-conversion for auto_convert_cell. Args: parens_as_neg: Converts numerics surrounded by parens to negative values
codesearchnet
def download_file_from_google_drive(file_id, root, filename=None, md5=None): import requests url = "https: root = os.path.expanduser(root) if not filename: filename = file_id fpath = os.path.join(root, filename) makedir_exist_ok(root) if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() response = session.get(url, params={'id': file_id}, stream=True) token = _get_confirm_token(response) if token: params = {'id': file_id, 'confirm': token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath)
Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check
juraj-google-style
def Copy(self, name=None): if name is None: name = self.name return Cdf(list(self.xs), list(self.ps), name)
Returns a copy of this Cdf. Args: name: string name for the new Cdf
juraj-google-style
def setup(self, file_path_list, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True): super(GRRHuntFileCollector, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.file_path_list = [item.strip() for item in file_path_list.strip().split(',')] if not file_path_list: self.state.add_error('Files must be specified for hunts', critical=True)
Initializes a GRR Hunt file collector. Args: file_path_list: comma-separated list of file paths. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: comma-separated list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
juraj-google-style
def non_serializable(): def _apply_fn(dataset): return _NonSerializableDataset(dataset) return _apply_fn
A non-serializable identity transformation. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def sym_init_args(self) -> pg_dict.Dict: return self._sym_attributes
Returns the symbolic attributes which are also the `__init__` args. Returns: A symbolic Dict as evaluated symbolic attributes, meaning that all ``pg.ContextValue`` will be resolved.
github-repos
def edge_length_sum(self, terminal=True, internal=True): if not isinstance(terminal, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") return sum(node.edge_length for node in self.traverse_preorder() if node.edge_length is not None and ((terminal and node.is_leaf()) or (internal and not node.is_leaf())))
Compute the sum of all selected edge lengths in this ``Tree`` Args: ``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False`` Returns: ``float``: Sum of all selected edge lengths in this ``Tree``
juraj-google-style
def set_route_name(self, ip_dest, next_hop, **kwargs): return self._set_route(ip_dest, next_hop, **kwargs)
Set the route_name value for the specified route Args: ip_dest (string): The ip address of the destination in the form of A.B.C.D/E next_hop (string): The next hop interface or ip address **kwargs['next_hop_ip'] (string): The next hop address on destination interface **kwargs['distance'] (string): Administrative distance for this route **kwargs['tag'] (string): Route tag **kwargs['route_name'] (string): Route name Returns: True if the operation succeeds, otherwise False. Notes: Any existing tag value must be included in call to set_route_name, otherwise the tag will be reset by the call to EOS.
codesearchnet
def _get_available_gpus(): if ops.executing_eagerly_outside_functions(): return [d.name for d in config.list_logical_devices('GPU')] global _LOCAL_DEVICES if _LOCAL_DEVICES is None: _LOCAL_DEVICES = get_session().list_devices() return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
Get a list of available GPU devices (formatted as strings). Returns: A list of available GPU devices.
github-repos
def auto_cast_partition_dtype(): return False
Whether incompatible row-partitioning dtypes should be auto-converted. If true, then operations that combine RaggedTensors but have different row-partitioning tensor dtypes will be automatically cast to a compatible dtype (`tf.int64`). If false, then such operations will result in an error. Returns: `bool`
github-repos
def search(self, **kwargs): path = self._get_path('search') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get movies that match the search query string from the API. Args: q (optional): plain text search query; remember to URI encode page_limit (optional): number of search results to show per page, default=30 page (optional): results page number, default=1 Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def get_lock_config(self, device_label): response = None try: response = requests.get( urls.lockconfig(self._giid, device_label), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get lock configuration Args: device_label (str): device label of lock
juraj-google-style
def matches_function(function: _evaluation.MatchesFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select: del function if operand_result is None: raise ValueError('matches() cannot be called without an operand.') sql_alias = 'matches_' sql_data_type = _sql_data_types.Boolean if not params_result: return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('NULL', _sql_alias=sql_alias, _sql_data_type=sql_data_type), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK) else: param_to_evaluate = [param for param in params_result] return dataclasses.replace(operand_result, select_part=_sql_data_types.FunctionCall(name='REGEXP', params=(operand_result.select_part, param_to_evaluate[0]), _sql_alias=sql_alias, _sql_data_type=sql_data_type))
Generates Spark SQL representing the FHIRPath matches() function. Returns `TRUE` if the operand matches the regex in the given param. This function takes one param (`pattern`) in addition to the operand. If `pattern` is not provided the matches function returns the empty set which in this function translates to NULL. The returned SQL expression is a table of cardinality 1, whose value is of `BOOL` type. By default, `_MatchesFunction` will return `FALSE` if given no operand. Returns an error In the event that the input collection contains multiple items. Args: function: The FHIRPath AST `MatchesFunction` node operand_result: The expression which is being evaluated params_result: The parameter passed in to function Returns: A compiled Spark SQL expression. Raises: ValueError: When the function is called without an operand
github-repos
def _replace_variables_by_constants(converter_data): input_graph = _GraphDef(converter_data.graph_def) for tensor_name, tensor_data in converter_data.tensor_data.items(): input_graph.nodes[tensor_name].convert_variable_to_constant(None, tensor_data) converted_graph = input_graph.converted_self().graph_def converted_input_indices = {t.index for t in converter_data.tensor_data.values() if t.index is not None} return (converted_graph, converted_input_indices)
Replaces variables by constants on a given graph. Given a _ConverterData instance with converted variables in its tensor_data field, create a new graph where the respective variables are replaced with the converted constants. Args: converter_data: A pre-populated _ConverterData instance. Returns: The converted graph.
github-repos
def sharded_filename(self, filename_tensor, shard, num_shards): return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
Append sharding information to a filename. Args: filename_tensor: A string tensor. shard: Integer. The shard for the filename. num_shards: An int Tensor for the number of shards. Returns: A string tensor.
github-repos
def _FlagIsRegistered(self, flag_obj): flag_dict = self.FlagDict() name = flag_obj.name if flag_dict.get(name, None) == flag_obj: return True short_name = flag_obj.short_name if (short_name is not None and flag_dict.get(short_name, None) == flag_obj): return True return False
Checks whether a Flag object is registered under long name or short name. Args: flag_obj: A Flag object. Returns: A boolean: True iff flag_obj is registered under long name or short name.
juraj-google-style