code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _guessEncoding(self, path): if os.path.exists(path) and path.lower().endswith('csv'): encoding = None if encoding is not None: if encoding.startswith('utf'): encoding = encoding.replace('-', '') encoding = encoding.replace('-','_') viewValue = _encodings.get(encoding) self._encodingKey = encoding index = self._encodingComboBox.findText(viewValue.upper()) self._encodingComboBox.setCurrentIndex(index)
Opens a file from the given `path` and checks the file encoding. The file must exists on the file system and end with the extension `.csv`. The file is read line by line until the encoding could be guessed. On a successfull identification, the widgets of this dialog will be updated. Args: path (string): Path to a csv file on the file system.
juraj-google-style
def _ExtractContentFromDataStream(self, mediator, file_entry, data_stream_name): self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseDataStream(mediator, file_entry, data_stream_name) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING self.last_activity_timestamp = time.time()
Extracts content from a data stream. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract its content. data_stream_name (str): name of the data stream whose content is to be extracted.
codesearchnet
def _cmp_rel(self, state, op_name, x, y): ret = self.ctx.program.NewVariable() leftover_x = self.ctx.program.NewVariable() leftover_y = self.ctx.program.NewVariable() op_not_eq = op_name not in ('EQ', 'NE') reported = False for b1 in x.bindings: for b2 in y.bindings: op = getattr(slots, op_name) try: err = False val = compare.cmp_rel(self.ctx, op, b1.data, b2.data) except compare.CmpTypeError: val = None if state.node.HasCombination([b1, b2]): err = True reported = True self.ctx.errorlog.unsupported_operands(self.frames, op, x, y) if val is None: if op_not_eq and isinstance(b1.data, abstract.Class) and err: ret.AddBinding(self.ctx.convert.unsolvable, {b1, b2}, state.node) elif isinstance(b1.data, abstract.SequenceLength): ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node) else: leftover_x.PasteBinding(b1, state.node) leftover_y.PasteBinding(b2, state.node) else: ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node) if leftover_x.bindings: op = f'__{op_name.lower()}__' report_errors = op_not_eq and (not bool(ret.bindings)) and (not reported) state, leftover_ret = vm_utils.call_binary_operator(state, op, leftover_x, leftover_y, report_errors=report_errors, ctx=self.ctx) ret.PasteVariable(leftover_ret, state.node) return (state, ret)
Implementation of relational operators CMP_(LT|LE|EQ|NE|GE|GT). Args: state: Initial FrameState. op_name: An operator name, e.g., "EQ". x: A variable of the lhs value. y: A variable of the rhs value. Returns: A tuple of the new FrameState and the return variable.
github-repos
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs): print("Running service on http: "Press Ctrl+C to terminate.") self.config.port = port self.config.host = host try: if self.event_broker: self.event_broker.start() self.loop.run_until_complete(self.announce()) http_handler = self.app.make_handler() self._http_server = self.loop.create_server(http_handler, host, port) self._server_handler = self.loop.run_until_complete(self._http_server) self.loop.run_forever() except KeyboardInterrupt: pass finally: try: self.cleanup() except UnboundLocalError: pass self.loop.close()
This function starts the service's network intefaces. Args: port (int): The port for the http server.
juraj-google-style
def apply_to_operation(self, operation): attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString()) operation._set_attr('_XlaSharding', attr_value)
Applies this Sharding attribute to `operation`. Args: operation: A tf.Operation to add sharding annotation.
github-repos
def _flush(self, buffer): with _handle_client_error(): self._client.put_object( Body=buffer.tobytes(), **self._client_kwargs)
Flush the write buffers of the stream if applicable. Args: buffer (memoryview): Buffer content.
juraj-google-style
def diff_halfMatch(self, text1, text2): if (self.Diff_Timeout <= 0): return None if (len(text1) > len(text2)): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if ((len(longtext) < 4) or ((len(shorttext) * 2) < len(longtext))): return None def diff_halfMatchI(longtext, shorttext, i): 'Does a substring of shorttext exist within longtext such that the\n substring is at least half the length of longtext?\n Closure, but does not reference any external variables.\n\n Args:\n longtext: Longer string.\n shorttext: Shorter string.\n i: Start index of quarter length substring within longtext.\n\n Returns:\n Five element Array, containing the prefix of longtext, the suffix of\n longtext, the prefix of shorttext, the suffix of shorttext and the\n common middle. Or None if there was no match.\n ' seed = longtext[i:(i + (len(longtext) best_common = '' j = shorttext.find(seed) while (j != (- 1)): prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if (len(best_common) < (suffixLength + prefixLength)): best_common = (shorttext[(j - suffixLength):j] + shorttext[j:(j + prefixLength)]) best_longtext_a = longtext[:(i - suffixLength)] best_longtext_b = longtext[(i + prefixLength):] best_shorttext_a = shorttext[:(j - suffixLength)] best_shorttext_b = shorttext[(j + prefixLength):] j = shorttext.find(seed, (j + 1)) if ((len(best_common) * 2) >= len(longtext)): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None hm1 = diff_halfMatchI(longtext, shorttext, ((len(longtext) + 3) hm2 = diff_halfMatchI(longtext, shorttext, ((len(longtext) + 1) if ((not hm1) and (not hm2)): return None elif (not hm2): hm = hm1 elif (not hm1): hm = hm2 elif (len(hm1[4]) > len(hm2[4])): hm = hm1 else: hm = hm2 if (len(text1) > len(text2)): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common)
Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match.
codesearchnet
def _dump_eager_tensors(self, tensors, op_type, input_tensor_ids, output_tensor_device_ids, graph_id=None): tensor_debug_mode = self._tensor_debug_mode output_tensor_ids = [t._id for t in tensors] assert len(tensors) == len(output_tensor_device_ids) if tensor_debug_mode == debug_event_pb2.TensorDebugMode.NO_TENSOR: return debug_event_pb2.Execution(op_type=op_type, graph_id=graph_id, num_outputs=len(tensors), input_tensor_ids=input_tensor_ids, output_tensor_ids=output_tensor_ids, output_tensor_device_ids=output_tensor_device_ids, tensor_debug_mode=tensor_debug_mode, code_location=self._process_stack_frames()) elif tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH, debug_event_pb2.TensorDebugMode.SHAPE, debug_event_pb2.TensorDebugMode.FULL_TENSOR): execution_proto = debug_event_pb2.Execution(op_type=op_type, num_outputs=len(tensors), graph_id=graph_id, input_tensor_ids=input_tensor_ids, output_tensor_ids=output_tensor_ids, output_tensor_device_ids=output_tensor_device_ids, tensor_debug_mode=tensor_debug_mode, code_location=self._process_stack_frames()) for tensor in tensors: if self._should_dump_tensor(op_type, tensor.dtype) and tensor.dtype.is_numpy_compatible: if tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH): if tensor.dtype.is_floating: tensor_proto = _concrete_tensor_to_proto(gen_debug_ops.debug_numeric_summary_v2(tensor, tensor_debug_mode=tensor_debug_mode, output_dtype=dtypes.float64)) else: tensor_proto = tensor_pb2.TensorProto() elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.SHAPE: if tensor.dtype.is_floating or tensor.dtype.is_integer or tensor.dtype.is_bool: tensor_proto = _concrete_tensor_to_proto(gen_debug_ops.debug_numeric_summary_v2(tensor, tensor_debug_mode=tensor_debug_mode, output_dtype=dtypes.float64)) else: tensor_proto = tensor_pb2.TensorProto() elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR: tensor_proto = _concrete_tensor_to_proto(tensor) if tensor_proto: execution_proto.tensor_protos.append(tensor_proto) return execution_proto else: raise NotImplementedError('Tensor instrumentation is not implemented for debug mode %s yet ' % self._tensor_debug_mode)
Dump the value of eager tensors. The destination of the dumping is determined by the dump_root of the currently enabled dumping callback. The tensors may be transformed prior to dumping (e.g., reduced as summary statistics such as minimum, maximum and arithmetic mean). The details of this transformation (if any) depends on the tensor_debug_mode of the currently enabled dumping callback. Args: tensors: The EagerTensors whose values are to be dumped, with or without value transform. op_type: Type of the op that generates the tensors, as a string. input_tensor_ids: IDs of the input EagerTensors to the op. output_tensor_device_ids: Debugged-generated IDs for the devices on which the output tensors are allocated, as a `list` of `int`s. Must match `tensors` in length. graph_id: ID of the executed graph, applicable only to eager execution of a FuncGraph. Returns: A tfdbg Execution protocol buffer.
github-repos
def hostname(hn, ft, si): if ((not hn) or (not hn.fqdn)): hn = ft if (hn and hn.fqdn): fqdn = hn.fqdn hostname = (hn.hostname if hn.hostname else fqdn.split('.')[0]) domain = (hn.domain if hn.domain else '.'.join(fqdn.split('.')[1:])) return Hostname(fqdn, hostname, domain) else: fqdn = (si.get('profile_name') if si else None) if fqdn: hostname = fqdn.split('.')[0] domain = '.'.join(fqdn.split('.')[1:]) return Hostname(fqdn, hostname, domain) raise Exception('Unable to get hostname.')
Check hostname, facter and systemid to get the fqdn, hostname and domain. Prefer hostname to facter and systemid. Returns: insights.combiners.hostname.Hostname: A named tuple with `fqdn`, `hostname` and `domain` components. Raises: Exception: If no hostname can be found in any of the three parsers.
codesearchnet
def apply_enhancement(data, func, exclude=None, separate=False, pass_dask=False): attrs = data.attrs bands = data.coords['bands'].values if exclude is None: exclude = ['A'] if 'A' in bands else [] if separate: data_arrs = [] for idx, band_name in enumerate(bands): band_data = data.sel(bands=[band_name]) if band_name in exclude: data_arrs.append(band_data) continue if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data, index=idx) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data, index=idx) data_arrs.append(band_data) attrs.update(band_data.attrs) data.data = xr.concat(data_arrs, dim='bands').data data.attrs = attrs return data else: band_data = data.sel(bands=[b for b in bands if b not in exclude]) if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data) attrs.update(band_data.attrs) new_data = xr.concat([band_data, data.sel(bands=exclude)], dim='bands') data.data = new_data.sel(bands=bands).data data.attrs = attrs return data
Apply `func` to the provided data. Args: data (xarray.DataArray): Data to be modified inplace. func (callable): Function to be applied to an xarray exclude (iterable): Bands in the 'bands' dimension to not include in the calculations. separate (bool): Apply `func` one band at a time. Default is False. pass_dask (bool): Pass the underlying dask array instead of the xarray.DataArray.
juraj-google-style
def execute_on(self, worker): replica_args = _select_worker_slice(worker.worker_index, self._args) replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs) e = _get_error_from_remote_values(replica_args) or _get_error_from_remote_values(replica_kwargs) if e: if not isinstance(e, ClosureInputError): e = ClosureInputError(e) raise e with ops.device(worker.device_name): with context.executor_scope(worker.executor): with coordinator_context.with_dispatch_context(worker): with metric_utils.monitored_timer('closure_execution'): output_values = self._function(*nest.map_structure(coordinator_context.maybe_get_remote_value, replica_args), **nest.map_structure(coordinator_context.maybe_get_remote_value, replica_kwargs)) self.maybe_call_with_output_remote_value(lambda r: r._set_values(output_values))
Executes the closure on the given worker. Args: worker: a `Worker` object.
github-repos
def get_hook(hook_name): if (not pkg_resources.resource_exists(__name__, hook_name)): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError
codesearchnet
def top_k_categorical_accuracy(y_true, y_pred, k=5): return math_ops.cast(nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), backend.floatx())
Computes how often targets are in the top `K` predictions. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: The ground truth values. y_pred: The prediction values. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Top K categorical accuracy value.
github-repos
def get_tree(profile, sha, recursive=True): resource = ('/trees/' + sha) if recursive: resource += '?recursive=1' data = api.get_request(profile, resource) return prepare(data)
Fetch a tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the tree to fetch. recursive If ``True``, traverse all subtrees and their subtrees, all the way down. That will return a list of all objects in the tree, all levels deep. Returns: A dict with data about the tree.
codesearchnet
def all_label_values(self, label_list_ids=None): values = set() for label_list in self.label_lists.values(): if label_list_ids is None or label_list.idx in label_list_ids: values = values.union(label_list.label_values()) return values
Return a set of all label-values occurring in this utterance. Args: label_list_ids (list): If not None, only label-values from label-lists with an id contained in this list are considered. Returns: :class:`set`: A set of distinct label-values.
juraj-google-style
def write(self, destination, filename, template_name, **kwargs): template = self.env.get_template(template_name) content = template.render(kwargs) super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content)
Write a file according to the template name Args: destination (string): the destination location filename (string): the filename that will be written template_name (string): the name of the template kwargs (dict): all attribute that will be passed to the template
juraj-google-style
def validate_and_decode(jwt_bu64, cert_obj): try: return jwt.decode(jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True) except jwt.InvalidTokenError as e: raise JwtException('Signature is invalid. error="{}"'.format(str(e)))
Validate the JWT and return as a dict. - JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and returns it as a dict. Args: jwt_bu64: bytes The JWT encoded using a a URL safe flavor of Base64. cert_obj: cryptography.Certificate Public certificate used for signing the JWT (typically the CN cert). Raises: JwtException: If validation fails. Returns: dict: Values embedded in the JWT.
codesearchnet
def get_dimension_indices(self, query): ids = self['id'] if self.get('id') else self['dimension']['id'] indices = [] for idx, id in enumerate(ids): indices.append(self.get_dimension_index(id, [d.get(id) for d in query if id in d][0])) return indices
Converts a dimension/category list of dicts into a list of \ dimensions’ indices. Args: query(list): dimension/category list of dicts. Returns: indices(list): list of dimensions' indices.
juraj-google-style
def _parse_dtype(self, space): if isinstance(space, gym.spaces.Discrete): return tf.int32 if isinstance(space, gym.spaces.Box): return tf.float32 raise NotImplementedError()
Get a tensor dtype from a OpenAI Gym space. Args: space: Gym space. Raises: NotImplementedError: For spaces other than Box and Discrete. Returns: TensorFlow data type.
codesearchnet
def _atoms(atoms_string): atoms = {} for split in atoms_string.split(','): sites = split.split('.') el = sites.pop(0) sites = list(map(int, sites)) atoms[el] = np.array(sites) - 1 return atoms
Parse the atom string. Args: atoms_string (str): The atoms to plot, in the form ``"C.1.2.3,"``. Returns: dict: The atomic indices over which to sum the DOS. Formatted as:: {Element: [atom_indices]}. Indices are zero indexed for each atomic species. If an element symbol is included with an empty list, then all sites for that species are considered.
juraj-google-style
def DoesNotContainIgnoreCase(self, value): self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN_IGNORE_CASE') return self._query_builder
Sets the type of the WHERE clause as "doesn not contain ignore case". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
codesearchnet
def _SparseAddGrad(op: ops.Operation, *grads): val_grad = grads[1] a_indices = op.inputs[0] b_indices = op.inputs[3] sum_indices = op.outputs[0] a_val_grad, b_val_grad = gen_sparse_ops.sparse_add_grad(val_grad, a_indices, b_indices, sum_indices) a_val_grad.set_shape(op.inputs[1].get_shape()) b_val_grad.set_shape(op.inputs[4].get_shape()) return (None, a_val_grad, None, None, b_val_grad, None, None)
The backward operator for the SparseAdd op. The SparseAdd op calculates A + B, where A, B, and the sum are all represented as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. non-empty values of the sum, and outputs the gradients w.r.t. the non-empty values of A and B. Args: op: the SparseAdd op *grads: the incoming gradients, one element per output of `op` Returns: Gradient for each of the 6 input tensors of SparseAdd: (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) The gradients for the indices, shapes, and the threshold are None.
github-repos
def all_arguments(cls, function, arguments): if isinstance(arguments, dict): arguments = Arguments(**arguments) elif (not isinstance(arguments, Arguments)): arguments = Arguments(*arguments) return cls(function, arguments)
Helper function for creating `FunctionCall`s with `Arguments`. Args: function: The value to store for the action function. arguments: The values to store for the arguments of the action. Can either be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an iterable is provided, the values will be unpacked into an `Arguments` object. Returns: A new `FunctionCall` instance.
codesearchnet
def GreaterThan(self, value): self._awql = self._CreateSingleValueCondition(value, '>') return self._query_builder
Sets the type of the WHERE clause as "greater than". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def vals2colors(vals,cmap='GnBu_d',res=100): if any(isinstance(el, list) for el in vals): vals = list(itertools.chain(*vals)) palette = np.array(sns.color_palette(cmap, res)) ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1 return [tuple(i) for i in palette[ranks, :]]
Maps values to colors Args: values (list or list of lists) - list of values to map to colors cmap (str) - color map (default is 'husl') res (int) - resolution of the color map (default: 100) Returns: list of rgb tuples
juraj-google-style
def tensor_rank_tensor(self, name='tensor_rank_tensor'): with self._name_scope(name): return self._tensor_rank_tensor()
Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op`. Returns: `int32` `Tensor`, determined at runtime.
github-repos
def Analyze(self, source_path, output_writer): if not os.path.exists(source_path): raise RuntimeError('No such source: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_path_spec = None scan_step = 0 scan_context.OpenSourcePath(source_path) while True: self._source_scanner.Scan( scan_context, auto_recurse=self._auto_recurse, scan_path_spec=scan_path_spec) if not scan_context.updated: break if not self._auto_recurse: output_writer.WriteScanContext(scan_context, scan_step=scan_step) scan_step += 1 if scan_context.source_type in [ definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]: break for locked_scan_node in scan_context.locked_scan_nodes: self._PromptUserForEncryptedVolumeCredential( scan_context, locked_scan_node, output_writer) if not self._auto_recurse: scan_node = scan_context.GetUnscannedScanNode() if not scan_node: return scan_path_spec = scan_node.path_spec if self._auto_recurse: output_writer.WriteScanContext(scan_context)
Analyzes the source. Args: source_path (str): the source path. output_writer (StdoutWriter): the output writer. Raises: RuntimeError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported.
juraj-google-style
def on_element(self, element, window, context): pass
Called when a new element arrives in a window. Args: element: the element being added window: the window to which the element is being added context: a context (e.g. a TriggerContext instance) for managing state and setting timers
github-repos
def get_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH): length = self.get_int(min_length, max_length) return self.fdp.ConsumeFloatListInRange(length, _MIN_FLOAT, _MAX_FLOAT)
Consume a float list with given constraints. Args: min_length: The minimum length of the list. max_length: The maximum length of the list. Returns: Consumed integer list based on input bytes and constraints.
github-repos
def _generate_enqueue_op(self, inputs, name_prefix, index, device=None, tpu_ordinal=-1): full_name = '%s/%d' % (name_prefix, index) shapes = [t.shape for t in inputs] if device is None: devices = [t.device for t in inputs] for i in range(1, self.number_of_tuple_elements): if devices[0] != devices[i]: raise ValueError(f'input devices for shard {index} are {str(devices)}, but should all be the same') with ops.colocate_with(inputs[0]): return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal) else: with ops.device(device): return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal)
Generate a host-side Op to enqueue a tuple to the queue. If device is None the inputs are all required to have the same device specification, and the enqueue Op is colocated with inputs[0]. Otherwise the enqueue Op is placed on 'device'. Args: inputs: a list of Tensors with the types and shapes of the tuple elements. name_prefix: the base name for the Op. index: the shard index, used to uniquify the Op name. device: device to place the Op on, or None if it should be colocated with the inputs. tpu_ordinal: ordinal of the TPU device on the host to use for infeed if device is a CPU device. Should be set to -1 if device is a TPU device. Returns: An Op corresponding to a shard of infeed enqueued at the host, suitable for use within a replicated block. Raises: ValueError: if device is None and inputs do not all have the same device specification.
github-repos
def _swap_where(condition, x, y): return (tf.where(condition, y, x), tf.where(condition, x, y))
Swaps the elements of `x` and `y` based on `condition`. Args: condition: A `Tensor` of dtype bool. x: A `Tensor` with the same shape as `condition`. y: A `Tensor` with the same shape and dtype as `x`. Returns: Two `Tensors` with the same shape as `x` and `y`.
github-repos
def write_config_json(config_file, data): outfile = None try: with open(config_file, 'w') as outfile: json.dump(data, outfile) except: (line, filename, synerror) = trace() raise ArcRestHelperError({'function': 'init_config_json', 'line': line, 'filename': filename, 'synerror': synerror}) finally: outfile = None del outfile gc.collect()
Serializes an object to disk. Args: config_file (str): The path on disk to save the file. data (object): The object to serialize.
codesearchnet
def __init__(self, sdat): self.sdat = sdat super().__init__('Stagnant lid regime for {}'.format(sdat))
Initialization of instances: Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData instance for which a stagnant lid regime was found. Attributes: sdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData instance for which a stagnant lid regime was found.
juraj-google-style
def convertDay(self, day, prefix='', weekday=False): def sameDay(d1, d2): d = (d1.day == d2.day) m = (d1.month == d2.month) y = (d1.year == d2.year) return (d and m and y) tom = (self.now + datetime.timedelta(days=1)) if sameDay(day, self.now): return 'today' elif sameDay(day, tom): return 'tomorrow' if weekday: dayString = day.strftime('%A, %B %d') else: dayString = day.strftime('%B %d') if (not int(dayString[(- 2)])): dayString = (dayString[:(- 2)] + dayString[(- 1)]) return ((prefix + ' ') + dayString)
Convert a datetime object representing a day into a human-ready string that can be read, spoken aloud, etc. Args: day (datetime.date): A datetime object to be converted into text. prefix (str): An optional argument that prefixes the converted string. For example, if prefix="in", you'd receive "in two days", rather than "two days", while the method would still return "tomorrow" (rather than "in tomorrow"). weekday (bool): An optional argument that returns "Monday, Oct. 1" if True, rather than "Oct. 1". Returns: A string representation of the input day, ignoring any time-related information.
codesearchnet
def size(self) -> int: return sizeof(self.value)
Number of byte required for this data type Returns: Integer > 0
github-repos
def get_structure_from_id(self, task_id, final_structure=True): args = {'task_id': task_id} field = ('output.crystal' if final_structure else 'input.crystal') results = tuple(self.query([field], args)) if (len(results) > 1): raise QueryError('More than one result found for task_id {}!'.format(task_id)) elif (len(results) == 0): raise QueryError('No structure found for task_id {}!'.format(task_id)) c = results[0] return Structure.from_dict(c[field])
Returns a structure from the database given the task id. Args: task_id: The task_id to query for. final_structure: Whether to obtain the final or initial structure. Defaults to True.
codesearchnet
def reindex(self, kdims=[], force=False): old_kdims = [d.name for d in self.kdims] if (not isinstance(kdims, list)): kdims = [kdims] elif (not len(kdims)): kdims = [d for d in old_kdims if (not (len(set(self.dimension_values(d))) == 1))] indices = [self.get_dimension_index(el) for el in kdims] keys = [tuple((k[i] for i in indices)) for k in self.data.keys()] reindexed_items = OrderedDict(((k, v) for (k, v) in zip(keys, self.data.values()))) reduced_dims = set([d.name for d in self.kdims]).difference(kdims) dimensions = [self.get_dimension(d) for d in kdims if (d not in reduced_dims)] if ((len(set(keys)) != len(keys)) and (not force)): raise Exception('Given dimension labels not sufficientto address all values uniquely') if len(keys): cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims} else: cdims = {} with item_check((indices == sorted(indices))): return self.clone(reindexed_items, kdims=dimensions, cdims=cdims)
Reindexes object dropping static or supplied kdims Creates a new object with a reordered or reduced set of key dimensions. By default drops all non-varying key dimensions. Reducing the number of key dimensions will discard information from the keys. All data values are accessible in the newly created object as the new labels must be sufficient to address each value uniquely. Args: kdims (optional): New list of key dimensions after reindexing force (bool, optional): Whether to drop non-unique items Returns: Reindexed object
codesearchnet
def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]: response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("artists/{}/related-artists".format(artist_id)), headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"] for artist in data: artist = NameExternalIDPair(artist["name"], artist["id"]) if artist.name is None or artist.external_id is None: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
juraj-google-style
def read_vocab_file(file_path): with file_io.FileIO(file_path, 'r') as f: vocab_pd = pd.read_csv( f, header=None, names=['vocab', 'count'], dtype=str, na_filter=False) vocab = vocab_pd['vocab'].tolist() ex_count = vocab_pd['count'].astype(int).tolist() return vocab, ex_count
Reads a vocab file to memeory. Args: file_path: Each line of the vocab is in the form "token,example_count" Returns: Two lists, one for the vocab, and one for just the example counts.
juraj-google-style
def is_profile_of(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool: options = annotation_utils.get_options(message_or_descriptor) return url in options.Extensions[annotations_pb2.fhir_profile_base]
Returns True if message_or_descriptor is a profile of url. Args: url: The FHIR structure definition URL to compare against. message_or_descriptor: The Message or Descriptor to examine. Returns: True if message_or_descriptor's fhir_profile_base extension list contains url.
github-repos
def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False): self._check_jtag() if (len(tmsdata) != len(tdidata)): raise Exception('TMSdata and TDIData must be the same length') self._update_scanchain(tmsdata) count = len(tmsdata) t = time() outdata = bitarray([val for pair in zip(tmsdata, tdidata) for val in pair]) outdata = build_byte_align_buff(outdata).tobytes()[::(- 1)] if (self._scanchain and self._scanchain._print_statistics): print('TDI/TDI DATA PREP TIME', (time() - t)) t = time() self.bulkCommandDefault((_BMSG_WRITE_TMS_TDI % (return_tdo, count.to_bytes(4, 'little')))) self.bulkWriteData(outdata) if (self._scanchain and self._scanchain._print_statistics): print('TRANSFER TIME', (time() - t)) t = time() tdo_bits = (self._read_tdo(count) if return_tdo else None) if (self._scanchain and self._scanchain._print_statistics): print('TDO READ TIME', (time() - t)) self._get_adv_trans_stats(10, return_tdo) return tdo_bits
Command controller to write arbitrary TDI and TMS data to the physical scan chain. Optionally return TDO bits sent back from the scan chain. Args: tmsdata - bits to send over TMS line of scan chain (bitarray) must be the same length ad tdidata tdidata - bits to send over TDI line of scan chain (bitarray) must be the same length ad tmsdata return_tdo (bool) - return the devices bitarray response Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_tdi_bits(bitarray("00001"), bitarray("11111"), return_tdo=True) >>> c.jtag_disable()
codesearchnet
def dummy_inputs(self) -> Dict[str, tf.Tensor]: return {self.main_input_name: tf.random.uniform([1, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32), 'decoder_input_ids': tf.constant([[1, 3]], dtype=tf.int32)}
Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs.
github-repos
def ParseRow(self, parser_mediator, row_offset, row): time_elements_tuple = self._GetTimeElementsTuple(row['time']) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(time_elements_tuple)) return event_data = SymantecEventData() event_data.access = row.get('access', None) event_data.action0 = row.get('action0', None) event_data.action1 = row.get('action1', None) event_data.action1_status = row.get('action1_status', None) event_data.action2 = row.get('action2', None) event_data.action2_status = row.get('action2_status', None) event_data.address = row.get('address', None) event_data.backup_id = row.get('backup_id', None) event_data.cat = row.get('cat', None) event_data.cleaninfo = row.get('cleaninfo', None) event_data.clientgroup = row.get('clientgroup', None) event_data.compressed = row.get('compressed', None) event_data.computer = row.get('computer', None) event_data.definfo = row.get('definfo', None) event_data.defseqnumber = row.get('defseqnumber', None) event_data.deleteinfo = row.get('deleteinfo', None) event_data.depth = row.get('depth', None) event_data.description = row.get('description', None) event_data.domain_guid = row.get('domain_guid', None) event_data.domainname = row.get('domainname', None) event_data.err_code = row.get('err_code', None) event_data.event_data = row.get('event_data', None) event_data.event = row.get('event', None) event_data.extra = row.get('extra', None) event_data.file = row.get('file', None) event_data.flags = row.get('flags', None) event_data.groupid = row.get('groupid', None) event_data.guid = row.get('guid', None) event_data.license_expiration_dt = row.get('license_expiration_dt', None) event_data.license_feature_name = row.get('license_feature_name', None) event_data.license_feature_ver = row.get('license_feature_ver', None) event_data.license_fulfillment_id = row.get('license_fulfillment_id', None) event_data.license_lifecycle = row.get('license_lifecycle', None) event_data.license_seats_delta = row.get('license_seats_delta', None) event_data.license_seats = row.get('license_seats', None) event_data.license_seats_total = row.get('license_seats_total', None) event_data.license_serial_num = row.get('license_serial_num', None) event_data.license_start_dt = row.get('license_start_dt', None) event_data.logger = row.get('logger', None) event_data.login_domain = row.get('login_domain', None) event_data.log_session_guid = row.get('log_session_guid', None) event_data.macaddr = row.get('macaddr', None) event_data.new_ext = row.get('new_ext', None) event_data.ntdomain = row.get('ntdomain', None) event_data.offset = row_offset event_data.parent = row.get('parent', None) event_data.quarfwd_status = row.get('quarfwd_status', None) event_data.remote_machine_ip = row.get('remote_machine_ip', None) event_data.remote_machine = row.get('remote_machine', None) event_data.scanid = row.get('scanid', None) event_data.snd_status = row.get('snd_status', None) event_data.status = row.get('status', None) event_data.still_infected = row.get('still_infected', None) event_data.time = row.get('time', None) event_data.user = row.get('user', None) event_data.vbin_id = row.get('vbin_id', None) event_data.vbin_session_id = row.get('vbin_session_id', None) event_data.version = row.get('version:', None) event_data.virus_id = row.get('virus_id', None) event_data.virus = row.get('virus', None) event_data.virustype = row.get('virustype', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
juraj-google-style
def filter_sequences(self, seq_type): return DictList(x for x in self.sequences if isinstance(x, seq_type))
Return a DictList of only specified types in the sequences attribute. Args: seq_type (SeqProp): Object type Returns: DictList: A filtered DictList of specified object type only
juraj-google-style
def QA_fetch_ctp_tick(code, start, end, frequence, format='pd', collections=DATABASE.ctp_tick): code = QA_util_code_tolist(code, auto_fill=False) cursor = collections.find({ 'InstrumentID': {'$in': code}, "time_stamp": { "$gte": QA_util_time_stamp(start), "$lte": QA_util_time_stamp(end) }, 'type': frequence }, {"_id": 0}, batch_size=10000) hq = pd.DataFrame([data for data in cursor]).replace(1.7976931348623157e+308, numpy.nan).replace('', numpy.nan).dropna(axis=1) p1 = hq.loc[:, ['ActionDay', 'AskPrice1', 'AskVolume1', 'AveragePrice', 'BidPrice1', 'BidVolume1', 'HighestPrice', 'InstrumentID', 'LastPrice', 'OpenInterest', 'TradingDay', 'UpdateMillisec', 'UpdateTime', 'Volume']] p1 = p1.assign(datetime=p1.ActionDay.apply(QA_util_date_int2str)+' '+p1.UpdateTime + (p1.UpdateMillisec/1000000).apply(lambda x: str('%.6f' % x)[1:]), code=p1.InstrumentID) p1.datetime = pd.to_datetime(p1.datetime) return p1.set_index(p1.datetime)
仅供存储的ctp tick使用 Arguments: code {[type]} -- [description] Keyword Arguments: format {str} -- [description] (default: {'pd'}) collections {[type]} -- [description] (default: {DATABASE.ctp_tick}) Returns: [type] -- [description]
juraj-google-style
def getent(refresh=False): if 'user.getent' in __context__ and not refresh: return __context__['user.getent'] ret = [] for user in __salt__['user.list_users'](): stuff = {} user_info = __salt__['user.info'](user) stuff['gid'] = '' stuff['groups'] = user_info['groups'] stuff['home'] = user_info['home'] stuff['name'] = user_info['name'] stuff['passwd'] = user_info['passwd'] stuff['shell'] = '' stuff['uid'] = user_info['uid'] ret.append(stuff) __context__['user.getent'] = ret return ret
Return the list of all info for all users Args: refresh (bool, optional): Refresh the cached user information. Useful when used from within a state function. Default is False. Returns: dict: A dictionary containing information about all users on the system CLI Example: .. code-block:: bash salt '*' user.getent
juraj-google-style
def _VerifyOneTest(self, pool_func, input_sizes, window, strides, padding, data_format, data_type, expected, use_gpu): total_size = 1 for s in input_sizes: total_size *= s x = [f * 1.0 for f in range(1, total_size + 1)] if data_type == dtypes.bfloat16: x = [f * 0.1 for f in x] expected = [f * 0.1 for f in expected] with self.cached_session(use_gpu=use_gpu): t = constant_op.constant(x, shape=input_sizes, dtype=data_type) window = [1] + list(window) + [1] strides = [1] + list(strides) + [1] if data_format == 'NCDHW': t = test_util.NHWCToNCHW(t) window = test_util.NHWCToNCHW(window) strides = test_util.NHWCToNCHW(strides) t = pool_func(t, ksize=window, strides=strides, padding=padding, data_format=data_format) if data_format == 'NCDHW': t = test_util.NCHWToNHWC(t) vals = self.evaluate(t) actual = vals.flatten() rtol = atol = 1e-06 if data_type == dtypes.bfloat16: rtol = atol = 0.02 self.assertAllClose(expected, actual, rtol=rtol, atol=atol)
Verifies the output values of the pooling function. Args: pool_func: Function to be called: co.MaxPool, co.AvgPool. input_sizes: Input tensor dimensions. window: Tuple of kernel dims: planes, rows, cols. strides: Tuple of strides for dims: planes, rows, cols. padding: Padding type. data_format: The data format we use to run the pooling operation. data_type: The data type to use to run the pooling operation. expected: An array containing the expected operation outputs. use_gpu: Whether to run ops on GPU.
github-repos
def __init__(self, service_name, user_name): super(Storage, self).__init__(lock=threading.Lock()) self._service_name = service_name self._user_name = user_name
Constructor. Args: service_name: string, The name of the service under which the credentials are stored. user_name: string, The name of the user to store credentials for.
juraj-google-style
def fft2(x): if any_symbolic_tensors(x): return FFT2().symbolic_call(x) return backend.math.fft2(x)
Computes the 2D Fast Fourier Transform along the last two axes of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output. Example: >>> x = ( ... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]), ... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]), ... ) >>> fft2(x) (array([[ 6., 0.], [ 0., -2.]], dtype=float32), array([[ 2., 0.], [ 0., -2.]], dtype=float32))
github-repos
def get_dummies(self, columns, **kwargs): cls = type(self) if (columns is None): columns = [c for c in self.columns if (not is_numeric_dtype(self.dtypes[c]))] if (len(columns) == 0): return self.copy() elif (not is_list_like(columns)): columns = [columns] def set_columns(df, columns): df.columns = columns return df set_cols = self.columns columns_applied = self._map_across_full_axis(1, (lambda df: set_columns(df, set_cols))) if (len(columns) == len(self.columns)): def get_dummies_builder(df): if (df is not None): if (not df.empty): return pandas.get_dummies(df, **kwargs) else: return pandas.DataFrame([]) func = self._prepare_method((lambda df: get_dummies_builder(df))) new_data = columns_applied.map_across_full_axis(0, func) untouched_data = None else: def get_dummies_builder(df, internal_indices=[]): return pandas.get_dummies(df.iloc[(:, internal_indices)], columns=None, **kwargs) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = columns_applied.apply_func_to_select_indices_along_full_axis(0, get_dummies_builder, numeric_indices, keep_remaining=False) untouched_data = self.drop(columns=columns) final_columns = self.compute_index(1, new_data, False) if (len(columns) != len(self.columns)): new_data = untouched_data.data.concat(1, new_data) final_columns = untouched_data.columns.append(pandas.Index(final_columns)) return cls(new_data, self.index, final_columns)
Convert categorical variables to dummy variables for certain columns. Args: columns: The columns to convert. Returns: A new QueryCompiler.
codesearchnet
def parse(cls: Type[MessageT], uid: int, data: bytes, permanent_flags: Iterable[Flag], internal_date: datetime, expunged: bool = False, **kwargs: Any) -> MessageT: content = MessageContent.parse(data) return cls(uid, permanent_flags, internal_date, expunged, content, **kwargs)
Parse the given file object containing a MIME-encoded email message into a :class:`BaseLoadedMessage` object. Args: uid: The UID of the message. data: The raw contents of the message. permanent_flags: Permanent flags for the message. internal_date: The internal date of the message. expunged: True if this message has been expunged from the mailbox.
juraj-google-style
def gen_permutations(self, index=0, args=None): if (args is None): args = [] try: name = self.layout_json_names[index] display = self.layout_json_params.get(name, {}).get('display') input_type = self.install_json_params().get(name, {}).get('type') if self.validate_layout_display(self.input_table, display): if (input_type.lower() == 'boolean'): for val in [True, False]: args.append({'name': name, 'value': val}) self.db_update_record(self.input_table, name, val) self.gen_permutations((index + 1), list(args)) args.pop() elif (input_type.lower() == 'choice'): valid_values = self.expand_valid_values(self.install_json_params().get(name, {}).get('validValues', [])) for val in valid_values: args.append({'name': name, 'value': val}) self.db_update_record(self.input_table, name, val) self.gen_permutations((index + 1), list(args)) args.pop() else: args.append({'name': name, 'value': None}) self.gen_permutations((index + 1), list(args)) else: self.gen_permutations((index + 1), list(args)) except IndexError: self._input_permutations.append(args) outputs = [] for o_name in self.install_json_output_variables(): if (self.layout_json_outputs.get(o_name) is not None): display = self.layout_json_outputs.get(o_name, {}).get('display') valid = self.validate_layout_display(self.input_table, display) if ((display is None) or (not valid)): continue for ov in self.install_json_output_variables().get(o_name): outputs.append(ov) self._output_permutations.append(outputs)
Iterate recursively over layout.json parameter names. TODO: Add indicator values. Args: index (int, optional): The current index position in the layout names list. args (list, optional): Defaults to None. The current list of args.
codesearchnet
def _multi_worker_test(test_method): def decorator(self, has_chief, num_workers, num_ps, share_gpu, runner, **kwargs): if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker or (test_util.is_xla_enabled() and num_ps > 0): with _multi_worker_session(kwargs): test_method(self, **kwargs) return test_id = self.id() if runner: results = runner.run(_test_runner, args=(test_id, _env)) else: cluster_spec = multi_worker_test_base.create_cluster_spec(has_chief=has_chief, num_workers=num_workers, num_ps=num_ps, has_eval=False) ephemeral_runner = multi_process_runner.MultiProcessRunner(_test_runner, cluster_spec, share_gpu=share_gpu, args=(test_id, _env), dependence_on_chief=has_chief) ephemeral_runner.start() results = ephemeral_runner.join().return_value skip_reason = None for result in results: if result.status == 'failure': self.fail(result.message) break elif result.status == 'skipped': skip_reason = result.message if skip_reason is not None: self.skipTest(skip_reason) argspec = tf_inspect.getfullargspec(test_method) decorator_args = (argspec.args or []) + ['has_chief', 'num_workers', 'num_ps', 'share_gpu', 'runner'] decorator_argspec = argspec._replace(args=decorator_args) return tf_decorator.make_decorator(test_method, decorator, decorator_argspec=decorator_argspec)
Decorate test_method so that it runs in each worker. We use `multi_process_runner` to simulate multiple workers. Since we run the this function in the main process and all worker processes, this decoration behaves differently in the main process and worker procssses. In the main process, it spawns subprocesses and runs the test on each of them; in a worker process, it executes test in the same way as a normal test, e.g. setUp()/tearDown() are called before/after the test. Args: test_method: a function which must be a test method. Returns: Decorated `test_method`. Note that the decorated function has additional arguments.
github-repos
def expect_end(self): logger.debug("Waiting for termination of '{0}'".format(self.name)) try: self._spawn.expect(pexpect.EOF) self._spawn.wait() dircontent = str(os.listdir(self.job.working_dir)) logger.debug(('Working directory after execution: ' + dircontent)) return (self.get_exitstatus(), self.get_output()) except pexpect.exceptions.EOF as e: logger.debug('Raising termination exception.') raise TerminationException(instance=self, real_exception=e, output=self.get_output()) except pexpect.exceptions.TIMEOUT as e: logger.debug('Raising timeout exception.') raise TimeoutException(instance=self, real_exception=e, output=self.get_output()) except Exception as e: logger.debug('Waiting for expected program end failed.') raise NestedException(instance=self, real_exception=e, output=self.get_output())
Wait for the running program to finish. Returns: A tuple with the exit code, as reported by the operating system, and the output produced.
codesearchnet
def plot(self, data, height=1000, render_large_data=False): import IPython if (not isinstance(data, pd.DataFrame)): raise ValueError('Expect a DataFrame.') if ((len(data) > 10000) and (not render_large_data)): raise ValueError(('Facets dive may not work well with more than 10000 rows. ' + 'Reduce data or set "render_large_data" to True.')) jsonstr = data.to_json(orient='records') html_id = ('f' + datalab.utils.commands.Html.next_id()) HTML_TEMPLATE = '\n <link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html">\n <facets-dive id="{html_id}" height="{height}"></facets-dive>\n <script>\n var data = {jsonstr};\n document.querySelector(" html = HTML_TEMPLATE.format(html_id=html_id, jsonstr=jsonstr, height=height) return IPython.core.display.HTML(html)
Plots a detail view of data. Args: data: a Pandas dataframe. height: the height of the output.
codesearchnet
def overwrite_view_source(project, dir_path): project_html_location = dir_path / project / HTML_LOCATION if not project_html_location.exists(): return files_to_overwrite = [ f for f in project_html_location.iterdir() if "html" in f.suffix ] for html_file in files_to_overwrite: with open(html_file, "r") as f: html = f.readlines() for i, l in enumerate(html): if TO_REPLACE_WITH_HOME in l: html[i] = NEW_HOME_LINK break with open(html_file, "w") as f: f.writelines(html)
In the project's index.html built file, replace the top "source" link with a link to the documentation's home, which is mkdoc's home Args: project (str): project to update dir_path (pathlib.Path): this file's path
juraj-google-style
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None): def _callback(downloaded, total): if (total is 0) or (downloaded == total): return progress = downloaded*100/total sys.stderr.write('\r[{0}] {1}%'.format(' sys.stderr.flush() m = _URI_RE.match(s3_path) bucket_name = m.group(1) bucket = boto_conn.get_bucket(bucket_name) retries = 6 if s3_path.endswith('/') is False: key_name = m.group(2) key_instance = bucket.get_key(key_name) while key_instance is None and retries > 0: retries = retries - 1 log.info("Results file is not available on s3. Retry: " + str(6-retries)) time.sleep(10) key_instance = bucket.get_key(key_name) if key_instance is None: raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.") log.info("Downloading file from %s" % s3_path) if delim is None: try: key_instance.get_contents_to_file(fp) except boto.exception.S3ResponseError as e: if (e.status == 403): log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....") key_instance.open() fp.write(key_instance.read()) key_instance.close() else: raise else: _read_iteratively(key_instance, fp, delim=delim) else: key_prefix = m.group(2) bucket_paths = bucket.list(key_prefix) for one_path in bucket_paths: name = one_path.name if name.endswith('$folder$'): continue log.info("Downloading file from %s" % name) if delim is None: one_path.get_contents_to_file(fp) else: _read_iteratively(one_path, fp, delim=delim)
Downloads the contents of all objects in s3_path into fp Args: `boto_conn`: S3 connection object `s3_path`: S3 path to be downloaded `fp`: The file object where data is to be downloaded
juraj-google-style
def list_windowsfeatures(): choc_path = _find_chocolatey(__context__, __salt__) cmd = [choc_path, 'list', '--source', 'windowsfeatures'] result = __salt__['cmd.run_all'](cmd, python_shell=False) if (result['retcode'] != 0): raise CommandExecutionError('Running chocolatey failed: {0}'.format(result['stdout'])) return result['stdout']
Instructs Chocolatey to pull a full package list from the Windows Features list, via the Deployment Image Servicing and Management tool. Returns: str: List of Windows Features CLI Example: .. code-block:: bash salt '*' chocolatey.list_windowsfeatures
codesearchnet
def enter_section(self, section_id): assert section_id not in self.exits self.exits[section_id] = set()
Enters a regular section. Regular sections admit exit jumps, which end the section. Args: section_id: Hashable, the same node that will be used in calls to the ast_node arg passed to add_exit_node
github-repos
def is_emulator(self): if EMULATOR_SERIAL_REGEX.match(self.serial): return True elif self.build_info['build_characteristics'] == 'emulator': return True elif self.build_info['hardware'] in ['ranchu', 'goldfish', 'cutf_cvm']: return True else: return False
Whether this device is probably an emulator. Returns: True if this is probably an emulator.
github-repos
def _ScanEncryptedVolume(self, scan_context, scan_node): if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError('Invalid or missing scan node.') credentials = credentials_manager.CredentialsManager.GetCredentials( scan_node.path_spec) if not credentials: raise errors.SourceScannerError('Missing credentials for scan node.') credentials_dict = { credential_type: credential_data for credential_type, credential_data in self._credentials} is_unlocked = False for credential_type in credentials.CREDENTIALS: credential_data = credentials_dict.get(credential_type, None) if not credential_data: continue is_unlocked = self._source_scanner.Unlock( scan_context, scan_node.path_spec, credential_type, credential_data) if is_unlocked: break if not is_unlocked: is_unlocked = self._PromptUserForEncryptedVolumeCredential( scan_context, scan_node, credentials) if is_unlocked: self._source_scanner.Scan( scan_context, scan_path_spec=scan_node.path_spec)
Scans an encrypted volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. Raises: SourceScannerError: if the format of or within the source is not supported, the scan node is invalid or there are no credentials defined for the format.
juraj-google-style
def MethodCalled(self, mock_method): for method in self._methods: if method == mock_method: self._methods_called.add(mock_method) mock_method._call_queue.appendleft(self) return self, method if self.IsSatisfied(): next_method = mock_method._PopNextMethod(); return next_method, None else: raise UnexpectedMethodCallError(mock_method, self)
Remove a method call from the group. If the method is not in the set, an UnexpectedMethodCallError will be raised. Args: mock_method: a mock method that should be equal to a method in the group. Returns: The mock method from the group Raises: UnexpectedMethodCallError if the mock_method was not in the group.
juraj-google-style
def add_tile(self, address, tile): if address in self._tiles: raise ArgumentError("Tried to add two tiles at the same address", address=address) self._tiles[address] = tile
Add a tile to handle all RPCs at a given address. Args: address (int): The address of the tile tile (RPCDispatcher): A tile object that inherits from RPCDispatcher
juraj-google-style
def _RegisterProcess(self, process): if (process is None): raise ValueError('Missing process.') if (process.pid in self._processes_per_pid): raise KeyError('Already managing process: {0!s} (PID: {1:d})'.format(process.name, process.pid)) self._processes_per_pid[process.pid] = process
Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing.
codesearchnet
def update_task_ids(self, encoder_vocab_size): for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset.
juraj-google-style
def _login(self, max_tries=2): if (not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL)): raise BrowserError(('Current url "%s" is not a signin url ("%s")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))) email_field_loaded = (lambda br: br.find_elements_by_id('ap_email')) self._wait().until(email_field_loaded) tries = 0 while (tries < max_tries): email_elem = self.find_element_by_id('ap_email') email_elem.clear() email_elem.send_keys(self._uname) pword_elem = self.find_element_by_id('ap_password') pword_elem.clear() pword_elem.send_keys(self._pword) def creds_entered(_): 'Returns whether the credentials were properly entered.' email_ok = (email_elem.get_attribute('value') == self._uname) pword_ok = (pword_elem.get_attribute('value') == self._pword) return (email_ok and pword_ok) kcr_page_loaded = (lambda br: (br.title == u'Kindle Cloud Reader')) try: self._wait(5).until(creds_entered) self.find_element_by_id('signInSubmit-input').click() self._wait(5).until(kcr_page_loaded) except TimeoutException: tries += 1 else: return raise LoginError
Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts.
codesearchnet
async def make_request(self, redirect=False): h11_connection = h11.Connection(our_role=h11.CLIENT) (self.scheme, self.host, self.path, self.uri_parameters, self.query, _) = urlparse(self.uri) if (not redirect): self.initial_scheme = self.scheme self.initial_netloc = self.host host = (self.host if ((self.port == '80') or (self.port == '443')) else ((self.host.split(':')[0] + ':') + self.port)) asks_headers = c_i_dict([('Host', host), ('Connection', 'keep-alive'), ('Accept-Encoding', 'gzip, deflate'), ('Accept', '*/*'), ('Content-Length', '0'), ('User-Agent', 'python-asks/2.2.2')]) if (self.persist_cookies is not None): self.cookies.update(self.persist_cookies.get_additional_cookies(self.host, self.path)) self._build_path() body = '' if any((self.data, self.files, (self.json is not None))): (content_type, content_len, body) = (await self._formulate_body()) asks_headers['Content-Type'] = content_type asks_headers['Content-Length'] = content_len if (self.headers is not None): asks_headers.update(self.headers) if (self.auth is not None): asks_headers.update((await self._auth_handler_pre())) asks_headers.update((await self._auth_handler_post_get_auth())) if self.cookies: cookie_str = '' for (k, v) in self.cookies.items(): cookie_str += '{}={}; '.format(k, v) asks_headers['Cookie'] = cookie_str[:(- 1)] if body: if (not isinstance(body, bytes)): body = bytes(body, self.encoding) asks_headers['Content-Length'] = str(len(body)) req_body = h11.Data(data=body) else: req_body = None req = h11.Request(method=self.method, target=self.path, headers=asks_headers.items()) response_obj = (await self._request_io(req, req_body, h11_connection)) if redirect: if (not ((self.scheme == self.initial_scheme) and (self.host == self.initial_netloc))): self.sock._active = False if self.streaming: return (None, response_obj) return (self.sock, response_obj)
Acts as the central hub for preparing requests to be sent, and returning them upon completion. Generally just pokes through self's attribs and makes decisions about what to do. Returns: sock: The socket to be returned to the calling session's pool. Response: The response object, after any redirects. If there were redirects, the redirect responses will be stored in the final response object's `.history`.
codesearchnet
def _readline(self): if (len(self.lines) > 1): return self.lines.pop(0) tail = '' if len(self.lines): tail = self.lines.pop() try: tail += self._read() except socket.error: logging.exception('No new data') time.sleep(0.1) self.lines += linesepx.split(tail) if (len(self.lines) > 1): return self.lines.pop(0)
Read exactly one line from the device, nonblocking. Returns: None on no data
codesearchnet
def split(content: AsyncIterable[_T], *, n: int=2, with_copy: bool=False) -> tuple[AsyncIterable[_T], ...]: if n == 0: raise ValueError('Cannot split a stream in n=0 streams.') if n == 1: return (content,) queues = [asyncio.Queue() for _ in range(n)] async def enqueue() -> None: async for part in content: for queue in queues: if with_copy: queue.put_nowait(copy.deepcopy(part)) else: queue.put_nowait(part) for queue in queues: queue.put_nowait(None) async def dequeue(queue: asyncio.Queue[_T]) -> AsyncIterable[_T]: while (part := (await queue.get())) is not None: yield part context.create_task(enqueue()) return tuple((dequeue(queue) for queue in queues))
Split a stream into `n` identical streams. Recommended to be used with processor.context to ensure error propagation. Args: content: content to be split n: number of streams to return with_copy: whether to copy the items of the streams or not. It is recommended to copy the items when side effects between streams can happen. This is the case when one processor changes a part in place (e.g. update its metadata). As this can be expensive if the items are large and the number of streams is high, the default is to not copy. Consider setting this to True if there is a chance that a part can be modified in place. Returns: n streams of content. Raises: ValueError if n=0
github-repos
def find_overlaps(self, index=False): return self.__find_incongruities(op=operator.gt, index=index)
Find overlaps in a striplog. Args: index (bool): If True, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the overlaps as intervals.
codesearchnet
def _find_experiment_tag(self): with self._experiment_from_tag_lock: if (self._experiment_from_tag is None): mapping = self.multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for tag_to_content in mapping.values(): if (metadata.EXPERIMENT_TAG in tag_to_content): self._experiment_from_tag = metadata.parse_experiment_plugin_data(tag_to_content[metadata.EXPERIMENT_TAG]) break return self._experiment_from_tag
Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag. Caches the experiment if it was found. Returns: The experiment or None if no such experiment is found.
codesearchnet
def check_syntax(self, app_path=None): app_path = app_path or '.' for filename in sorted(os.listdir(app_path)): error = None status = True if filename.endswith('.py'): try: with open(filename, 'rb') as f: ast.parse(f.read(), filename=filename) except SyntaxError: status = False e = [] for line in traceback.format_exc().split('\n')[-5:-2]: e.append(line.strip()) error = ' '.join(e) elif filename.endswith('.json'): try: with open(filename, 'r') as fh: json.load(fh) except ValueError as e: status = False error = e else: continue if error: self.validation_data['errors'].append( 'Syntax validation failed for {} ({}).'.format(filename, error) ) self.validation_data['fileSyntax'].append({'filename': filename, 'status': status})
Run syntax on each ".py" and ".json" file. Args: app_path (str, optional): Defaults to None. The path of Python files.
juraj-google-style
def get_session(region, profile=None): if (profile is None): logger.debug('No AWS profile explicitly provided. Falling back to default.') profile = default_profile logger.debug(('Building session using profile "%s" in region "%s"' % (profile, region))) session = boto3.Session(region_name=region, profile_name=profile) c = session._session.get_component('credential_provider') provider = c.get_provider('assume-role') provider.cache = credential_cache provider._prompter = ui.getpass return session
Creates a boto3 session with a cache Args: region (str): The region for the session profile (str): The profile for the session Returns: :class:`boto3.session.Session`: A boto3 session with credential caching
codesearchnet
def VerifyStructure(self, parser_mediator, line): return max([parser.matches(line) for _, parser in self.LINE_STRUCTURES])
Verifies that this is an apache access log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from the text file. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def receive_data(socket): answer = b"" while True: packet = socket.recv(4096) if not packet: break answer += packet response = pickle.loads(answer) socket.close() return response
Receive an answer from the daemon and return the response. Args: socket (socket.socket): A socket that is connected to the daemon. Returns: dir or string: The unpickled answer.
juraj-google-style
def symbolize(flt: float) -> sympy.Symbol: try: ratio = rationalize(flt) res = sympy.simplify(ratio) except ValueError: ratio = rationalize((flt / np.pi)) res = (sympy.simplify(ratio) * sympy.pi) return res
Attempt to convert a real number into a simpler symbolic representation. Returns: A sympy Symbol. (Convert to string with str(sym) or to latex with sympy.latex(sym) Raises: ValueError: If cannot simplify float
codesearchnet
def devices(self): return self._device_names
Get the list of device names. Returns: (`list` of `str`) names of the devices.
github-repos
def _ReadRecord(self, tables, file_object, record_offset, record_type): table = tables.get(record_type, None) if (not table): raise errors.ParseError('Missing table for relation identifier: 0x{0:08}'.format(record_type)) record_header = self._ReadRecordHeader(file_object, record_offset) record = collections.OrderedDict() if table.columns: attribute_value_offsets = self._ReadRecordAttributeValueOffset(file_object, (record_offset + 24), len(table.columns)) file_offset = file_object.tell() record_data_offset = (file_offset - record_offset) record_data_size = (record_header.data_size - (file_offset - record_offset)) record_data = file_object.read(record_data_size) if (record_header.key_data_size > 0): record['_key_'] = record_data[:record_header.key_data_size] if table.columns: for (index, column) in enumerate(table.columns): attribute_data_read_function = self._ATTRIBUTE_DATA_READ_FUNCTIONS.get(column.attribute_data_type, None) if attribute_data_read_function: attribute_data_read_function = getattr(self, attribute_data_read_function, None) if (not attribute_data_read_function): attribute_value = None else: attribute_value = attribute_data_read_function(record_data, record_offset, record_data_offset, attribute_value_offsets[index]) record[column.attribute_name] = attribute_value table.records.append(record)
Reads the record. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. record_type (int): record type, which should correspond to a relation identifier of a table defined in the schema. Raises: ParseError: if the record cannot be read.
codesearchnet
def delete(self, remove_tombstone=True): response = self.repo.api.http_request('DELETE', self.uri) if (response.status_code == 204): self._empty_resource_attributes() if remove_tombstone: self.repo.api.http_request('DELETE', ('%s/fcr:tombstone' % self.uri)) return True
Method to delete resources. Args: remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource. Returns: (bool)
codesearchnet
def get_all_pipelines(app=''): url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app) pipelines = response.json() LOG.debug('Pipelines:\n%s', pipelines) return pipelines
Get a list of all the Pipelines in _app_. Args: app (str): Name of Spinnaker Application. Returns: requests.models.Response: Response from Gate containing Pipelines.
juraj-google-style
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None): if (pandas is None): raise ValueError(_NO_PANDAS_ERROR) return pandas.DataFrame()
Create an empty dataframe. Args: bqstorage_client (Any): Ignored. Added for compatibility with RowIterator. dtypes (Any): Ignored. Added for compatibility with RowIterator. progress_bar_type (Any): Ignored. Added for compatibility with RowIterator. Returns: pandas.DataFrame: An empty :class:`~pandas.DataFrame`.
codesearchnet
def _do_logon(self): if (self._userid is None): raise ClientAuthError('Userid is not provided.') if (self._password is None): if self._get_password: self._password = self._get_password(self._host, self._userid) else: raise ClientAuthError('Password is not provided.') logon_uri = '/api/sessions' logon_body = {'userid': self._userid, 'password': self._password} self._headers.pop('X-API-Session', None) self._session = self._new_session(self.retry_timeout_config) logon_res = self.post(logon_uri, logon_body, logon_required=False) self._session_id = logon_res['api-session'] self._headers['X-API-Session'] = self._session_id
Log on, unconditionally. This can be used to re-logon. This requires credentials to be provided. Raises: :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.HTTPError`
codesearchnet
def modified_lu(q): q = q.assemble() m, b = q.shape[0], q.shape[1] S = np.zeros(b) q_work = np.copy(q) for i in range(b): S[i] = -1 * np.sign(q_work[i, i]) q_work[i, i] -= S[i] q_work[(i + 1):m, i] /= q_work[i, i] q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i], q_work[i, (i + 1):b]) L = np.tril(q_work) for i in range(b): L[i, i] = 1 U = np.triu(q_work)[:b, :] return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S
Perform a modified LU decomposition of a matrix. This takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u. Args: q: A two dimensional orthonormal matrix q. Returns: A tuple of a lower triangular matrix l, an upper triangular matrix u, and a a vector representing a diagonal matrix s such that q - s = l * u.
juraj-google-style
def _convert_pandas_csv_options(pandas_options, columns): _columns = pandas_options.pop('names', columns) header = pandas_options.pop('header', None) pandas_options.pop('encoding', None) if header == 'infer': header_line_number = 0 if not bool(_columns) else None else: header_line_number = header return _columns, header_line_number
Translate `pd.read_csv()` options into `pd.DataFrame()` especially for header. Args: pandas_option (dict): pandas options like {'header': None}. columns (list): list of column name.
juraj-google-style
def bruteVersionStr(self, valu): try: valu, info = self.core.model.type('it:semver').norm(valu) subs = info.get('subs') return valu, subs except s_exc.BadTypeValu: subs = s_version.parseVersionParts(valu) if subs is None: raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr', mesg='Unable to brute force version parts out of the string') if subs: valu = s_version.packVersion(subs.get('major'), subs.get('minor', 0), subs.get('patch', 0)) return valu, subs
Brute force the version out of a string. Args: valu (str): String to attempt to get version information for. Notes: This first attempts to parse strings using the it:semver normalization before attempting to extract version parts out of the string. Returns: int, dict: The system normalized version integer and a subs dictionary.
juraj-google-style
def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None): forbidden_item_ids = (set() if (forbidden_item_ids is None) else set(forbidden_item_ids)) children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids) counts = self.get_children_counts(active=None) if (item_ids is None): item_ids = set(children.keys()) def _get_leaves(item_id): leaves = set() def __search(item_ids): result = set(flatten([children.get(item_id, []) for item_id in item_ids])) new_leaves = {item_id for item_id in result if (item_id not in children.keys())} leaves.update(new_leaves) return (result - new_leaves) fixed_point(is_zero=(lambda to_visit: (len(to_visit) == 0)), minus=(lambda to_visit, visited: (to_visit - visited)), plus=(lambda visited_x, visited_y: (visited_x | visited_y)), f=__search, x={item_id}) leaves = {leaf for leaf in leaves if (counts[leaf] == 0)} if (len(leaves) > 0): return leaves if ((counts[item_id] == 0) and (item_id not in forbidden_item_ids)): return {item_id} return set() return {item_id: _get_leaves(item_id) for item_id in item_ids}
Get mapping of items to their reachable leaves. Leaves having inactive relations to other items are omitted. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (reachable leaves)
codesearchnet
def forward(self, hidden_states: torch.Tensor): if hidden_states.size(-1) != self.dim_norm: raise AssertionError('hidden_states.size(-1) != self.dim_norm') old_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True) hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight return hidden_states
Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
github-repos
def update(self, teamId, name=None, **request_parameters): check_type(teamId, basestring, may_be_none=False) check_type(name, basestring) put_data = dict_from_items_with_values(request_parameters, name=name) json_data = self._session.put(((API_ENDPOINT + '/') + teamId), json=put_data) return self._object_factory(OBJECT_TYPE, json_data)
Update details for a team, by ID. Args: teamId(basestring): The team ID. name(basestring): A user-friendly name for the team. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: Team: A Team object with the updated Webex Teams team details. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def get_structure_from_mp(formula): m = MPRester() entries = m.get_entries(formula, inc_structure="final") if len(entries) == 0: raise ValueError("No structure with formula %s in Materials Project!" % formula) elif len(entries) > 1: warnings.warn("%d structures with formula %s found in Materials " "Project. The lowest energy structure will be returned." % (len(entries), formula)) return min(entries, key=lambda e: e.energy_per_atom).structure
Convenience method to get a crystal from the Materials Project database via the API. Requires PMG_MAPI_KEY to be set. Args: formula (str): A formula Returns: (Structure) The lowest energy structure in Materials Project with that formula.
juraj-google-style
def cancelMktDepth(self, contract: Contract, isSmartDepth=False): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktDepth') if reqId: self.client.cancelMktDepth(reqId, isSmartDepth) else: self._logger.error( f'cancelMktDepth: No reqId found for contract {contract}')
Unsubscribe from market depth data. Args: contract: The exact contract object that was used to subscribe with.
juraj-google-style
def isHostCert(self, name): crtpath = self._getPathJoin('hosts', ('%s.crt' % name)) return os.path.isfile(crtpath)
Checks if a host certificate exists. Args: name (str): The name of the host keypair. Examples: Check if the host cert "myhost" exists: exists = cdir.isUserCert('myhost') Returns: bool: True if the certificate is present, False otherwise.
codesearchnet
def _wrap_and_check_metrics(self, metrics): if not isinstance(metrics, dict): metrics = {self.METRICS_NAME: metrics} outputs = {} for key, value in metrics.items(): if isinstance(value, tuple): metric_val, metric_op = value else: metric_val = value.result() assert len(value.updates) == 1 metric_op = value.updates[0] key = self._check_output_key(key, self.METRICS_NAME) key = self._prefix_key(key, self.METRICS_NAME) val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX if not isinstance(metric_val, tensor.Tensor): raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val)) if not (tensor_util.is_tf_type(metric_op) or isinstance(metric_op, ops.Operation)): raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op)) metric_op_tensor = metric_op if not isinstance(metric_op, tensor.Tensor): with ops.control_dependencies([metric_op]): metric_op_tensor = constant_op.constant([], name='metric_op_wrapper') outputs[val_name] = metric_val outputs[op_name] = metric_op_tensor return outputs
Handle the saving of metrics. Metrics is either a tuple of (value, update_op), or a dict of such tuples. Here, we separate out the tuples and create a dict with names to tensors. Args: metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of `Metric` class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Returns: dict of output_names to tensors Raises: ValueError: if the dict key is not a string, or the metric values or ops are not tensors.
github-repos
def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool: folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, 'r', encoding='utf-8') as f: old_content = f.read() with open(folder / filename, 'r', encoding='utf-8') as f: new_content = f.read() old_content_clean = keep_doc_examples_only(old_content) new_content_clean = keep_doc_examples_only(new_content) return old_content_clean != new_content_clean
Check if the diff is only in code examples of the doc in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff is only in codes examples. Returns: `bool`: Whether the diff is only in code examples of the doc or not.
github-repos
def __init__(self, message): super(KeyCompressionTypeNotSupported, self).__init__( reason=enums.ResultReason.KEY_COMPRESSION_TYPE_NOT_SUPPORTED, message=message )
Create a KeyCompressionTypeNotSupported exception. Args: message (string): A string containing information about the error.
juraj-google-style
def run_without_time_limit(self, cmd): cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd logging.info('Docker command: %s', ' '.join(cmd)) start_time = time.time() retval = subprocess.call(cmd) elapsed_time_sec = int(time.time() - start_time) logging.info('Elapsed time of attack: %d', elapsed_time_sec) logging.info('Docker retval: %d', retval) if retval != 0: logging.warning('Docker returned non-zero retval: %d', retval) raise WorkerError('Docker returned non-zero retval ' + str(retval)) return elapsed_time_sec
Runs docker command without time limit. Args: cmd: list with the command line arguments which are passed to docker binary Returns: how long it took to run submission in seconds Raises: WorkerError: if error occurred during execution of the submission
juraj-google-style
def build(cls, local_scheduler=True, **task_params): luigi.build([cls(**task_params)], local_scheduler=local_scheduler)
Instantiate the task and build it with luigi Args: local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler task_params: parameters to pass to task for instantiation
juraj-google-style
def sg_summary_image(tensor, prefix=None, name=None): prefix = ('' if (prefix is None) else (prefix + '/')) name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name)) if (not tf.get_variable_scope().reuse): tf.summary.image((name + '-im'), tensor)
r"""Register `tensor` to summary report as `image` Args: tensor: A tensor to log as image prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
codesearchnet
def check_R_package(self, package): test_package = not bool(launch_R_script("{}/R_templates/test_import.R".format(os.path.dirname(os.path.realpath(__file__))), {"{package}": package}, verbose=True)) return test_package
Execute a subprocess to check the package's availability. Args: package (str): Name of the package to be tested. Returns: bool: `True` if the package is available, `False` otherwise
juraj-google-style
def load_data_table(table_name, meta_file, meta): for table in meta['tables']: if table['name'] == table_name: prefix = os.path.dirname(meta_file) relative_path = os.path.join(prefix, meta['path'], table['path']) return pd.read_csv(relative_path), table
Return the contents and metadata of a given table. Args: table_name(str): Name of the table. meta_file(str): Path to the meta.json file. meta(dict): Contents of meta.json. Returns: tuple(pandas.DataFrame, dict)
juraj-google-style
def from_list(index, queues): if not queues or not isinstance(queues, list) or (not all((isinstance(x, QueueBase) for x in queues))): raise TypeError('A list of queues expected') dtypes = queues[0].dtypes if not all((dtypes == q.dtypes for q in queues[1:])): raise TypeError('Queues do not have matching component dtypes.') names = queues[0].names if not all((names == q.names for q in queues[1:])): raise TypeError('Queues do not have matching component names.') queue_shapes = [q.shapes for q in queues] reduced_shapes = [functools.reduce(_shape_common, s) for s in zip(*queue_shapes)] queue_refs = array_ops_stack.stack([x.queue_ref for x in queues]) selected_queue = array_ops.gather(queue_refs, index) return QueueBase(dtypes=dtypes, shapes=reduced_shapes, names=names, queue_ref=selected_queue)
Create a queue using the queue reference from `queues[index]`. Args: index: An integer scalar tensor that determines the input that gets selected. queues: A list of `QueueBase` objects. Returns: A `QueueBase` object. Raises: TypeError: When `queues` is not a list of `QueueBase` objects, or when the data types of `queues` are not all the same.
github-repos