code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _dict_mapping_to_pb(mapping, proto_type): converted_pb = getattr(trace_pb2, proto_type)() ParseDict(mapping, converted_pb) return converted_pb
Convert a dict to protobuf. Args: mapping (dict): A dict that needs to be converted to protobuf. proto_type (str): The type of the Protobuf. Returns: An instance of the specified protobuf.
codesearchnet
def get_many(self, type: Type[T], query: Mapping[(str, Any)], context: PipelineContext=None) -> Iterable[T]: pass
Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects.
codesearchnet
def _apply_colocation_attr_map(colocation_attr_map, absolute_import_scope): graph = tf_v1.get_default_graph() for op in graph.get_operations(): if (not op.name.startswith((absolute_import_scope + '/'))): continue try: class_values = op.get_attr('_class') except ValueError: continue new_attr_value = tf_v1.AttrValue() new_coloc_groups = [] for class_value in class_values: if class_value.startswith(tf.compat.as_bytes('loc:@')): if (class_value not in colocation_attr_map): rewritten_class_value = [class_value] else: rewritten_class_value = colocation_attr_map[class_value].GetConsistentValueOrRaise('Failed to rewrite colocation constraints while applying hub.Module:\nThe module graph contains a node {op!r} that has a colocation constraint {class_value!r} with ambiguous rewriting {old_value!r} vs {new_value!r} because {old_reason} and {new_reason}, respectively.\nTo fix, avoid publishing a module with inputs comprising multiple outputs of one op that is referenced in tf.colocate_with(...) constraints on other ops.', {'op': op.name, 'class_value': class_value}) new_coloc_groups.extend(rewritten_class_value) else: new_attr_value.list.s.append(class_value) new_coloc_groups = sorted(set(new_coloc_groups)) new_attr_value.list.s.extend(new_coloc_groups) op._set_attr('_class', new_attr_value) if new_coloc_groups: new_coloc_device = '' for new_coloc_group in new_coloc_groups: assert new_coloc_group.startswith(tf.compat.as_bytes('loc:@')) new_coloc_target_op = graph.get_operation_by_name(tf.compat.as_str_any(new_coloc_group[5:])) new_coloc_device = new_coloc_target_op.device if new_coloc_device: break op._set_device(new_coloc_device)
Rewrites colocation constraints in the current default graph. Nodes in `absolute_import_scope` get their "_class" attr lists rewritten according to `colocation_attr_map`: each entry that matches a key gets replaced by the associated values (with deduplication). The node's device is updated accordingly. Args: colocation_attr_map: as returned by _build_colocation_attr_map. absolute_import_scope: as for fix_colocation_after_import. Raises: ValueError: if rewriting runs into an inconsistent value in `colocation_attr_map`.
codesearchnet
def load(self, languages=[]): duckling_load = self.clojure.var("duckling.core", "load!") clojure_hashmap = self.clojure.var("clojure.core", "hash-map") clojure_list = self.clojure.var("clojure.core", "list") if languages: iso_languages = [Language.convert_to_iso(lang) for lang in languages] duckling_load.invoke( clojure_hashmap.invoke( self.clojure.read(':languages'), clojure_list.invoke(*iso_languages) ) ) else: duckling_load.invoke() self._is_loaded = True
Loads the Duckling corpus. Languages can be specified, defaults to all. Args: languages: Optional parameter to specify languages, e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. ["en", "fr"])
juraj-google-style
def main(event_loop=None): context, credentials = get_context_from_cmdln(sys.argv[1:]) log.info("Scriptworker starting up at {} UTC".format(arrow.utcnow().format())) cleanup(context) context.event_loop = event_loop or asyncio.get_event_loop() done = False async def _handle_sigterm(): log.info("SIGTERM received; shutting down") nonlocal done done = True if context.running_tasks is not None: await context.running_tasks.cancel() context.event_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.ensure_future(_handle_sigterm())) while not done: try: context.event_loop.run_until_complete(async_main(context, credentials)) except Exception: log.critical("Fatal exception", exc_info=1) raise
Scriptworker entry point: get everything set up, then enter the main loop. Args: event_loop (asyncio.BaseEventLoop, optional): the event loop to use. If None, use ``asyncio.get_event_loop()``. Defaults to None.
juraj-google-style
def GetMessage(self, log_source, lcid, message_identifier): event_log_provider_key = self._GetEventLogProviderKey(log_source) if not event_log_provider_key: return None generator = self._GetMessageFileKeys(event_log_provider_key) if not generator: return None message_string = None for message_file_key in generator: message_string = self._GetMessage( message_file_key, lcid, message_identifier) if message_string: break if self._string_format == 'wrc': message_string = self._ReformatMessageString(message_string) return message_string
Retrieves a specific message for a specific Event Log source. Args: log_source (str): Event Log source. lcid (int): language code identifier (LCID). message_identifier (int): message identifier. Returns: str: message string or None if not available.
juraj-google-style
def split(x, axis=0): from .function_bases import split as split_base return split_base(x, axis, x.shape[axis])
Split arrays at the specified axis. It returns a number corresponding the size of the given axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s. Args: x(~nnabla.Variable): N-D array axis(int): Axis Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s See Also: :func:`nnabla.function_bases.split`.
juraj-google-style
def resolve_label_conflict(mapping, old_labels=None, new_labels=None): if (old_labels is None): old_labels = set(mapping) if (new_labels is None): new_labels = set(itervalues(mapping)) counter = itertools.count((2 * len(mapping))) old_to_intermediate = {} intermediate_to_new = {} for (old, new) in iteritems(mapping): if (old == new): continue if ((old in new_labels) or (new in old_labels)): lbl = next(counter) while ((lbl in new_labels) or (lbl in old_labels)): lbl = next(counter) old_to_intermediate[old] = lbl intermediate_to_new[lbl] = new else: old_to_intermediate[old] = new return (old_to_intermediate, intermediate_to_new)
Resolve a self-labeling conflict by creating an intermediate labeling. Args: mapping (dict): A dict mapping the current variable labels to new ones. old_labels (set, optional, default=None): The keys of mapping. Can be passed in for performance reasons. These are not checked. new_labels (set, optional, default=None): The values of mapping. Can be passed in for performance reasons. These are not checked. Returns: tuple: A 2-tuple containing: dict: A map from the keys of mapping to an intermediate labeling dict: A map from the intermediate labeling to the values of mapping.
codesearchnet
def needkwargs(*argnames): required = set(argnames) def decorator(func): def inner(*args, **kwargs): missing = required - set(kwargs) if missing: err = "%s kwargs are missing." % list(missing) raise ValueError(err) return func(*args, **kwargs) return inner return decorator
Function decorator which checks that the decorated function is called with a set of required kwargs. Args: *argnames: String keyword argument names. Raises: ValueError: If a required kwarg is missing in the decorated function call.
juraj-google-style
def from_json(cls, data): optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False} assert 'wind_speed' in data, 'Required key "wind_speed" is missing!' for key, val in optional_keys.items(): if key not in data: data[key] = val return cls(data['wind_speed'], data['wind_direction'], data['rain'], data['snow_on_ground'])
Create a Wind Condition from a dictionary. Args: data = { "wind_speed": float, "wind_direction": float, "rain": bool, "snow_on_ground": bool}
juraj-google-style
def __init__(self, recognizer: IRecognizer, node: yaml.Node) -> None: self.__recognizer = recognizer self.yaml_node = node
Create an UnknownNode for a particular mapping node. The member functions will act on the contained node. Args: node: The node to operate on.
juraj-google-style
def from_string(string): lines = [line.strip() for line in string.splitlines()] comment = lines[0] num_kpts = int(lines[1].split()[0].strip()) style = lines[2].lower()[0] if style == "a": return Kpoints.automatic(int(lines[3])) coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+' r'([\d+.\-Ee]+)') if style == "g" or style == "m": kpts = [int(i) for i in lines[3].split()] kpts_shift = (0, 0, 0) if len(lines) > 4 and coord_pattern.match(lines[4]): try: kpts_shift = [float(i) for i in lines[4].split()] except ValueError: pass return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \ else Kpoints.monkhorst_automatic(kpts, kpts_shift) if num_kpts <= 0: style = Kpoints.supported_modes.Cartesian if style in "ck" \ else Kpoints.supported_modes.Reciprocal kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)] kpts_shift = [float(i) for i in lines[6].split()] return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, kpts_shift=kpts_shift) if style == "l": coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \ else "Reciprocal" style = Kpoints.supported_modes.Line_mode kpts = [] labels = [] patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)' r'\s*!*\s*(.*)') for i in range(4, len(lines)): line = lines[i] m = patt.match(line) if m: kpts.append([float(m.group(1)), float(m.group(2)), float(m.group(3))]) labels.append(m.group(4).strip()) return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, coord_type=coord_type, labels=labels) style = Kpoints.supported_modes.Cartesian if style in "ck" \ else Kpoints.supported_modes.Reciprocal kpts = [] kpts_weights = [] labels = [] tet_number = 0 tet_weight = 0 tet_connections = None for i in range(3, 3 + num_kpts): toks = lines[i].split() kpts.append([float(j) for j in toks[0:3]]) kpts_weights.append(float(toks[3])) if len(toks) > 4: labels.append(toks[4]) else: labels.append(None) try: if lines[3 + num_kpts].strip().lower()[0] == "t": toks = lines[4 + num_kpts].split() tet_number = int(toks[0]) tet_weight = float(toks[1]) tet_connections = [] for i in range(5 + num_kpts, 5 + num_kpts + tet_number): toks = lines[i].split() tet_connections.append((int(toks[0]), [int(toks[j]) for j in range(1, 5)])) except IndexError: pass return Kpoints(comment=comment, num_kpts=num_kpts, style=Kpoints.supported_modes[str(style)], kpts=kpts, kpts_weights=kpts_weights, tet_number=tet_number, tet_weight=tet_weight, tet_connections=tet_connections, labels=labels)
Reads a Kpoints object from a KPOINTS string. Args: string (str): KPOINTS string. Returns: Kpoints object
juraj-google-style
def testEmptyTensors(self, drop_remainder): new_batch_size = 4 dataset = dataset_ops.Dataset.range(8) dataset = dataset.map(lambda x: array_ops.reshape((), (5, 0))) dataset = dataset.batch(2) rebatched_dataset = dataset.rebatch(batch_size=new_batch_size, drop_remainder=drop_remainder) expected_output = [array_ops.reshape((), (new_batch_size, 5, 0)) for _ in range(8 self.assertDatasetProduces(rebatched_dataset, expected_output)
Tests empty tensors case. Args: drop_remainder: whether to drop the remainder. The implementation of rebatch might move the input data. This test ensures the empty buffer is handled correctly.
github-repos
def splitdrive(path): relative = get_instance(path).relpath(path) drive = path.rsplit(relative, 1)[0] if drive and not drive[-2:] == ' relative = '/' + relative drive = drive.rstrip('/') return drive, relative
Split the path into a pair (drive, tail) where drive is either a mount point or the empty string. On systems which do not use drive specifications, drive will always be the empty string. In all cases, drive + tail will be the same as path. Equivalent to "os.path.splitdrive". Args: path (path-like object): Path or URL. Returns: tuple of str: drive, tail.
juraj-google-style
def storage_systems(self): if (not self.__storage_systems): self.__storage_systems = StorageSystems(self.__connection) return self.__storage_systems
Gets the StorageSystems API client. Returns: StorageSystems:
codesearchnet
def get_file_link(self, file_key): self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.files_suffix, file_key, self.file_link_suffix, ]) return self._req('get', uri)
Gets link to file Args: file_key key for the file return (status code, ?)
juraj-google-style
def from_json(cls, raw): bcls = None if 'webLink' in raw: bcls = WebLink elif 'topicCategory' in raw: bcls = Category elif 'taskAssist' in raw: bcls = TaskAssist elif 'context' in raw: bcls = Context if bcls is None: logger.warning('Unknown annotation type: %s', raw.keys()) return None annotation = bcls() annotation.load(raw) return annotation
Helper to construct an annotation from a dict. Args: raw (dict): Raw annotation representation. Returns: Node: An Annotation object or None.
juraj-google-style
def _reduction_a_cell(ip, p, filters, block_id=None): channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1 with backend.name_scope(f'reduction_A_block_{block_id}'): p = _adjust_block(p, ip, filters, block_id) h = layers.Activation('relu')(ip) h = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'reduction_conv_1_{block_id}', use_bias=False, kernel_initializer='he_normal')(h) h = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'reduction_bn_1_{block_id}')(h) h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3), name=f'reduction_pad_1_{block_id}')(h) with backend.name_scope('block_1'): x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), block_id=f'reduction_left1_{block_id}') x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right1_{block_id}') x1 = layers.add([x1_1, x1_2], name=f'reduction_add_1_{block_id}') with backend.name_scope('block_2'): x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left2_{block_id}')(h3) x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right2_{block_id}') x2 = layers.add([x2_1, x2_2], name=f'reduction_add_2_{block_id}') with backend.name_scope('block_3'): x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left3_{block_id}')(h3) x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), block_id=f'reduction_right3_{block_id}') x3 = layers.add([x3_1, x3_2], name=f'reduction_add3_{block_id}') with backend.name_scope('block_4'): x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'reduction_left4_{block_id}')(x1) x4 = layers.add([x2, x4]) with backend.name_scope('block_5'): x5_1 = _separable_conv_block(x1, filters, (3, 3), block_id=f'reduction_left4_{block_id}') x5_2 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_right5_{block_id}')(h3) x5 = layers.add([x5_1, x5_2], name=f'reduction_add4_{block_id}') x = layers.concatenate([x2, x3, x4, x5], axis=channel_dim, name=f'reduction_concat_{block_id}') return (x, ip)
Adds a Reduction cell for NASNet-A (Fig. 4 in the paper). Args: ip: Input tensor `x` p: Input tensor `p` filters: Number of output filters block_id: String block_id Returns: A Keras tensor
github-repos
def ModuleHelp(self, module): helplist = [] self.__RenderOurModuleKeyFlags(module, helplist) return '\n'.join(helplist)
Describe the key flags of a module. Args: module: A module object or a module name (a string). Returns: string describing the key flags of a module.
juraj-google-style
def get_learning_rate(self, iter): lr = self.scheduler.get_learning_rate(iter) if iter < self.warmup_iter: lr *= (iter + 1) * 1.0 / self.warmup_iter return lr
Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate
juraj-google-style
def __init__(self, wrapped, exit_callback): Future.__init__(self) wrapped.add_done_callback(self._done_callback) self._exit_callback = exit_callback self._wrapped = wrapped
Constructor. Args: wrapped (Future): the original Future object (to wrap) exit_callback: the exit callback to call at the end of the block
juraj-google-style
def PatternMatch(regex): pattern = re.compile(regex) return (lambda text: ((- 1) if (pattern.search(text) is None) else 0))
Compute the score of a text by determing if a pattern matches. Example: >>> fitness = PatternMatch("flag{.*}") >>> fitness("flag{example}") 0 >>> fitness("junk") -1 Args: regex (str): regular expression string to use as a pattern
codesearchnet
def get_learning_rate(self, iter): return (self.init_lr * ((1.0 - ((iter * 1.0) / self.max_iter)) ** self.power))
Get learning rate with polymomial decay based on current iteration. Args: iter (int): current iteration (starting with 0). Returns: float: Learning rate
codesearchnet
def add_event(self, event): self._warn_if_event_writer_is_closed() self.event_writer.add_event(event)
Adds an event to the event file. Args: event: An `Event` protocol buffer.
github-repos
def __init__(self, config_manager, backend): self._config_manager = config_manager self._backend = backend
Initializes an instance of the DiscoveryService. Args: config_manager: An instance of ApiConfigManager. backend: An _ApiServer instance for API config generation.
juraj-google-style
def MultiHeadedAttention( feature_depth, num_heads=8, dropout=0.0, mode='train'): return combinators.Serial( combinators.Parallel( combinators.Branch(num_branches=3), combinators.Identity() ), MultiHeadedAttentionQKV( feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), )
Transformer-style multi-headed attention. Accepts inputs of the form (x, mask) and constructs (q, k, v) from x. Args: feature_depth: int: depth of embedding num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Multi-headed self-attention layer.
juraj-google-style
def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None, log_level="INFO", log_file=None): self.exception_stage = "initialize HttpRunner()" kwargs = { "failfast": failfast, "resultclass": report.HtmlTestResult } self.unittest_runner = unittest.TextTestRunner(**kwargs) self.test_loader = unittest.TestLoader() self.save_tests = save_tests self.report_template = report_template self.report_dir = report_dir self._summary = None if log_file: logger.setup_logger(log_level, log_file)
initialize HttpRunner. Args: failfast (bool): stop the test run on the first error or failure. save_tests (bool): save loaded/parsed tests to JSON file. report_template (str): report template file path, template should be in Jinja2 format. report_dir (str): html report save directory. log_level (str): logging level. log_file (str): log file path.
juraj-google-style
def set_all_curriculums_to_lesson_num(self, lesson_num): for _, curriculum in self.brains_to_curriculums.items(): curriculum.lesson_num = lesson_num
Sets all the curriculums in this meta curriculum to a specified lesson number. Args: lesson_num (int): The lesson number which all the curriculums will be set to.
juraj-google-style
def getContextsForTerm(self, term, getFingerprint=None, startIndex=0, maxResults=5): return self._terms.getContextsForTerm(self._retina, term, getFingerprint, startIndex, maxResults)
Get the contexts for a given term Args: term, str: A term in the retina (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Context Raises: CorticalioException: if the request was not successful
juraj-google-style
def add_column(self, column_name, column_values): if isinstance(column_values, list) and isinstance(column_values[0], list): raise ValueError('"column_values" must be a flat list, but we detected ' 'that its first entry is a list') if isinstance(column_values, np.ndarray) and column_values.ndim != 1: raise ValueError('"column_values" should be of rank 1, ' 'but is of rank %d' % column_values.ndim) if len(column_values) != self.num_points: raise ValueError('"column_values" should be of length %d, but is of ' 'length %d' % (self.num_points, len(column_values))) if column_name in self.name_to_values: raise ValueError('The column name "%s" is already used' % column_name) self.column_names.append(column_name) self.name_to_values[column_name] = column_values
Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used.
juraj-google-style
def export(self, input_ids: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None, dynamic_shapes: Optional[dict]=None, strict: Optional[bool]=None) -> torch.export.ExportedProgram: ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap) ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa']) self.model.model.config._attn_implementation = 'sdpa_without_vmap' example_input_ids = input_ids if input_ids is not None else torch.tensor([[1]], dtype=torch.long) example_cache_position = cache_position if cache_position is not None else torch.tensor([0], dtype=torch.long) exported_program = torch.export.export(self.model, args=(example_input_ids, example_cache_position), kwargs={}, dynamic_shapes=dynamic_shapes, strict=strict if strict is not None else True) return exported_program
Export the wrapped module using `torch.export`. Args: input_ids (`Optional[torch.Tensor]`): Tensor representing current input token id to the module. If not provided, a default tensor will be used. cache_position (`Optional[torch.Tensor]`): Tensor representing current input position in the cache. If not provided, a default tensor will be used. dynamic_shapes (`Optional[dict]`): Dynamic shapes to use for export if specified. strict(`Optional[bool]`): Flag to instruct `torch.export` to use `torchdynamo`.
github-repos
def start_upsert(ini_data): stack_driver = CloudStackUtility(ini_data) poll_stack = not ini_data.get('no_poll', False) if stack_driver.upsert(): logging.info('stack create/update was started successfully.') if poll_stack: stack_tool = None try: profile = ini_data.get('environment', {}).get('profile') if profile: boto3_session = boto3.session.Session(profile_name=profile) else: boto3_session = boto3.session.Session() region = ini_data['environment']['region'] stack_name = ini_data['environment']['stack_name'] cf_client = stack_driver.get_cloud_formation_client() if not cf_client: cf_client = boto3_session.client('cloudformation', region_name=region) stack_tool = stack_tool = StackTool( stack_name, region, cf_client ) except Exception as wtf: logging.warning('there was a problems creating stack tool: {}'.format(wtf)) if stack_driver.poll_stack(): try: logging.info('stack create/update was finished successfully.') stack_tool.print_stack_info() except Exception as wtf: logging.warning('there was a problems printing stack info: {}'.format(wtf)) sys.exit(0) else: try: logging.error('stack create/update was did not go well.') stack_tool.print_stack_events() except Exception as wtf: logging.warning('there was a problems printing stack events: {}'.format(wtf)) sys.exit(1) else: logging.error('start of stack create/update did not go well.') sys.exit(1)
Helper function to facilitate upsert. Args: ini_date - the dictionary of info to run upsert Exit: 0 - good 1 - bad
juraj-google-style
def HasBalance(self, assetId): for key, fixed8 in self.Balances.items(): if key == assetId: return True return False
Flag indicating if the asset has a balance. Args: assetId (UInt256): Returns: bool: True if a balance is present. False otherwise.
juraj-google-style
def show(self, *args, **kwargs): plt = self.get_pourbaix_plot(*args, **kwargs) plt.show()
Shows the pourbaix plot Args: *args: args to get_pourbaix_plot **kwargs: kwargs to get_pourbaix_plot Returns: None
juraj-google-style
def _generate_bucket_value(self, bucketing_id): ratio = (float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE) return math.floor((ratio * MAX_TRAFFIC_VALUE))
Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE). Args: bucketing_id: ID for bucketing. Returns: Bucket value corresponding to the provided bucketing ID.
codesearchnet
def define_lattice_from_file( self, filename, cell_lengths ): self.lattice = init_lattice.lattice_from_sites_file( filename, cell_lengths = cell_lengths )
Set up the simulation lattice from a file containing site data. Uses `init_lattice.lattice_from_sites_file`, which defines the site file spec. Args: filename (Str): sites file filename. cell_lengths (List(x,y,z)): cell lengths for the simulation cell. Returns: None
juraj-google-style
def settings_view_for_block(block_wrapper, settings_view_factory): state_root_hash = \ block_wrapper.state_root_hash \ if block_wrapper is not None else None return settings_view_factory.create_settings_view(state_root_hash)
Returns the settings view for an arbitrary block. Args: block_wrapper (BlockWrapper): The block for which a settings view is to be returned settings_view_factory (SettingsViewFactory): The settings view factory used to create the SettingsView object Returns: SettingsView object associated with the block
juraj-google-style
def contains(self, name): try: self._api.buckets_get(name) except google.datalab.utils.RequestException as e: if e.status == 404: return False raise e except Exception as e: raise e return True
Checks if the specified bucket exists. Args: name: the name of the bucket to lookup. Returns: True if the bucket exists; False otherwise. Raises: Exception if there was an error requesting information about the bucket.
juraj-google-style
def minimum_image_dr(self, r1, r2, cutoff=None): delta_r_vector = self.minimum_image(r1, r2) return self.dr(np.zeros(3), delta_r_vector, cutoff)
Calculate the shortest distance between two points in the cell, accounting for periodic boundary conditions. Args: r1 (np.array): fractional coordinates of point r1. r2 (np.array): fractional coordinates of point r2. cutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None. Returns: (float): The distance between r1 and r2.
codesearchnet
def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops, ps_strategy): self._ps_tasks = ps_tasks self._ps_device = ps_device self._worker_device = worker_device self._merge_devices = merge_devices self._ps_ops = ps_ops self._ps_strategy = ps_strategy
Create a new `_ReplicaDeviceChooser`. Args: ps_tasks: Number of tasks in the `ps` job. ps_device: String. Name of the `ps` job. worker_device: String. Name of the `worker` job. merge_devices: Boolean. Set to True to allow merging of device specs. ps_ops: List of strings representing `Operation` types that need to be placed on `ps` devices. ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by `ps_ops`), that takes the `Operation` and returns the ps task index to use.
github-repos
def __init__(self, chunks: typing.List[str], separator: str): HTMLParser.__init__(self) self.chunks_joined = SEP.join(chunks) self.separator = separator self.to_skip = False self.scan_index = 0 self.element_stack: queue.LifoQueue[ElementState] = queue.LifoQueue()
Initializes the parser. Args: chunks (List[str]): The chunks to resolve. separator (str): The separator string.
github-repos
def inspect_secret(self, id): url = self._url('/secrets/{0}', id) return self._result(self._get(url), True)
Retrieve secret metadata Args: id (string): Full ID of the secret to remove Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no secret with that ID exists
codesearchnet
def add_minute(self, minute): _moy = self.moy + int(minute) return self.__class__.from_moy(_moy)
Create a new DateTime after the minutes are added. Args: minute: An integer value for minutes.
juraj-google-style
def set_agent(self, agent): self.agent = agent self.queue = asyncio.Queue(loop=self.agent.loop) self.presence = agent.presence self.web = agent.web
Links behaviour with its owner agent Args: agent (spade.agent.Agent): the agent who owns the behaviour
codesearchnet
def _build(self, ids): if self._existing_vocab is None: if self.EMBEDDINGS not in self._initializers: self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal() self._embeddings = tf.get_variable( "embeddings", shape=[self._vocab_size, self._embed_dim], dtype=tf.float32, initializer=self._initializers[self.EMBEDDINGS], partitioner=self._partitioners.get(self.EMBEDDINGS, None), regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) else: self._embeddings = tf.get_variable( "embeddings", dtype=tf.float32, initializer=self._existing_vocab, regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) if self._densify_gradients: embeddings = util.convert_gradient_to_tensor(self._embeddings) else: embeddings = self._embeddings return tf.nn.embedding_lookup(embeddings, ids, name="embedding_lookup")
Lookup embeddings. Looks up an embedding vector for each value in `ids`. All ids must be within [0, vocab_size), else an `InvalidArgumentError` is raised at runtime. Args: ids: Tensor of dtype int64. Returns: Tensor of tf.shape(ids) + [embedding_dim] and dtype float32.
juraj-google-style
def console_wait_for_keypress(flush: bool) -> Key: key = Key() lib.TCOD_console_wait_for_keypress_wrapper(key.key_p, flush) return key
Block until the user presses a key, then returns a new Key. Args: flush bool: If True then the event queue is cleared before waiting for the next event. Returns: Key: A new Key instance. .. deprecated:: 9.3 Use the :any:`tcod.event.wait` function to wait for events.
juraj-google-style
def attribute( self, main_type, sub_type, unique_id, attribute_id, action='GET', owner=None, params=None ): params = params or {} if owner: params['owner'] = owner action = action.upper() if not sub_type: url = '/v2/{}/{}/attributes/{}'.format(main_type, unique_id, attribute_id) else: url = '/v2/{}/{}/{}/attributes/{}'.format(main_type, sub_type, unique_id, attribute_id) if action == 'GET': return self.tcex.session.get(url, params=params) if action == 'DELETE': return self.tcex.session.delete(url, params=params) return None
Args: owner: main_type: sub_type: unique_id: attribute_id: action: params: Return:
juraj-google-style
def get(self, **params): if self._use_cache: r = requests.get(self.url, params=params) else: with requests_cache.disabled(): r = requests.get(self.url, params=params) r.raise_for_status() return r
Performs get request to the biomart service. Args: **params (dict of str: any): Arbitrary keyword arguments, which are added as parameters to the get request to biomart. Returns: requests.models.Response: Response from biomart for the request.
juraj-google-style
def get_source(label, source_type, **kwargs): if (source_type not in yapconf.ALL_SUPPORTED_SOURCES): raise YapconfSourceError(('Invalid source type %s. Supported types are %s.' % (source_type, yapconf.ALL_SUPPORTED_SOURCES))) if (source_type not in yapconf.SUPPORTED_SOURCES): raise YapconfSourceError(('Unsupported source type "%s". If you want to use this type, you will need to install the correct client for it (try `pip install yapconf[%s]. Currently supported types are %s. All supported types are %s' % (source_type, source_type, yapconf.SUPPORTED_SOURCES, yapconf.ALL_SUPPORTED_SOURCES))) if (source_type == 'dict'): return DictConfigSource(label, data=kwargs.get('data')) elif (source_type == 'json'): return JsonConfigSource(label, **kwargs) elif (source_type == 'yaml'): filename = kwargs.get('filename') if ('filename' in kwargs): kwargs.pop('filename') return YamlConfigSource(label, filename, **kwargs) elif (source_type == 'environment'): return EnvironmentConfigSource(label) elif (source_type == 'etcd'): return EtcdConfigSource(label, kwargs.get('client'), kwargs.get('key', '/')) elif (source_type == 'kubernetes'): name = kwargs.get('name') if ('name' in kwargs): kwargs.pop('name') client = kwargs.get('client') if ('client' in kwargs): kwargs.pop('client') return KubernetesConfigSource(label, client, name, **kwargs) else: raise NotImplementedError(('No implementation for source type %s' % source_type))
Get a config source based on type and keyword args. This is meant to be used internally by the spec via ``add_source``. Args: label (str): The label for this source. source_type: The type of source. See ``yapconf.SUPPORTED_SOURCES`` Keyword Args: The keyword arguments are based on the source_type. Please see the documentation of the individual sources for a detailed list of all possible arguments. Returns (yapconf.sources.ConfigSource): A valid config source which can be used for generating an override. Raises: YapconfSourceError: If there is some kind of error with this source definition.
codesearchnet
def forward(self, logits, labels): duration, start_time, end_time = labels candidates = torch.mul(logits, duration) candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float()) losses_dict = {} for loss in self.losses: losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)}) return losses_dict
This performs the loss computation. Args: logits (`torch.FloatTensor`): The output logits of head module. labels (`List[torch.FloatTensor]`): List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.
github-repos
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]: if isinstance(variables, dict): if variables.get('times'): times = int(variables['times']) del variables['times'] (yield (list(variable_matrix(variables, parent, 'product')) * times)) else: raise ValueError(f'times is a required keyword for the repeat iterator.') else: raise ValueError(f'The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}')
Cycle through a list of values a specified number of times Args: variables: The input variables for the creation of the range parent: The variable for which the values are being generated. Returns: A list of dictionaries mapping the parent to each value.
codesearchnet
def _add_tag(self, tag): tags = self.data.get('tags', None) if tags: if tag in [x['name'] for x in tags]: return False else: tags = list() tags.append({'name': tag}) self.data['tags'] = tags return True
Add a tag Args: tag (str): Tag to add Returns: bool: True if tag added or False if tag already present
juraj-google-style
def check_tweet(tweet, validation_checking=False): if "id" not in tweet: raise NotATweetError("This text has no 'id' key") original_format = is_original_format(tweet) if original_format: _check_original_format_tweet(tweet, validation_checking=validation_checking) else: _check_activity_streams_tweet(tweet, validation_checking=validation_checking) return original_format
Ensures a tweet is valid and determines the type of format for the tweet. Args: tweet (dict/Tweet): the tweet payload validation_checking (bool): check for valid key structure in a tweet.
juraj-google-style
def bulk_get_or_create(self, data_list): items_to_create = dict() for record_key, record_config in data_list.items(): if record_key not in items_to_create: record = self.get_instance(record_key) if not record: items_to_create[record_key] = self.model_cls(**record_config) if items_to_create: self.model_cls.objects.bulk_create(items_to_create.values()) self.set_record_lookup(True) return self.record_lookup
data_list is the data to get or create We generate the query and set all the record keys based on passed in queryset Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time Use values instead of the whole object, much faster Args: data_list: Returns:
juraj-google-style
def eq(self, other, axis="columns", level=None): return self._binary_op("eq", other, axis=axis, level=level)
Checks element-wise that this is equal to other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the eq over. level: The Multilevel index level to apply eq over. Returns: A new DataFrame filled with Booleans.
juraj-google-style
def peek_with_kwargs(init, args=[]): def peek(store, container, _stack=None): return init(\ *[ store.peek(attr, container, _stack=_stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) return peek
Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable): type constructor. args (iterable): arguments NOT to be keyworded; order does matter. Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments.
juraj-google-style
def get_catalog_courses(self, catalog_id): return self._load_data(self.CATALOGS_COURSES_ENDPOINT.format(catalog_id), default=[])
Return the courses included in a single course catalog by ID. Args: catalog_id (int): The catalog ID we want to retrieve. Returns: list: Courses of the catalog in question
codesearchnet
def delete_variants(adapter, vcf_obj, case_obj, case_id=None): case_id = case_id or case_obj['case_id'] nr_deleted = 0 start_deleting = datetime.now() chrom_time = datetime.now() current_chrom = None new_chrom = None for variant in vcf_obj: formated_variant = build_variant( variant=variant, case_obj=case_obj, case_id=case_id, ) if not formated_variant: continue new_chrom = formated_variant.get('chrom') adapter.delete_variant(formated_variant) nr_deleted += 1 if not current_chrom: LOG.info("Start deleting chromosome {}".format(new_chrom)) current_chrom = new_chrom chrom_time = datetime.now() continue if new_chrom != current_chrom: LOG.info("Chromosome {0} done".format(current_chrom)) LOG.info("Time to delete chromosome {0}: {1}".format( current_chrom, datetime.now()-chrom_time)) LOG.info("Start deleting chromosome {0}".format(new_chrom)) current_chrom = new_chrom return nr_deleted
Delete variants for a case in the database Args: adapter(loqusdb.plugins.Adapter) vcf_obj(iterable(dict)) ind_positions(dict) case_id(str) Returns: nr_deleted (int): Number of deleted variants
juraj-google-style
def _parse_getprop_output(self, output): output = output.decode('utf-8', errors='ignore').replace('\r\n', '\n') results = {} for line in output.split(']\n'): if not line: continue try: name, value = line.split(': ', 1) except ValueError: logging.debug('Failed to parse adb getprop line %s', line) continue name = name.strip()[1:-1] if value and value[0] == '[': value = value[1:] results[name] = value return results
Parses the raw output of `adb shell getprop` into a dictionary. Args: output: byte str, the raw output of the `adb shell getprop` call. Returns: dict, name-value pairs of the properties.
github-repos
def __eq__(self, other): if type(self) is type(other) and \ self._index == other._index: return True return False
Two channels are the same if they are of the same type, and have the same index. Args: other (Channel): other Channel Returns: bool: are self and other equal.
juraj-google-style
def register(self, name, namespace): if name in self._NAMESPACES: raise ValueError("Namespace {0} already exists.".format(name)) if not isinstance(namespace, ns.Namespace): raise TypeError("Namespaces must be of type Namespace.") self._NAMESPACES[name] = namespace
Register a new namespace with the Configuration object. Args: name (str): The name of the section/namespace. namespace (namespace.Namespace): The Namespace object to store. Raises: TypeError: If the namespace is not a Namespace object. ValueError: If the namespace is already registered.
juraj-google-style
def register_for_auto_class(cls, auto_class='FlaxAutoModel'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class
Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"FlaxAutoModel"`): The auto class to register this new model with.
github-repos
def kmip_version(self, value): if isinstance(value, enums.KMIPVersion): self._kmip_version = value else: raise ValueError("KMIP version must be a KMIPVersion enumeration")
Set the KMIP version for the client. Args: value (KMIPVersion): A KMIPVersion enumeration Return: None Raises: ValueError: if value is not a KMIPVersion enumeration Example: >>> client.kmip_version = enums.KMIPVersion.KMIP_1_1 >>>
juraj-google-style
def __init__(self, amount): super(CapitalFlow, self).__init__() self.amount = float(amount)
CapitalFlow constructor. Args: * amount (float): Amount to adjust by
juraj-google-style
def _remove_boring_lines(text): lines = text.split("\n") filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] return "\n".join(filtered)
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string
juraj-google-style
def payments(self, virtual_account_id, data={}, **kwargs): url = '{}/{}/payments'.format(self.base_url, virtual_account_id) return self.get_url(url, data, **kwargs)
Fetch Payment for Virtual Account Id Args: virtual_account_id : Id for which Virtual Account objects has to be retrieved Returns: Payment dict for given Virtual Account Id
codesearchnet
def _get_flat_core_sizes(cores): core_sizes_lists = [] for core in cores: flat_output_size = nest.flatten(core.output_size) core_sizes_lists.append([tf.TensorShape(size).as_list() for size in flat_output_size]) return core_sizes_lists
Obtains the list flattened output sizes of a list of cores. Args: cores: list of cores to get the shapes from. Returns: List of lists that, for each core, contains the list of its output dimensions.
codesearchnet
def get_2d_local_memory(x, query_shape, memory_flange): (_, height, width, depth_x) = common_layers.shape_list(x) x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, (height + (2 * memory_flange[0])), (width + (2 * memory_flange[1])), depth_x]) x_outer_memory_blocks = _extract_blocks(padded_x, memory_flange[0], memory_flange[1]) (x_left_blocks, x_right_blocks) = _get_left_right_blocks(x_outer_memory_blocks) t_hw_block = (lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5])) (x_top_center_blocks, x_bottom_center_blocks) = map(t_hw_block, _get_left_right_blocks(t_hw_block(x_outer_memory_blocks))) (x_left_corner_blocks, x_right_corner_blocks) = _split_along_width(x_outer_memory_blocks) t_hw = (lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5])) (x_top_left_corner_blocks, x_bottom_left_corner_blocks) = map(t_hw, _split_along_width(t_hw(x_left_corner_blocks))) (x_top_right_corner_blocks, x_bottom_right_corner_blocks) = map(t_hw, _split_along_width(t_hw(x_right_corner_blocks))) x_top_memory = tf.concat([x_top_left_corner_blocks, x_top_center_blocks, x_top_right_corner_blocks], axis=4) x_middle_memory = tf.concat([x_left_blocks, x_center_blocks, x_right_blocks], axis=4) x_bottom_memory = tf.concat([x_bottom_left_corner_blocks, x_bottom_center_blocks, x_bottom_right_corner_blocks], axis=4) x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3) return x
Stitches together the local 2d memory blocks. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor.
codesearchnet
def _CreateLineReader(self, file_object): if py2to3.PY_3: line_reader = text_file.TextFile(file_object, encoding=self._encoding, end_of_line=self._end_of_line) maximum_read_buffer_size = line_reader._MAXIMUM_READ_BUFFER_SIZE else: line_reader = line_reader_file.BinaryLineReader(file_object, end_of_line=self._end_of_line) maximum_read_buffer_size = line_reader.MAXIMUM_READ_BUFFER_SIZE if (self._maximum_line_length > maximum_read_buffer_size): self._maximum_line_length = (maximum_read_buffer_size - 1) for _ in range(0, self.NUMBER_OF_HEADER_LINES): line_reader.readline(self._maximum_line_length) return line_reader
Creates an object that reads lines from a text file. The line reader is advanced to the beginning of the DSV content, skipping any header lines. Args: file_object (dfvfs.FileIO): file-like object. Returns: TextFile|BinaryLineReader: an object that implements an iterator over lines in a text file. Raises: UnicodeDecodeError: if the file cannot be read with the specified encoding.
codesearchnet
def walk(self, walk_func): nodes = self.topological_sort() nodes.reverse() for n in nodes: walk_func(n)
Walks each node of the graph in reverse topological order. This can be used to perform a set of operations, where the next operation depends on the previous operation. It's important to note that walking happens serially, and is not paralellized. Args: walk_func (:class:`types.FunctionType`): The function to be called on each node of the graph.
codesearchnet
def intent(method): def wrapper(self, *args, **kwargs): try: return method(self, *args, **kwargs) except exceptions.MatrixError as e: if isinstance(e.original_exception, matrix_client.errors.MatrixRequestError): self._handle_request_exception(e) return method(self, *args, **kwargs) else: raise e return wrapper
Helps object methods handle MatrixRequestError. Args: method(function): Object method to be wrapped Method's object must have _handle_request_exception method that deals with specific status codes and errcodes.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def open(self, file_path, flags, mode=None, dir_fd=None): file_path = self._path_with_dir_fd(file_path, self.open, dir_fd) if (mode is None): if self.filesystem.is_windows_fs: mode = 438 else: mode = (511 & (~ self._umask())) open_modes = _OpenModes(must_exist=(not (flags & os.O_CREAT)), can_read=(not (flags & os.O_WRONLY)), can_write=(flags & (os.O_RDWR | os.O_WRONLY)), truncate=(flags & os.O_TRUNC), append=(flags & os.O_APPEND), must_not_exist=(flags & os.O_EXCL)) if (open_modes.must_not_exist and open_modes.must_exist): raise NotImplementedError('O_EXCL without O_CREAT mode is not supported') if ((not self.filesystem.is_windows_fs) and self.filesystem.exists(file_path)): obj = self.filesystem.resolve(file_path) if isinstance(obj, FakeDirectory): if (((not open_modes.must_exist) and (not self.filesystem.is_macos)) or open_modes.can_write): self.filesystem.raise_os_error(errno.EISDIR, file_path) dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem) file_des = self.filesystem._add_open_file(dir_wrapper) dir_wrapper.filedes = file_des return file_des str_flags = 'b' delete_on_close = False if hasattr(os, 'O_TEMPORARY'): delete_on_close = ((flags & os.O_TEMPORARY) == os.O_TEMPORARY) fake_file = FakeFileOpen(self.filesystem, delete_on_close=delete_on_close, raw_io=True)(file_path, str_flags, open_modes=open_modes) if (fake_file.file_object != self.filesystem.dev_null): self.chmod(file_path, mode) return fake_file.fileno()
Return the file descriptor for a FakeFile. Args: file_path: the path to the file flags: low-level bits to indicate io operation mode: bits to define default permissions Note: only basic modes are supported, OS-specific modes are ignored dir_fd: If not `None`, the file descriptor of a directory, with `file_path` being relative to this directory. New in Python 3.3. Returns: A file descriptor. Raises: IOError: if the path cannot be found ValueError: if invalid mode is given NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`
codesearchnet
def get_agent_settings(): ret = dict() sorted_types = sorted(_SERVICE_TYPES.items(), key=(lambda x: ((- x[1]), x[0]))) ret['services'] = list() ret['contact'] = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysContact')['vdata'] ret['location'] = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysLocation')['vdata'] current_bitmask = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysServices')['vdata'] if (current_bitmask == 0): ret['services'].append(sorted_types[(- 1)][0]) else: for (service, bitmask) in sorted_types: if ((current_bitmask is not None) and (current_bitmask > 0)): remaining_bitmask = (current_bitmask - bitmask) if (remaining_bitmask >= 0): current_bitmask = remaining_bitmask ret['services'].append(service) else: break ret['services'] = sorted(ret['services']) return ret
Determine the value of the SNMP sysContact, sysLocation, and sysServices settings. Returns: dict: A dictionary of the agent settings. CLI Example: .. code-block:: bash salt '*' win_snmp.get_agent_settings
codesearchnet
def get_screenshot_as_png(obj, driver=None, timeout=5, **kwargs): Image = import_required('PIL.Image', ('To use bokeh.io.export_png you need pillow ' + '("conda install pillow" or "pip install pillow")')) with _tmp_html() as tmp: html = get_layout_html(obj, **kwargs) with io.open(tmp.path, mode='w', encoding='utf-8') as file: file.write(decode_utf8(html)) web_driver = (driver if (driver is not None) else webdriver_control.get()) web_driver.get(('file: web_driver.maximize_window() web_driver.execute_script("document.body.style.width = '100%';") wait_until_render_complete(web_driver, timeout) png = web_driver.get_screenshot_as_png() b_rect = web_driver.execute_script(_BOUNDING_RECT_SCRIPT) image = Image.open(io.BytesIO(png)) cropped_image = _crop_image(image, **b_rect) return cropped_image
Get a screenshot of a ``LayoutDOM`` object. Args: obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget object or Document to export. driver (selenium.webdriver) : a selenium webdriver instance to use to export the image. timeout (int) : the maximum amount of time to wait for initialization. It will be used as a timeout for loading Bokeh, then when waiting for the layout to be rendered. Returns: cropped_image (PIL.Image.Image) : a pillow image loaded from PNG. .. warning:: Responsive sizing_modes may generate layouts with unexpected size and aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
codesearchnet
def cardinal(self, to): return sum(m.cardinal(to) for m in self.submodules)
Return the number of dependencies of this package to the given node. Args: to (Package/Module): target node. Returns: int: number of dependencies.
juraj-google-style
def __init__(self, identifier=None, session_identifier=None): super(TaskCompletion, self).__init__() self.aborted = False self.identifier = identifier self.session_identifier = session_identifier self.timestamp = None
Initializes a task completion attribute container. Args: identifier (Optional[str]): unique identifier of the task. The identifier should match that of the corresponding task start information. session_identifier (Optional[str]): identifier of the session the task is part of.
juraj-google-style
def __init__(self, gl, parent=None): self.gitlab = gl self._parent = parent self._computed_path = self._compute_path()
REST manager constructor. Args: gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make requests. parent: REST object to which the manager is attached.
juraj-google-style
def _read_mode_acopt(self, size, kind): temp = self._read_unpack(size) algo = chksum_opt.get(temp) data = dict(kind=kind, length=size, ac=algo) return data
Read Alternate Checksum Request option. Positional arguments: size - int, length of option kind - int, 14 (Alt-Chksum Request) Returns: * dict -- extracted Alternate Checksum Request (CHKSUM-REQ) option Structure of TCP CHKSUM-REQ [RFC 1146][RFC 6247]: +----------+----------+----------+ | Kind=14 | Length=3 | chksum | +----------+----------+----------+ Octets Bits Name Description 0 0 tcp.chksumreq.kind Kind (14) 1 8 tcp.chksumreq.length Length (3) 2 16 tcp.chksumreq.ac Checksum Algorithm
codesearchnet
def _WriteData(self, target, entry): password_entry = '%s:%s:%d:%d:%s:%s:%s' % (entry.name, entry.passwd, entry.uid, entry.gid, entry.gecos, entry.dir, entry.shell) target.write(password_entry.encode() + b'\n') return len(password_entry) + 1
Write a PasswdMapEntry to the target cache. Args: target: A file-like object. entry: A PasswdMapEntry. Returns: Number of bytes written to the target.
github-repos
def add_cohp(self, label, cohp): energies = ((cohp.energies - cohp.efermi) if self.zero_at_efermi else cohp.energies) populations = cohp.get_cohp() int_populations = cohp.get_icohp() self._cohps[label] = {'energies': energies, 'COHP': populations, 'ICOHP': int_populations, 'efermi': cohp.efermi}
Adds a COHP for plotting. Args: label: Label for the COHP. Must be unique. cohp: COHP object.
codesearchnet
def _use_prototype(self, spec, prototypes): prototype = spec['based-on'] del spec['based-on'] for attr in prototype: if (attr not in spec): spec[attr] = copy.deepcopy(prototype[attr]) return spec
Populates the given spec with the values of it's declared prototype Args: spec (dict): spec to update prototypes (dict): Configuration spec containing the prototypes Returns: dict: updated spec
codesearchnet
def inverse_guass(self, mu: float, sigma: float) -> float: return float(lib.TCOD_random_get_gaussian_double_inv(self.random_c, mu, sigma))
Return a random Gaussian number using the Box-Muller transform. Args: mu (float): The median returned value. sigma (float): The standard deviation. Returns: float: A random float.
codesearchnet
def set_who(voevent, date=None, author_ivorn=None): if author_ivorn is not None: voevent.Who.AuthorIVORN = ''.join(('ivo: if date is not None: voevent.Who.Date = date.replace(microsecond=0).isoformat()
Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally.
juraj-google-style
def plot(self, pts_per_edge, color=None, ax=None, with_nodes=False): if (self._dimension != 2): raise NotImplementedError('2D is the only supported dimension', 'Current dimension', self._dimension) if (ax is None): ax = _plot_helpers.new_axis() _plot_helpers.add_patch(ax, color, pts_per_edge, *self._get_edges()) if with_nodes: ax.plot(self._nodes[(0, :)], self._nodes[(1, :)], color='black', marker='o', linestyle='None') return ax
Plot the current surface. Args: pts_per_edge (int): Number of points to plot per edge. color (Optional[Tuple[float, float, float]]): Color as RGB profile. ax (Optional[matplotlib.artist.Artist]): matplotlib axis object to add plot to. with_nodes (Optional[bool]): Determines if the control points should be added to the plot. Off by default. Returns: matplotlib.artist.Artist: The axis containing the plot. This may be a newly created axis. Raises: NotImplementedError: If the surface's dimension is not ``2``.
codesearchnet
def rgbline(x, y, red, green, blue, alpha=1, linestyles='solid', linewidth=2.5): y = np.array(y) if (len(y.shape) == 1): y = np.array([y]) red = np.array([red]) green = np.array([green]) blue = np.array([blue]) alpha = np.array([alpha]) elif isinstance(alpha, int): alpha = ([alpha] * len(y)) seg = [] colours = [] for (yy, rr, gg, bb, aa) in zip(y, red, green, blue, alpha): pts = np.array([x, yy]).T.reshape((- 1), 1, 2) seg.extend(np.concatenate([pts[:(- 1)], pts[1:]], axis=1)) nseg = (len(x) - 1) r = [(0.5 * (rr[i] + rr[(i + 1)])) for i in range(nseg)] g = [(0.5 * (gg[i] + gg[(i + 1)])) for i in range(nseg)] b = [(0.5 * (bb[i] + bb[(i + 1)])) for i in range(nseg)] a = (np.ones(nseg, np.float) * aa) colours.extend(list(zip(r, g, b, a))) lc = LineCollection(seg, colors=colours, rasterized=True, linewidth=linewidth, linestyles=linestyles) return lc
Get a RGB coloured line for plotting. Args: x (list): x-axis data. y (list): y-axis data (can be multidimensional array). red (list): Red data (must have same shape as ``y``). green (list): Green data (must have same shape as ``y``). blue (list): blue data (must have same shape as ``y``). alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency) data (must have same shape as ``y`` or be an :obj:`int`). linestyles (:obj:`str`, optional): Linestyle for plot. Options are ``"solid"`` or ``"dotted"``.
codesearchnet
def remove(self, annotation): if annotation.id in self._annotations: del self._annotations[annotation.id] self._dirty = True
Removes an annotation. Args: annotation (gkeepapi.node.Annotation): An Annotation object. Returns: gkeepapi.node.Annotation: The Annotation.
juraj-google-style
def get_path(self, key, rel_to_cwd=False, rel_to_conf=False): if (key in self.__cli): path = self.__cli[key] from_conf = False else: path = self.__config.get(key) from_conf = True if (not isinstance(path, str)): return None res = self.__abspath(path, from_conf) if rel_to_cwd: return os.path.relpath(res, self.__invoke_dir) if rel_to_conf: return os.path.relpath(res, self.__conf_dir) return self.__abspath(path, from_conf)
Retrieve a path from the config, resolving it against the invokation directory or the configuration file directory, depending on whether it was passed through the command-line or the configuration file. Args: key: str, the key to lookup the path with Returns: str: The path, or `None`
codesearchnet
def ProcessListDirectory(self, responses): if (not responses.success): raise flow.FlowError('Unable to list directory.') with data_store.DB.GetMutationPool() as pool: for response in responses: stat_entry = rdf_client_fs.StatEntry(response) filesystem.CreateAFF4Object(stat_entry, self.client_urn, pool, token=self.token) self.SendReply(stat_entry)
Processes the results of the ListDirectory client action. Args: responses: a flow Responses object.
codesearchnet
def chmod(target): assert isinstance(target, str) assert os.path.exists(target) file_mode = (stat.S_IRUSR | stat.S_IWUSR) folder_mode = ((stat.S_IRUSR | stat.S_IWUSR) | stat.S_IXUSR) remove_immutable_attribute(target) if os.path.isfile(target): os.chmod(target, file_mode) elif os.path.isdir(target): os.chmod(target, folder_mode) for (root, dirs, files) in os.walk(target): for cur_dir in dirs: os.chmod(os.path.join(root, cur_dir), folder_mode) for cur_file in files: os.chmod(os.path.join(root, cur_file), file_mode) else: raise ValueError('Unsupported file type: {}'.format(target))
Recursively set the chmod for files to 0600 and 0700 for folders. It's ok unless we need something more specific. Args: target (str): Root file or folder
codesearchnet
class Identity(Initializer): def __init__(self, gain=1.0): self.gain = gain def __call__(self, shape, dtype=None): if len(shape) != 2: raise ValueError(f'Identity matrix initializer can only be used for 2D matrices. Received: shape={shape} of rank {len(shape)}.') dtype = standardize_dtype(dtype) return self.gain * ops.eye(*shape, dtype=dtype)
Initializer that generates the identity matrix. Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = Identity() >>> layer = Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix.
github-repos
def _SetAllFieldTypes(self, package, desc_proto, scope): package = _PrefixWithDot(package) main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) if (package == '.'): nested_package = _PrefixWithDot(desc_proto.name) else: nested_package = '.'.join([package, desc_proto.name]) for (field_proto, field_desc) in zip(desc_proto.field, main_desc.fields): self._SetFieldType(field_proto, field_desc, nested_package, scope) for (extension_proto, extension_desc) in zip(desc_proto.extension, main_desc.extensions): extension_desc.containing_type = self._GetTypeFromScope(nested_package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, nested_package, scope) for nested_type in desc_proto.nested_type: self._SetAllFieldTypes(nested_package, nested_type, scope)
Sets all the descriptor's fields's types. This method also sets the containing types on any extensions. Args: package: The current package of desc_proto. desc_proto: The message descriptor to update. scope: Enclosing scope of available types.
codesearchnet
def get_block_details(self, block_ids): if not hasattr(block_ids, "__iter__"): block_ids = [block_ids] for _id in block_ids: block_key = self._db.get_block(_id)[0] block_data = self._db.get_all_field_value(block_key) for key in block_data: for char in ['[', '{']: if char in block_data[key]: block_data[key] = ast.literal_eval( str(block_data[key])) yield block_data
Get details of scheduling or processing block Args: block_ids (list): List of block IDs
juraj-google-style
def _compute_args(self, data=dict(), **kwargs): for (name, remote_attribute) in self._attributes.items(): default_value = BambouConfig.get_default_attribute_value(self.__class__, name, remote_attribute.attribute_type) setattr(self, name, default_value) if (len(data) > 0): self.from_dict(data) for (key, value) in kwargs.items(): if hasattr(self, key): setattr(self, key, value)
Compute the arguments Try to import attributes from data. Otherwise compute kwargs arguments. Args: data: a dict() kwargs: a list of arguments
codesearchnet
def SplitIntoNormalAndControl(self, buf): if not self._csi or not buf: return [(buf, '')] seq = [] i = 0 while i < len(buf): c = buf.find(self._csi, i) if c < 0: seq.append((buf[i:], '')) break normal = buf[i:c] i = c + self.GetControlSequenceLen(buf[c:]) seq.append((normal, buf[c:i])) return seq
Returns a list of (normal_string, control_sequence) tuples from buf. Args: buf: The input string containing one or more control sequences interspersed with normal strings. Returns: A list of (normal_string, control_sequence) tuples.
github-repos
def sort_request(request: Dict[str, Any]) -> OrderedDict: sort_order = ["jsonrpc", "method", "params", "id"] return OrderedDict(sorted(request.items(), key=lambda k: sort_order.index(k[0])))
Sort a JSON-RPC request dict. This has no effect other than making the request nicer to read. >>> json.dumps(sort_request( ... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'})) '{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}' Args: request: JSON-RPC request in dict format.
juraj-google-style
def DeserializeFromDB(buffer): m = StreamManager.GetStream(buffer) reader = BinaryReader(m) spentcoin = SpentCoinState() spentcoin.Deserialize(reader) StreamManager.ReleaseStream(m) return spentcoin
Deserialize full object. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: SpentCoinState:
juraj-google-style
def __init__(self, timestamp, rank=0): self.timestamp = timestamp self.rank = rank
Create a reorderer. Args: timestamp (int): Epoch time of timestamp. Packages before this time are preferred. rank (int): If non-zero, allow version changes at this rank or above past the timestamp.
juraj-google-style
def JoinPath(stem="", *parts): parts = [SmartUnicode(path) for path in parts] result = (stem + NormalizePath(u"/".join(parts))).replace(" result = result.rstrip("/") return result or "/"
A sane version of os.path.join. The intention here is to append the stem to the path. The standard module removes the path if the stem begins with a /. Args: stem: The stem to join to. *parts: parts of the path to join. The first arg is always the root and directory traversal is not allowed. Returns: a normalized path.
juraj-google-style
def __init__(self, path): super(StorageFileReader, self).__init__() self._path = path self._storage_file = None
Initializes a storage reader. Args: path (str): path to the input file.
juraj-google-style