code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def bbox2distance(points, bbox, max_num_bins, reg_scale, up, eps=0.1): reg_scale = abs(reg_scale) left = (points[:, 0] - bbox[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale top = (points[:, 1] - bbox[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale right = (bbox[:, 2] - points[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale bottom = (bbox[:, 3] - points[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale four_lens = torch.stack([left, top, right, bottom], -1) four_lens, weight_right, weight_left = translate_gt(four_lens, max_num_bins, reg_scale, up) if max_num_bins is not None: four_lens = four_lens.clamp(min=0, max=max_num_bins - eps) return (four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach())
Converts bounding box coordinates to distances from a reference point. Args: points (Tensor): (n, 4) [x, y, w, h], where (x, y) is the center. bbox (Tensor): (n, 4) bounding boxes in "xyxy" format. max_num_bins (float): Maximum bin value. reg_scale (float): Controlling curvarture of W(n). up (Tensor): Controlling upper bounds of W(n). eps (float): Small value to ensure target < max_num_bins. Returns: Tensor: Decoded distances.
github-repos
def set_help_intro(self, help_intro): self._help_intro = help_intro
Set an introductory message to help output. Args: help_intro: (RichTextLines) Rich text lines appended to the beginning of the output of the command "help", as introductory information.
github-repos
def register(self, cmd: Type[Command]) -> None: self.commands[cmd.command] = cmd
Register a new IMAP command. Args: cmd: The new command type.
juraj-google-style
def qry_create(options): qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str)
Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list.
juraj-google-style
def realtime(widget, url_name=None, url_regex=None, time_interval=None): if (not hasattr(widget, 'get_updated_content')): raise AttributeError(('Widget %s must implement get_updated_content method.' % widget)) elif (not callable(widget.get_updated_content)): raise ValueError(('get_updated_content in widget %s is not callable' % widget)) if (url_name is None): if (getattr(widget, 'url_name', None) is not None): url_name = widget.url_name else: url_name = widget.__class__.__name__ if (url_name in [w.url_name for w in REALTIME_WIDGETS]): raise ValueError(('URL name %s is already used by another real time widget.' % url_name)) if (url_regex is None): if (getattr(widget, 'url_regex', None) is not None): url_regex = widget.url_regex else: url_regex = sha256(url_name.encode('utf-8')) url_regex = url_regex.hexdigest()[:32] url_regex = ('realtime/' + url_regex) if (url_regex in [w.url_regex for w in REALTIME_WIDGETS]): raise ValueError(('URL regex %s is already used by another real time widget.' % url_regex)) if (time_interval is None): if (getattr(widget, 'time_interval', None) is not None): time_interval = widget.time_interval else: time_interval = app_settings.default_time_interval from django.views.generic import View from braces.views import AjaxResponseMixin, JSONResponseMixin class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View): def get_data(self): return widget.get_updated_content() def get(self, request, *args, **kwargs): return self.get_ajax(request, *args, **kwargs) def get_ajax(self, request, *args, **kwargs): return self.render_json_response(self.get_data()) PartialResponse.url_name = url_name PartialResponse.url_regex = url_regex PartialResponse.time_interval = time_interval REALTIME_WIDGETS.append(PartialResponse) if (not hasattr(widget, 'url_name')): widget.url_name = url_name if (not hasattr(widget, 'url_regex')): widget.url_regex = url_regex if (not hasattr(widget, 'time_interval')): widget.time_interval = time_interval return widget
Return a widget as real-time. Args: widget (Widget): the widget to register and return as real-time. url_name (str): the URL name to call to get updated content. url_regex (regex): the URL regex to be matched. time_interval (int): the interval of refreshment in milliseconds. Returns: Widget: the "real-timed" widget.
codesearchnet
def remove(self, force=False): return self.client.api.remove_node(self.id, force=force)
Remove this node from the swarm. Args: force (bool): Force remove an active node. Default: `False` Returns: `True` if the request was successful. Raises: :py:class:`docker.errors.NotFound` If the node doesn't exist in the swarm. :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def extend_validators(raw_validators, override_validators): if (not raw_validators): return override_validators elif (not override_validators): return raw_validators else: def_validators_mapping = _convert_validators_to_mapping(raw_validators) ref_validators_mapping = _convert_validators_to_mapping(override_validators) def_validators_mapping.update(ref_validators_mapping) return list(def_validators_mapping.values())
extend raw_validators with override_validators. override_validators will merge and override raw_validators. Args: raw_validators (dict): override_validators (dict): Returns: list: extended validators Examples: >>> raw_validators = [{'eq': ['v1', 200]}, {"check": "s2", "expect": 16, "comparator": "len_eq"}] >>> override_validators = [{"check": "v1", "expect": 201}, {'len_eq': ['s3', 12]}] >>> extend_validators(raw_validators, override_validators) [ {"check": "v1", "expect": 201, "comparator": "eq"}, {"check": "s2", "expect": 16, "comparator": "len_eq"}, {"check": "s3", "expect": 12, "comparator": "len_eq"} ]
codesearchnet
def create_issue(self, data, params=None): return self._post((self.API_URL + 'issue'), data=data, params=params)
Creates an issue or a sub-task from a JSON representation. You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response. A field validation error will occur if such field is submitted in request. Creating a sub-task is similar to creating an issue with the following differences: issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and You must provide a parent field with the ID or key of the parent issue. Args: data: params: Returns:
codesearchnet
def fragmentate(self, give_only_index=False, use_lookup=None): if (use_lookup is None): use_lookup = settings['defaults']['use_lookup'] fragments = [] pending = set(self.index) self.get_bonds(use_lookup=use_lookup) while pending: index = self.get_coordination_sphere(pending.pop(), use_lookup=True, n_sphere=float('inf'), only_surface=False, give_only_index=True) pending = (pending - index) if give_only_index: fragments.append(index) else: fragment = self.loc[index] fragment._metadata['bond_dict'] = fragment.restrict_bond_dict(self._metadata['bond_dict']) try: fragment._metadata['val_bond_dict'] = fragment.restrict_bond_dict(self._metadata['val_bond_dict']) except KeyError: pass fragments.append(fragment) return fragments
Get the indices of non bonded parts in the molecule. Args: give_only_index (bool): If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: list: A list of sets of indices or new Cartesian instances.
codesearchnet
def list_tasks(target=None): from os import getcwd, chdir from glob import glob original = getcwd() if (target is None): target = _dbdir() chdir(target) result = {} for filename in glob('*.*.json'): (project, task) = filename.split('.')[0:2] if (project not in result): result[project] = [] result[project].append(task) chdir(original) return result
Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project.
codesearchnet
def deep_update(d, u): for (k, v) in u.items(): if isinstance(v, Mapping): d[k] = deep_update(d.get(k, {}), v) elif isinstance(v, list): existing_elements = d.get(k, []) d[k] = (existing_elements + [ele for ele in v if (ele not in existing_elements)]) else: d[k] = v return d
Deeply updates a dictionary. List values are concatenated. Args: d (dict): First dictionary which will be updated u (dict): Second dictionary use to extend the first one Returns: dict: The merge dictionary
codesearchnet
def map(self, map_fn, desc=None): if desc is None: desc = getattr(map_fn, '__name__', '') desc = u'map({})'.format(desc) return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)
Return a copy of this query, with the values mapped through `map_fn`. Args: map_fn (callable): A callable that takes a single argument and returns a new value. Keyword Args: desc (str): A description of the mapping transform, for use in log message. Defaults to the name of the map function. Returns: Query
juraj-google-style
def is50(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 3, 11): return False if wrongstatus(d, 12, 13, 23): return False if wrongstatus(d, 24, 25, 34): return False if wrongstatus(d, 35, 36, 45): return False if wrongstatus(d, 46, 47, 56): return False roll = roll50(msg) if (roll is not None) and abs(roll) > 60: return False gs = gs50(msg) if gs is not None and gs > 600: return False tas = tas50(msg) if tas is not None and tas > 500: return False if (gs is not None) and (tas is not None) and (abs(tas - gs) > 200): return False return True
Check if a message is likely to be BDS code 5,0 (Track and turn report) Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream): display_name = mediator.GetDisplayName() data_stream_name = (getattr(data_stream, 'name', '') or '') logger.debug('[ProcessFileEntryDataStream] processing data stream: "{0:s}" of file entry: {1:s}'.format(data_stream_name, display_name)) mediator.ClearEventAttributes() if (data_stream and self._analyzers): self._AnalyzeDataStream(mediator, file_entry, data_stream.name) self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream) if (not data_stream): return skip_content_extraction = self._CanSkipContentExtraction(file_entry) if skip_content_extraction: display_name = mediator.GetDisplayName() logger.debug('Skipping content extraction of: {0:s}'.format(display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return path_spec = copy.deepcopy(file_entry.path_spec) if (data_stream and (not data_stream.IsDefault())): path_spec.data_stream = data_stream.name archive_types = [] compressed_stream_types = [] if self._process_compressed_streams: compressed_stream_types = self._GetCompressedStreamTypes(mediator, path_spec) if (not compressed_stream_types): archive_types = self._GetArchiveTypes(mediator, path_spec) if archive_types: if self._process_archives: self._ProcessArchiveTypes(mediator, path_spec, archive_types) if (dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types): self._ExtractContentFromDataStream(mediator, file_entry, data_stream.name) elif compressed_stream_types: self._ProcessCompressedStreamTypes(mediator, path_spec, compressed_stream_types) else: self._ExtractContentFromDataStream(mediator, file_entry, data_stream.name)
Processes a specific data stream of a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry containing the data stream. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
codesearchnet
def run_suite_class(argv=None): cli_args = _parse_cli_args(argv) suite_class = _find_suite_class() if cli_args.list_tests: _print_test_names_for_suite(suite_class) sys.exit(0) test_configs = config_parser.load_test_config_file(cli_args.config, cli_args.test_bed) config_count = len(test_configs) if config_count != 1: logging.error('Expect exactly one test config, found %d', config_count) config = test_configs[0] runner = test_runner.TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name) suite = suite_class(runner, config) test_selector = _parse_raw_test_selector(cli_args.tests) suite.set_test_selector(test_selector) suite_record = SuiteInfoRecord(suite_class_name=suite_class.__name__) console_level = logging.DEBUG if cli_args.verbose else logging.INFO ok = False with runner.mobly_logger(console_level=console_level) as log_path: try: suite.setup_suite(config.copy()) try: suite_record.suite_begin() runner.run() ok = runner.results.is_all_pass print(ok) except signals.TestAbortAll: pass finally: suite.teardown_suite() suite_record.suite_end() suite_record.suite_run_display_name = suite.get_suite_run_display_name() suite_record.extras = suite.get_suite_info().copy() _dump_suite_info(suite_record, log_path) if not ok: sys.exit(1)
Executes tests in the test suite. Args: argv: A list that is then parsed as CLI args. If None, defaults to sys.argv.
github-repos
def trainable_variables(self): return tuple((v for v in self.variables if v.trainable))
A sequence of trainable variables accessed by this FuncGraph. Note that functions keep only weak references to variables. Calling the function after a variable it accesses has been deleted is an error. Returns: Sequence of trainable variables for this func graph.
github-repos
def hwvtep_add_loopback_interface(self, **kwargs): name = kwargs.pop('name') id = kwargs.pop('int_id') ip_args = dict(name=name, loopback_id=id) method_name = 'overlay_gateway_ip_interface_loopback_loopback_id' method_class = self._brocade_tunnels gw_attr = getattr(method_class, method_name) config = gw_attr(**ip_args) output = self._callback(config) return output
Add loopback interface to the overlay-gateway Args: name (str): gateway-name int_id (int): loopback inteface id callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def build_losses(self, logits_real, logits_fake): with tf.name_scope("GAN_loss"): score_real = tf.sigmoid(logits_real) score_fake = tf.sigmoid(logits_fake) tf.summary.histogram('score-real', score_real) tf.summary.histogram('score-fake', score_fake) with tf.name_scope("discrim"): d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real') d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake') d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real') d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake') d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy') self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss') with tf.name_scope("gen"): self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss') g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy') add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
Build standard GAN loss and set `self.g_loss` and `self.d_loss`. D and G play two-player minimax game with value function V(G,D) min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))] Args: logits_real (tf.Tensor): discrim logits from real samples logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
juraj-google-style
def ScanForStorageMediaImage(self, source_path_spec): try: type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(source_path_spec, resolver_context=self._resolver_context) except RuntimeError as exception: raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception)) if (not type_indicators): file_system = resolver.Resolver.OpenFileSystem(source_path_spec, resolver_context=self._resolver_context) raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_RAW, parent=source_path_spec) try: glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec) except errors.PathSpecError: glob_results = None file_system.Close() if (not glob_results): return None return raw_path_spec if (len(type_indicators) > 1): raise errors.BackEndError('Unsupported source found more than one storage media image types.') return path_spec_factory.Factory.NewPathSpec(type_indicators[0], parent=source_path_spec)
Scans the path specification for a supported storage media image format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: storage media image path specification or None if no supported storage media image type was found. Raises: BackEndError: if the source cannot be scanned or more than one storage media image type is found.
codesearchnet
def _GetStringValue(self, data_dict, name, default_value=None): values = data_dict.get(name, None) if not values: return default_value for index, value in enumerate(values): if ',' in value: values[index] = '"{0:s}"'.format(value) return ', '.join(values)
Retrieves a specific string value from the data dict. Args: data_dict (dict[str, list[str]): values per name. name (str): name of the value to retrieve. default_value (Optional[object]): value to return if the name has no value set in data_dict. Returns: str: value represented as a string.
juraj-google-style
def _CallMethod(self, srvc, method_descriptor, rpc_controller, request, callback): if method_descriptor.containing_service != self.descriptor: raise RuntimeError( 'CallMethod() given method descriptor for wrong service type.') method = getattr(srvc, method_descriptor.name) return method(rpc_controller, request, callback)
Calls the method described by a given method descriptor. Args: srvc: Instance of the service for which this method is called. method_descriptor: Descriptor that represent the method to call. rpc_controller: RPC controller to use for this method's execution. request: Request protocol message. callback: A callback to invoke after the method has completed.
juraj-google-style
def __init__(self, domain_postfix='_domain'): super(ReverseDNS, self).__init__() self.domain_postfix = domain_postfix self.ip_lookup_cache = cache.Cache(timeout=600) self.output_stream = self.process_for_rdns()
Initialize ReverseDNS Class Args: domain_postfix: the string to be appended to the ip fields (e.g. IP.src -> IP.src_domain)
juraj-google-style
def add(self, distinguished_name, object_class, attributes): self.conn.add(distinguished_name, object_class, attributes)
Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr
juraj-google-style
def set_reprompt_text(self, text): self.response.reprompt.outputSpeech.type = 'PlainText' self.response.reprompt.outputSpeech.text = text
Set response reprompt output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
juraj-google-style
def __register_methods(self, parsed_config): methods = parsed_config.get('methods') if (not methods): return for (method_name, method) in methods.iteritems(): self.__api_methods[method_name] = method.get('rosyMethod')
Register all methods from the given api config file. Methods are stored in a map from method_name to rosyMethod, the name of the ProtoRPC method to be called on the backend. If no rosyMethod was specified the value will be None. Args: parsed_config: The JSON object with the API configuration being added.
codesearchnet
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): line = clean_lines.elided[linenum] match = Match('^(.*\\S)&&', line) if (not match): match = Match('(.*)&&\\S', line) if ((not match) or ('(&&)' in line) or Search('\\boperator\\s*$', match.group(1))): return typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if (not IsRValueAllowed(clean_lines, linenum, typenames)): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')
Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
codesearchnet
def save_lines(lines, filename): with open(filename, 'w', encoding='utf-8') as f: f.write('\n'.join(lines))
Save an array of lines to a file. Args: lines: An array of strings that will be saved as individual lines. filename: Path to the output file.
juraj-google-style
def _get_memory_contents(self): if (self._memory_contents is not None): return self._memory_contents schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg) self._memory_contents = self._graph.compute_memory_contents_under_schedule(schedule) return self._memory_contents
Runs the scheduler to determine memory contents at every point in time. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into GetAllOperationNames()).
codesearchnet
def issuperset(self, other): other = self._cast_to_frameset(other) if other is NotImplemented: return NotImplemented return self.items >= other.items
Check if the contents of `self` is a superset of the contents of `other.` Args: other (:class:`FrameSet`): Returns: bool: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
juraj-google-style
def insert(self, lines=None): for i, (key, line) in enumerate(lines.items()): n = key + i first_half = self._lines[:n] last_half = self._lines[n:] self._lines = first_half + [line] + last_half
Insert lines into the editor. Note: To insert before the first line, use :func:`~exa.core.editor.Editor.preappend` (or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`. Args: lines (dict): Dictionary of lines of form (lineno, string) pairs
juraj-google-style
def compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width=1, kv_filter_width=1, q_padding='VALID', kv_padding='VALID', vars_3d_num_heads=0, layer_collection=None): if (memory_antecedent is None): memory_antecedent = query_antecedent q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, 'q', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) k = compute_attention_component(memory_antecedent, total_key_depth, kv_filter_width, kv_padding, 'k', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) v = compute_attention_component(memory_antecedent, total_value_depth, kv_filter_width, kv_padding, 'v', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) return (q, k, v)
Computes query, key and value. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. vars_3d_num_heads: an optional (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: q, k, v : [batch, length, depth] tensors
codesearchnet
def get_all_function_definitions(base_most_function): return ([base_most_function] + [function for derived_contract in base_most_function.contract.derived_contracts for function in derived_contract.functions if (function.full_name == base_most_function.full_name)])
Obtains all function definitions given a base-most function. This includes the provided function, plus any overrides of that function. Returns: (list): Returns any the provided function and any overriding functions defined for it.
codesearchnet
def _simple_name(distribution): simple_name = distribution.name if simple_name.endswith('/'): simple_name = simple_name.split('/')[(- 2)] parts = simple_name.split('_') if parts[(- 1)].isdigit(): simple_name = '_'.join(parts[:(- 1)]) return simple_name
Infer the original name passed into a distribution constructor. Distributions typically follow the pattern of with.name_scope(name) as name: super(name=name) so we attempt to reverse the name-scope transformation to allow addressing of RVs by the distribution's original, user-visible name kwarg. Args: distribution: a tfd.Distribution instance. Returns: simple_name: the original name passed into the Distribution. #### Example ``` d1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/' d2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/' _simple_name(d2) # returns 'x' ```
codesearchnet
class TFXGLMModel(TFXGLMPreTrainedModel): def __init__(self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings]=None, *inputs: Any, **kwargs: Any) -> None: super().__init__(config, *inputs, **kwargs) self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name='model') @unpack_inputs @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs: Any) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None)
Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`] Args: config: XGLMConfig embed_tokens: [TFSharedEmbeddings]: output embedding
github-repos
def _manage_location(attr): return property(lambda self: getattr(self, '_%s' % attr), lambda self, value: self._set_location(attr, value))
Build managed property interface. Args: attr (str): Property's name Returns: property: Managed property interface
juraj-google-style
def compute_files(user1, user2, file_list, dir_pre, start_num): match_total = 0 test_total = 0 gold_total = 0 for fi in file_list: file1 = dir_pre + user1 + "/" + fi + ".txt" file2 = dir_pre + user2 + "/" + fi + ".txt" if not os.path.exists(file1): print("*********Error: ", file1, "does not exist*********", file=ERROR_LOG) return -1.00 if not os.path.exists(file2): print("*********Error: ", file2, "does not exist*********", file=ERROR_LOG) return -1.00 try: file1_h = open(file1, "r") file2_h = open(file2, "r") except IOError: print("Cannot open the files", file1, file2, file=ERROR_LOG) break cur_amr1 = smatch.get_amr_line(file1_h) cur_amr2 = smatch.get_amr_line(file2_h) if cur_amr1 == "": print("AMR 1 is empty", file=ERROR_LOG) continue if cur_amr2 == "": print("AMR 2 is empty", file=ERROR_LOG) continue amr1 = amr.AMR.parse_AMR_line(cur_amr1) amr2 = amr.AMR.parse_AMR_line(cur_amr2) test_label = "a" gold_label = "b" amr1.rename_node(test_label) amr2.rename_node(gold_label) (test_inst, test_rel1, test_rel2) = amr1.get_triples() (gold_inst, gold_rel1, gold_rel2) = amr2.get_triples() if verbose: print("Instance triples of file 1:", len(test_inst), file=DEBUG_LOG) print(test_inst, file=DEBUG_LOG) print("Attribute triples of file 1:", len(test_rel1), file=DEBUG_LOG) print(test_rel1, file=DEBUG_LOG) print("Relation triples of file 1:", len(test_rel2), file=DEBUG_LOG) print(test_rel2, file=DEBUG_LOG) print("Instance triples of file 2:", len(gold_inst), file=DEBUG_LOG) print(gold_inst, file=DEBUG_LOG) print("Attribute triples of file 2:", len(gold_rel1), file=DEBUG_LOG) print(gold_rel1, file=DEBUG_LOG) print("Relation triples of file 2:", len(gold_rel2), file=DEBUG_LOG) print(gold_rel2, file=DEBUG_LOG) (best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2, gold_inst, gold_rel1, gold_rel2, test_label, gold_label) if verbose: print("best match number", best_match_num, file=DEBUG_LOG) print("Best Match:", smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG) match_total += best_match_num test_total += (len(test_inst) + len(test_rel1) + len(test_rel2)) gold_total += (len(gold_inst) + len(gold_rel1) + len(gold_rel2)) smatch.match_triple_dict.clear() (precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total) return "%.2f" % f_score
Compute the smatch scores for a file list between two users Args: user1: user 1 name user2: user 2 name file_list: file list dir_pre: the file location prefix start_num: the number of restarts in smatch Returns: smatch f score.
juraj-google-style
def matches(self, spec): if (callable(spec) and (not isinstance(spec, type))): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = (tuple(spec.split('.')) if (not isinstance(spec, tuple)) else spec) (split_spec, nocompare) = zip(*(((None, True) if ((s == '*') or (s is None)) else (s, False)) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for (idx, nc) in enumerate(nocompare) if (not nc))) self_spec = match_fn(split_spec) unescaped_match = (match_fn(specification[:len(split_spec)]) == self_spec) if unescaped_match: return True sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer] identifier_specification = tuple((fn(ident, escape=False) for (ident, fn) in zip(specification, sanitizers))) identifier_match = (match_fn(identifier_specification[:len(split_spec)]) == self_spec) return identifier_match
Whether the spec applies to this object. Args: spec: A function, spec or type to check for a match * A 'type[[.group].label]' string which is compared against the type, group and label of this object * A function which is given the object and returns a boolean. * An object type matched using isinstance. Returns: bool: Whether the spec matched this object.
codesearchnet
def split_input(cls, mapper_spec): params = _get_params(mapper_spec) blob_keys = params[cls.BLOB_KEYS_PARAM] if isinstance(blob_keys, basestring): blob_keys = blob_keys.split(",") blob_sizes = {} for blob_key in blob_keys: blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key)) blob_sizes[blob_key] = blob_info.size shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count) shards_per_blob = shard_count if shards_per_blob == 0: shards_per_blob = 1 chunks = [] for blob_key, blob_size in blob_sizes.items(): blob_chunk_size = blob_size for i in xrange(shards_per_blob - 1): chunks.append(BlobstoreLineInputReader.from_json( {cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: blob_chunk_size * i, cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)})) chunks.append(BlobstoreLineInputReader.from_json( {cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1), cls.END_POSITION_PARAM: blob_size})) return chunks
Returns a list of shard_count input_spec_shards for input_spec. Args: mapper_spec: The mapper specification to split from. Must contain 'blob_keys' parameter with one or more blob keys. Returns: A list of BlobstoreInputReaders corresponding to the specified shards.
juraj-google-style
def SetInputSourceConfiguration(self, configuration): mount_path = configuration.mount_path if mount_path and mount_path.endswith(os.sep): mount_path = mount_path[:-1] self._mount_path = mount_path
Sets the input source configuration settings. Args: configuration (InputSourceConfiguration): input source configuration.
juraj-google-style
def _use_widgets(objs): from ..models.widgets import Widget return _any(objs, (lambda obj: isinstance(obj, Widget)))
Whether a collection of Bokeh objects contains a any Widget Args: objs (seq[Model or Document]) : Returns: bool
codesearchnet
def limit_weights(weights, limit=0.1): if ((1.0 / limit) > len(weights)): raise ValueError('invalid limit -> 1 / limit must be <= len(weights)') if isinstance(weights, dict): weights = pd.Series(weights) if (np.round(weights.sum(), 1) != 1.0): raise ValueError(('Expecting weights (that sum to 1) - sum is %s' % weights.sum())) res = np.round(weights.copy(), 4) to_rebalance = (res[(res > limit)] - limit).sum() ok = res[(res < limit)] ok += ((ok / ok.sum()) * to_rebalance) res[(res > limit)] = limit res[(res < limit)] = ok if any(((x > limit) for x in res)): return limit_weights(res, limit=limit) return res
Limits weights and redistributes excedent amount proportionally. ex: - weights are {a: 0.7, b: 0.2, c: 0.1} - call with limit=0.5 - excess 0.2 in a is ditributed to b and c proportionally. - result is {a: 0.5, b: 0.33, c: 0.167} Args: * weights (Series): A series describing the weights * limit (float): Maximum weight allowed
codesearchnet
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx): import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib import patches import numpy as np if (feature.geometry.type not in ('Polygon', 'MultiPolygon')): raise ValueError(('Cannot handle feature of type ' + feature.geometry.type)) dpi = 100 fig = plt.figure(frameon=False, dpi=dpi) fig.set_size_inches((shape[1] / float(dpi)), (shape[0] / float(dpi))) ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() ax.set_xlim([0, shape[1]]) ax.set_ylim([0, shape[0]]) fig.add_axes(ax) if (feature.geometry.type == 'Polygon'): coords = [feature.geometry.coordinates] else: coords = feature.geometry.coordinates for poly_coords in coords: for (i, outline) in enumerate(poly_coords): value = (0.0 if (i == 0) else 1.0) outline = np.array(outline) xs = lon_idx(outline[(:, 0)]) ys = lat_idx(outline[(:, 1)]) poly = patches.Polygon(list(zip(xs, ys)), facecolor=(value, value, value), edgecolor='none', antialiased=True) ax.add_patch(poly) fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,)))[(:, :, 0)] assert (data.shape[0] == shape[0]) assert (data.shape[1] == shape[1]) data = (1.0 - (data.astype(float) / 255.0)) data = data[(::(- 1), :)] plt.close('all') return data
Convert a GeoJSON polygon feature to a numpy array Args: feature (pygeoj.Feature): polygon feature to draw shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in lat_idx (func): function converting a latitude to the (fractional) row index in the map lon_idx (func): function converting a longitude to the (fractional) column index in the map Returns: np.array: mask, background is zero, foreground is one
codesearchnet
def build(self, text, matrix, skim_depth=10, d_weights=False): for anchor in bar(matrix.keys): n1 = text.unstem(anchor) pairs = matrix.anchored_pairs(anchor).items() for (term, weight) in list(pairs)[:skim_depth]: if d_weights: weight = (1 - weight) n2 = text.unstem(term) self.graph.add_edge(n1, n2, weight=float(weight))
1. For each term in the passed matrix, score its KDE similarity with all other indexed terms. 2. With the ordered stack of similarities in hand, skim off the top X pairs and add them as edges. Args: text (Text): The source text instance. matrix (Matrix): An indexed term matrix. skim_depth (int): The number of siblings for each term. d_weights (bool): If true, give "close" words low edge weights.
codesearchnet
def start(self): resp = self.post('start') if resp.is_fail(): return None if ('result' not in resp.data): return None result = resp.data['result'] return {'user': result['user'], 'ws_host': result['ws_host']}
Gets the rtm ws_host and user information Returns: None if request failed, else a dict containing "user"(User) and "ws_host"
codesearchnet
def isna(obj): if isinstance(obj, BasePandasDataset): return obj.isna() else: return pandas.isna(obj)
Detect missing values for an array-like object. Args: obj: Object to check for null or missing values. Returns: bool or array-like of bool
juraj-google-style
def set_tag(self, key, value, update_session=True): existing_tags = {x.key: x for x in self.tags} if key in existing_tags: tag = existing_tags[key] if tag.value == value: return False tag.value = value else: tag = Tag() tag.resource_id = self.id tag.key = key tag.value = value self.tags.append(tag) if update_session: db.session.add(tag) return True
Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or `False` if there were no changes to be made. Args: key (str): Key of the tag value (str): Value of the tag update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True Returns: `bool`
juraj-google-style
def to_grayscale(img): gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float) imbands = img.getbands() alpha = None if 'A' in imbands: alpha = numpy.asarray(img.split()[-1]).astype(numpy.float) return gray, alpha
Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays.
juraj-google-style
def match_exists(self, field, required=True, new_group=False): return self.match_field(field, '*', required=required, new_group=new_group)
Require a field to exist in the results. Matches will have some value in ``field``. Arguments: field (str): The field to check. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. required (bool): If ``True``, will add term with ``AND``. If ``False``, will use ``OR``. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self
codesearchnet
def get(quantity, min_type=EventType.firstevent, max_type=EventType.lastevent): return _peep(quantity, lib.SDL_GETEVENT, min_type, max_type)
Return events at the front of the event queue, within the specified minimum and maximum type, and remove them from the queue. Args: quantity (int): The maximum number of events to return. min_type (int): The minimum value for the event type of the returned events. max_type (int): The maximum value for the event type of the returned events. Returns: List[Event]: Events from the front of the event queue. Raises: SDLError: If there was an error retrieving the events.
codesearchnet
def Install(self, apk_path, destination_dir='', replace_existing=True, grant_permissions=False, timeout_ms=None, transfer_progress_callback=None): if (not destination_dir): destination_dir = '/data/local/tmp/' basename = os.path.basename(apk_path) destination_path = posixpath.join(destination_dir, basename) self.Push(apk_path, destination_path, timeout_ms=timeout_ms, progress_callback=transfer_progress_callback) cmd = ['pm install'] if grant_permissions: cmd.append('-g') if replace_existing: cmd.append('-r') cmd.append('"{}"'.format(destination_path)) ret = self.Shell(' '.join(cmd), timeout_ms=timeout_ms) rm_cmd = ['rm', destination_path] rmret = self.Shell(' '.join(rm_cmd), timeout_ms=timeout_ms) return ret
Install an apk to the device. Doesn't support verifier file, instead allows destination directory to be overridden. Args: apk_path: Local path to apk to install. destination_dir: Optional destination directory. Use /system/app/ for persistent applications. replace_existing: whether to replace existing application grant_permissions: If True, grant all permissions to the app specified in its manifest timeout_ms: Expected timeout for pushing and installing. transfer_progress_callback: callback method that accepts filename, bytes_written and total_bytes of APK transfer Returns: The pm install output.
codesearchnet
def from_sample_rate(sample_rate, n_bands, always_even=False): fb = FrequencyBand(0, sample_rate.nyquist) return LinearScale(fb, n_bands, always_even=always_even)
Return a :class:`~zounds.spectral.LinearScale` instance whose upper frequency bound is informed by the nyquist frequency of the sample rate. Args: sample_rate (SamplingRate): the sample rate whose nyquist frequency will serve as the upper frequency bound of this scale n_bands (int): the number of evenly-spaced frequency bands
juraj-google-style
def _tower_loss(images, labels, num_classes, scope, reuse_variables=None): restore_logits = (not FLAGS.fine_tune) with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): logits = inception.inference(images, num_classes, for_training=True, restore_logits=restore_logits, scope=scope) split_batch_size = images.get_shape().as_list()[0] inception.loss(logits, labels, batch_size=split_batch_size) losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = tf.add_n((losses + regularization_losses), name='total_loss') loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') loss_averages_op = loss_averages.apply((losses + [total_loss])) for l in (losses + [total_loss]): loss_name = re.sub(('%s_[0-9]*/' % inception.TOWER_NAME), '', l.op.name) tf.summary.scalar((loss_name + ' (raw)'), l) tf.summary.scalar(loss_name, loss_averages.average(l)) with tf.control_dependencies([loss_averages_op]): total_loss = tf.identity(total_loss) return total_loss
Calculate the total loss on a single tower running the ImageNet model. We perform 'batch splitting'. This means that we cut up a batch across multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2, then each tower will operate on an batch of 16 images. Args: images: Images. 4D tensor of size [batch_size, FLAGS.image_size, FLAGS.image_size, 3]. labels: 1-D integer Tensor of [batch_size]. num_classes: number of classes scope: unique prefix string identifying the ImageNet tower, e.g. 'tower_0'. Returns: Tensor of shape [] containing the total loss for a batch of data
codesearchnet
def alltoall(self, x, mesh_axis, split_axis, concat_axis): return self._collective_with_groups(x, [mesh_axis], functools.partial(alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))
Grouped alltoall. Args: x: a LaidOutTensor mesh_axis: an integer the mesh axis along which to group split_axis: an integer (the Tensor axis along which to split) concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor
codesearchnet
def _extract_units(self, obj, value): if isinstance(value, dict): if ('units' in value): value = copy(value) units = value.pop('units', None) if units: self.units_prop.__set__(obj, units) return value
Internal helper for dealing with units associated units properties when setting values on |UnitsSpec| properties. When ``value`` is a dict, this function may mutate the value of the associated units property. Args: obj (HasProps) : instance to update units spec property value for value (obj) : new value to set for the property Returns: copy of ``value``, with 'units' key and value removed when applicable
codesearchnet
def sg_summary_gradient(tensor, gradient, prefix=None, name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name _scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient))) _histogram(name + '/grad-h', tf.abs(gradient))
r"""Register `tensor` to summary report as `gradient` Args: tensor: A `Tensor` to log as gradient gradient: A 0-D `Tensor`. A gradient to log prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
juraj-google-style
def teleport(self, location=None, rotation=None): val = 0 if (location is not None): val += 1 np.copyto(self._teleport_buffer, location) if (rotation is not None): np.copyto(self._rotation_buffer, rotation) val += 2 self._teleport_bool_buffer[0] = val
Teleports the agent to a specific location, with a specific rotation. Args: location (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters. If None, keeps the current location. Defaults to None. rotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent. If None, keeps the current rotation. Defaults to None. Returns: None
codesearchnet
def load_sst(path=None, url='http: if (path is None): path = os.path.expanduser('~/stanford_sentiment_treebank/') makedirs(path, exist_ok=True) fnames = download_sst(path, url) return {key: import_tree_corpus(value) for (key, value) in fnames.items()}
Download and read in the Stanford Sentiment Treebank dataset into a dictionary with a 'train', 'dev', and 'test' keys. The dictionary keys point to lists of LabeledTrees. Arguments: ---------- path : str, (optional defaults to ~/stanford_sentiment_treebank), directory where the corpus should be downloaded (and imported from). url : str, where the corpus should be downloaded from (defaults to nlp.stanford.edu address). Returns: -------- dict : loaded dataset
codesearchnet
def unfold_tensor(tensor, max_seq_len): _, _, D = tensor.shape tensor = tensor.transpose(-1, -2) tensor = F.unfold(tensor[..., None, :], kernel_size=(1, max_seq_len), stride=(1, max_seq_len)) new_bsz, _, slen = tensor.shape tensor = tensor.view(new_bsz, -1, max_seq_len, slen) tensor = tensor.permute(0, 3, 2, 1) tensor = tensor.view(-1, max_seq_len, D).contiguous() return tensor
For a given tensor with shape of (N, T, D), if sequence length T is longer than max_seq_len, this function unfold it to a (NT', max_seq_len, D) where T' is T // max_seq_len. Args: tensor: N, T, D
github-repos
def mark_flag_as_required(flag_name, flag_values=FLAGS): if (flag_values[flag_name].default is not None): warnings.warn(('Flag %s has a non-None default value; therefore, mark_flag_as_required will pass even if flag is not specified in the command line!' % flag_name)) register_validator(flag_name, (lambda value: (value is not None)), message=('Flag --%s must be specified.' % flag_name), flag_values=flag_values)
Ensures that flag is not None during program execution. Registers a flag validator, which will follow usual validator rules. Important note: validator will pass for any non-None value, such as False, 0 (zero), '' (empty string) and so on. It is recommended to call this method like this: if __name__ == '__main__': gflags.mark_flag_as_required('your_flag_name') app.run() Because validation happens at app.run() we want to ensure required-ness is enforced at that time. However, you generally do not want to force users who import your code to have additional required flags for their own binaries or tests. Args: flag_name: string, name of the flag flag_values: FlagValues Raises: AttributeError: if flag_name is not registered as a valid flag name.
codesearchnet
def owned_by(self, owner, also_check_group=False): if also_check_group: return self.owner == owner and self.group == owner else: return self.owner == owner
Checks if the specified user or user and group own the file. Args: owner (str): the user (or group) name for which we ask about ownership also_check_group (bool): if set to True, both user owner and group owner checked if set to False, only user owner checked Returns: bool: True if owner of the file is the specified owner
juraj-google-style
class JsonPipelineDataFormat(PipelineDataFormat): def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False): super().__init__(output_path, input_path, column, overwrite=overwrite) with open(input_path, 'r') as f: self._entries = json.load(f) def __iter__(self): for entry in self._entries: if self.is_multi_columns: yield {k: entry[c] for k, c in self.column} else: yield entry[self.column[0]] def save(self, data: dict): with open(self.output_path, 'w') as f: json.dump(data, f)
Support for pipelines using JSON file format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`.
github-repos
def GetWindowsEventMessage(self, log_source, message_identifier): database_reader = self._GetWinevtRcDatabaseReader() if (not database_reader): return None if (self._lcid != self.DEFAULT_LCID): message_string = database_reader.GetMessage(log_source, self.lcid, message_identifier) if message_string: return message_string return database_reader.GetMessage(log_source, self.DEFAULT_LCID, message_identifier)
Retrieves the message string for a specific Windows Event Log source. Args: log_source (str): Event Log source, such as "Application Error". message_identifier (int): message identifier. Returns: str: message string or None if not available.
codesearchnet
def assert_matches_stdout(actual, expected_stdout, normalize_fn=lambda elem: elem, label=''): def stdout_to_python_object(elem_str): try: elem = ast.literal_eval(elem_str) except (SyntaxError, ValueError): elem = elem_str return normalize_fn(elem) actual = actual | label >> beam.Map(stdout_to_python_object) expected = list(map(stdout_to_python_object, expected_stdout)) assert_that(actual, equal_to(expected), 'assert ' + label)
Asserts a PCollection of strings matches the expected stdout elements. Args: actual (beam.PCollection): A PCollection. expected (List[str]): A list of stdout elements, one line per element. normalize_fn (Function[any]): A function to normalize elements before comparing them. Can be used to sort lists before comparing. label (str): [optional] Label to make transform names unique.
github-repos
async def _auth_plain(self, username, password): mechanism = 'PLAIN' credentials = '\x00{}\x00{}'.format(username, password) encoded_credentials = SMTP.b64enc(credentials) try: (code, message) = (await self.do_cmd('AUTH', mechanism, encoded_credentials, success=(235, 503))) except SMTPCommandFailedError as e: raise SMTPAuthenticationError(e.code, e.message, mechanism) return (code, message)
Performs an authentication attempt using the PLAIN mechanism. Protocol: 1. Format the username and password in a suitable way ; 2. The formatted string is base64-encoded ; 3. The string 'AUTH PLAIN' and a space character are prepended to the base64-encoded username and password and sent to the server ; 4. If the server replies with a 235 return code, user is authenticated. Args: username (str): Identifier of the user trying to authenticate. password (str): Password for the user. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPAuthenticationError: If the authentication attempt fails. Returns: (int, str): A (code, message) 2-tuple containing the server response.
codesearchnet
def VerifyStructure(self, parser_mediator, lines): match = self._PARSING_COMPONENTS['msg_left_delimiter'].match return match in lines
Verifies whether content corresponds to an SCCM log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def load_ui_wrapper(uifile, base_instance=None): if 'PySide' in __binding__: return pyside_load_ui(uifile, base_instance) elif 'PyQt' in __binding__: uic = __import__(__binding__ + ".uic").uic return uic.loadUi(uifile, base_instance)
Load a Qt Designer .ui file and returns an instance of the user interface Args: uifile (str): Absolute path to .ui file base_instance (QWidget): The widget into which UI widgets are loaded Returns: function: pyside_load_ui or uic.loadUi
juraj-google-style
def __call__(self, *args, **kwargs): retry_timedelta = kwargs.pop('retry_timedelta', self._retry_timedelta) if retry_timedelta is None: retry_timedelta = datetime.timedelta(days=1000000) num_retries = kwargs.pop('num_retries', self._num_retries) if num_retries is None: num_retries = 1000000 if os.environ.get('WANDB_TEST'): num_retries = 0 sleep_base = kwargs.pop('retry_sleep_base', 1) check_retry_fn = kwargs.pop('check_retry_fn', self._check_retry_fn) first = True sleep = sleep_base start_time = datetime.datetime.now() now = start_time self._num_iter = 0 while True: try: result = self._call_fn(*args, **kwargs) if not first: wandb.termlog('{} resolved after {}, resuming normal operation.'.format( self._error_prefix, datetime.datetime.now() - start_time)) return result except self._retryable_exceptions as e: if not check_retry_fn(e): raise if (datetime.datetime.now() - start_time >= retry_timedelta or self._num_iter >= num_retries): raise if self._num_iter == 2: logger.exception('Retry attempt failed:') wandb.termlog( '{} ({}), entering retry loop. See {} for full traceback.'.format( self._error_prefix, e.__class__.__name__, util.get_log_file_path())) if wandb.env.is_debug(): traceback.print_exc() first = False time.sleep(sleep + random.random() * 0.25 * sleep) sleep *= 2 if sleep > self.MAX_SLEEP_SECONDS: sleep = self.MAX_SLEEP_SECONDS now = datetime.datetime.now() self._num_iter += 1
Call the wrapped function, with retries. Args: retry_timedelta (kwarg): amount of time to retry before giving up. sleep_base (kwarg): amount of time to sleep upon first failure, all other sleeps are derived from this one.
juraj-google-style
def __init__(self, channel): self.DeployStorageSecret = channel.unary_unary( '/deploy.API/DeployStorageSecret', request_serializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretRequest.SerializeToString, response_deserializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: if (variable_name not in self): self[variable_name] = (replacement.copy() if isinstance(replacement, Multiset) else replacement) else: existing_value = self[variable_name] if isinstance(existing_value, tuple): if isinstance(replacement, Multiset): if (Multiset(existing_value) != replacement): raise ValueError elif (replacement != existing_value): raise ValueError elif isinstance(existing_value, Multiset): if (not isinstance(replacement, (tuple, list, Multiset))): raise ValueError compare_value = Multiset(replacement) if (existing_value == compare_value): if (not isinstance(replacement, Multiset)): self[variable_name] = replacement else: raise ValueError elif (replacement != existing_value): raise ValueError
Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name.
codesearchnet
def fetch_github_pull_request(destination_directory: str, repository: github_repository.GithubRepository, pull_request_number: int, verbose: bool) -> prepared_env.PreparedEnv: branch = 'pull/{}/head'.format(pull_request_number) os.chdir(destination_directory) print('chdir', destination_directory, file=sys.stderr) shell_tools.run_cmd('git', 'init', (None if verbose else '--quiet'), out=sys.stderr) result = _git_fetch_for_comparison(remote=repository.as_remote(), actual_branch=branch, compare_branch='master', verbose=verbose) shell_tools.run_cmd('git', 'branch', (None if verbose else '--quiet'), 'compare_commit', result.compare_commit_id, log_run_to_stderr=verbose) shell_tools.run_cmd('git', 'checkout', (None if verbose else '--quiet'), '-b', 'actual_commit', result.actual_commit_id, log_run_to_stderr=verbose) return prepared_env.PreparedEnv(github_repo=repository, actual_commit_id=result.actual_commit_id, compare_commit_id=result.compare_commit_id, destination_directory=destination_directory, virtual_env_path=None)
Uses content from github to create a dir for testing and comparisons. Args: destination_directory: The location to fetch the contents into. repository: The github repository that the commit lives under. pull_request_number: The id of the pull request to clone. If None, then the master branch is cloned instead. verbose: When set, more progress output is produced. Returns: Commit ids corresponding to content to test/compare.
codesearchnet
def pool_function(args): is_valid = True try: checker = emailahoy.VerifyEmail() status, message = checker.verify_email_smtp(args, from_host='gmail.com', from_email='sample@gmail.com') if status == 250: print("\t[*] Verification of '{}' status: {}. Details:\n\t\t{}".format(general.success(args), general.success("SUCCESS ({})".format(str(status))), message.replace('\n', '\n\t\t'))) is_valid = True else: print("\t[*] Verification of '{}' status: {}. Details:\n\t\t{}".format(general.error(args), general.error("FAILED ({})".format(str(status))), message.replace('\n', '\n\t\t'))) is_valid = False except Exception, e: print(general.warning("WARNING. An error was found when performing the search. You can omit this message.\n" + str(e))) is_valid = False aux = {} aux["type"] = "i3visio.profile" aux["value"] = "Email - " + args aux["attributes"] = general.expandEntitiesFromEmail(args) platform = aux["attributes"][2]["value"].title() aux["attributes"].append({ "type": "i3visio.platform", "value": platform, "attributes": [] } ) if is_valid: return {"platform": platform, "status": "DONE", "data": aux} else: return {"platform": platform, "status": "DONE", "data": {}}
A wrapper for being able to launch all the threads. We will use python-emailahoy library for the verification. Args: ----- args: reception of the parameters for getPageWrapper as a tuple. Returns: -------- A dictionary representing whether the verification was ended successfully. The format is as follows: ``` {"platform": "str(domain["value"])", "status": "DONE", "data": aux} ```
juraj-google-style
def create_config(cnf_file, uid, overwrite): conf = None if not os.path.exists(settings.DEB_CONF_PATH): os.makedirs(settings.DEB_CONF_PATH, 0755) os.chown(settings.DEB_CONF_PATH, uid, -1) if not os.path.exists(cnf_file): conf = CLEAN_CONFIG elif overwrite: backup_name = cnf_file + "_" if not os.path.exists(backup_name): shutil.copyfile(cnf_file, backup_name) os.chown(backup_name, uid, -1) conf = CLEAN_CONFIG else: with open(cnf_file) as f: conf = f.read() with open(cnf_file, "w") as f: f.write(update_configuration(conf)) os.chown(cnf_file, uid, -1) os.chmod(cnf_file, 0644) symlink = settings.DEB_CONF_PATH + settings.CONF_FILE if not settings.is_deb_system() and not os.path.exists(symlink): os.symlink(cnf_file, symlink) os.chown(symlink, uid, -1) os.chmod(symlink, 0644)
Creates configuration file and the directory where it should be stored and set correct permissions. Args: cnf_file (str): Path to the configuration file. uid (int): User ID - will be used for chown. overwrite (bool): Overwrite the configuration with :attr:`CLEAN_CONFIG`.
juraj-google-style
def parse_arguments(argv): parser = argparse.ArgumentParser(description='write-to-pubsub') parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local') parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID) args, _ = parser.parse_known_args(args=argv) return args
Parses the arguments passed to the command line and returns them as an object Args: argv: The arguments passed to the command line. Returns: The arguments that are being passed in.
github-repos
def get_permissions(self, namespace, explicit=False): if (not isinstance(namespace, Namespace)): namespace = Namespace(namespace) keys = namespace.keys (p, _) = self._check(keys, self.index, explicit=explicit) return p
Returns the permissions level for the specified namespace Arguments: namespace -- permissioning namespace (str) explicit -- require explicitly set permissions to the provided namespace Returns: int -- permissioning flags
codesearchnet
def nhs_check_digit(ninedigits: Union[(str, List[Union[(str, int)]])]) -> int: if ((len(ninedigits) != 9) or (not all((str(x).isdigit() for x in ninedigits)))): raise ValueError('bad string to nhs_check_digit') check_digit = (11 - (sum([(int(d) * f) for (d, f) in zip(ninedigits, NHS_DIGIT_WEIGHTINGS)]) % 11)) if (check_digit == 11): check_digit = 0 return check_digit
Calculates an NHS number check digit. Args: ninedigits: string or list Returns: check digit Method: 1. Multiply each of the first nine digits by the corresponding digit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`). 2. Sum the results. 3. Take remainder after division by 11. 4. Subtract the remainder from 11 5. If this is 11, use 0 instead If it's 10, the number is invalid If it doesn't match the actual check digit, the number is invalid
codesearchnet
def add_user(self, group, username): try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: raise err from None operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]} self.client.modify(self.__distinguished_name(group), operation)
Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
juraj-google-style
def _register_bounds_validator_if_needed(parser, name, flag_values): if parser.lower_bound is not None or parser.upper_bound is not None: def checker(value): if value is not None and parser.is_outside_bounds(value): message = '%s is not %s' % (value, parser.syntactic_help) raise _exceptions.ValidationError(message) return True _validators.register_validator(name, checker, flag_values=flag_values)
Enforces lower and upper bounds for numeric flags. Args: parser: NumericParser (either FloatParser or IntegerParser), provides lower and upper bounds, and help text to display. name: str, name of the flag flag_values: FlagValues.
juraj-google-style
def get_id(page): start_pos = page.find('<id>') end_pos = page.find('</id>') assert (start_pos != (- 1)) assert (end_pos != (- 1)) start_pos += len('<id>') return int(page[start_pos:end_pos])
Extract the id from a page. Args: page: a string Returns: an integer
codesearchnet
def to_jdbc_url(self) -> str: return self._build_jdbc_url(socketFactory='com.google.cloud.sql.postgres.SocketFactory', database_type='postgresql')
Convert options to a properly formatted JDBC URL. Returns: JDBC URL string configured with all options.
github-repos
def md(cls, data, force_field, temperature, nsteps, other_settings=None): template_path = os.path.join(cls.template_dir, 'md.txt') with open(template_path) as f: script_template = f.read() settings = (other_settings.copy() if (other_settings is not None) else {}) settings.update({'force_field': force_field, 'temperature': temperature, 'nsteps': nsteps}) script_filename = 'in.md' return cls(script_template=script_template, settings=settings, data=data, script_filename=script_filename)
Example for a simple MD run based on template md.txt. Args: data (LammpsData or str): Data file as a LammpsData instance or path to an existing data file. force_field (str): Combined force field related cmds. For example, 'pair_style eam\npair_coeff * * Cu_u3.eam'. temperature (float): Simulation temperature. nsteps (int): No. of steps to run. other_settings (dict): other settings to be filled into placeholders.
codesearchnet
def _list_objects(self, client_kwargs, max_request_entries): client_kwargs = self._update_listing_client_kwargs( client_kwargs, max_request_entries) with _handle_azure_exception(): for obj in self.client.list_directories_and_files(**client_kwargs): yield (obj.name, self._model_to_dict(obj), isinstance(obj, _Directory))
Lists objects. args: client_kwargs (dict): Client arguments. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict, directory bool
juraj-google-style
def __init__(self, channel): self.MemberAdd = channel.unary_unary( '/etcdserverpb.Cluster/MemberAdd', request_serializer=rpc__pb2.MemberAddRequest.SerializeToString, response_deserializer=rpc__pb2.MemberAddResponse.FromString, ) self.MemberRemove = channel.unary_unary( '/etcdserverpb.Cluster/MemberRemove', request_serializer=rpc__pb2.MemberRemoveRequest.SerializeToString, response_deserializer=rpc__pb2.MemberRemoveResponse.FromString, ) self.MemberUpdate = channel.unary_unary( '/etcdserverpb.Cluster/MemberUpdate', request_serializer=rpc__pb2.MemberUpdateRequest.SerializeToString, response_deserializer=rpc__pb2.MemberUpdateResponse.FromString, ) self.MemberList = channel.unary_unary( '/etcdserverpb.Cluster/MemberList', request_serializer=rpc__pb2.MemberListRequest.SerializeToString, response_deserializer=rpc__pb2.MemberListResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def Copy(self, name=None): new = copy.copy(self) new.d = copy.copy(self.d) new.name = (name if (name is not None) else self.name) return new
Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
codesearchnet
def assign_sub(self, delta, use_locking=None, name=None, read_value=True): with _handle_graph(self.handle), self._assign_dependencies(): assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._lazy_read(assign_sub_op) return assign_sub_op
Subtracts a value from this variable. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name to use for the operation. read_value: A `bool`. Whether to read and return the new value of the variable or not. Returns: If `read_value` is `True`, this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the `Operation` that does the assignment, and when in eager mode it will return `None`.
github-repos
def locate_file(start_path, file_name): if os.path.isfile(start_path): start_dir_path = os.path.dirname(start_path) elif os.path.isdir(start_path): start_dir_path = start_path else: raise exceptions.FileNotFound('invalid path: {}'.format(start_path)) file_path = os.path.join(start_dir_path, file_name) if os.path.isfile(file_path): return os.path.abspath(file_path) if (os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]): raise exceptions.FileNotFound('{} not found in {}'.format(file_name, start_path)) return locate_file(os.path.dirname(start_dir_path), file_name)
locate filename and return absolute file path. searching will be recursive upward until current working directory. Args: start_path (str): start locating path, maybe file path or directory path Returns: str: located file path. None if file not found. Raises: exceptions.FileNotFound: If failed to locate file.
codesearchnet
def as_text(bytes_or_text, encoding='utf-8'): encoding = codecs.lookup(encoding).name if isinstance(bytes_or_text, str): return bytes_or_text elif isinstance(bytes_or_text, bytes): return bytes_or_text.decode(encoding) else: raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
Converts any string-like python input types to unicode. Returns the input as a unicode string. Uses utf-8 encoding for text by default. Args: bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for decoding unicode. Returns: A `unicode` (Python 2) or `str` (Python 3) object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string.
github-repos
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): with tf.variable_scope('set_precision', values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[(- 1)]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return (tf.to_float(tf.equal(labels, predictions)), weights)
Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels].
codesearchnet
def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0, parallel=False): xml = extract_xml(_input) return parse_aggregate_report_xml(xml, nameservers=nameservers, timeout=dns_timeout, parallel=parallel)
Parses a file at the given path, a file-like object. or bytes as a aggregate DMARC report Args: _input: A path to a file, a file like object, or bytes nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds parallel (bool): Parallel processing Returns: OrderedDict: The parsed DMARC aggregate report
juraj-google-style
def List(device, device_path): files = device.List(device_path) files.sort(key=lambda x: x.filename) maxname = max(len(f.filename) for f in files) maxsize = max(len(str(f.size)) for f in files) for f in files: mode = ( ('d' if stat.S_ISDIR(f.mode) else '-') + ('r' if f.mode & stat.S_IRUSR else '-') + ('w' if f.mode & stat.S_IWUSR else '-') + ('x' if f.mode & stat.S_IXUSR else '-') + ('r' if f.mode & stat.S_IRGRP else '-') + ('w' if f.mode & stat.S_IWGRP else '-') + ('x' if f.mode & stat.S_IXGRP else '-') + ('r' if f.mode & stat.S_IROTH else '-') + ('w' if f.mode & stat.S_IWOTH else '-') + ('x' if f.mode & stat.S_IXOTH else '-')) t = time.gmtime(f.mtime) yield '%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\n' % ( mode, maxsize, f.size, t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, maxname, f.filename)
Prints a directory listing. Args: device_path: Directory to list.
juraj-google-style
def make_query(self, ns): if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter("%s %s" % (f[0], f[1]), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type.
juraj-google-style
def load_terms(fo: IO, metadata: dict, forceupdate: bool): version = metadata['metadata']['version'] with timy.Timer('Load Terms') as timer: es = bel.db.elasticsearch.get_client() es_version = version.replace('T', '').replace('-', '').replace(':', '') index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}" index_name = f'{index_prefix}_{es_version}' if (not elasticsearch.index_exists(es, index_name)): elasticsearch.create_terms_index(es, index_name) elif forceupdate: index_name += '_alt' elasticsearch.create_terms_index(es, index_name) else: return terms_iterator = terms_iterator_for_elasticsearch(fo, index_name) elasticsearch.bulk_load_docs(es, terms_iterator) index_names = elasticsearch.get_all_index_names(es) for name in index_names: if ((name != index_name) and (index_prefix in name)): elasticsearch.delete_index(es, name) elasticsearch.add_index_alias(es, index_name, terms_alias) log.info('Load namespace terms', elapsed=timer.elapsed, namespace=metadata['metadata']['namespace']) with timy.Timer('Load Term Equivalences') as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs(belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate='update') log.info('Loaded namespace equivalences', elapsed=timer.elapsed, namespace=metadata['metadata']['namespace']) remove_old_equivalence_edges = f remove_old_equivalence_nodes = f arangodb.aql_query(belns_db, remove_old_equivalence_edges) arangodb.aql_query(belns_db, remove_old_equivalence_nodes) metadata['_key'] = f"Namespace_{metadata['metadata']['namespace']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
Load terms into Elasticsearch and ArangoDB Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: fo: file obj - terminology file metadata: dict containing the metadata for terminology forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
codesearchnet
def __make_id(receiver): if __is_bound_method(receiver): return (id(receiver.__func__), id(receiver.__self__)) return id(receiver)
Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver.
juraj-google-style
def keypoint_rot90(keypoint, factor, rows, cols, **params): if factor < 0 or factor > 3: raise ValueError('Parameter n must be in range [0;3]') x, y, angle, scale = keypoint if factor == 1: keypoint = [y, (cols - 1) - x, angle - math.pi / 2, scale] if factor == 2: keypoint = [(cols - 1) - x, (rows - 1) - y, angle - math.pi, scale] if factor == 3: keypoint = [(rows - 1) - y, x, angle + math.pi / 2, scale] return keypoint
Rotates a keypoint by 90 degrees CCW (see np.rot90) Args: keypoint (tuple): A tuple (x, y, angle, scale). factor (int): Number of CCW rotations. Must be in range [0;3] See np.rot90. rows (int): Image rows. cols (int): Image cols.
juraj-google-style
def handle_error(program_name, cmd, log=None): print('\nHouston, we have a problem.', ('\n%s did not finish successfully. Review the log' % program_name), 'file and the input file(s) to see what went wrong.') print(('%s command: "%s"' % (program_name, cmd))) if (log is not None): print(('log: "%s"' % log)) print('Where do we go from here?') print((' r - retry running %s (probably after' % program_name), "you've fixed any problems with the input files)") print(' c - continue on with the script (probably after', "you've manually re-run and generated the desired", 'output file(s)') print(' x - exit, keeping the TEMP3D files and log') print(' xd - exit, deleting the TEMP3D files and log') while True: choice = input('Select r, c, x (default), or xd: ') if (choice not in ('r', 'c', 'x', 'xd')): choice = 'x' break if (choice == 'x'): print('Exiting ...') sys.exit(1) elif (choice == 'xd'): print('Deleting TEMP3D* and log files and exiting ...') util.delete_all('TEMP3D*') if (log is not None): os.remove(log) sys.exit(1) elif (choice == 'c'): print('Continuing on ...') break_now = True elif (choice == 'r'): print(('Retrying %s cmd ...' % program_name)) break_now = False return break_now
Subprocess program error handling Args: program_name (str): name of the subprocess program Returns: break_now (bool): indicate whether calling program should break out of loop
codesearchnet
def _get_contexts_for_squash(self, batch_signature): batch = self._batches_by_id[batch_signature].batch index = self._batches.index(batch) contexts = [] txns_added_predecessors = [] for b in self._batches[index::(- 1)]: batch_is_valid = True contexts_from_batch = [] for txn in b.transactions[::(- 1)]: result = self._txn_results[txn.header_signature] if (not result.is_valid): batch_is_valid = False break else: txn_id = txn.header_signature if (txn_id not in txns_added_predecessors): txns_added_predecessors.append(self._txn_predecessors[txn_id]) contexts_from_batch.append(result.context_id) if batch_is_valid: contexts.extend(contexts_from_batch) return contexts
Starting with the batch referenced by batch_signature, iterate back through the batches and for each valid batch collect the context_id. At the end remove contexts for txns that are other txn's predecessors. Args: batch_signature (str): The batch to start from, moving back through the batches in the scheduler Returns: (list): Context ids that haven't been previous base contexts.
codesearchnet
def RegisterPlugin(cls, plugin_class): plugin_name = plugin_class.NAME.lower() if plugin_name in cls._plugin_classes: raise KeyError(( 'Plugin class already set for name: {0:s}.').format( plugin_class.NAME)) cls._plugin_classes[plugin_name] = plugin_class
Registers a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is already set for the corresponding name.
juraj-google-style
def cmd_startstop(options): statelu = {'start': 'stopped', 'stop': 'running'} options.inst_state = statelu[options.command] debg.dprint('toggle set state: ', options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {'start': 'StartingInstances', 'stop': 'StoppingInstances'} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for (i, j) in enumerate(state_term): resp[i] = response['{0}'.format(filt)][0]['{0}'.format(j)]['Name'] print('Current State: {}{}{} - Previous State: {}{}{}\n'.format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM))
Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser.
codesearchnet
def exclude(self, scheduled_operation: ScheduledOperation) -> bool: try: self.scheduled_operations.remove(scheduled_operation) return True except ValueError: return False
Omits a scheduled operation from the schedule, if present. Args: scheduled_operation: The operation to try to remove. Returns: True if the operation was present and is now removed, False if it was already not present.
codesearchnet
def secure_channel(target, credentials, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False): return Channel(_grpc.secure_channel(target, credentials, options), loop, executor, standalone_pool_for_streaming)
Creates a secure Channel to a server. Args: target: The server address. credentials: A ChannelCredentials instance. options: An optional list of key-value pairs (channel args in gRPC runtime) to configure the channel. Returns: A Channel object.
codesearchnet
def Execute(self, http, sleep_between_polls=5, max_retries=5, max_batch_size=None, batch_request_callback=None): requests = [request for request in self.api_requests if (not request.terminal_state)] batch_size = (max_batch_size or len(requests)) for attempt in range(max_retries): if attempt: time.sleep(sleep_between_polls) for i in range(0, len(requests), batch_size): batch_http_request = BatchHttpRequest(batch_url=self.batch_url, callback=batch_request_callback, response_encoding=self.response_encoding) for request in itertools.islice(requests, i, (i + batch_size)): batch_http_request.Add(request.http_request, request.HandleResponse) batch_http_request.Execute(http) if hasattr(http.request, 'credentials'): if any((request.authorization_failed for request in itertools.islice(requests, i, (i + batch_size)))): http.request.credentials.refresh(http) requests = [request for request in self.api_requests if (not request.terminal_state)] if (not requests): break return self.api_requests
Execute all of the requests in the batch. Args: http: httplib2.Http object for use in the request. sleep_between_polls: Integer number of seconds to sleep between polls. max_retries: Max retries. Any requests that have not succeeded by this number of retries simply report the last response or exception, whatever it happened to be. max_batch_size: int, if specified requests will be split in batches of given size. batch_request_callback: function of (http_response, exception) passed to BatchHttpRequest which will be run on any given results. Returns: List of ApiCalls.
codesearchnet