code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def activate_vcenter(self, **kwargs): name = kwargs.pop('name') activate = kwargs.pop('activate', True) vcenter_args = dict(id=name) method_class = self._brocade_vswitch if activate: method_name = 'vcenter_activate' vcenter_attr = getattr(method_class, method_name) config = vcenter_attr(**vcenter_args) output = self._callback(config) print output return output else: pass
Activate vCenter on the switch Args: name: (str) : Name of an established vCenter activate (bool) : Activates the vCenter if activate=True else deactivates it callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def _split_result_for_readers(axis, num_splits, df): splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames.
juraj-google-style
def add_arguments(self, parser): parser.add_argument('-p', '--product', action='store_true', help='print the production information') parser.add_argument('-j', '--jtag', action='store_true', help='print the JTAG pin status') return self.add_common_arguments(parser, False)
Adds the information commands to the parser. Args: self (InfoCommand): the ``InfoCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None``
juraj-google-style
def parse(self, filename): with io.open(filename, 'r', encoding='utf-8') as _: lines = _.readlines() all_source_files = set() source_map = {} lineno = 0 root = None index = None cur_level = -1 parent_queue = [] for line in lines: try: level, line = dedent(line) if line.startswith(' lineno += 1 continue elif line.startswith('\\ line = line[1:] except IndentError as exc: error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column) if not line: lineno += 1 continue source_file = dequote(line) if not source_file: lineno += 1 continue if source_file in all_source_files: error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=level * 8 + 1) all_source_files.add(source_file) source_map[source_file] = (lineno, level * 8 + 1) page = OrderedDict() if root is not None and level == 0: error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0) if root is None: root = page index = source_file else: lvl_diff = cur_level - level while lvl_diff >= 0: parent_queue.pop() lvl_diff -= 1 parent_queue[-1][source_file] = page parent_queue.append(page) cur_level = level lineno += 1 return Sitemap(root, filename, index, source_map)
Parse a sitemap file. Args: filename: str, the path to the sitemap file. Returns: Sitemap: the generated sitemap.
juraj-google-style
def color_set_hsv(c: Color, h: float, s: float, v: float) -> None: new_color = ffi.new("TCOD_color_t*") lib.TCOD_color_set_HSV(new_color, h, s, v) c[:] = new_color.r, new_color.g, new_color.b
Set a color using: hue, saturation, and value parameters. Does not return a new Color. ``c`` is modified inplace. Args: c (Union[Color, List[Any]]): A Color instance, or a list of any kind. h (float): Hue, from 0 to 360. s (float): Saturation, from 0 to 1. v (float): Value, from 0 to 1.
juraj-google-style
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False): if tensor_type is None: return self if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.') import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.') import torch is_tensor = torch.is_tensor def as_tensor(value, dtype=None): if isinstance(value, list) and isinstance(value[0], np.ndarray): return torch.from_numpy(np.array(value)) return torch.tensor(value) elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.') import jax.numpy as jnp as_tensor = jnp.array is_tensor = is_jax_tensor elif tensor_type == TensorType.MLX: if not is_mlx_available(): raise ImportError('Unable to convert output to MLX tensors format, MLX is not installed.') import mlx.core as mx as_tensor = mx.array def is_tensor(obj): return isinstance(obj, mx.array) else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array for key, value in self.items(): try: if prepend_batch_axis: value = [value] if not is_tensor(value): tensor = as_tensor(value) self[key] = tensor except Exception as e: if key == 'overflowing_tokens': raise ValueError('Unable to create tensor returning overflowing tokens of different lengths. Please see if a fast version of this tokenizer is available to have this feature available.') from e raise ValueError(f"Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is expected).") from e return self
Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. prepend_batch_axis (`int`, *optional*, defaults to `False`): Whether or not to add the batch dimension during the conversion.
github-repos
def getStreamNetworkAsGeoJson(self, session, withNodes=True): features_list = [] for link in self.streamLinks: link_geoJson = link.getAsGeoJson(session) if link_geoJson: link_geometry = json.loads(link.getAsGeoJson(session)) link_properties = {'link_number': link.linkNumber, 'type': link.type, 'num_elements': link.numElements, 'dx': link.dx, 'erode': link.erode, 'subsurface': link.subsurface} link_feature = {'type': 'Feature', 'geometry': link_geometry, 'properties': link_properties, 'id': link.id} features_list.append(link_feature) if withNodes: for node in link.nodes: node_geoJson = node.getAsGeoJson(session) if node_geoJson: node_geometry = json.loads(node_geoJson) node_properties = {'link_number': link.linkNumber, 'node_number': node.nodeNumber, 'elevation': node.elevation} node_feature = {'type': 'Feature', 'geometry': node_geometry, 'properties': node_properties, 'id': node.id} features_list.append(node_feature) feature_collection = {'type': 'FeatureCollection', 'features': features_list} return json.dumps(feature_collection)
Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string.
codesearchnet
def parse_mapreduce_yaml(contents): try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
juraj-google-style
def approve(self, peer_jid): self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())
Approve a subscription request from jid Args: peer_jid (str): the JID to approve
juraj-google-style
def memory_read16(self, addr, num_halfwords, zone=None): return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)
Reads memory from the target system in units of 16-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_halfwords (int): number of half words to read zone (str): memory zone to read from Returns: List of halfwords read from the target system. Raises: JLinkException: if memory could not be read
codesearchnet
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if declarator_end >= 0: fragment = line[declarator_end:] else: if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: fragment = line else: return if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"'))
Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def resorted(values): if (not values): return values values = sorted(values) first_word = next((cnt for (cnt, val) in enumerate(values) if (val and (not val[0].isdigit()))), None) if (first_word is None): return values words = values[first_word:] numbers = values[:first_word] return (words + numbers)
Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words
codesearchnet
def from_index_amount(cls, idx, amount): if np.array(idx).ndim == 0: v = np.zeros(6) v[idx] = amount return cls.from_voigt(v) elif np.array(idx).ndim == 1: v = np.zeros((3, 3)) for i in itertools.permutations(idx): v[i] = amount return cls(v) else: raise ValueError("Index must either be 2-tuple or integer " "corresponding to full-tensor or voigt index")
Like Deformation.from_index_amount, except generates a strain from the zero 3x3 tensor or voigt vector with the amount specified in the index location. Ensures symmetric strain. Args: idx (tuple or integer): index to be perturbed, can be voigt or full-tensor notation amount (float): amount to perturb selected index
juraj-google-style
def _rows_event_to_dict(e, stream): pk_cols = (e.primary_key if isinstance(e.primary_key, (list, tuple)) else (e.primary_key,)) if isinstance(e, row_event.UpdateRowsEvent): sig = signals.rows_updated action = 'update' row_converter = _convert_update_row elif isinstance(e, row_event.WriteRowsEvent): sig = signals.rows_inserted action = 'insert' row_converter = _convert_write_row elif isinstance(e, row_event.DeleteRowsEvent): sig = signals.rows_deleted action = 'delete' row_converter = _convert_write_row else: assert False, 'Invalid binlog event' meta = {'time': e.timestamp, 'log_pos': stream.log_pos, 'log_file': stream.log_file, 'schema': e.schema, 'table': e.table, 'action': action} rows = list(map(row_converter, e.rows)) for row in rows: row['keys'] = {k: row['values'][k] for k in pk_cols} return (rows, meta)
Convert RowsEvent to a dict Args: e (pymysqlreplication.row_event.RowsEvent): the event stream (pymysqlreplication.BinLogStreamReader): the stream that yields event Returns: dict: event's data as a dict
codesearchnet
def get_item(env, name, default=None): for key in name.split('.'): if (isinstance(env, dict) and (key in env)): env = env[key] elif (isinstance(env, types.ModuleType) and (key in env.__dict__)): env = env.__dict__[key] else: return default return env
Get an item from a dictionary, handling nested lookups with dotted notation. Args: env: the environment (dictionary) to use to look up the name. name: the name to look up, in dotted notation. default: the value to return if the name if not found. Returns: The result of looking up the name, if found; else the default.
codesearchnet
def export_as_file(self, filepath, hyperparameters): if (not filepath.endswith('.py')): filepath += '.py' file_contents = '' file_contents += self.source file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters) file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator) with open(filepath, 'wb') as f: f.write(file_contents.encode('utf8'))
Generates a Python file with the importable base learner set to ``hyperparameters`` This function generates a Python file in the specified file path that contains the base learner as an importable variable stored in ``base_learner``. The base learner will be set to the appropriate hyperparameters through ``set_params``. Args: filepath (str, unicode): File path to save file in hyperparameters (dict): Dictionary to use for ``set_params``
codesearchnet
def attention_bias_same_segment(query_segment_id, memory_segment_id): ret = (tf.to_float( tf.not_equal( tf.expand_dims(query_segment_id, 2), tf.expand_dims(memory_segment_id, 1))) * large_compatible_negative(memory_segment_id.dtype)) return tf.expand_dims(ret, axis=1)
Create an bias tensor to be added to attention logits. Positions with the same segment_ids can see each other. Args: query_segment_id: a float `Tensor` with shape [batch, query_length]. memory_segment_id: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, query_length, memory_length].
juraj-google-style
def goto(self, rules, symbol): return self.closure({rule.move_dot() for rule in rules if ((not rule.at_end) and (rule.rhs[rule.pos] == symbol))})
Computes the next closure for rules based on the symbol we got. Args: rules - an iterable of DottedRules symbol - a string denoting the symbol we've just seen Returns: frozenset of DottedRules
codesearchnet
def wrap_cc(filepath, compiler, project, python=sys.executable, detect_project=False): env = __create_jinja_env() template = env.get_template('run_compiler.py.inc') cc_fname = local.path(filepath).with_suffix('.benchbuild.cc', depth=0) cc_f = persist(compiler, filename=cc_fname) project_file = persist(project, suffix='.project') with open(filepath, 'w') as wrapper: wrapper.write(template.render(cc_f=cc_f, project_file=project_file, python=python, detect_project=detect_project)) chmod('+x', filepath) LOG.debug('Placed wrapper in: %s for compiler %s', local.path(filepath), str(compiler)) LOG.debug('Placed project in: %s', local.path(project_file)) LOG.debug('Placed compiler command in: %s', local.path(cc_f)) return local[filepath]
Substitute a compiler with a script that hides CFLAGS & LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: filepath (str): Path to the wrapper script. compiler (benchbuild.utils.cmd): Real compiler command we should call in the script. project (benchbuild.project.Project): The project this compiler will be for. python (str): Path to the python interpreter we should use. detect_project: Should we enable project detection or not. Returns (benchbuild.utils.cmd): Command of the new compiler we can call.
codesearchnet
def dump(self, out_path, header=True): if (sys.version_info[0] < 3): mode = 'wb' else: mode = 'w' with open(out_path, mode) as outfile: writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL) if header: writer.writerow(['Timestamp', 'Tile Address', 'Property Name', 'Value']) for entry in self.changes: writer.writerow([entry.time, entry.tile, entry.property, entry.string_value])
Save this list of changes as a csv file at out_path. The format of the output file will be a CSV with 4 columns: timestamp, tile address, property, string_value There will be a single header row starting the CSV output unless header=False is passed. Args: out_path (str): The path where we should save our current list of changes. header (bool): Whether we should include a header row in the csv file. Defaults to True.
codesearchnet
def _follow_leafref( self, xpath: "Expr", init: "TerminalNode") -> Optional["DataNode"]: if isinstance(xpath, LocationPath): lft = self._follow_leafref(xpath.left, init) if lft is None: return None return lft._follow_leafref(xpath.right, init) elif isinstance(xpath, Step): if xpath.axis == Axis.parent: return self.data_parent() elif xpath.axis == Axis.child: if isinstance(self, InternalNode) and xpath.qname: qname = (xpath.qname if xpath.qname[1] else (xpath.qname[0], init.ns)) return self.get_data_child(*qname) elif isinstance(xpath, Root): return self.schema_root() return None
Return the data node referred to by a leafref path. Args: xpath: XPath expression compiled from a leafref path. init: initial context node
juraj-google-style
def start_task(self, task_type, task_id): assert self._mpr if not self._start_events[task_type][task_id].is_set() or not self._finish_events[task_type][task_id].is_set(): raise ValueError('The task %s:%d is still alive. You cannot start another one.' % (task_type, task_id)) self._start_events[task_type][task_id] = self._mpr_manager.Event() self._finish_events[task_type][task_id] = self._mpr_manager.Event() self._mpr.start_single_process(task_type=task_type, task_id=task_id) self._start_events[task_type][task_id].wait()
Starts a server given task_type and task_id. Args: task_type: the type of the task such as "worker". task_id: the id the task such as 1. Raises: ValueError: if the server already exists.
github-repos
def serialize(self) -> dict: data = {**self} if ('attachments' in self): data['attachments'] = json.dumps(self['attachments']) return data
Serialize the message for sending to slack API Returns: serialized message
codesearchnet
def _get_or_load_domain(self, domain): if isinstance(domain, six.string_types): if domain in self.domains: return self.domains[domain] elif exists(domain): with open(domain, 'r') as fobj: domain = json.load(fobj) else: raise ValueError("No domain could be found/loaded from input " "'{}'; value must be either the name of an " "existing Domain, or a valid path to a " "configuration file.".format(domain)) name = domain['name'] if name in self.domains: msg = ("Domain with name '{}' already exists; returning existing " "Domain configuration.".format(name)) warnings.warn(msg) return self.domains[name] entities = domain.get('entities', []) domain = Domain(domain) for e in entities: self.add_entity(domain=domain, **e) self.domains[name] = domain return self.domains[name]
Return a domain if one already exists, or create a new one if not. Args: domain (str, dict): Can be one of: - The name of the Domain to return (fails if none exists) - A path to the Domain configuration file - A dictionary containing configuration information
juraj-google-style
def add_properties(props, mol): if (not props): return for (_, atom) in mol.atoms_iter(): atom.charge = 0 atom.multi = 1 atom.mass = None for prop in props.get('CHG', []): mol.atom(prop[0]).charge = prop[1] for prop in props.get('RAD', []): mol.atom(prop[0]).multi = prop[1] for prop in props.get('ISO', []): mol.atom(prop[0]).mass = prop[1]
apply properties to the molecule object Returns: None (alter molecule object directly)
codesearchnet
def result(self): raise NotImplementedError
Compute the current metric value. Returns: A scalar tensor, or a dictionary of scalar tensors.
github-repos
def validate_with_tags(self, tags, confidence): result = {'intent_type': self.name} intent_confidence = 0.0 local_tags = tags[:] used_tags = [] for require_type, attribute_name in self.requires: required_tag, canonical_form, confidence = find_first_tag(local_tags, require_type) if not required_tag: result['confidence'] = 0.0 return result, [] result[attribute_name] = canonical_form if required_tag in local_tags: local_tags.remove(required_tag) used_tags.append(required_tag) intent_confidence += confidence if len(self.at_least_one) > 0: best_resolution = resolve_one_of(tags, self.at_least_one) if not best_resolution: result['confidence'] = 0.0 return result, [] else: for key in best_resolution: result[key] = best_resolution[key][0].get('key') intent_confidence += 1.0 used_tags.append(best_resolution) if best_resolution in local_tags: local_tags.remove(best_resolution) for optional_type, attribute_name in self.optional: optional_tag, canonical_form, conf = find_first_tag(local_tags, optional_type) if not optional_tag or attribute_name in result: continue result[attribute_name] = canonical_form if optional_tag in local_tags: local_tags.remove(optional_tag) used_tags.append(optional_tag) intent_confidence += 1.0 total_confidence = intent_confidence / len(tags) * confidence target_client, canonical_form, confidence = find_first_tag(local_tags, CLIENT_ENTITY_NAME) result['target'] = target_client.get('key') if target_client else None result['confidence'] = total_confidence return result, used_tags
Validate weather tags has required entites for this intent to fire Args: tags(list): Tags and Entities used for validation confidence(float): ? Returns: intent, tags: Returns intent and tags used by the intent on falure to meat required entities then returns intent with confidence of 0.0 and an empty list for tags.
juraj-google-style
def is_same_file(path1, path2): return (path1 and path2 and os.path.isfile(path1) and os.path.isfile(path2) and os.path.samefile(path1, path2))
Return True if path1 is the same file as path2. The reason for this dance is that samefile throws if either file doesn't exist. Args: path1: str or path-like. path2: str or path-like. Returns: bool. True if the same file, False if not.
codesearchnet
def __init__(self, channel): self.DeleteLog = channel.unary_unary( "/google.logging.v2.LoggingServiceV2/DeleteLog", request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.WriteLogEntries = channel.unary_unary( "/google.logging.v2.LoggingServiceV2/WriteLogEntries", request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.FromString, ) self.ListLogEntries = channel.unary_unary( "/google.logging.v2.LoggingServiceV2/ListLogEntries", request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.FromString, ) self.ListMonitoredResourceDescriptors = channel.unary_unary( "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.FromString, ) self.ListLogs = channel.unary_unary( "/google.logging.v2.LoggingServiceV2/ListLogs", request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def reset(self, entries_to_reset): num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op.
juraj-google-style
def _PopulateQuantilesHistogram(self, hist, nums): if not nums: return num_quantile_buckets = 10 quantiles_to_get = [ x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1) ] quantiles = np.percentile(nums, quantiles_to_get) hist.type = self.histogram_proto.QUANTILES quantiles_sample_count = float(len(nums)) / num_quantile_buckets for low, high in zip(quantiles, quantiles[1:]): hist.buckets.add( low_value=low, high_value=high, sample_count=quantiles_sample_count)
Fills in the histogram with quantile information from the provided array. Args: hist: A Histogram proto message to fill in. nums: A list of numbers to create a quantiles histogram from.
juraj-google-style
def device_configuration(self, pending=False, use_included=False): device_configs = self.device_configurations(use_included=use_included) for device_config in device_configs: if (device_config.is_loaded() is not pending): return device_config return None
Get a specific device configuration. A device can have at most one loaded and one pending device configuration. This returns that device_configuration based on a given flag. Keyword Args: pending(bool): Fetch the pending configuration or return the loaded one. use_included(bool): Use included resources in this device configuration. Returns: The requested loaded or pending configuration or None if no device configuration is found.
codesearchnet
def readline(self, size=(- 1)): self._check_open() if ((size == 0) or (not self._remaining())): return '' data_list = [] newline_offset = self._buffer.find_newline(size) while (newline_offset < 0): data = self._buffer.read(size) size -= len(data) self._offset += len(data) data_list.append(data) if ((size == 0) or (not self._remaining())): return ''.join(data_list) self._buffer.reset(self._buffer_future.get_result()) self._request_next_buffer() newline_offset = self._buffer.find_newline(size) data = self._buffer.read_to_offset((newline_offset + 1)) self._offset += len(data) data_list.append(data) return ''.join(data_list)
Read one line delimited by '\n' from the file. A trailing newline character is kept in the string. It may be absent when a file ends with an incomplete line. If the size argument is non-negative, it specifies the maximum string size (counting the newline) to return. A negative size is the same as unspecified. Empty string is returned only when EOF is encountered immediately. Args: size: Maximum number of bytes to read. If not specified, readline stops only on '\n' or EOF. Returns: The data read as a string. Raises: IOError: When this buffer is closed.
codesearchnet
def parse_args(test: ArgList=None) -> argparse.Namespace: parser = argparse.ArgumentParser(prog='budoux', formatter_class=lambda prog: BudouxHelpFormatter(prog, **{'width': shutil.get_terminal_size(fallback=(120, 50)).columns, 'max_help_position': 30}), description=textwrap.dedent(' BudouX is the successor to Budou,\n the machine learning powered line break organizer tool.'), epilog='\n- '.join(['supported languages of `-l`, `--lang`:', *langs.keys()])) parser.add_argument('text', metavar='TXT', nargs='?', type=str, help='text') parser.add_argument('-H', '--html', action='store_true', help='HTML mode') model_select_group = parser.add_mutually_exclusive_group() model_select_group.add_argument('-m', '--model', metavar='JSON', type=check_file, default=check_lang('ja'), help='custom model file path') model_select_group.add_argument('-l', '--lang', metavar='LANG', type=check_lang, help='language of custom model') parser.add_argument('-s', '--sep', metavar='STR', type=str, default='\n', help='output phrase separator in TEXT mode') parser.add_argument('-d', '--delim', metavar='STR', type=str, default='---', help='output sentence delimiter in TEXT mode') parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(budoux.__version__)) if test is not None: return parser.parse_args(test) else: return parser.parse_args()
Parse commandline arguments. Args: test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None. Returns: argparse.Namespace: Parsed data of args.
github-repos
def VisitTypeDeclUnit(self, node): if not self._star_imports: return node star_import_names = set() p = self._ModulePrefix() for x in self._star_imports: if x.startswith(p): star_import_names.add(x + '.*') star_import_names.add(p + x + '.*') new_aliases = [] new_getattrs = set() for module in self._star_imports: aliases, getattrs = self._ImportAll(module) new_aliases.extend(aliases) new_getattrs.update(getattrs) new_aliases = self._DiscardExistingNames(node, new_aliases) new_getattrs = self._DiscardExistingNames(node, new_getattrs) new_aliases = self._HandleDuplicates(new_aliases) if len(new_getattrs) > 1: raise KeyError('Multiple __getattr__ definitions') return node.Replace(functions=node.functions + tuple(new_getattrs), aliases=tuple((a for a in node.aliases if a.name not in star_import_names)) + tuple(new_aliases))
Add star imports to the ast. Args: node: A pytd.TypeDeclUnit instance. Returns: The pytd.TypeDeclUnit instance, with star imports added. Raises: KeyError: If a duplicate member is found during import.
github-repos
def threshold(self) -> float: return self._cutoff
Returns the fixed cutoff threshold value. Returns: float: The fixed threshold value.
github-repos
def check_lang(lang: str) -> Path: if lang in langs: return langs[lang] else: raise argparse.ArgumentTypeError(f"'{lang}' does not exist in builtin models. (supported languages: {list(langs.keys())})")
Check if given language exists or not. Args: lang (str): language code (e.g.: 'ja') Raises: argparse.ArgumentTypeError: Raise if no model for given language exists. Returns: The model path.
github-repos
def inference_q(self, next_action_arr): q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1)) self.__q_arr_list.append(q_arr) while len(self.__q_arr_list) > self.__seq_len: self.__q_arr_list = self.__q_arr_list[1:] while len(self.__q_arr_list) < self.__seq_len: self.__q_arr_list.append(self.__q_arr_list[-1]) q_arr = np.array(self.__q_arr_list) q_arr = q_arr.transpose((1, 0, 2)) q_arr = self.__lstm_model.inference(q_arr) return q_arr[:, -1].reshape((q_arr.shape[0], 1))
Infernce Q-Value. Args: next_action_arr: `np.ndarray` of action. Returns: `np.ndarray` of Q-Values.
juraj-google-style
def _dataset_load_from_hdx(self, id_or_name): if (not self._load_from_hdx('dataset', id_or_name)): return False self._dataset_create_resources() return True
Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not
codesearchnet
def set_axis(self, labels, axis=0, inplace=None): if is_scalar(labels): warnings.warn( 'set_axis now takes "labels" as first argument, and ' '"axis" as named parameter. The old form, with "axis" as ' 'first parameter and "labels" as second, is still supported ' "but will be deprecated in a future version of pandas.", FutureWarning, stacklevel=2, ) labels, axis = axis, labels if inplace is None: warnings.warn( "set_axis currently defaults to operating inplace.\nThis " "will change in a future version of pandas, use " "inplace=True to avoid this warning.", FutureWarning, stacklevel=2, ) inplace = True if inplace: setattr(self, pandas.DataFrame()._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj
Assign desired index to given axis. Args: labels (pandas.Index or list-like): The Index to assign. axis (string or int): The axis to reassign. inplace (bool): Whether to make these modifications inplace. Returns: If inplace is False, returns a new DataFrame, otherwise None.
juraj-google-style
def from_config(cls, config, custom_objects=None): if 'learning_rate' in config: if isinstance(config['learning_rate'], dict): config['learning_rate'] = serialization_lib.deserialize_keras_object(config['learning_rate'], custom_objects=custom_objects) return cls(**config)
Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional user-defined Python objects needed to recreate this optimizer. Returns: An optimizer instance.
github-repos
def _is_src_field_auto_convertible(src_field, dest_proto_fields_by_name) -> bool: if src_field.name not in dest_proto_fields_by_name: return False dest_field = dest_proto_fields_by_name[src_field.name] if dest_field.label != src_field.label or src_field.type != dest_field.type: return False if _is_map_field(src_field): src_fields_by_name = src_field.message_type.fields_by_name dest_fields_by_name = dest_field.message_type.fields_by_name if not _is_src_field_auto_convertible(src_fields_by_name['key'], dest_fields_by_name) or not _is_src_field_auto_convertible(src_fields_by_name['value'], dest_fields_by_name): return False elif src_field.type == descriptor.FieldDescriptor.TYPE_MESSAGE: if _is_any_field(src_field) and _is_any_field(dest_field): return True if _is_any_field(src_field): return False if _is_any_field(dest_field): return True if src_field.message_type != dest_field.message_type: return False return True
Checks if the src_field can be auto-converted. There must be a field in dest_proto with same name and type as the src_field to auto convert src_field. Args: src_field: the field to check if it's auto-convertible. dest_proto_fields_by_name: field name to field dict for dest_proto. Returns: bool: True if the src_field is auto-convertible.
github-repos
def __init__(self, config): self.name = config['name'] self.config = config self.entities = {} self.files = [] self.include = listify(self.config.get('include', [])) self.exclude = listify(self.config.get('exclude', [])) if self.include and self.exclude: raise ValueError("The 'include' and 'exclude' arguments cannot " "both be set. Please pass at most one of these " "for domain '%s'." % self.name) self.path_patterns = listify(config.get('default_path_patterns', []))
A set of rules that applies to one or more directories within a Layout. Args: name (str): The name of the Domain. config (dict): The configuration dictionary that defines the entities and paths for the current domain.
juraj-google-style
def print_probabilities(state: State, ndigits: int=4, file: TextIO=None) -> None: prob = bk.evaluate(state.probabilities()) for (index, prob) in np.ndenumerate(prob): prob = round(prob, ndigits) if (prob == 0.0): continue ket = ''.join([str(n) for n in index]) print(ket, ':', prob, file=file)
Pretty print state probabilities. Args: state: ndigits: Number of digits of accuracy file: Output stream (Defaults to stdout)
codesearchnet
class PerceiverEmbeddingDecoder(nn.Module): def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config = config self.vocab_size = config.vocab_size self.bias = nn.Parameter(torch.zeros(self.vocab_size)) def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor: batch_size, seq_len, d_model = hidden_states.shape output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1)) output = output + self.bias return output.reshape([batch_size, seq_len, self.vocab_size])
Module to decode embeddings (for masked language modeling). Args: config ([`PerceiverConfig`]): Model configuration.
github-repos
def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False): if not rename_model_to: rename_model_to = self.model_to_use new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to)) if self.structure_path: if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path): custom_clean = CleanPDB() my_pdb = StructureIO(self.structure_path) new_model_path = my_pdb.write_pdb(custom_selection=custom_clean, custom_name=rename_model_to, out_dir=copy_to_dir, force_rerun=force_rerun) self.load_structure_path(structure_path=new_model_path, file_type='pdb') dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to)) if not op.exists(dest_itasser_dir): os.mkdir(dest_itasser_dir) for attr in self._attrs_to_copy: old_file_path = getattr(self, attr) new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path): shutil.copy2(old_file_path, new_file_path) log.debug('{}: copied from {}'.format(new_file_path, old_file_path)) else: log.debug('{}: file already exists'.format(new_file_path)) setattr(self, attr, new_file_path)
Copy the raw information from I-TASSER modeling to a new folder. Copies all files in the list _attrs_to_copy. Args: copy_to_dir (str): Directory to copy the minimal set of results per sequence. rename_model_to (str): New file name (without extension) force_rerun (bool): If existing models and results should be overwritten.
juraj-google-style
def get_access_token(tenant_id, application_id, application_secret): context = adal.AuthenticationContext((get_auth_endpoint() + tenant_id), api_version=None) token_response = context.acquire_token_with_client_credentials(get_resource_endpoint(), application_id, application_secret) return token_response.get('accessToken')
get an Azure access token using the adal library. Args: tenant_id (str): Tenant id of the user's account. application_id (str): Application id of a Service Principal account. application_secret (str): Application secret (password) of the Service Principal account. Returns: An Azure authentication token string.
codesearchnet
def __init__(self, **kwargs): try: self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30)) except Exception: self.nap_time = 15 self._stack_name = kwargs.get('Stack') self._verbose = kwargs.get('Verbose', False) if not self._stack_name: logging.error('no stack name given, exiting') raise SystemError if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')): logging.error('client initialization failed, exiting') raise SystemError
The initializer sets up stuff to do the work Args: dict of args Returns: kwarg[Profile]: asdasdf Raises: SystemError if thing are not all good
juraj-google-style
def GetMetadataAttribute(self, attribute_name): table_name = 'metadata' has_table = self._database_file.HasTable(table_name) if (not has_table): return None column_names = ['value'] condition = 'name == "{0:s}"'.format(attribute_name) values = list(self._database_file.GetValues([table_name], column_names, condition)) number_of_values = len(values) if (number_of_values == 0): return None if (number_of_values == 1): return values[0]['value'] raise RuntimeError('More than one value found in database.')
Retrieves the metadata attribute. Args: attribute_name (str): name of the metadata attribute. Returns: str: the metadata attribute or None. Raises: RuntimeError: if more than one value is found in the database.
codesearchnet
def _FormatInode(self, event): inode = event.inode if inode is None: if hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode'): inode = event.pathspec.image_inode if inode is None: inode = '-' return inode
Formats the inode. Args: event (EventObject): event. Returns: str: inode field.
juraj-google-style
def __init__(self, checkpointer_impl, root=None, **kwargs): if root: trackable_root = root() if isinstance(root, weakref.ref) else root kwargs['root'] = trackable_root trackable_root._maybe_initialize_trackable() if checkpointer_impl is None: raise AttributeError('checkpointer_impl cannot be None for AsyncCheckpointHelper.') self._checkpointer_impl = checkpointer_impl self._checkpoint_items = kwargs self._checkpoint = None self.checkpointer() self._checkpoint_options = None self._initialized = False self._original_nodes = None self._object_map = None self._tpu_embedding_objects = None self._saveable_trackables = None self._default_device = device_util.current() or 'CPU:0' self._default_device = device_util.canonicalize(self._default_device) self._save_file_prefix = None self._use_checkpoint_save = False self._async_save_thread = None self._queue = queue.Queue(maxsize=1) atexit.register(self._join_async_save_thread) self._async_error = None global _END_TIME_OF_LAST_ASYNC_WRITE with _END_TIME_OF_LAST_ASYNC_WRITE_LOCK: if _END_TIME_OF_LAST_ASYNC_WRITE is None: _END_TIME_OF_LAST_ASYNC_WRITE = time.time()
Initialize AsyncCheckpoint. Args: checkpointer_impl: The Checkpoint class to power the AsyncCheckpoint. root: The root object to checkpoint. `root` may be a trackable object or `WeakRef` of a trackable object. **kwargs: The keyword arguments representing the checkpointed variables. Raises: AttributeError: when checkpointer_impl is None.
github-repos
def has_all_nonzero_section_lengths(neuron, threshold=0.0): bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites) if (section_length(s.points) <= threshold)] return CheckResult((len(bad_ids) == 0), bad_ids)
Check presence of neuron sections with length not above threshold Arguments: neuron(Neuron): The neuron object to test threshold(float): value above which a section length is considered to be non-zero Returns: CheckResult with result including list of ids of bad sections
codesearchnet
def wait_key(keys=None): if is_a_tty(): if keys: if not isinstance(keys, tuple): keys = (keys,) while True: key = _getch() if key in keys: return key else: return _getch()
Waits for a keypress at the console and returns it. "Where's the any key?" Arguments: keys - if passed, wait for this specific key, e.g. ESC. may be a tuple. Returns: char or ESC - depending on key hit. None - immediately under i/o redirection, not an interactive tty.
juraj-google-style
def is_in_path(program): if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``?
juraj-google-style
def generate_parsers(config, paths): output = output += inspect.getsource(conf_reader._get_source) + "\n\n" output += inspect.getsource(utils._get_encoding) + "\n\n" output += inspect.getsource(utils.handle_encodnig) + "\n\n" output += inspect.getsource(utils.is_equal_tag) + "\n\n" output += inspect.getsource(utils.has_neigh) + "\n\n" output += " for name, path in paths.items(): path = path[0] required = config[0]["vars"][name].get("required", False) notfoundmsg = config[0]["vars"][name].get("notfoundmsg", "") output += _generate_parser(name, path, required, notfoundmsg) output += " output += _unittest_template(config) output += " output += "if __name__ == '__main__':\n" output += IND + "test_parsers()" return output
Generate parser for all `paths`. Args: config (dict): Original configuration dictionary used to get matches for unittests. See :mod:`~harvester.autoparser.conf_reader` for details. paths (dict): Output from :func:`.select_best_paths`. Returns: str: Python code containing all parsers for `paths`.
juraj-google-style
def send_cmd(cmd, args, ret): from dvc.daemon import daemon if (not Analytics._is_enabled(cmd)): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(['analytics', analytics.dump()])
Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command.
codesearchnet
def copy(self, destination): destination_uri = self.repo.parse_uri(destination) response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination': destination_uri.toPython()}) if (response.status_code == 201): return destination_uri else: raise Exception(('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri)))
Method to copy resource to another location Args: destination (rdflib.term.URIRef, str): URI location to move resource Returns: (Resource) new, moved instance of resource
codesearchnet
def display(self, *amplExpressions): exprs = list(map(str, amplExpressions)) lock_and_call((lambda : self._impl.displayLst(exprs, len(exprs))), self._lock)
Writes on the current OutputHandler the outcome of the AMPL statement. .. code-block:: ampl display e1, e2, .., en; where e1, ..., en are the strings passed to the procedure. Args: amplExpressions: Expressions to be evaluated.
codesearchnet
def execute_script(self, script, *args): return self._execute(Command.EXECUTE_SCRIPT, {'script': script, 'args': list(args)})
Execute JavaScript Synchronously in current context. Support: Web(WebView) Args: script: The JavaScript to execute. *args: Arguments for your JavaScript. Returns: Returns the return value of the function.
codesearchnet
def get_field(self, field, default=None): metadata = self._op.get('metadata') value = None if field == 'internal-id': value = self._op['name'] elif field == 'job-id': value = metadata['labels'].get('job-id') elif field == 'job-name': value = metadata['labels'].get('job-name') elif field == 'task-id': value = metadata['labels'].get('task-id') elif field == 'task-attempt': value = metadata['labels'].get('task-attempt') elif field == 'user-id': value = metadata['labels'].get('user-id') elif field == 'dsub-version': value = metadata['labels'].get('dsub-version') elif field == 'task-status': value = self._operation_status() elif field == 'logging': value = metadata['request']['pipelineArgs']['logging']['gcsPath'] elif field == 'envs': value = self._get_operation_input_field_values(metadata, False) elif field == 'labels': value = { k: v for k, v in metadata['labels'].items() if k not in job_model.RESERVED_LABELS } elif field == 'inputs': value = self._get_operation_input_field_values(metadata, True) elif field == 'outputs': value = self._get_operation_output_field_values(metadata) elif field == 'mounts': value = None elif field == 'create-time': value = google_base.parse_rfc3339_utc_string(metadata['createTime']) elif field == 'start-time': start_events = [ e for e in metadata.get('events', []) if e['description'] == 'start' ] if start_events: value = google_base.parse_rfc3339_utc_string( start_events[-1]['startTime']) elif field == 'end-time': if 'endTime' in metadata: value = google_base.parse_rfc3339_utc_string(metadata['endTime']) elif field == 'status': value = self._operation_status() elif field in ['status-message', 'status-detail']: status, last_update = self._operation_status_message() value = status elif field == 'last-update': status, last_update = self._operation_status_message() value = last_update elif field == 'provider': return _PROVIDER_NAME elif field == 'provider-attributes': gce_data = metadata.get('runtimeMetadata', {}).get('computeEngine', {}) if 'machineType' in gce_data: machine_type = gce_data.get('machineType').rpartition('/')[2] else: machine_type = None instance_name = gce_data.get('instanceName') instance_zone = gce_data.get('zone') value = { 'machine-type': machine_type, 'instance-name': instance_name, 'zone': instance_zone, } elif field == 'events': events = metadata.get('events', []) value = [] for event in events: event_value = { 'name': event.get('description', ''), 'start-time': google_base.parse_rfc3339_utc_string(event['startTime']) } if 'endTime' in event: event_value['end-time'] = google_base.parse_rfc3339_utc_string( event['endTime']) value.append(event_value) elif field in [ 'user-project', 'script-name', 'script', 'input-recursives', 'output-recursives' ]: value = None else: raise ValueError('Unsupported field: "%s"' % field) return value if value else default
Returns a value from the operation for a specific set of field names. Args: field: a dsub-specific job metadata key default: default value to return if field does not exist or is empty. Returns: A text string for the field or a list for 'inputs'. Raises: ValueError: if the field label is not supported by the operation
juraj-google-style
def get_graph(self, item_ids, language=None): def _related(item_ids): if (item_ids is None): items = Item.objects.filter(active=True).prefetch_related('parents', 'children') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children') return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items} if (item_ids is None): return self._reachable_graph(None, _related, language=language) else: graph = self.get_graph(None, language) return self._subset_graph(graph, item_ids)
Get a subgraph of items reachable from the given set of items through any relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (parent items), root items are referenced by None key
codesearchnet
def iter_predict_proba(self, X, include_init=False): utils.validation.check_is_fitted(self, 'init_estimator_') X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False) probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64) for y_pred in super().iter_predict(X, include_init=include_init): if (len(self.classes_) == 2): probas[(:, 1)] = sigmoid(y_pred[(:, 0)]) probas[(:, 0)] = (1.0 - probas[(:, 1)]) else: probas[:] = softmax(y_pred) (yield probas)
Returns the predicted probabilities for ``X`` at every stage of the boosting procedure. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples, n_classes) containing the predicted probabilities at each stage
codesearchnet
def _parse_doc(doc): lines = doc.split("\n") descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines)) if len(descriptions) < 3: description = lines[0] else: description = "{0}\n\n{1}".format( lines[0], textwrap.dedent("\n".join(descriptions[2:]))) args = list(itertools.takewhile( _checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines))) argmap = {} if len(args) > 1: for pair in args[1:]: kv = [v.strip() for v in pair.split(":")] if len(kv) >= 2: argmap[kv[0]] = ":".join(kv[1:]) return dict(headline=descriptions[0], description=description, args=argmap)
Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary.
juraj-google-style
def __init__(self, filter_string=None, context=None): self._client = _utils.make_client(context) self._filter_string = filter_string self._descriptors = None
Initializes the ResourceDescriptors based on the specified filters. Args: filter_string: An optional filter expression describing the resource descriptors to be returned. context: An optional Context object to use instead of the global default.
juraj-google-style
def add_observer(self, callback): if (callback in self._observers): raise ValueError('{} is already an observer of {}'.format(callback, self)) self._observers.append(callback)
Add an observer to this event. Args: callback: A function or coroutine callback to call when the event is fired. Raises: ValueError: If the callback has already been added.
codesearchnet
def calc_intent(self, query): matches = self.calc_intents(query) if (len(matches) == 0): return MatchData('', '') best_match = max(matches, key=(lambda x: x.conf)) best_matches = (match for match in matches if (match.conf == best_match.conf)) return min(best_matches, key=(lambda x: sum(map(len, x.matches.values()))))
Tests all the intents against the query and returns match data of the best intent Args: query (str): Input sentence to test against intents Returns: MatchData: Best intent match
codesearchnet
def _DeserializeResponse(self, payload): (status_line, payload) = payload.split('\n', 1) (_, status, _) = status_line.split(' ', 2) parser = email_parser.Parser() msg = parser.parsestr(payload) info = dict(msg) info['status'] = status content = msg.get_payload() return http_wrapper.Response(info, content, self.__batch_url)
Convert string into Response and content. Args: payload: Header and body string to be deserialized. Returns: A Response object
codesearchnet
def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None): if device_name is not None: if platform_name is not None: raise ValueError('device_name and platform_name cannot be provided at the same time.') warnings.warn('device_name is being deprecated. Use platform_name.') device_name = compiler_ir.maybe_get_device_name(device_name) res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=list(filtered_flat_args), captured_inputs=concrete_fn.captured_inputs, stage=stage) if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'): return res_bytes else: return res_bytes.decode('utf-8')
Gets the compiler IR bytes. Args: stage: The exported stage for the given function. device_name: The name of the device with the form as "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. When this is used, actual device is used for getting the compiler IR. platform_name: The name of the platform, e.g. "TPU". See the comment in `get_compiler_ir` in `context.py`. Returns: The compiler IR bytes.
github-repos
def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[str, int]: if self.framework != 'pt': raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.') xmin, ymin, xmax, ymax = box.int().tolist() bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax} return bbox
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
github-repos
def dot( self, coords_a: Vector3Like, coords_b: Vector3Like, frac_coords: bool = False ) -> np.ndarray: coords_a, coords_b = ( np.reshape(coords_a, (-1, 3)), np.reshape(coords_b, (-1, 3)), ) if len(coords_a) != len(coords_b): raise ValueError("") if np.iscomplexobj(coords_a) or np.iscomplexobj(coords_b): raise TypeError("Complex array!") if not frac_coords: cart_a, cart_b = coords_a, coords_b else: cart_a = np.reshape( [self.get_cartesian_coords(vec) for vec in coords_a], (-1, 3) ) cart_b = np.reshape( [self.get_cartesian_coords(vec) for vec in coords_b], (-1, 3) ) return np.array([dot(a, b) for a, b in zip(cart_a, cart_b)])
Compute the scalar product of vector(s). Args: coords_a, coords_b: Array-like objects with the coordinates. frac_coords (bool): Boolean stating whether the vector corresponds to fractional or cartesian coordinates. Returns: one-dimensional `numpy` array.
juraj-google-style
def reset(self): fetches = [] for processor in self.preprocessors: fetches.extend((processor.reset() or [])) return fetches
Calls `reset` on all our Preprocessor objects. Returns: A list of tensors to be fetched.
codesearchnet
def run(self, dag): num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()]) if num_dag_qubits > self.coupling_map.size(): raise TranspilerError('Number of qubits greater than device.') self.property_set['layout'] = Layout.generate_trivial_layout(*dag.qregs.values())
Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1. Args: dag (DAGCircuit): DAG to find layout for. Raises: TranspilerError: if dag wider than self.coupling_map
juraj-google-style
def __init__(self, enum_class): import enum if not issubclass(enum_class, enum.Enum): raise TypeError('{} is not a subclass of Enum.'.format(enum_class)) if not enum_class.__members__: raise ValueError('enum_class cannot be empty, but "{}" is empty.' .format(enum_class)) super(EnumClassParser, self).__init__() self.enum_class = enum_class
Initializes EnumParser. Args: enum_class: class, the Enum class with all possible flag values. Raises: TypeError: When enum_class is not a subclass of Enum. ValueError: When enum_class is empty.
juraj-google-style
def http_exception(channel, title): gui = ui_embed.UI(channel, 'Too much help', '{} is too helpful! Try trimming some of the help messages.'.format(title), modulename=modulename) return gui
Creates an embed UI containing the 'too long' error message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed Returns: ui (ui_embed.UI): The embed UI object
codesearchnet
def delete_attachment(cls, session, attachment): return super(Conversations, cls).delete( session, attachment, endpoint_override='/attachments/%s.json' % attachment.id, out_type=Attachment, )
Delete an attachment. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be deleted. Returns: NoneType: Nothing.
juraj-google-style
def convert_ini(config_dict): config_lines = [] for (env, configs) in sorted(config_dict.items()): for (resource, app_properties) in sorted(configs.items()): try: for (app_property, value) in sorted(app_properties.items()): variable = '{env}_{resource}_{app_property}'.format(env=env, resource=resource, app_property=app_property).upper() if isinstance(value, (dict, DeepChainMap)): safe_value = "'{0}'".format(json.dumps(dict(value))) else: safe_value = json.dumps(value) line = '{variable}={value}'.format(variable=variable, value=safe_value) LOG.debug('INI line: %s', line) config_lines.append(line) except AttributeError: resource = resource.upper() app_properties = "'{}'".format(json.dumps(app_properties)) line = '{0}={1}'.format(resource, app_properties) LOG.debug('INI line: %s', line) config_lines.append(line) return config_lines
Convert _config_dict_ into a list of INI formatted strings. Args: config_dict (dict): Configuration dictionary to be flattened. Returns: (list) Lines to be written to a file in the format of KEY1_KEY2=value.
codesearchnet
def _det_large_enough_mask(x, det_bounds): return tf.cast((tf.linalg.det(x) > det_bounds), dtype=x.dtype)
Returns whether the input matches the given determinant limit. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. det_bounds: A floating-point `Tensor` that must broadcast to shape `[B1, ..., Bn]`, giving the desired lower bound on the determinants in `x`. Returns: mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each scalar is 1 if the corresponding matrix had determinant above the corresponding bound, otherwise 0.
codesearchnet
def delete(self, uri): try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object
codesearchnet
def get_latest_score_for_submission(submission_uuid, read_replica=False): try: submission_model = _get_submission_model(submission_uuid, read_replica) score_qs = Score.objects.filter( submission__uuid=submission_model.uuid ).order_by("-id").select_related("submission") if read_replica: score_qs = _use_read_replica(score_qs) score = score_qs[0] if score.is_hidden(): return None except (IndexError, Submission.DoesNotExist): return None return ScoreSerializer(score).data
Retrieve the latest score for a particular submission. Args: submission_uuid (str): The UUID of the submission to retrieve. Kwargs: read_replica (bool): If true, attempt to use the read replica database. If no read replica is available, use the default database. Returns: dict: The serialized score model, or None if no score is available.
juraj-google-style
def add_embedded_campaign(self, id, collection, campaign, confidence, analyst, date, description): if (type(id) is not ObjectId): id = ObjectId(id) obj = getattr(self.db, collection) result = obj.find({'_id': id, 'campaign.name': campaign}) if (result.count() > 0): return else: log.debug('Adding campaign to set: {}'.format(campaign)) campaign_obj = {'analyst': analyst, 'confidence': confidence, 'date': date, 'description': description, 'name': campaign} result = obj.update({'_id': id}, {'$push': {'campaign': campaign_obj}}) return result
Adds an embedded campaign to the TLO. Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. campaign: The campaign to assign. confidence: The campaign confidence analyst: The analyst making the assignment date: The date of the assignment description: A description Returns: The resulting mongo object
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) result = [1] + [0] * len(token_ids_0) + [1] if token_ids_1 is not None: result += [0] * len(token_ids_1) + [1] return result
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def allele_clusters(dists, t=0.025): clusters = fcluster(linkage(dists), 0.025, criterion='distance') cluster_idx = defaultdict(list) for (idx, cl) in enumerate(clusters): cluster_idx[cl].append(idx) return cluster_idx
Flat clusters from distance matrix Args: dists (numpy.array): pdist distance matrix t (float): fcluster (tree cutting) distance threshold Returns: dict of lists: cluster number to list of indices of distances in cluster
codesearchnet
def count_params(self): if not self.built: if getattr(self, '_is_graph_network', False): with tf_utils.maybe_init_scope(self): self._maybe_build(self.inputs) else: raise ValueError('You tried to call `count_params` on ' + self.name + ", but the layer isn't built. You can build it manually via: `" + self.name + '.build(batch_input_shape)`.') return layer_utils.count_params(self.weights)
Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined).
github-repos
def set_target(self, target: EventDispatcherBase) -> None: if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance.
juraj-google-style
def shapeplot(h, ax, sections=None, order='pre', cvals=None, clim=None, cmap=cm.YlOrBr_r, legend=True, **kwargs): if (sections is None): if (order == 'pre'): sections = allsec_preorder(h) else: sections = list(h.allsec()) if ((cvals is not None) and (clim is None)): clim = [np.nanmin(cvals), np.nanmax(cvals)] lines = [] i = 0 allDiams = [] for sec in sections: allDiams.append(get_section_diams(h, sec)) for (isec, sec) in enumerate(sections): xyz = get_section_path(h, sec) seg_paths = interpolate_jagged(xyz, sec.nseg) diams = allDiams[isec] linewidths = diams for (j, path) in enumerate(seg_paths): (line,) = plt.plot(path[(:, 0)], path[(:, 1)], path[(:, 2)], '-k', **kwargs) try: line.set_linewidth(linewidths[j]) except: pass if (cvals is not None): if isinstance(cvals[i], numbers.Number): try: col = cmap(int((((cvals[i] - clim[0]) * 255) / (clim[1] - clim[0])))) except: col = cmap(0) else: col = cvals[i] line.set_color(col) lines.append(line) i += 1 return lines
Plots a 3D shapeplot Args: h = hocObject to interface with neuron ax = matplotlib axis for plotting sections = list of h.Section() objects to be plotted order = { None= use h.allsec() to get sections 'pre'= pre-order traversal of morphology } cvals = list/array with values mapped to color by cmap; useful for displaying voltage, calcium or some other state variable across the shapeplot. **kwargs passes on to matplotlib (e.g. color='r' for red lines) Returns: lines = list of line objects making up shapeplot
codesearchnet
def setDocumentedBy(self, documented_pid, documenting_pid): self._check_initialized() documented_id = self.getObjectByPid(documented_pid) documenting_id = self.getObjectByPid(documenting_pid) self.add((documented_id, CITO.isDocumentedBy, documenting_id))
Add a CiTO, the Citation Typing Ontology, triple asserting that ``documented_pid`` isDocumentedBy ``documenting_pid``. Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid`` Args: documented_pid: str PID of a Science Object that is documented by ``documenting_pid``. documenting_pid: str PID of a Science Object that documents ``documented_pid``.
juraj-google-style
def screenshot(self, filename=None, scale=1.0, method=None): image = None method = (method or self._screenshot_method) if (method == 'minicap'): try: image = self._adb_minicap(scale) except Exception as e: logger.warn('use minicap failed, fallback to screencap. error detail: %s', e) self._screenshot_method = 'screencap' return self.screenshot(filename=filename, scale=scale) elif (method == 'screencap'): image = self._adb_screencap(scale) else: raise RuntimeError(('No such method(%s)' % method)) if filename: image.save(filename) return image
Take device screenshot Args: - filename(string): optional, save int filename - scale(float): scale size - method(string): one of minicap,screencap Return: PIL.Image
codesearchnet
def validate_word(self, word): while word: match = self.seg_regex.match(word) if match: word = word[len(match.group(0)):] else: return False return True
Returns True if `word` consists exhaustively of valid IPA segments Args: word (unicode): input word as Unicode IPA string Returns: bool: True if `word` can be divided exhaustively into IPA segments that exist in the database
juraj-google-style
def to_query(self, fields=None): from . import _query if (fields is None): fields = '*' elif isinstance(fields, list): fields = ','.join(fields) return _query.Query(('SELECT %s FROM %s' % (fields, self._repr_sql_())), context=self._context)
Return a Query for this Table. Args: fields: the fields to return. If None, all fields will be returned. This can be a string which will be injected into the Query after SELECT, or a list of field names. Returns: A Query object that will return the specified fields from the records in the Table.
codesearchnet
def make_val_and_grad_fn(value_fn): @functools.wraps(value_fn) def val_and_grad(x): return value_and_gradient(value_fn, x) return val_and_grad
Function decorator to compute both function value and gradient. For example: ``` @tff.math.make_val_and_grad_fn def quadratic(x): return tf.reduce_sum(scales * (x - minimum) ** 2, axis=-1) ``` Turns `quadratic` into a function that accepts a point as a `Tensor` as input and returns a tuple of two `Tensor`s with the value and the gradient of the defined quadratic function evaluated at the input point. This is useful for constructing functions to optimize with tff.math.optimizer methods. Args: value_fn: A python function to decorate. Returns: The decorated function.
github-repos
def _get_client(self): return (_oss.StsAuth if ('security_token' in self._storage_parameters) else (_oss.Auth if self._storage_parameters else _oss.AnonymousAuth))(**self._storage_parameters)
OSS2 Auth client Returns: oss2.Auth or oss2.StsAuth: client
codesearchnet
def symbolic_tensor_id(self, graph_id, op_name, output_slot): return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)
Get the ID of a symbolic tensor. Args: graph_id: The ID of the immediately-enclosing graph. op_name: Name of the op. output_slot: Output slot as an int. Returns: The ID of the symbolic tensor as an int.
github-repos
def get_num_days_required(offset, period='d', perc_required=0.90): x = pd.to_datetime('2010-01-01') delta = x - (x - offset) days = delta.days * 0.69 if period == 'd': req = days * perc_required elif period == 'm': req = (days / 20) * perc_required elif period == 'y': req = (days / 252) * perc_required else: raise NotImplementedError( 'period not supported. Supported periods are d, m, y') return req
Estimates the number of days required to assume that data is OK. Helper function used to determine if there are enough "good" data days over a given period. Args: * offset (DateOffset): Offset (lookback) period. * period (str): Period string. * perc_required (float): percentage of number of days expected required.
juraj-google-style
def write_fasta_file(self, outfile, force_rerun=False): if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): SeqIO.write(self, outfile, 'fasta') self.sequence_path = outfile
Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file. Args: outfile (str): Path to new FASTA file to be written to force_rerun (bool): If an existing file should be overwritten
codesearchnet
def get_telex_definition(w_shorthand=True, brackets_shorthand=True): telex = { "a": "a^", "o": "o^", "e": "e^", "w": ["u*", "o*", "a+"], "d": "d-", "f": "\\", "s": "/", "r": "?", "x": "~", "j": ".", } if w_shorthand: telex["w"].append('<ư') if brackets_shorthand: telex.update({ "]": "<ư", "[": "<ơ", "}": "<Ư", "{": "<Ơ" }) return telex
Create a definition dictionary for the TELEX input method Args: w_shorthand (optional): allow a stand-alone w to be interpreted as an ư. Default to True. brackets_shorthand (optional, True): allow typing ][ as shorthand for ươ. Default to True. Returns a dictionary to be passed into process_key().
juraj-google-style
def supported_device(self, index=0): if not util.is_natural(index) or index >= self.num_supported_devices(): raise ValueError('Invalid index.') info = structs.JLinkDeviceInfo() result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info)) return info
Gets the device at the given ``index``. Args: self (JLink): the ``JLink`` instance index (int): the index of the device whose information to get Returns: A ``JLinkDeviceInfo`` describing the requested device. Raises: ValueError: if index is less than 0 or >= supported device count.
juraj-google-style
def parse_example_tensor(examples, train_config, keep_target): csv_header = [] if keep_target: csv_header = train_config['csv_header'] else: csv_header = [name for name in train_config['csv_header'] if (name != train_config['target_column'])] record_defaults = [[train_config['csv_defaults'][name]] for name in csv_header] tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors') tensors = [tf.expand_dims(x, axis=1) for x in tensors] tensor_dict = dict(zip(csv_header, tensors)) return tensor_dict
Read the csv files. Args: examples: string tensor train_config: training config keep_target: if true, the target column is expected to exist and it is returned in the features dict. Returns: Dict of feature_name to tensor. Target feature is in the dict.
codesearchnet
def simplify_countryname(cls, country): countryupper = country.upper() words = get_words_in_sentence(countryupper) index = countryupper.find(',') if index != -1: countryupper = countryupper[:index] index = countryupper.find(':') if index != -1: countryupper = countryupper[:index] regex = re.compile('\(.+?\)') countryupper = regex.sub('', countryupper) remove = copy.deepcopy(cls.simplifications) for simplification1, simplification2 in cls.abbreviations.items(): countryupper = countryupper.replace(simplification1, '') remove.append(simplification2) for simplification1, simplifications in cls.multiple_abbreviations.items(): countryupper = countryupper.replace(simplification1, '') for simplification2 in simplifications: remove.append(simplification2) remove = '|'.join(remove) regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE) countryupper = regex.sub('', countryupper) countryupper = countryupper.strip() countryupper_words = get_words_in_sentence(countryupper) if len(countryupper_words) > 1: countryupper = countryupper_words[0] if countryupper: words.remove(countryupper) return countryupper, words
Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
juraj-google-style
def assign_add(self, variable, value): variable.assign_add(value)
Add a value to a variable. This should be used in optimizers instead of `variable.assign_add(value)` to support backend specific optimizations. Note that the variable can be a model variable or an optimizer variable; it can be a backend native variable or a Keras variable. Args: variable: The variable to update. value: The value to add to the variable.
github-repos
def specific_file_rst_filename(self, source_filename: str) -> str: highest_code_to_target = relative_filename_within_dir(source_filename, self.highest_code_dir) bname = basename(source_filename) result = join(self.autodoc_rst_root_dir, dirname(highest_code_to_target), (bname + EXT_RST)) log.debug('Source {!r} -> RST {!r}', source_filename, result) return result
Gets the RST filename corresponding to a source filename. See the help for the constructor for more details. Args: source_filename: source filename within current project Returns: RST filename Note in particular: the way we structure the directories means that we won't get clashes between files with idential names in two different directories. However, we must also incorporate the original source filename, in particular for C++ where ``thing.h`` and ``thing.cpp`` must not generate the same RST filename. So we just add ``.rst``.
codesearchnet