code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def port(self, value): self._port = value if (value is None): try: del self._connectionXML.attrib['port'] except KeyError: pass else: self._connectionXML.set('port', value)
Set the connection's port property. Args: value: New port value. String. Returns: Nothing.
codesearchnet
def rotoreflection(axis, angle, origin=(0, 0, 0)): rot = SymmOp.from_origin_axis_angle(origin, axis, angle) refl = SymmOp.reflection(axis, origin) m = np.dot(rot.affine_matrix, refl.affine_matrix) return SymmOp(m)
Returns a roto-reflection symmetry operation Args: axis (3x1 array): Axis of rotation / mirror normal angle (float): Angle in degrees origin (3x1 array): Point left invariant by roto-reflection. Defaults to (0, 0, 0). Return: Roto-reflection operation
juraj-google-style
def get_controller(self, path): path_info = path.lstrip('/').split('/', 2) try: return self._routes.get(path_info[0] + '/' + path_info[1]) except (IndexError, KeyError): return self._routes.get(path_info[0] or 'index')
Return controller that handle given path. Args: - path: requested path, like: /blog/post_view/15
juraj-google-style
def get_session(self, username, password, remote='127.0.0.1', proxy=None): params = {'username': username, 'password': password, 'validation-factors': {'validationFactors': [{'name': 'remote_address', 'value': remote}]}} if proxy: params['validation-factors']['validationFactors'].append({'name': 'X-Forwarded-For', 'value': proxy}) response = self._post((self.rest_url + '/session'), data=json.dumps(params), params={'expand': 'user'}) if (not response.ok): return None return response.json()
Create a session for a user. Attempts to create a user session on the Crowd server. Args: username: The account username. password: The account password. remote: The remote address of the user. This can be used to create multiple concurrent sessions for a user. The host you run this program on may need to be configured in Crowd as a trusted proxy for this to work. proxy: Value of X-Forwarded-For server header. Returns: dict: A dict mapping of user attributes if the application authentication was successful. See the Crowd documentation for the authoritative list of attributes. None: If authentication failed.
codesearchnet
def to_dict(self) -> Dict[str, Any]: output = copy.deepcopy(self.__dict__) if '_commit_hash' in output: del output['_commit_hash'] if '_original_object_hash' in output: del output['_original_object_hash'] if 'compile_config' in output: del output['compile_config'] output['transformers_version'] = __version__ self.dict_torch_dtype_to_str(output) return output
Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
github-repos
def _initial_guess(self): (a, b, c) = np.polyfit(self.volumes, self.energies, 2) self.eos_params = [a, b, c] v0 = ((- b) / (2 * a)) e0 = (((a * (v0 ** 2)) + (b * v0)) + c) b0 = ((2 * a) * v0) b1 = 4 (vmin, vmax) = (min(self.volumes), max(self.volumes)) if ((not (vmin < v0)) and (v0 < vmax)): raise EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.') return (e0, b0, b1, v0)
Quadratic fit to get an initial guess for the parameters. Returns: tuple: (e0, b0, b1, v0)
codesearchnet
def _timesfm_masked_mean_std(inputs: torch.Tensor, padding: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: def _get_patch_index(arr: torch.Tensor): indices = torch.argmax((arr >= 3).to(torch.int32), dim=1) row_sum = (arr >= 3).to(torch.int32).sum(dim=1) return torch.where(row_sum == 0, arr.shape[1] - 1, indices) pad_sum = torch.sum(1 - padding, dim=2) patch_indices = _get_patch_index(pad_sum) bidxs = torch.arange(inputs.shape[0]) arr = inputs[bidxs, patch_indices, :] pad = padding[bidxs, patch_indices, :] mask = 1 - pad num_valid_elements = torch.sum(mask, dim=1) num_valid_elements = torch.where(num_valid_elements == 0, torch.tensor(1, dtype=num_valid_elements.dtype, device=num_valid_elements.device), num_valid_elements) masked_sum = torch.sum(arr * mask, dim=1) masked_squared_sum = torch.sum((arr * mask) ** 2, dim=1) masked_mean = masked_sum / num_valid_elements masked_var = masked_squared_sum / num_valid_elements - masked_mean ** 2 masked_var = torch.where(masked_var < 0.0, torch.tensor(0.0, dtype=masked_var.dtype, device=masked_var.device), masked_var) masked_std = torch.sqrt(masked_var) return (masked_mean, masked_std)
Calculates mean and standard deviation of `inputs` across axis 1. It excludes values where `padding` is 1. Args: inputs: A PyTorch tensor of shape [b, n, p]. padding: A PyTorch tensor of shape [b, n, p] with values 0 or 1. Returns: A tuple containing the mean and standard deviation. We return the statistics of the first patch with more than three non-padded values.
github-repos
def summary(self, line_length=None, positions=None, print_fn=None): if not self.built: raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.') layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn)
Prints a string summary of the network. Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. Defaults to `print`. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if `summary()` is called before the model is built.
github-repos
def BasenamePath(self, path): if path.endswith(self.PATH_SEPARATOR): path = path[:-1] _, _, basename = path.rpartition(self.PATH_SEPARATOR) return basename
Determines the basename of the path. Args: path (str): path. Returns: str: basename of the path.
juraj-google-style
def __init__( self, name, data_type_definition, aliases=None, data_type=None, description=None, urls=None): super(ElementSequenceDataTypeDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.byte_order = getattr( data_type_definition, 'byte_order', definitions.BYTE_ORDER_NATIVE) self.elements_data_size = None self.elements_data_size_expression = None self.element_data_type = data_type self.element_data_type_definition = data_type_definition self.elements_terminator = None self.number_of_elements = None self.number_of_elements_expression = None
Initializes a sequence data type definition. Args: name (str): name. data_type_definition (DataTypeDefinition): sequence element data type definition. aliases (Optional[list[str]]): aliases. data_type (Optional[str]): name of the sequence element data type. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def cancel(self, workflow_id): self.logger.debug('Canceling workflow: ' + workflow_id) url = '%(wf_url)s/%(wf_id)s/cancel' % { 'wf_url': self.workflows_url, 'wf_id': workflow_id } r = self.gbdx_connection.post(url, data='') r.raise_for_status()
Cancels a running workflow. Args: workflow_id (str): Workflow id. Returns: Nothing
juraj-google-style
def register_thread(self, thread): with self._lock: self._registered_threads.add(thread)
Register a thread to join. Args: thread: A Python thread to join.
github-repos
def list_changes(self): if (not self.is_attached()): raise ItsdbError('changes are not tracked for detached tables.') return [(i, self[i]) for (i, row) in enumerate(self._records) if (row is not None)]
Return a list of modified records. This is only applicable for attached tables. Returns: A list of `(row_index, record)` tuples of modified records Raises: :class:`delphin.exceptions.ItsdbError`: when called on a detached table
codesearchnet
def lsfiles(root='.', **kwargs): paths = ls(root=root, **kwargs) if isfile(root): return paths return [_path for _path in paths if isfile(path(root, _path))]
Return only files from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of file paths. Raises: OSError: If root directory does not exist.
codesearchnet
def from_json_stat(datasets, naming='label', value='value'): warnings.warn("Shouldn't use this function anymore! Now use read() methods ofDataset, Collection or Dimension.", DeprecationWarning) check_input(naming) results = [] if (type(datasets) is list): for (idx, element) in enumerate(datasets): for dataset in element: js_dict = datasets[idx][dataset] results.append(generate_df(js_dict, naming, value)) elif (isinstance(datasets, OrderedDict) or (type(datasets) is dict) or isinstance(datasets, Dataset)): if ('class' in datasets): if (datasets['class'] == 'dataset'): js_dict = datasets results.append(generate_df(js_dict, naming, value)) else: for dataset in datasets: js_dict = datasets[dataset] results.append(generate_df(js_dict, naming, value)) return results
Decode JSON-stat formatted data into pandas.DataFrame object. Args: datasets(OrderedDict, list): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), for example.\ Both List and OrderedDict are accepted \ as inputs. naming(string, optional): dimension naming. Possible values: 'label' or 'id'.Defaults to 'label'. value (string, optional): name of the value column. Defaults to 'value'. Returns: results(list): list of pandas.DataFrame with imported data.
codesearchnet
def text_set_fields(text, variables): text = RE_TEXT_FIELD.sub('{\\1}', text) try: return text.format_map(defaultdict(str, variables)) except ValueError: return text
Replaces fields in text with values from recipe. Fields in text are just are {field}, where field is a name of the variable. Missing fields default to blanks. Args: text (string) A paragraph possible containing {field} entries variables: (dict) The keys mapping to field, and values to replace Returns: A string with all values replaced. Or if an error occurs, original text.
github-repos
def _get_kind_name(param_type, is_list): if issubclass(param_type, bool): typename = 'bool' elif issubclass(param_type, six.integer_types): typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized.
juraj-google-style
def is_traceback_filtering_enabled(): return global_state.get_global_attribute('traceback_filtering', True)
Check if traceback filtering is enabled. Raw Keras tracebacks (also known as stack traces) involve many internal frames, which can be challenging to read through, while not being actionable for end users. By default, Keras filters internal frames in most exceptions that it raises, to keep traceback short, readable, and focused on what's actionable for you (your own code). See also `keras.config.enable_traceback_filtering()` and `keras.config.disable_traceback_filtering()`. If you have previously disabled traceback filtering via `keras.config.disable_traceback_filtering()`, you can re-enable it via `keras.config.enable_traceback_filtering()`. Returns: Boolean, `True` if traceback filtering is enabled, and `False` otherwise.
github-repos
def record_data(self, content): if 'timestamp' not in content: content = content.copy() content['timestamp'] = utils.get_current_epoch_time() self.summary_writer.dump(content, records.TestSummaryEntryType.USER_DATA)
Record an entry in test summary file. Sometimes additional data need to be recorded in summary file for debugging or post-test analysis. Each call adds a new entry to the summary file, with no guarantee of its position among the summary file entries. The content should be a dict. If absent, timestamp field is added for ease of parsing later. Args: content: dict, the data to add to summary file.
github-repos
def get_assistants(cls, superassistants): _assistants = cls.load_all_assistants(superassistants) result = [] for supa in superassistants: result.extend(_assistants[supa.name]) return result
Returns list of assistants that are subassistants of given superassistants (I love this docstring). Args: roles: list of names of roles, defaults to all roles Returns: list of YamlAssistant instances with specified roles
juraj-google-style
def _bns_task_id(job: str) -> Union[int, str]: maybe_task_id = job.rsplit('/')[-1].rsplit(':')[0] try: return int(maybe_task_id) except ValueError: return job
Tries to extract an integer task ID from a job name. For example, for `job` = '/.../tpu_worker/0:port_name', return 0. Args: job: A job name to extract task ID from. Returns: The task ID on success, or the original job name on failure.
github-repos
def fn(x: str, y: Optional[list[Union[str, int]]], z: tuple[Union[str, int], str]=(42, 'hello')) -> tuple[int, str]: pass
Test function with multiple args, and docstring args that we have to strip out. Args: x: The first input. It's got a big multiline description and also contains (choices: ["a", "b", "c"]) y: The second input. It's a big list with a single-line description. z: The third input. It's some kind of tuple with a default arg. Returns: The output. The return description is also a big multiline description that spans multiple lines.
github-repos
def return_resource_name(self, record, resource_type): try: if resource_type == 's3': regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)') bucket_name = record.replace(regex.match(record).group(1), '') return bucket_name except Exception as e: self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e)) return record
Removes the trailing AWS domain from a DNS record to return the resource name e.g bucketname.s3.amazonaws.com will return bucketname Args: record (str): DNS record resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)
juraj-google-style
def constant(interval=1): try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: (yield val)
Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values.
codesearchnet
def validate(self, message, schema_name): err = None try: jsonschema.validate(message, self.schemas[schema_name]) except KeyError: msg = f err = {'msg': msg} except jsonschema.ValidationError as e: msg = f'Given message was not valid against the schema "{schema_name}": {e.message}' err = {'msg': msg} if err: logging.error(**err) raise exceptions.InvalidMessageError(err['msg'])
Validate a message given a schema. Args: message (dict): Loaded JSON of pulled message from Google PubSub. schema_name (str): Name of schema to validate ``message`` against. ``schema_name`` will be used to look up schema from :py:attr:`.MessageValidator.schemas` dict Raises: InvalidMessageError: if message is invalid against the given schema. InvalidMessageError: if given schema name can not be found.
codesearchnet
def _save_env(env): env_path = os.path.join(env["resultdir"], "env") if os.path.isdir(env["resultdir"]): with open(env_path, "w") as f: yaml.dump(env, f)
Saves one environment. Args: env (dict): the env dict to save.
juraj-google-style
def __init__(self, data_type_definition): if (data_type_definition.false_value is None and data_type_definition.true_value is None): raise errors.FormatError( 'Boolean data type has no True or False values.') super(BooleanMap, self).__init__(data_type_definition)
Initializes a boolean data type map. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: FormatError: if the data type map cannot be determined from the data type definition.
juraj-google-style
def rename(self, source_file_names, destination_file_names): if not len(source_file_names) == len(destination_file_names): message = 'Unable to rename unequal number of sources and destinations' raise BeamIOError(message) src_dest_pairs = list(zip(source_file_names, destination_file_names)) results = s3io.S3IO(options=self._options).rename_files(src_dest_pairs) exceptions = {(src, dest): error for src, dest, error in results if error is not None} if exceptions: raise BeamIOError('Rename operation failed', exceptions)
Rename the files at the source list to the destination list. Source and destination lists should be of the same size. Args: source_file_names: List of file paths that need to be moved destination_file_names: List of destination_file_names for the files Raises: ``BeamIOError``: if any of the rename operations fail
github-repos
def get_plot(self, structure, two_theta_range=(0, 90), annotate_peaks=True, ax=None, with_labels=True, fontsize=16): if (ax is None): from pymatgen.util.plotting import pretty_plot plt = pretty_plot(16, 10) ax = plt.gca() else: import matplotlib.pyplot as plt xrd = self.get_pattern(structure, two_theta_range=two_theta_range) for (two_theta, i, hkls, d_hkl) in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls): if (two_theta_range[0] <= two_theta <= two_theta_range[1]): print(hkls) label = ', '.join([str(hkl['hkl']) for hkl in hkls]) ax.plot([two_theta, two_theta], [0, i], color='k', linewidth=3, label=label) if annotate_peaks: ax.annotate(label, xy=[two_theta, i], xytext=[two_theta, i], fontsize=fontsize) if with_labels: ax.set_xlabel('$2\\theta$ ($^\\circ$)') ax.set_ylabel('Intensities (scaled)') if hasattr(ax, 'tight_layout'): ax.tight_layout() return plt
Returns the diffraction plot as a matplotlib.pyplot. Args: structure: Input structure two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. annotate_peaks: Whether to annotate the peaks with plane information. ax: matplotlib :class:`Axes` or None if a new figure should be created. with_labels: True to add xlabels and ylabels to the plot. fontsize: (int) fontsize for peak labels. Returns: (matplotlib.pyplot)
codesearchnet
class LlavaFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): do_pad: Optional[bool]
Args: do_pad (`bool`, *optional*): Whether to pad the image to a square based on the longest edge.
github-repos
def infer_module(filename, pythonpath): for path in filter(bool, pythonpath): if not path.endswith(path_utils.sep): path += path_utils.sep if filename.startswith(path): filename = filename[len(path):] break else: path = '' return Module(path, filename, path_to_module_name(filename))
Convert a filename to a module relative to pythonpath. This method tries to deduce the module name from the pythonpath and the filename. This will not always be possible. (It depends on the filename starting with an entry in the pythonpath.) Args: filename: The filename of a Python file. E.g. "foo/bar/baz.py". pythonpath: The path Python uses to search for modules. Returns: A Module object.
github-repos
def checkpoint_exists(checkpoint_prefix): return checkpoint_exists_internal(checkpoint_prefix)
Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is the recommended way to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by `checkpoint_prefix` exists.
github-repos
def _get_version(self, root): version = self.get_version(root) if version: return StrictVersion(version) raise UnknownVersionError( "Unable to determine the version of the input document. No " "version information found on the root element." )
Return the version of the root element passed in. Args: root (etree.Element) Returns: distutils.StrictVersion Raises: UnknownVersionError
juraj-google-style
def __init__(self, request, response): self.status = QueueItem.STATUS_QUEUED self.decomposed = False self.__response_soup = None self.__index_hash = None self.request = request self.response = response
Constructs a QueueItem instance. Args: request (:class:`nyawc.http.Request`): The Request object. response (:class:`nyawc.http.Response`): The Response object (empty object when initialized).
juraj-google-style
def validate(self, corpus): invalid_utterances = {} for utterance in corpus.utterances.values(): if self.label_list_idx in utterance.label_lists.keys(): ll = utterance.label_lists[self.label_list_idx] if len(ll) < self.min_number_of_labels: invalid_utterances[utterance.idx] = 'Only {} labels'.format(len(ll)) else: invalid_utterances[utterance.idx] = 'No label-list {}'.format(self.label_list_idx) passed = len(invalid_utterances) <= 0 info = { 'Min. number of labels': str(self.min_number_of_labels), 'Label-List ID': self.label_list_idx } return base.InvalidUtterancesResult(passed, invalid_utterances, name=self.name(), info=info)
Perform the validation on the given corpus. Args: corpus (Corpus): The corpus to test/validate. Returns: InvalidUtterancesResult: Validation result.
juraj-google-style
def show_app(app, state, notebook_url, port=0, **kw): logging.basicConfig() from tornado.ioloop import IOLoop from ..server.server import Server loop = IOLoop.current() if callable(notebook_url): origin = notebook_url(None) else: origin = _origin_url(notebook_url) server = Server({'/': app}, io_loop=loop, port=port, allow_websocket_origin=[origin], **kw) server_id = uuid4().hex curstate().uuid_to_server[server_id] = server server.start() if callable(notebook_url): url = notebook_url(server.port) else: url = _server_url(notebook_url, server.port) logging.debug(('Server URL is %s' % url)) logging.debug(('Origin URL is %s' % origin)) from ..embed import server_document script = server_document(url, resources=None) publish_display_data({HTML_MIME_TYPE: script, EXEC_MIME_TYPE: ''}, metadata={EXEC_MIME_TYPE: {'server_id': server_id}})
Embed a Bokeh server application in a Jupyter Notebook output cell. Args: app (Application or callable) : A Bokeh Application to embed inline in a Jupyter notebook. state (State) : ** Unused ** notebook_url (str or callable) : The URL of the notebook server that is running the embedded app. If ``notebook_url`` is a string, the value string is parsed to construct the origin and full server URLs. If notebook_url is a callable, it must accept one parameter, which will be the server port, or None. If passed a port, the callable must generate the server URL, otherwise if passed None, it must generate the origin URL for the server. port (int) : A port for the embedded server will listen on. By default the port is 0, which results in the server listening on a random dynamic port. Any additional keyword arguments are passed to :class:`~bokeh.server.Server` (added in version 1.1) Returns: None
codesearchnet
def serialize_sparse(sp_input, name=None, out_type=dtypes.string): return serialize_sparse_v2(sp_input, out_type, name)
Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`.
github-repos
def load(self, cfgstr=None): from six.moves import cPickle as pickle cfgstr = self._rectify_cfgstr(cfgstr) dpath = self.dpath fname = self.fname verbose = self.verbose if (not self.enabled): if (verbose > 1): self.log('[cacher] ... cache disabled: fname={}'.format(self.fname)) raise IOError(3, 'Cache Loading Is Disabled') fpath = self.get_fpath(cfgstr=cfgstr) if (not exists(fpath)): if (verbose > 2): self.log('[cacher] ... cache does not exist: dpath={} fname={} cfgstr={}'.format(basename(dpath), fname, cfgstr)) raise IOError(2, ('No such file or directory: %r' % (fpath,))) elif (verbose > 3): self.log('[cacher] ... cache exists: dpath={} fname={} cfgstr={}'.format(basename(dpath), fname, cfgstr)) try: with open(fpath, 'rb') as file_: data = pickle.load(file_) except Exception as ex: if (verbose > 0): self.log(('CORRUPTED? fpath = %s' % (fpath,))) if (verbose > 1): self.log('[cacher] ... CORRUPTED? dpath={} cfgstr={}'.format(basename(dpath), cfgstr)) if isinstance(ex, (EOFError, IOError, ImportError)): raise IOError(str(ex)) else: if (verbose > 1): self.log('[cacher] ... unknown reason for exception') raise else: if (self.verbose > 2): self.log('[cacher] ... {} cache hit'.format(self.fname)) elif (verbose > 1): self.log('[cacher] ... cache hit') return data
Loads the data Raises: IOError - if the data is unable to be loaded. This could be due to a cache miss or because the cache is disabled. Example: >>> from ubelt.util_cache import * # NOQA >>> # Setting the cacher as enabled=False turns it off >>> cacher = Cacher('test_disabled_load', '', enabled=True) >>> cacher.save('data') >>> assert cacher.load() == 'data' >>> cacher.enabled = False >>> assert cacher.tryload() is None
codesearchnet
def to_subquery(self) -> StandardSqlExpression: return SubQuery(Select(select_part=self, from_part=None))
Renders the expression as a subquery. Builds a SELECT statement for the expression and returns it as a subquery. Expressions which already render a SELECT (such as the Select and UnionExpression classes) should overide this to remove the extra SELECT. Returns: A SubQuery expression for this expression.
github-repos
def remove_observer(self, callback): if (callback not in self._observers): raise ValueError('{} is not an observer of {}'.format(callback, self)) self._observers.remove(callback)
Remove an observer from this event. Args: callback: A function or coroutine callback to remove from this event. Raises: ValueError: If the callback is not an observer of this event.
codesearchnet
def find_structure(self, filename_or_structure): try: if isinstance(filename_or_structure, str): s = Structure.from_file(filename_or_structure) elif isinstance(filename_or_structure, Structure): s = filename_or_structure else: raise MPRestError('Provide filename or Structure object.') payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)} response = self.session.post('{}/find_structure'.format(self.preamble), data=payload) if (response.status_code in [200, 400]): resp = json.loads(response.text, cls=MontyDecoder) if resp['valid_response']: return resp['response'] else: raise MPRestError(resp['error']) raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text)) except Exception as ex: raise MPRestError(str(ex))
Finds matching structures on the Materials Project site. Args: filename_or_structure: filename or Structure object Returns: A list of matching structures. Raises: MPRestError
codesearchnet
def _try_run_local_init_op(self, sess: session.Session) -> Tuple[bool, Optional[str]]: if self._local_init_op is not None: is_ready_for_local_init, msg = self._model_ready_for_local_init(sess) if is_ready_for_local_init: logging.info('Running local_init_op.') sess.run(self._local_init_op, feed_dict=self._local_init_feed_dict, options=self._local_init_run_options) logging.info('Done running local_init_op.') return (True, None) else: return (False, msg) return (True, None)
Tries to run _local_init_op, if not None, and is ready for local init. Args: sess: A `Session`. Returns: A tuple (is_successful, msg), where is_successful is True if _local_init_op is None, or we ran _local_init_op, and False otherwise; and msg is a `String` with the reason why the model was not ready to run local init.
github-repos
def load_data(path): if not os.path.exists(path): print(path) raise AttributeError('Path given does not exist!') data = {} if 'raw_data' in os.listdir(path): data_files = os.listdir(os.path.join(path, 'raw_data' + '/')) path = os.path.join(path, 'raw_data' + '/') else: data_files = glob.glob(os.path.join(path, '*.csv')) if not data_files: raise AttributeError('Could not find data files in {:s}'.format(path)) for data_file in data_files: data_name = data_file.split('-')[-1][0:-4] imported_data_df = pd.read_csv(os.path.join(path, data_file)) column_headers = list(imported_data_df.columns.values) if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers): data[data_name] = {h: imported_data_df[h].as_matrix() for h in column_headers} else: data[data_name] = np.squeeze(imported_data_df.as_matrix()) return data
loads the data that has been save with Script.save. Args: path: path to folder saved by Script.save or raw_data folder within Returns: a dictionary with the data of form data = {param_1_name: param_1_data, ...}
juraj-google-style
def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision model configuration. Returns: [`SiglipConfig`]: An instance of a configuration object
github-repos
def oauth_access(self, *, client_id: str, client_secret: str, code: str, **kwargs) -> SlackResponse: kwargs.update({'client_id': client_id, 'client_secret': client_secret, 'code': code}) return self.api_call('oauth.access', data=kwargs)
Exchanges a temporary OAuth verifier code for an access token. Args: client_id (str): Issued when you created your application. e.g. '4b39e9-752c4' client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1' code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
codesearchnet
def duplicate_verts(script): if script.ml_version == '1.3.4BETA': filter_xml = ' <filter name="Remove Duplicated Vertex"/>\n' else: filter_xml = ' <filter name="Remove Duplicate Vertices"/>\n' util.write_filter(script, filter_xml) return None
"Check for every vertex on the mesh: if there are two vertices with the same coordinates they are merged into a single one. Args: script: the FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def ParseLocalEntryRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): query_hash = hash(query) inode_number = self._GetRowValue(query_hash, row, 'inode_number') local_path = self.GetLocalPath(inode_number, cache, database) event_data = GoogleDriveSnapshotLocalEntryEventData() event_data.path = local_path event_data.query = query event_data.size = self._GetRowValue(query_hash, row, 'size') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a local entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database.
juraj-google-style
def _num_slices_in_dimension(self, axis): if not isinstance(axis, int): raise TypeError('axis must be an integer') if axis < 0: rank = self.rank if rank is None: raise ValueError("You can't use negative values if the rank is undefined") axis = axis + rank if axis == 0: return self._dimension(0) if axis <= self.num_row_partitions: return self.row_partitions[axis - 1].nvals() remainder = axis - (self.num_row_partitions - 1) return _reduce_prod_patch(self.inner_shape[:remainder])
The total size of a dimension (like nvals). Effectively, this is self[:axis+1]._num_elements() Example: shape = DynamicRaggedShape._from_inner_shape([2, 3, 4]) shape._num_slices_in_dimension(0) = 2 shape._num_slices_in_dimension(1) = 6 shape._num_slices_in_dimension(2) = 24 shape._num_slices_in_dimension(-1) = 24 shape._num_slices_in_dimension(-2) = 6 shape._num_slices_in_dimension(-2) = 2 Args: axis: the last axis to include in the number of elements. If negative, then axis = axis + rank. Returns: The number of elements in the shape.
github-repos
def command_runner(shell_command, force_rerun_flag, outfile_checker, cwd=None, silent=False): program_and_args = shlex.split(shell_command) if (not program_exists(program_and_args[0])): raise OSError('{}: program not installed'.format(program_and_args[0])) if cwd: outfile_checker = op.join(cwd, op.basename(outfile_checker)) if force_rerun(flag=force_rerun_flag, outfile=outfile_checker): if silent: command = subprocess.Popen(program_and_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) (out, err) = command.communicate() ret = command.returncode else: for path in execute(cmd=program_and_args, cwd=cwd): print(path, end='') log.debug('{}: Ran program, output to {}'.format(program_and_args[0], outfile_checker)) else: log.debug('{}: Output already exists'.format(outfile_checker))
Run a shell command with subprocess, with additional options to check if output file exists and printing stdout. Args: shell_command (str): Command as it would be formatted in the command-line (ie. "program -i test.in -o test.out"). force_rerun_flag: If the program should be rerun even if the output file exists. outfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile will be, that should be done in the program's args or predetermined. cwd (str): Path to working directory where command will be executed. silent (bool): If program STDOUT should be printed to the current shell. Returns: bool: If the program ran successfully.
codesearchnet
def is_published(self): citeable = (('publication_info' in self.record) and is_citeable(self.record['publication_info'])) submitted = (('dois' in self.record) and any((('journal_title' in el) for el in force_list(self.record.get('publication_info'))))) return (citeable or submitted)
Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True
codesearchnet
def restore_state(self, state): super(EmulatedPeripheralTile, self).restore_state(state) self.debug_mode = state.get('debug_mode', False) self.run_level = state.get('run_level', None) if state.get('app_started', False): self._hosted_app_running.set()
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
juraj-google-style
def _init_project_service(self, version): project_cfg = self._load_config_section(CONFIG_PROJECT_SECTION) self._token_project = project_cfg[CONFIG_TOKEN] proto = project_cfg[CONFIG_PROTOCOL] host = project_cfg[CONFIG_HOST] self._project = ProjectService(host, version) self._project.base_protocol = proto self._project.set_auth(self._token_project)
Method to initialize the Project Service from the config data Args: version (string): Version of Boss API to use. Returns: None Raises: (KeyError): if given invalid version.
codesearchnet
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None): course_videos = CourseVideo.objects.select_related('video').prefetch_related('video__encoded_videos', 'video__encoded_videos__profile').filter(video__encoded_videos__profile__profile_name='youtube').order_by('id').distinct() if course_ids: course_videos = course_videos.filter(course_id__in=course_ids) course_videos = course_videos.values_list('course_id', 'video__edx_video_id') if ((limit is not None) and (offset is not None)): course_videos = course_videos[offset:(offset + limit)] course_videos_with_yt_profile = [] for (course_id, edx_video_id) in course_videos: yt_profile = EncodedVideo.objects.filter(video__edx_video_id=edx_video_id, profile__profile_name='youtube').first() if yt_profile: course_videos_with_yt_profile.append((course_id, edx_video_id, yt_profile.url)) return course_videos_with_yt_profile
Returns a list that contains all the course ids and video ids with the youtube profile Args: course_ids (list): valid course ids limit (int): batch records limit offset (int): an offset for selecting a batch Returns: (list): Tuples of course_id, edx_video_id and youtube video url
codesearchnet
def train(total_loss, global_step): num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)) lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) loss_averages_op = _add_loss_summaries(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) for (grad, var) in grads: if (grad is not None): tf.summary.histogram((var.op.name + '/gradients'), grad) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training.
codesearchnet
def signatures(self, transaction): if (not self.multi_wallet): raise DecryptionError('This wallet must be unlocked with wallet.unlock(passphrase)') return self.multi_wallet.signatures(transaction)
Sign a transaction. Args: transaction (coinop.Transaction) Returns: A list of signature dicts of the form [ {'primary': 'base58signaturestring'}, ... ]
codesearchnet
def __setitem__(self, key, value): with self._condition: if key not in self._processors: proc_iterator = self._proc_iter_class() proc_iterator.add_processor(value) self._processors[key] = proc_iterator else: self._processors[key].add_processor(value) if value.connection_id not in self._identities: self._identities[value.connection_id] = [key] else: self._identities[value.connection_id].append(key) self._condition.notify_all()
Either create a new ProcessorIterator, if none exists for a ProcessorType, or add the Processor to the ProcessorIterator. Args: key (ProcessorType): The type of transactions this transaction processor can handle. value (Processor): Information about the transaction processor.
juraj-google-style
def CreateSourceType(cls, type_indicator, attributes): if type_indicator not in cls._source_type_classes: raise errors.FormatError( 'Unsupported type indicator: {0:s}.'.format(type_indicator)) return cls._source_type_classes[type_indicator](**attributes)
Creates a source type. Args: type_indicator (str): source type indicator. attributes (dict[str, object]): source type attributes. Returns: SourceType: a source type. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing.
juraj-google-style
def get_compute_usage(access_token, subscription_id, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.compute/locations/', location, '/usages?api-version=', COMP_API]) return do_get(endpoint, access_token)
List compute usage and limits for a location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body of Compute usage and limits data.
juraj-google-style
def get_data_xlsx(file_name, file_contents=None, on_demand=False): return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)
Loads the new excel format files. Old format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
juraj-google-style
def count(self, files=False): return (len(self.files) if files else len(self.unique()))
Returns a count of unique values or files. Args: files (bool): When True, counts all files mapped to the Entity. When False, counts all unique values. Returns: an int.
codesearchnet
def list_vdirs(site, app=_DEFAULT_APP): ret = dict() ps_cmd = ['Get-WebVirtualDirectory', '-Site', "'{0}'".format(site), '-Application', "'{0}'".format(app), '|', "Select-Object PhysicalPath, @{ Name = 'name';", "Expression = { $_.path.Split('/')[-1] } }"] cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') for item in items: ret[item['name']] = {'sourcepath': item['physicalPath']} if (not ret): log.warning('No vdirs found in output: %s', cmd_ret) return ret
Get all configured IIS virtual directories for the specified site, or for the combination of site and application. Args: site (str): The IIS site name. app (str): The IIS application. Returns: dict: A dictionary of the virtual directory names and properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_vdirs site
codesearchnet
def by_name(name): devices = discover(all_households=True) for device in (devices or []): if (device.player_name == name): return device return None
Return a device by name. Args: name (str): The name of the device to return. Returns: :class:`~.SoCo`: The first device encountered among all zone with the given player name. If none are found `None` is returned.
codesearchnet
def match_shortname(self, name, filled_args=None): filled_count = 0 if filled_args is not None: filled_count = len(filled_args) possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)] if len(possible) == 0: raise ArgumentError("Could not convert short-name full parameter name, none could be found", short_name=name, parameters=self.arg_names) elif len(possible) > 1: raise ArgumentError("Short-name is ambiguous, could match multiple keyword parameters", short_name=name, possible_matches=possible) return possible[0]
Try to convert a prefix into a parameter name. If the result could be ambiguous or there is no matching parameter, throw an ArgumentError Args: name (str): A prefix for a parameter name filled_args (list): A list of filled positional arguments that will be removed from consideration. Returns: str: The full matching parameter name
juraj-google-style
def record_ttft_metric(self, created_time: float, request_id: str) -> None: if not _has_opentelemetry: return ttft_ms = (time.time() - created_time) * 1000.0 try: self.ttft_histogram.record(ttft_ms) logger.debug(f'Recorded TTFT for request {request_id}: {ttft_ms:.2f}ms') except Exception as e: logger.warning(f'Failed to record TTFT metric: {e}')
Record Time to First Token (TTFT). Args: created_time: The time the request was created request_id: The ID of the request
github-repos
def write(self, string): x, y = self._normalizeCursor(*self._cursor) width, height = self.get_size() wrapper = _textwrap.TextWrapper(initial_indent=(' '*x), width=width) writeLines = [] for line in string.split('\n'): if line: writeLines += wrapper.wrap(line) wrapper.initial_indent = '' else: writeLines.append([]) for line in writeLines: x, y = self._normalizeCursor(x, y) self.draw_str(x, y, line[x:], self._fg, self._bg) y += 1 x = 0 y -= 1 self._cursor = (x, y)
This method mimics basic file-like behaviour. Because of this method you can replace sys.stdout or sys.stderr with a :any:`Console` or :any:`Window` instance. This is a convoluted process and behaviour seen now can be excepted to change on later versions. Args: string (Text): The text to write out. .. seealso:: :any:`set_colors`, :any:`set_mode`, :any:`Window`
juraj-google-style
def meas_gate(self, circuit, qreg, op): if (self.meas_fun is None): pass else: self.meas_fun(circuit, qreg, op)
Add measurement gates to a circuit. Args: circuit (QuantumCircuit): circuit to add measurement to. qreg (tuple(QuantumRegister,int)): quantum register being measured. op (str): the basis label for the measurement.
codesearchnet
def auth_user_id(self, value): if value == self._defaults['ai.user.authUserId'] and 'ai.user.authUserId' in self._values: del self._values['ai.user.authUserId'] else: self._values['ai.user.authUserId'] = value
The auth_user_id property. Args: value (string). the property value.
juraj-google-style
def github_belspec_files(spec_dir, force: bool = False): if not force: dtnow = datetime.datetime.utcnow() delta = datetime.timedelta(1) yesterday = dtnow - delta for fn in glob.glob(f"{spec_dir}/bel*yaml"): if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday: log.info("Skipping BEL Specification update - specs less than 1 day old") return repo_url = "https: params = {} github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "") if github_access_token: params = {"access_token": github_access_token} r = requests.get(repo_url, params=params) if r.status_code == 200: results = r.json() for f in results: url = f["download_url"] fn = os.path.basename(url) if "yaml" not in fn and "yml" in fn: fn = fn.replace("yml", "yaml") r = requests.get(url, params=params, allow_redirects=True) if r.status_code == 200: open(f"{spec_dir}/{fn}", "wb").write(r.content) else: sys.exit( f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}" ) else: sys.exit( f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}" )
Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
juraj-google-style
def DNN(input_shape, dense_layers, output_layer=[1, 'sigmoid'], optimizer='adam', loss='binary_crossentropy'): inputs = Input(shape=input_shape) dense = inputs for i, d in enumerate(dense_layers): dense = Dense(d, activation='relu')(dense) dense = BatchNormalization()(dense) dense = Dropout(0.3)(dense) output = Dense(output_layer[0], activation=output_layer[1])(dense) model = Model(inputs=inputs, outputs=output) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) return model
Summary Args: input_shape (list): The shape of the input layer targets (int): Number of targets dense_layers (list): Dense layer descriptor [fully_connected] optimizer (str or object optional): Keras optimizer as string or keras optimizer Returns: TYPE: model, build_arguments
juraj-google-style
def _PromptUserForAPFSVolumeIdentifiers( self, volume_system, volume_identifiers): print_header = True while True: if print_header: self._PrintAPFSVolumeIdentifiersOverview( volume_system, volume_identifiers) print_header = False lines = self._textwrapper.wrap(self._USER_PROMPT_APFS) self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\nVolume identifiers: ') try: selected_volumes = self._ReadSelectedVolumes( volume_system, prefix='apfs') if (not selected_volumes or not set(selected_volumes).difference(volume_identifiers)): break except ValueError: pass self._output_writer.Write('\n') lines = self._textwrapper.wrap( 'Unsupported volume identifier(s), please try again or abort with ' 'Ctrl^C.') self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\n') return selected_volumes
Prompts the user to provide APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers including prefix. Returns: list[str]: selected volume identifiers including prefix or None.
juraj-google-style
def combine_slices(self, slices, tensor_shape, device=None): if (tensor_shape.ndims == 0): return slices[0] ret = slices[:] tensor_layout = self.tensor_layout(tensor_shape) for (mesh_dim, tensor_axis) in zip(self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)): slice_size = (len(ret) if (tensor_axis is None): ret = ret[:slice_size] else: if device: devices = ([device] * slice_size) else: devices = [ret[i].device for i in xrange(slice_size)] concat_inputs = [] for i in xrange(slice_size): concat_inputs.append([ret[(i + (slice_size * j))] for j in xrange(mesh_dim.size)]) ret = parallel(devices, tf.concat, concat_inputs, axis=([tensor_axis] * len(devices))) assert (len(ret) == 1) return ret[0]
Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor.
codesearchnet
def chimera_anticluster(m, n=None, t=4, multiplier=3.0, cls=BinaryQuadraticModel, subgraph=None, seed=None): if (seed is None): seed = numpy.random.randint((2 ** 32), dtype=np.uint32) r = numpy.random.RandomState(seed) m = int(m) if (n is None): n = m else: n = int(n) t = int(t) ldata = np.zeros((((m * n) * t) * 2)) if (m and n and t): (inrow, incol) = zip(*_iter_chimera_tile_edges(m, n, t)) if ((m > 1) or (n > 1)): (outrow, outcol) = zip(*_iter_chimera_intertile_edges(m, n, t)) else: outrow = outcol = tuple() qdata = r.choice(((- 1.0), 1.0), size=(len(inrow) + len(outrow))) qdata[len(inrow):] *= multiplier irow = (inrow + outrow) icol = (incol + outcol) else: irow = icol = qdata = tuple() bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN) if (subgraph is not None): (nodes, edges) = subgraph subbqm = cls.empty(SPIN) try: subbqm.add_variables_from(((v, bqm.linear[v]) for v in nodes)) except KeyError: msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t) raise ValueError(msg) try: subbqm.add_interactions_from(((u, v, bqm.adj[u][v]) for (u, v) in edges)) except KeyError: msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t) raise ValueError(msg) bqm = subbqm return bqm
Generate an anticluster problem on a Chimera lattice. An anticluster problem has weak interactions within a tile and strong interactions between tiles. Args: m (int): Number of rows in the Chimera lattice. n (int, optional, default=m): Number of columns in the Chimera lattice. t (int, optional, default=t): Size of the shore within each Chimera tile. multiplier (number, optional, default=3.0): Strength of the intertile edges. cls (class, optional, default=:class:`.BinaryQuadraticModel`): Binary quadratic model class to build from. subgraph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`): A subgraph of a Chimera(m, n, t) graph to build the anticluster problem on. seed (int, optional, default=None): Random seed. Returns: :obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
codesearchnet
def from_coffeescript(cls, code, args={}): compiled = nodejs_compile(code, lang='coffeescript', file='???') if ('error' in compiled): raise CompilationError(compiled.error) return cls(code=compiled.code, args=args)
Create a CustomJSHover instance from a CoffeeScript snippet. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``code`` snippet namespace will contain the variable ``value`` (the untransformed value) at render time as well as ``special_vars`` and ``format`` as described in the class description. Example: .. code-block:: coffeescript formatter = CustomJSHover.from_coffeescript("return value + " total") Args: code (str) : A coffeescript snippet to transform a single ``value`` value Returns: CustomJSHover
codesearchnet
def _infer_device_name(self, device_name, node_name): if device_name is None: if node_name in self._node_devices: if len(self._node_devices[node_name]) == 1: return list(self._node_devices[node_name])[0] else: raise ValueError("There are multiple (%d) devices with nodes named '%s' but device_name is not specified." % (len(self._node_devices[node_name]), node_name)) else: raise ValueError("None of the %d device(s) has a node named '%s'." % (len(self._device_names), node_name)) else: return device_name
Infer the device name given node name. If device_name is provided (i.e., not None), it'll be simply returned right away. Args: device_name: (str or None) name of the device. If None, will try to infer the device name by looking at the available nodes. node_name: (str) name of the node. Returns: (str) Inferred name of the device, if available. Raises: ValueError: If the node name does not exist on any of the available devices or if there are multiple devices that contain the node with the given name.
github-repos
def find_mip(self, direction, mechanism, purview): if not purview: return _null_ria(direction, mechanism, purview) repertoire = self.repertoire(direction, mechanism, purview) def _mip(phi, partition, partitioned_repertoire): return RepertoireIrreducibilityAnalysis( phi=phi, direction=direction, mechanism=mechanism, purview=purview, partition=partition, repertoire=repertoire, partitioned_repertoire=partitioned_repertoire, node_labels=self.node_labels ) if (direction == Direction.CAUSE and np.all(repertoire == 0)): return _mip(0, None, None) mip = _null_ria(direction, mechanism, purview, phi=float('inf')) for partition in mip_partitions(mechanism, purview, self.node_labels): phi, partitioned_repertoire = self.evaluate_partition( direction, mechanism, purview, partition, repertoire=repertoire) if phi == 0: return _mip(0.0, partition, partitioned_repertoire) if phi < mip.phi: mip = _mip(phi, partition, partitioned_repertoire) return mip
Return the minimum information partition for a mechanism over a purview. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The nodes in the mechanism. purview (tuple[int]): The nodes in the purview. Returns: RepertoireIrreducibilityAnalysis: The irreducibility analysis for the mininum-information partition in one temporal direction.
juraj-google-style
def _lookup_key_parse(table_keys): regex_matcher = '\\[([^\\]]+)]' valid_dynamodb_datatypes = ['M', 'S', 'N', 'L'] clean_table_keys = [] new_keys = [] for key in table_keys: match = re.search(regex_matcher, key) if match: if (match.group(1) in valid_dynamodb_datatypes): match_val = str(match.group(1)) key = key.replace(match.group(0), '') new_keys.append({match_val: key}) clean_table_keys.append(key) else: raise ValueError('Stacker does not support looking up the datatype: {}'.format(str(match.group(1)))) else: new_keys.append({'S': key}) clean_table_keys.append(key) key_dict = {} key_dict['new_keys'] = new_keys key_dict['clean_table_keys'] = clean_table_keys return key_dict
Return the order in which the stacks should be executed. Args: dependencies (dict): a dictionary where each key should be the fully qualified name of a stack whose value is an array of fully qualified stack names that the stack depends on. This is used to generate the order in which the stacks should be executed. Returns: dict: includes a dict of lookup types with data types ('new_keys') and a list of the lookups with without ('clean_table_keys')
codesearchnet
class EmbeddingTypeAdapter(Generic[EmbeddingTypeAdapterInputT, EmbeddingTypeAdapterOutputT]): input_fn: Callable[[Sequence[EmbeddingTypeAdapterInputT]], List[str]] output_fn: Callable[[Sequence[EmbeddingTypeAdapterInputT], Sequence[Any]], List[EmbeddingTypeAdapterOutputT]] def __reduce__(self): return (self.__class__, (self.input_fn, self.output_fn))
Adapts input types to text for embedding and converts output embeddings. Args: input_fn: Function to extract text for embedding from input type output_fn: Function to create output type from input and embeddings
github-repos
def _get_pprof_proto(self, profile_datum_generator): pprof_profile = profile_pb2.Profile() samples = Samples(self._string_table) for datum in profile_datum_generator: if not datum.traceback: continue stack_frame = datum.traceback[-1] after_apply_op = False location_ids = [] for stack_frame_index in reversed(range(len(datum.traceback) - 1)): prev_stack_frame = stack_frame stack_frame = datum.traceback[stack_frame_index] prev_file_path = prev_stack_frame[0] prev_function = prev_stack_frame[2] prev_function_start_line = -1 curr_file_path = stack_frame[0] curr_line_number = stack_frame[1] if not after_apply_op: if prev_function == 'apply_op': after_apply_op = True continue location_index = self._locations.index_of(curr_file_path, curr_line_number, prev_function, prev_file_path, prev_function_start_line) location_ids.append(location_index) samples.add(datum, location_ids) sample_type_description = 'count' sample_type = pprof_profile.sample_type.add() sample_type.type = self._string_table.index_of(sample_type_description) sample_type.unit = self._string_table.index_of('count') sample_type_description = 'all_time' sample_type = pprof_profile.sample_type.add() sample_type.type = self._string_table.index_of(sample_type_description) sample_type.unit = self._string_table.index_of('nanoseconds') sample_type_description = 'op_time' sample_type = pprof_profile.sample_type.add() sample_type.type = self._string_table.index_of(sample_type_description) sample_type.unit = self._string_table.index_of('nanoseconds') pprof_profile.string_table.extend(self._string_table.string_table()) pprof_profile.sample.extend(samples.get_sample_protos()) pprof_profile.function.extend(self._functions.function_protos()) pprof_profile.location.extend(self._locations.location_protos()) return pprof_profile
Returns profile data in pprof proto format. Args: profile_datum_generator: Generator outputting `ProfileDatum` objects. Returns: A proto in pprof format.
github-repos
def _op_expand(n_bits, func=None, broadcastable=None): if func is None: return functools.partial(_op_expand, n_bits, broadcastable=broadcastable) @functools.wraps(func) def wrapper(self, *args): params = args[0:-n_bits] if len(args) > n_bits else tuple() rargs = args[-n_bits:] if broadcastable is None: blist = [True] * len(rargs) else: blist = broadcastable if not all([_is_bit(arg) for arg in rargs]): rarg_size = [1] * n_bits for iarg, arg in enumerate(rargs): if isinstance(arg, Register): rarg_size[iarg] = len(arg) elif isinstance(arg, list) and all([_is_bit(bit) for bit in arg]): rarg_size[iarg] = len(arg) elif _is_bit(arg): rarg_size[iarg] = 1 else: raise QiskitError('operation arguments must be qubits/cbits') broadcast_size = max(rarg_size) expanded_rargs = [] for arg, broadcast in zip(rargs, blist): if isinstance(arg, Register): arg = [(arg, i) for i in range(len(arg))] elif isinstance(arg, tuple): arg = [arg] if isinstance(arg, list) and len(arg) == 1 and broadcast: arg = arg * broadcast_size if len(arg) != broadcast_size: raise QiskitError('register size error') expanded_rargs.append(arg) rargs = expanded_rargs if all([isinstance(arg, list) for arg in rargs]): if all(rargs): instructions = InstructionSet() for irargs in zip(*rargs): instructions.add(func(self, *params, *irargs), [i for i in irargs if isinstance(i[0], QuantumRegister)], [i for i in irargs if isinstance(i[0], ClassicalRegister)]) return instructions else: raise QiskitError('empty control or target argument') return func(self, *params, *rargs) return wrapper
Decorator for expanding an operation across a whole register or register subset. Args: n_bits (int): the number of register bit arguments the decorated function takes func (function): used for decorators with keyword args broadcastable (list(bool)): list of bool for which register args can be broadcast from 1 bit to the max size of the rest of the args. Defaults to all True if not specified. Return: type: partial function object
juraj-google-style
def render_head_repr(expr: Any, sub_render=None, key_sub_render=None) -> str: head_repr_fmt = '{head}({args}{kwargs})' if (sub_render is None): sub_render = render_head_repr if (key_sub_render is None): key_sub_render = sub_render if isinstance(expr.__class__, Singleton): return repr(expr) if isinstance(expr, Expression): args = expr.args keys = expr.minimal_kwargs.keys() kwargs = '' if (len(keys) > 0): kwargs = ', '.join([('%s=%s' % (key, key_sub_render(expr.kwargs[key]))) for key in keys]) if (len(args) > 0): kwargs = (', ' + kwargs) return head_repr_fmt.format(head=expr.__class__.__name__, args=', '.join([sub_render(arg) for arg in args]), kwargs=kwargs) elif isinstance(expr, (tuple, list)): delims = (('(', ')') if isinstance(expr, tuple) else ('[', ']')) if (len(expr) == 1): delims = (delims[0], (',' + delims[1])) return ((delims[0] + ', '.join([render_head_repr(v, sub_render=sub_render, key_sub_render=key_sub_render) for v in expr])) + delims[1]) else: return sympy_srepr(expr)
Render a textual representation of `expr` using Positional and keyword arguments are recursively rendered using `sub_render`, which defaults to `render_head_repr` by default. If desired, a different renderer may be used for keyword arguments by giving `key_sub_renderer` Raises: AttributeError: if `expr` is not an instance of :class:`Expression`, or more specifically, if `expr` does not have `args` and `kwargs` (respectively `minimal_kwargs`) properties
codesearchnet
def getmtime(self, path=None, client_kwargs=None, header=None): return self._getmtime_from_header( self.head(path, client_kwargs, header))
Return the time of last access of path. Args: path (str): File path or URL. client_kwargs (dict): Client arguments. header (dict): Object header. Returns: float: The number of seconds since the epoch (see the time module).
juraj-google-style
def clean_email_or_username(self): email_or_username = self.cleaned_data[self.Fields.EMAIL_OR_USERNAME].strip() if (not email_or_username): return email_or_username email = email_or_username__to__email(email_or_username) bulk_entry = (len(split_usernames_and_emails(email)) > 1) if bulk_entry: for email in split_usernames_and_emails(email): validate_email_to_link(email, None, ValidationMessages.INVALID_EMAIL_OR_USERNAME, ignore_existing=True) email = email_or_username else: validate_email_to_link(email, email_or_username, ValidationMessages.INVALID_EMAIL_OR_USERNAME, ignore_existing=True) return email
Clean email form field Returns: str: the cleaned value, converted to an email address (or an empty string)
codesearchnet
def run( self, for_time=None ): self.for_time = for_time try: self.is_initialised() except AttributeError: raise if self.number_of_equilibration_jumps > 0: for step in range( self.number_of_equilibration_jumps ): self.lattice.jump() self.reset() if self.for_time: self.number_of_jumps = 0 while self.lattice.time < self.for_time: self.lattice.jump() self.number_of_jumps += 1 else: for step in range( self.number_of_jumps ): self.lattice.jump() self.has_run = True
Run the simulation. Args: for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None. Returns: None
juraj-google-style
def call(self, input_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor: assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos
def claim(self, unclaimed_file_readers): claimed_vcf_readers = [] for caller in self._callers: (unclaimed_file_readers, translated_vcf_readers) = caller.claim(unclaimed_file_readers) claimed_vcf_readers.extend(translated_vcf_readers) return unclaimed_file_readers, claimed_vcf_readers
Allows each caller to claim incoming files as they are recognized. Args: unclaimed_file_readers: Usually, all files in the input dir. Returns: A tuple of unclaimed file readers and claimed VcfReaders. The presence of any unclaimed file readers could indicate stray files in the input dir.
juraj-google-style
def en(item): if pakr is None: return msgpack.packb(item, use_bin_type=True, unicode_errors='surrogatepass') try: return pakr.pack(item) except Exception: pakr.reset() raise
Use msgpack to serialize a compatible python object. Args: item (obj): The object to serialize Notes: String objects are encoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow encoding bad input strings. Returns: bytes: The serialized bytes in msgpack format.
juraj-google-style
def GetSoapXMLForComplexType(self, type_name, value): element = self.schema.get_element( '{%s}%s' % (self._namespace_override, type_name)) result_element = self._element_maker(element.qname.localname) element_value = element(**value) element.type.render(result_element, element_value) data = lxml.etree.tostring(result_element).strip() return data
Return an XML string representing a SOAP complex type. Args: type_name: The name of the type with namespace prefix if necessary. value: A python dictionary to hydrate the type instance with. Returns: A string containing the SOAP XML for the type.
juraj-google-style
def screenshot(self): b64data = self.http.get('/screenshot').value raw_data = base64.b64decode(b64data) from PIL import Image buff = io.BytesIO(raw_data) return Image.open(buff)
Take screenshot with session check Returns: PIL.Image
codesearchnet
def do_post(self, uri, resource, timeout, custom_headers): self.validate_resource_uri(uri) (task, entity) = self._connection.post(uri, resource, custom_headers=custom_headers) if (not task): return entity return self._task_monitor.wait_for_task(task, timeout)
Helps to make post requests. Args: uri: URI of the resource. resource: Resource data to post. timeout: Time out for the request in seconds. cutom_headers: Allows to add custom http headers. Returns: Retunrs Task object.
codesearchnet
def get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): if (hparams.recurrence_type == 'basic'): ut_initializer = (x, x, x) ut_function = functools.partial(universal_transformer_basic, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif (hparams.recurrence_type == 'highway'): ut_initializer = (x, x, x) ut_function = functools.partial(universal_transformer_highway, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif (hparams.recurrence_type == 'skip'): ut_initializer = (x, x, x) ut_function = functools.partial(universal_transformer_skip, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif (hparams.recurrence_type == 'dwa'): memory_size = (hparams.num_rec_steps + 1) memory_empty = tf.zeros(([memory_size] + common_layers.shape_list(x))) memory = fill_memory_slot(memory_empty, x, 0) ut_initializer = (x, x, memory) ut_function = functools.partial(universal_transformer_depthwise_attention, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit) elif (hparams.recurrence_type == 'gru'): ut_initializer = (x, x, x) ut_function = functools.partial(universal_transformer_with_gru_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) elif (hparams.recurrence_type == 'lstm'): memory = tf.zeros(common_layers.shape_list(x)) ut_initializer = (x, x, memory) ut_function = functools.partial(universal_transformer_with_lstm_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover) else: raise ValueError(('Unknown recurrence type: %s' % hparams.recurrence_type)) return (ut_function, ut_initializer)
Provides the function that is used in universal transforemr steps. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: ut_function and the ut_initializer Raises: ValueError: Unknown recurrence type
codesearchnet
def patch_on_member(src: symbolic.Symbolic, cls: Union[Type[Any], Tuple[Type[Any], ...]], name: str, value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any: return _conditional_patch(src, lambda k, v, p: isinstance(p, cls) and k.key == name, value, value_fn, skip_notification)
Recursively patch values that are the requested member of classes. Example:: d = pg.Dict(a=A(x=1), b=2) print(pg.patching.patch_on_member(d, A, 'x', 2) # {a=A(x=2), b=4} Args: src: symbolic value to patch. cls: In which class the member belongs to. name: Member name. value: New value for field that satisfy `condition`. value_fn: Callable object that produces new value based on old value. If not None, `value` must be None. skip_notification: If True, `on_change` event will not be triggered for this operation. If None, the behavior is decided by `pg.notify_on_rebind`. Please see `symbolic.Symbolic.rebind` for details. Returns: `src` after being patched.
github-repos
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT): try: dim = self._read_dimensions(dimname, path=path)[0] return len(dim) except self.Error: if default is NO_DEFAULT: raise return default
Returns the value of a dimension. Args: dimname: Name of the variable path: path to the group. default: return `default` if `dimname` is not present and `default` is not `NO_DEFAULT` else raise self.Error.
juraj-google-style
def get_num_filters(layer): if K.ndim(layer.output) == 2: return K.int_shape(layer.output)[-1] channel_idx = 1 if K.image_data_format() == 'channels_first' else -1 return K.int_shape(layer.output)[channel_idx]
Determines the number of filters within the given `layer`. Args: layer: The keras layer to use. Returns: Total number of filters within `layer`. For `keras.layers.Dense` layer, this is the total number of outputs.
juraj-google-style
def get_element_dt(self, el_name, tz=None, el_idx=0): return iso8601.parse_date(self.get_element_by_name(el_name, el_idx).text, tz)
Return the text of the selected element as a ``datetime.datetime`` object. The element text must be a ISO8601 formatted datetime Args: el_name : str Name of element to use. tz : datetime.tzinfo Timezone in which to return the datetime. - Without a timezone, other contextual information is required in order to determine the exact represented time. - If dt has timezone: The ``tz`` parameter is ignored. - If dt is naive (without timezone): The timezone is set to ``tz``. - ``tz=None``: Prevent naive dt from being set to a timezone. Without a timezone, other contextual information is required in order to determine the exact represented time. - ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC. el_idx : int Index of element to use in the event that there are multiple sibling elements with the same name. Returns: datetime.datetime
codesearchnet
def read_nanopubs(fn: str) -> Iterable[Mapping[(str, Any)]]: (jsonl_flag, json_flag, yaml_flag) = (False, False, False) if ((fn == '-') or ('jsonl' in fn)): jsonl_flag = True elif ('json' in fn): json_flag = True elif re.search('ya?ml', fn): yaml_flag = True else: log.error('Do not recognize nanopub file format - neither json nor jsonl format.') return {} try: if re.search('gz$', fn): f = gzip.open(fn, 'rt') else: try: f = click.open_file(fn, mode='rt') except Exception as e: log.info(f'Can not open file {fn} Error: {e}') quit() if jsonl_flag: for line in f: (yield json.loads(line)) elif json_flag: nanopubs = json.load(f) for nanopub in nanopubs: (yield nanopub) elif yaml_flag: nanopubs = yaml.load(f, Loader=yaml.SafeLoader) for nanopub in nanopubs: (yield nanopub) except Exception as e: log.error(f'Could not open file: {fn}')
Read file and generate nanopubs If filename has *.gz, will read as a gzip file If filename has *.jsonl*, will parsed as a JSONLines file IF filename has *.json*, will be parsed as a JSON file If filename has *.yaml* or *.yml*, will be parsed as a YAML file Args: filename (str): filename to read nanopubs from Returns: Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
codesearchnet
def updateGroup(self, group, vendorSpecific=None): response = self.updateGroupResponse(group, vendorSpecific) return self._read_boolean_response(response)
See Also: updateGroupResponse() Args: group: vendorSpecific: Returns:
juraj-google-style
def get_showcases(self): (assoc_result, showcases_dicts) = self._read_from_hdx('showcase', self.data['id'], fieldname='package_id', action=hdx.data.showcase.Showcase.actions()['list_showcases']) showcases = list() if assoc_result: for showcase_dict in showcases_dicts: showcase = hdx.data.showcase.Showcase(showcase_dict, configuration=self.configuration) showcases.append(showcase) return showcases
Get any showcases the dataset is in Returns: List[Showcase]: list of showcases
codesearchnet
def has_file_with_suffix(self, suffixes): if not isinstance(suffixes, list): suffixes = [suffixes] if self.handle: for member in self.handle.getmembers(): if os.path.splitext(member.name)[1] in suffixes: return True else: for suffix in suffixes: if '{0}/'.format(suffix) in member.name: return True return False
Finds out if there is a file with one of suffixes in the archive. Args: suffixes: list of suffixes or single suffix to look for Returns: True if there is at least one file with at least one given suffix in the archive, False otherwise (or archive can't be opened)
juraj-google-style
def all(self, data={}, **kwargs): return super(Plan, self).all(data, **kwargs)
Fetch all plan entities Returns: Dictionary of plan data
codesearchnet
def _ParseProcessingOptions(self, options): self._single_process_mode = getattr(options, 'single_process', False) argument_helper_names = [ 'process_resources', 'temporary_directory', 'workers', 'zeromq'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names)
Parses the processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
juraj-google-style