code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _scale_size(size, scale): w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
Rescale a size by a ratio. Args: size (tuple): w, h. scale (float): Scaling factor. Returns: tuple[int]: scaled size.
juraj-google-style
def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp): parent_tag = ('sample_%d' % sample_ind) frame_shape = hparams.problem.frame_shape interp_shape = ([hparams.batch_size, decode_hp.num_interp] + frame_shape) interpolations = np.reshape(interpolations, interp_shape) interp_tag = ('%s/interp/%s' % (parent_tag, decode_hp.channel_interp)) if (decode_hp.channel_interp == 'ranked'): interp_tag = ('%s/rank_%d' % (interp_tag, decode_hp.rank_interp)) (summaries, _) = common_video.py_gif_summary(interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second) if decode_hp.save_frames: first_frame_summ = image_utils.image_to_tf_summary_value(first_frame, ('%s/first' % parent_tag)) last_frame_summ = image_utils.image_to_tf_summary_value(last_frame, ('%s/last' % parent_tag)) summaries.append(first_frame_summ) summaries.append(last_frame_summ) return summaries
Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values.
codesearchnet
def merge_sketches(outdir, sketch_paths): merge_sketch_path = os.path.join(outdir, 'sistr.msh') args = ['mash', 'paste', merge_sketch_path] for x in sketch_paths: args.append(x) args.append(MASH_SKETCH_FILE) logging.info('Running Mash paste with command: %s', ' '.join(args)) p = Popen(args) p.wait() assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path) return merge_sketch_path
Merge new Mash sketches with current Mash sketches Args: outdir (str): output directory to write merged Mash sketch file sketch_paths (list of str): Mash sketch file paths for input fasta files Returns: str: output path for Mash sketch file with new and old sketches
codesearchnet
def traverse_by(self, fixers, traversal): if (not fixers): return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if (new is not None): node.replace(new) node = new
Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None
codesearchnet
def get_report_zip(results): def add_subdir(root_path, subdir): subdir_path = os.path.join(root_path, subdir) for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path): for subdir_file in subdir_files: subdir_file_path = os.path.join(root_path, subdir, subdir_file) if os.path.isfile(subdir_file_path): rel_path = os.path.relpath(subdir_root, subdir_file_path) subdir_arc_name = os.path.join(rel_path, subdir_file) zip_file.write(subdir_file_path, subdir_arc_name) for subdir in subdir_dirs: add_subdir(subdir_path, subdir) storage = BytesIO() tmp_dir = tempfile.mkdtemp() try: save_output(results, tmp_dir) with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file: for root, dirs, files in os.walk(tmp_dir): for file in files: file_path = os.path.join(root, file) if os.path.isfile(file_path): arcname = os.path.join(os.path.relpath(root, tmp_dir), file) zip_file.write(file_path, arcname) for directory in dirs: dir_path = os.path.join(root, directory) if os.path.isdir(dir_path): zip_file.write(dir_path, directory) add_subdir(root, directory) finally: shutil.rmtree(tmp_dir) return storage.getvalue()
Creates a zip file of parsed report output Args: results (OrderedDict): The parsed results Returns: bytes: zip file bytes
juraj-google-style
def set_floatx(value): global _FLOATX accepted_dtypes = {'bfloat16', 'float16', 'float32', 'float64'} if value not in accepted_dtypes: raise ValueError(f'Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}') _FLOATX = str(value)
Set the default float dtype. Note: It is not recommended to set this to `"float16"` for training, as this will likely cause numeric stability issues. Instead, mixed precision, which leverages a mix of `float16` and `float32`. It can be configured by calling `keras.mixed_precision.set_dtype_policy('mixed_float16')`. Args: value: String; `'bfloat16'`, `'float16'`, `'float32'`, or `'float64'`. Examples: >>> keras.config.floatx() 'float32' >>> keras.config.set_floatx('float64') >>> keras.config.floatx() 'float64' >>> # Set it back to float32 >>> keras.config.set_floatx('float32') Raises: ValueError: In case of invalid value.
github-repos
def get_model_schema_and_features(model_dir): schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json') schema = json.loads(file_io.read_file_to_string(schema_file)) features_file = os.path.join(model_dir, 'assets.extra', 'features.json') features_config = json.loads(file_io.read_file_to_string(features_file)) return (schema, features_config)
Get a local model's schema and features config. Args: model_dir: local or GCS path of a model. Returns: A tuple of schema (list) and features config (dict).
codesearchnet
def _get_static_ndims(x, expect_static=False, expect_ndims=None, expect_ndims_no_more_than=None, expect_ndims_at_least=None): ndims = x.shape.ndims if (ndims is None): shape_const = tf.get_static_value(tf.shape(input=x)) if (shape_const is not None): ndims = shape_const.ndim if (ndims is None): if expect_static: raise ValueError(('Expected argument `x` to have statically defined `ndims`. Found: ' % x)) return if (expect_ndims is not None): ndims_message = ('Expected argument `x` to have ndims %s. Found tensor %s' % (expect_ndims, x)) if (ndims != expect_ndims): raise ValueError(ndims_message) if (expect_ndims_at_least is not None): ndims_at_least_message = ('Expected argument `x` to have ndims >= %d. Found tensor %s' % (expect_ndims_at_least, x)) if (ndims < expect_ndims_at_least): raise ValueError(ndims_at_least_message) if (expect_ndims_no_more_than is not None): ndims_no_more_than_message = ('Expected argument `x` to have ndims <= %d. Found tensor %s' % (expect_ndims_no_more_than, x)) if (ndims > expect_ndims_no_more_than): raise ValueError(ndims_no_more_than_message) return ndims
Get static number of dimensions and assert that some expectations are met. This function returns the number of dimensions 'ndims' of x, as a Python int. The optional expect arguments are used to check the ndims of x, but this is only done if the static ndims of x is not None. Args: x: A Tensor. expect_static: Expect `x` to have statically defined `ndims`. expect_ndims: Optional Python integer. If provided, assert that x has number of dimensions equal to this. expect_ndims_no_more_than: Optional Python integer. If provided, assert that x has no more than this many dimensions. expect_ndims_at_least: Optional Python integer. If provided, assert that x has at least this many dimensions. Returns: ndims: A Python integer. Raises: ValueError: If any of the expectations above are violated.
codesearchnet
def get_proposed_feature(project): change_collector = ChangeCollector(project) collected_changes = change_collector.collect_changes() try: new_feature_info = one_or_raise(collected_changes.new_feature_info) (importer, _, _) = new_feature_info except ValueError: raise BalletError('Too many features collected') module = importer() feature = _get_contrib_feature_from_module(module) return feature
Get the proposed feature The path of the proposed feature is determined by diffing the project against a comparison branch, such as master. The feature is then imported from that path and returned. Args: project (ballet.project.Project): project info Raises: ballet.exc.BalletError: more than one feature collected
codesearchnet
def WrapCFTypeInPython(self, obj): obj_type = self.dll.CFGetTypeID(obj) if (obj_type == self.dll.CFBooleanGetTypeID()): return CFBoolean(obj) elif (obj_type == self.dll.CFNumberGetTypeID()): return CFNumber(obj) elif (obj_type == self.dll.CFStringGetTypeID()): return CFString(obj) elif (obj_type == self.dll.CFDictionaryGetTypeID()): return CFDictionary(obj) elif (obj_type == self.dll.CFArrayGetTypeID()): return CFArray(obj) else: raise TypeError('Unknown type for object: {0}'.format(obj))
Package a CoreFoundation object in a Python wrapper. Args: obj: The CoreFoundation object. Returns: One of CFBoolean, CFNumber, CFString, CFDictionary, CFArray. Raises: TypeError: If the type is not supported.
codesearchnet
def get_structures(self, primitive=True): structures = [] for d in self._cif.data.values(): try: s = self._get_structure(d, primitive) if s: structures.append(s) except (KeyError, ValueError) as exc: self.errors.append(str(exc)) warnings.warn(str(exc)) if self.errors: warnings.warn("Issues encountered while parsing CIF:") for error in self.errors: warnings.warn(error) if len(structures) == 0: raise ValueError("Invalid cif file with no structures!") return structures
Return list of structures in CIF file. primitive boolean sets whether a conventional cell structure or primitive cell structure is returned. Args: primitive (bool): Set to False to return conventional unit cells. Defaults to True. With magnetic CIF files, will return primitive magnetic cell which may be larger than nuclear primitive cell. Returns: List of Structures.
juraj-google-style
def _GetOutputModulesInformation(self): output_modules_information = [] for (name, output_class) in output_manager.OutputManager.GetOutputClasses(): output_modules_information.append((name, output_class.DESCRIPTION)) return output_modules_information
Retrieves the output modules information. Returns: list[tuple[str, str]]: pairs of output module names and descriptions.
codesearchnet
def _convert_schemas(mapping, schemas): schemas = deepcopy(schemas) for schema in schemas: for fk in schema.get('foreignKeys', []): resource = fk['reference']['resource'] if resource != 'self': if resource not in mapping: message = 'Not resource "%s" for foreign key "%s"' message = message % (resource, fk) raise ValueError(message) fk['reference']['resource'] = mapping[resource] return schemas
Convert schemas to be compatible with storage schemas. Foreign keys related operations. Args: mapping (dict): mapping between resource name and table name schemas (list): schemas Raises: ValueError: if there is no resource for some foreign key in given mapping Returns: list: converted schemas
juraj-google-style
def get_child(self, injection_site_fn, binding): child_scope_id = binding.scope_id new_binding_stack = self._binding_stack + [binding] if binding in self._binding_stack: raise errors.CyclicInjectionError(new_binding_stack) if not self._is_scope_usable_from_scope_fn( child_scope_id, self._scope_id): raise errors.BadDependencyScopeError( self.get_injection_site_desc(), self._scope_id, child_scope_id, binding.binding_key) return _InjectionContext( injection_site_fn, new_binding_stack, child_scope_id, self._is_scope_usable_from_scope_fn)
Creates a child injection context. A "child" injection context is a context for a binding used to inject something into the current binding's provided value. Args: injection_site_fn: the child function being injected into binding: a Binding Returns: a new _InjectionContext
juraj-google-style
def get_user(self, user): return self.service.get_user( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get user's data (first and last name, email, etc). Args: user (string): User name. Returns: (dictionary): User's data encoded in a dictionary. Raises: requests.HTTPError on failure.
juraj-google-style
def GetMap(self, cache_filename=None): data = self.data if cache_filename is None: cache_filename = self.GetCacheFilename() self.log.debug('Opening %r for reading existing cache', cache_filename) if not os.path.exists(cache_filename): self.log.warning('Cache file does not exist, using an empty map instead') else: cache_file = open(cache_filename) data = self.map_parser.GetMap(cache_file, data) return data
Returns the map from the cache. Args: cache_filename: alternative file to read, optional. Returns: A child of Map containing the cache data. Raises: CacheNotFound: The cache file we expected to read from does not exist.
github-repos
def make_dict_observable(matrix_observable): dict_observable = {} observable = np.array(matrix_observable) observable_size = len(observable) observable_bits = int(np.ceil(np.log2(observable_size))) binary_formater = '0{}b'.format(observable_bits) if (observable.ndim == 2): observable = observable.diagonal() for state_no in range(observable_size): state_str = format(state_no, binary_formater) dict_observable[state_str] = observable[state_no] return dict_observable
Convert an observable in matrix form to dictionary form. Takes in a diagonal observable as a matrix and converts it to a dictionary form. Can also handle a list sorted of the diagonal elements. Args: matrix_observable (list): The observable to be converted to dictionary form. Can be a matrix or just an ordered list of observed values Returns: Dict: A dictionary with all observable states as keys, and corresponding values being the observed value for that state
codesearchnet
def run_without_tensor_float_32(description): def decorator(f): @functools.wraps(f) def decorated(self, *args, **kwargs): allowed = config.tensor_float_32_execution_enabled() try: config.enable_tensor_float_32_execution(False) f(self, *args, **kwargs) finally: config.enable_tensor_float_32_execution(allowed) return decorated return decorator
Execute test with TensorFloat-32 disabled. While almost every real-world deep learning model runs fine with TensorFloat-32, many tests use assertAllClose or similar methods. TensorFloat-32 matmuls typically will cause such methods to fail with the default tolerances. Args: description: A description used for documentation purposes, describing why the test requires TensorFloat-32 to be disabled. Returns: Decorator which runs a test with TensorFloat-32 disabled.
github-repos
def make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text): def module_fn(): 'Spec function for a token embedding module.' tokens = tf.placeholder(shape=[None], dtype=tf.string, name='tokens') embeddings_var = tf.get_variable(initializer=tf.zeros([(vocab_size + num_oov_buckets), embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32) lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets) ids = lookup_table.lookup(tokens) combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids) hub.add_signature('default', {'tokens': tokens}, {'default': combined_embedding}) def module_fn_with_preprocessing(): 'Spec function for a full-text embedding module with preprocessing.' sentences = tf.placeholder(shape=[None], dtype=tf.string, name='sentences') normalized_sentences = tf.regex_replace(input=sentences, pattern='\\pP', rewrite='') tokens = tf.string_split(normalized_sentences, ' ') embeddings_var = tf.get_variable(initializer=tf.zeros([(vocab_size + num_oov_buckets), embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32) lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets) sparse_ids = tf.SparseTensor(indices=tokens.indices, values=lookup_table.lookup(tokens.values), dense_shape=tokens.dense_shape) (sparse_ids, _) = tf.sparse_fill_empty_rows(sparse_ids, lookup_table.lookup(tf.constant(''))) sparse_ids = tf.sparse_reset_shape(sparse_ids) combined_embedding = tf.nn.embedding_lookup_sparse(params=embeddings_var, sp_ids=sparse_ids, sp_weights=None, combiner='sqrtn') hub.add_signature('default', {'sentences': sentences}, {'default': combined_embedding}) if preprocess_text: return hub.create_module_spec(module_fn_with_preprocessing) else: return hub.create_module_spec(module_fn)
Makes a module spec to simply perform token to embedding lookups. Input of this module is a 1-D list of string tokens. For T tokens input and an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor. Args: vocabulary_file: Text file where each line is a key in the vocabulary. vocab_size: The number of tokens contained in the vocabulary. embeddings_dim: The embedding dimension. num_oov_buckets: The number of out-of-vocabulary buckets. preprocess_text: Whether to preprocess the input tensor by removing punctuation and splitting on spaces. Returns: A module spec object used for constructing a TF-Hub module.
codesearchnet
def get_nets_jpnic(self, response): nets = [] for match in re.finditer('^.*?(\\[Network Number\\])[^\\S\\n]+.+?>(?P<val>.+?)</A>$', response, re.MULTILINE): try: net = copy.deepcopy(BASE_NET) tmp = ip_network(match.group(2)) try: network_address = tmp.network_address except AttributeError: network_address = tmp.ip pass try: broadcast_address = tmp.broadcast_address except AttributeError: broadcast_address = tmp.broadcast pass net['range'] = '{0} - {1}'.format((network_address + 1), broadcast_address) cidr = ip_network(match.group(2).strip()).__str__() net['cidr'] = cidr net['start'] = match.start() net['end'] = match.end() nets.append(net) except (ValueError, TypeError): pass return nets
The function for parsing network blocks from jpnic whois data. Args: response (:obj:`str`): The response from the jpnic server. Returns: list of dict: Mapping of networks with start and end positions. :: [{ 'cidr' (str) - The network routing block 'start' (int) - The starting point of the network 'end' (int) - The endpoint point of the network }]
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
github-repos
def upload_from_url_sync(cls, url, timeout=30, interval=0.3, until_ready=False, store=None, filename=None): ffu = cls.upload_from_url(url, store, filename) return ffu.wait(timeout=timeout, interval=interval, until_ready=until_ready)
Uploads file from given url and returns ``File`` instance. Args: - url (str): URL of file to upload to - store (Optional[bool]): Should the file be automatically stored upon upload. Defaults to None. - False - do not store file - True - store file (can result in error if autostore is disabled for project) - None - use project settings - filename (Optional[str]): Name of the uploaded file. If this not specified the filename will be obtained from response headers or source URL. Defaults to None. - timeout (Optional[int]): seconds to wait for successful upload. Defaults to 30. - interval (Optional[float]): interval between upload status checks. Defaults to 0.3. - until_ready (Optional[bool]): should we wait until file is available via CDN. Defaults to False. Returns: ``File`` instance Raises: ``TimeoutError`` if file wasn't uploaded in time
codesearchnet
def get_fragment(self, list_of_indextuples, give_only_index=False, use_lookup=None): if (use_lookup is None): use_lookup = settings['defaults']['use_lookup'] exclude = [tuple[0] for tuple in list_of_indextuples] index_of_atom = list_of_indextuples[0][1] fragment_index = self.get_coordination_sphere(index_of_atom, exclude=set(exclude), n_sphere=float('inf'), only_surface=False, give_only_index=True, use_lookup=use_lookup) if give_only_index: return fragment_index else: return self.loc[(fragment_index, :)]
Get the indices of the atoms in a fragment. The list_of_indextuples contains all bondings from the molecule to the fragment. ``[(1,3), (2,4)]`` means for example that the fragment is connected over two bonds. The first bond is from atom 1 in the molecule to atom 3 in the fragment. The second bond is from atom 2 in the molecule to atom 4 in the fragment. Args: list_of_indextuples (list): give_only_index (bool): If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: A set of indices or a new Cartesian instance.
codesearchnet
def absent(name, bridge=None): ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] comments = {} comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {0} does not exist on bridge {1}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {0} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {0}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {0} may exist.'.format(name), 'new': 'Deleted port {0}.'.format(name)}} if __opts__['test']: if (bridge and (not bridge_exists)): ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if (bridge and (not bridge_exists)): ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif (name not in port_list): ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret
Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge.
codesearchnet
def highlight(__text: str, *, lexer: str='diff', formatter: str='terminal') -> str: if sys.stdout.isatty(): lexer = get_lexer_by_name(lexer) formatter = get_formatter_by_name(formatter) __text = pyg_highlight(__text, lexer, formatter) return __text
Highlight text highlighted using ``pygments``. Returns text untouched if colour output is not enabled. See also: :pypi:`Pygments` Args: __text: Text to highlight lexer: Jinja lexer to use formatter: Jinja formatter to use Returns: Syntax highlighted output, when possible
codesearchnet
def build_backward_pass_step(get_transition_matrix_for_timestep): def backward_pass_step(state, filtered_parameters): 'Run a single step of backward smoothing.' (filtered_mean, filtered_cov, predicted_mean, predicted_cov) = filtered_parameters transition_matrix = get_transition_matrix_for_timestep(state.timestep) next_posterior_mean = state.backward_mean next_posterior_cov = state.backward_cov (posterior_mean, posterior_cov) = backward_smoothing_update(filtered_mean, filtered_cov, predicted_mean, predicted_cov, next_posterior_mean, next_posterior_cov, transition_matrix) return BackwardPassState(backward_mean=posterior_mean, backward_cov=posterior_cov, timestep=(state.timestep - 1)) return backward_pass_step
Build a callable that perform one step for backward smoothing. Args: get_transition_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[latent_size, latent_size]`. Returns: backward_pass_step: a callable that updates a BackwardPassState from timestep `t` to `t-1`.
codesearchnet
def create(self, resource, timeout=-1): data = self.__default_values.copy() data.update(resource) return self._client.create(data, timeout=timeout)
Creates a Golden Image resource from the deployed OS Volume as per the attributes specified. Args: resource (dict): Object to create. timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in OneView, it just stops waiting for its completion. Returns: dict: Golden Image created.
juraj-google-style
def get_urls_for_profiles(edx_video_id, profiles): profiles_to_urls = {profile: None for profile in profiles} try: video_info = get_video_info(edx_video_id) except ValVideoNotFoundError: return profiles_to_urls for encoded_video in video_info['encoded_videos']: if (encoded_video['profile'] in profiles): profiles_to_urls[encoded_video['profile']] = encoded_video['url'] return profiles_to_urls
Returns a dict mapping profiles to URLs. If the profiles or video is not found, urls will be blank. Args: edx_video_id (str): id of the video profiles (list): list of profiles we want to search for Returns: (dict): A dict containing the profile to url pair
codesearchnet
def wrap_or_copy(cls, func, **options): if isinstance(func, openhtf.PhaseGroup): raise PhaseWrapError(('Cannot wrap PhaseGroup <%s> as a phase.' % (func.name or 'Unnamed'))) if isinstance(func, cls): retval = mutablerecords.CopyRecord(func) else: retval = cls(func) retval.options.update(**options) return retval
Return a new PhaseDescriptor from the given function or instance. We want to return a new copy so that you can reuse a phase with different options, plugs, measurements, etc. Args: func: A phase function or PhaseDescriptor instance. **options: Options to update on the result. Raises: PhaseWrapError: if func is a openhtf.PhaseGroup. Returns: A new PhaseDescriptor object.
codesearchnet
def csv_row_to_transaction(index, row, source_encoding='latin1', date_format='%d-%m-%Y', thousand_sep='.', decimal_sep=','): (xfer, posted, message, amount, total) = row xfer = Parse.date(xfer) posted = Parse.date(posted) message = Parse.to_utf8(message, source_encoding) amount = Parse.money(amount) total = Parse.money(total) return Transaction(index, xfer, posted, message, amount, total)
Parses a row of strings to a ``Transaction`` object. Args: index: The index of this row in the original CSV file. Used for sorting ``Transaction``s by their order of appearance. row: The row containing strings for [transfer_date, posted_date, message, money_amount, money_total]. source_encoding: The encoding that will be used to decode strings to UTF-8. date_format: The format of dates in this row. thousand_sep: The thousand separator in money amounts. decimal_sep: The decimal separator in money amounts. Returns: A ``Transaction`` object.
codesearchnet
def from_bytes(b): if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s)
Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length
juraj-google-style
def decode_offset_response(cls, response): return [ kafka.structs.OffsetResponsePayload(topic, partition, error, tuple(offsets)) for topic, partitions in response.topics for partition, error, offsets in partitions ]
Decode OffsetResponse into OffsetResponsePayloads Arguments: response: OffsetResponse Returns: list of OffsetResponsePayloads
juraj-google-style
def resolve(node, source, context_filepath, context_lineno, context_col_offset): code_reader = io.StringIO(source) comments_map = {} try: for token in tokenize.generate_tokens(code_reader.readline): tok_type, tok_string, loc, _, _ = token srow, _ = loc if tok_type == tokenize.COMMENT: comments_map[srow] = tok_string.strip()[1:].strip() except tokenize.TokenError: if isinstance(node, gast.Lambda): pass else: raise source_lines = source.split('\n') visitor = OriginResolver(node, source_lines, comments_map, context_lineno, context_col_offset, context_filepath) visitor.visit(node)
Adds origin information to an AST, based on the source it was loaded from. This allows us to map the original source code line numbers to generated source code. Note: the AST may be a part of a larger context (e.g. a function is part of a module that may contain other things). However, this function does not assume the source argument contains the entire context, nor that it contains only code corresponding to node itself. However, it assumes that node was parsed from the given source code. For this reason, two extra arguments are required, and they indicate the location of the node in the original context. Args: node: gast.AST, the AST to annotate. source: Text, the source code representing node. context_filepath: Text context_lineno: int context_col_offset: int
github-repos
def default_update_stack(self, fqn, template, old_parameters, parameters, tags, stack_policy=None, **kwargs): logger.debug('Using default provider mode for %s.', fqn) args = generate_cloudformation_args(fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy) try: self.cloudformation.update_stack(**args) except botocore.exceptions.ClientError as e: if ('No updates are to be performed.' in str(e)): logger.debug('Stack %s did not change, not updating.', fqn) raise exceptions.StackDidNotChange elif (e.response['Error']['Message'] == 'TemplateURL must reference a valid S3 object to which you have access.'): s3_fallback(fqn, template, parameters, tags, self.cloudformation.update_stack, self.service_role) else: raise
Update a Cloudformation stack in default mode. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
codesearchnet
def add_filter(self, table, cols, condition): if table is not None and table not in self.relations: raise ItsdbError('Cannot add filter; table "{}" is not defined ' 'by the relations file.' .format(table)) if cols is None: cols = [None] self.filters[table].append((cols, condition))
Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function.
juraj-google-style
def _GetGradReduced(output_grad, output_subs, input_subs, input_shape, reduced_label_set): reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts(reduced_label_set, input_shape, input_subs) has_repeated_labels = len(set(input_subs)) + len(set(output_subs)) < len(input_subs) + len(output_subs) input_subs_without_reduced_labels = ''.join([s for s in input_subs if s not in reduced_label_set]) if not has_repeated_labels and input_subs_without_reduced_labels == output_subs: reduced_shape = math_ops.reduced_shape(input_shape, ops.convert_to_tensor(reduced_axes)) return array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), input_shape) grad_shape_with_reduced_labels = array_ops.concat([reduced_dims, array_ops.shape(output_grad)], axis=0) reduced_shape = array_ops.concat([array_ops.ones(len(reduced_label_set), dtype=dtypes.int32), array_ops.shape(output_grad)], axis=0) broadcasted_grad = array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), grad_shape_with_reduced_labels) return gen_linalg_ops.einsum([broadcasted_grad], '{}->{}'.format(reduced_subs + output_subs, input_subs))
Returns the gradient wrt input for a unary einsum with reductions. Args: output_grad: The gradient wrt the output of a unary einsum operation. output_subs: The output subscript. (E.g. `ac` for equation `abc->ac`). input_subs: The input subscript. (E.g. `abc` for equation `abc->ac`). input_shape: A `Tensor` representing the shape of the input operand. reduced_label_set: The set of axis labels appearing in `input_subs` but not in `output_subs`.
github-repos
def _parse_exchange_token_response(content): resp = {} content = _helpers._from_bytes(content) try: resp = json.loads(content) except Exception: resp = _helpers.parse_unique_urlencoded(content) if (resp and ('expires' in resp)): resp['expires_in'] = resp.pop('expires') return resp
Parses response of an exchange token request. Most providers return JSON but some (e.g. Facebook) return a url-encoded string. Args: content: The body of a response Returns: Content as a dictionary object. Note that the dict could be empty, i.e. {}. That basically indicates a failure.
codesearchnet
def add(self, index, value): self.buf.append(value) if (index - self.flush_at) < self.interval: return value = np.mean(self.buf) if self.verbose: logger.info("iter={} {{{}}}={}".format(index, self.name, value)) if self.fd is not None: print("{} {:g}".format(index, value), file=self.fd) self.flush_at = index self.buf = []
Add a value to the series. Args: index (int): Index. value (float): Value.
juraj-google-style
def download(branch=None, build=True, installdir="MalmoPlatform"): if branch is None: branch = malmo_version subprocess.check_call(["git", "clone", "-b", branch, "https: return setup(build=build, installdir=installdir)
Download Malmo from github and build (by default) the Minecraft Mod. Example usage: import malmoenv.bootstrap; malmoenv.bootstrap.download() Args: branch: optional branch to clone. TODO Default is release version. build: build the Mod unless build arg is given as False. installdir: the install dir name. Defaults to MalmoPlatform. Returns: The path for the Malmo Minecraft mod.
juraj-google-style
def bandpass_filter(data: FLOATS_TYPE, sampling_freq_hz: float, lower_freq_hz: float, upper_freq_hz: float, numtaps: int) -> FLOATS_TYPE: f1 = normalized_frequency(lower_freq_hz, sampling_freq_hz) f2 = normalized_frequency(upper_freq_hz, sampling_freq_hz) coeffs = firwin(numtaps=numtaps, cutoff=[f1, f2], pass_zero=False) filtered_data = lfilter(b=coeffs, a=1.0, x=data) return filtered_data
Apply a band-pass filter to the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) lower_freq_hz: filter cutoff lower frequency in Hz (or other consistent units) upper_freq_hz: filter cutoff upper frequency in Hz (or other consistent units) numtaps: number of filter taps Returns: filtered data Note: number of filter taps = filter order + 1
codesearchnet
def image_load(filename: str) -> tcod.image.Image: return tcod.image.Image._from_cdata(ffi.gc(lib.TCOD_image_load(_bytes(filename)), lib.TCOD_image_delete))
Load an image file into an Image instance and return it. Args: filename (AnyStr): Path to a .bmp or .png image file.
codesearchnet
def read_worker_creds(key='credentials'): for path in CREDS_FILES: if (not os.path.exists(path)): continue contents = load_json_or_yaml(path, is_path=True, exception=None) if contents.get(key): return contents[key] else: if ((key == 'credentials') and os.environ.get('TASKCLUSTER_ACCESS_TOKEN') and os.environ.get('TASKCLUSTER_CLIENT_ID')): credentials = {'accessToken': os.environ['TASKCLUSTER_ACCESS_TOKEN'], 'clientId': os.environ['TASKCLUSTER_CLIENT_ID']} if os.environ.get('TASKCLUSTER_CERTIFICATE'): credentials['certificate'] = os.environ['TASKCLUSTER_CERTIFICATE'] return credentials
Get credentials from CREDS_FILES or the environment. This looks at the CREDS_FILES in order, and falls back to the environment. Args: key (str, optional): each CREDS_FILE is a json dict. This key's value contains the credentials. Defaults to 'credentials'. Returns: dict: the credentials found. None if no credentials found.
codesearchnet
def encrypt_block(self, plainText): if (not self.initialized): raise TypeError('CamCrypt object has not been initialized') if (len(plainText) != BLOCK_SIZE): raise ValueError(('plainText must be %d bytes long (received %d bytes)' % (BLOCK_SIZE, len(plainText)))) cipher = ctypes.create_string_buffer(BLOCK_SIZE) self.encblock(self.bitlen, plainText, self.keytable, cipher) return cipher.raw
Encrypt a 16-byte block of data. NOTE: This function was formerly called `encrypt`, but was changed when support for encrypting arbitrary-length strings was added. Args: plainText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes.
codesearchnet
def on_change(self, attr, *callbacks): if len(callbacks) == 0: raise ValueError("on_change takes an attribute name and one or more callbacks, got only one parameter") _callbacks = self._callbacks.setdefault(attr, []) for callback in callbacks: if callback in _callbacks: continue _check_callback(callback, ('attr', 'old', 'new')) _callbacks.append(callback)
Add a callback on this object to trigger when ``attr`` changes. Args: attr (str) : an attribute name on this object callback (callable) : a callback function to register Returns: None
juraj-google-style
def push_datapackage(descriptor, backend, **backend_options): warnings.warn( 'Functions "push/pull_datapackage" are deprecated. ' 'Please use "Package" class', UserWarning) tables = [] schemas = [] datamap = {} mapping = {} model = Package(descriptor) plugin = import_module('jsontableschema.plugins.%s' % backend) storage = plugin.Storage(**backend_options) for resource in model.resources: if not resource.tabular: continue name = resource.descriptor.get('name', None) table = _convert_path(resource.descriptor['path'], name) schema = resource.descriptor['schema'] data = resource.table.iter(keyed=True) def values(schema, data): for item in data: row = [] for field in schema['fields']: row.append(item.get(field['name'], None)) yield tuple(row) tables.append(table) schemas.append(schema) datamap[table] = values(schema, data) if name is not None: mapping[name] = table schemas = _convert_schemas(mapping, schemas) for table in tables: if table in storage.buckets: storage.delete(table) storage.create(tables, schemas) for table in storage.buckets: if table in datamap: storage.write(table, datamap[table]) return storage
Push Data Package to storage. All parameters should be used as keyword arguments. Args: descriptor (str): path to descriptor backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs
juraj-google-style
def List(device, device_path): files = device.List(device_path) files.sort(key=(lambda x: x.filename)) maxname = max((len(f.filename) for f in files)) maxsize = max((len(str(f.size)) for f in files)) for f in files: mode = (((((((((('d' if stat.S_ISDIR(f.mode) else '-') + ('r' if (f.mode & stat.S_IRUSR) else '-')) + ('w' if (f.mode & stat.S_IWUSR) else '-')) + ('x' if (f.mode & stat.S_IXUSR) else '-')) + ('r' if (f.mode & stat.S_IRGRP) else '-')) + ('w' if (f.mode & stat.S_IWGRP) else '-')) + ('x' if (f.mode & stat.S_IXGRP) else '-')) + ('r' if (f.mode & stat.S_IROTH) else '-')) + ('w' if (f.mode & stat.S_IWOTH) else '-')) + ('x' if (f.mode & stat.S_IXOTH) else '-')) t = time.gmtime(f.mtime) (yield ('%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\n' % (mode, maxsize, f.size, t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, maxname, f.filename)))
Prints a directory listing. Args: device_path: Directory to list.
codesearchnet
def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) kwargs["manual_partition"] = True kwargs["_lengths"] = lengths args = [self.axis, func, num_splits, kwargs, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args))
Shuffle the order of the data in this axis based on the `lengths`. Extends `BaseFrameAxisPartition.shuffle`. Args: func: The function to apply before splitting. lengths: The list of partition lengths to split the result into. Returns: A list of RemotePartition objects split by `lengths`.
juraj-google-style
def create_config(config_path="scriptworker.yaml"): if not os.path.exists(config_path): print("{} doesn't exist! Exiting...".format(config_path), file=sys.stderr) sys.exit(1) with open(config_path, "r", encoding="utf-8") as fh: secrets = safe_load(fh) config = dict(deepcopy(DEFAULT_CONFIG)) if not secrets.get("credentials"): secrets['credentials'] = read_worker_creds() config.update(secrets) apply_product_config(config) messages = check_config(config, config_path) if messages: print('\n'.join(messages), file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) credentials = get_frozen_copy(secrets['credentials']) del(config['credentials']) config = get_frozen_copy(config) return config, credentials
Create a config from DEFAULT_CONFIG, arguments, and config file. Then validate it and freeze it. Args: config_path (str, optional): the path to the config file. Defaults to "scriptworker.yaml" Returns: tuple: (config frozendict, credentials dict) Raises: SystemExit: on failure
juraj-google-style
def FormatProblem(self, d=None): if not d: d = self.GetDictToFormat() output_error_text = self.__class__.ERROR_TEXT % d if ('reason' in d) and d['reason']: return '%s\n%s' % (output_error_text, d['reason']) else: return output_error_text
Return a text string describing the problem. Args: d: map returned by GetDictToFormat with with formatting added
juraj-google-style
def get_electron_number(self, charge=0): atomic_number = constants.elements['atomic_number'].to_dict() return sum([atomic_number[atom] for atom in self['atom']]) - charge
Return the number of electrons. Args: charge (int): Charge of the molecule. Returns: int:
juraj-google-style
def _build(self, *args): net = args if not self._layers: if len(args) == 1: return args[0] else: return args for layer in self._layers: if isinstance(net, tuple): net = layer(*net) else: net = layer(net) return net
Connects the Sequential module into the graph. Args: *args: A tuple of inputs, to be unpacked as the arguments to the first layer. Returns: The output value of the last layer.
juraj-google-style
def IsErrorSuppressedByNolint(category, linenum): return (_global_error_suppressions.get(category, False) or (linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set())))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression.
codesearchnet
def load_hpo_bulk(self, hpo_bulk): LOG.debug("Loading hpo bulk") try: result = self.hpo_term_collection.insert_many(hpo_bulk) except (DuplicateKeyError, BulkWriteError) as err: raise IntegrityError(err) return result
Add a hpo object Arguments: hpo_bulk(list(scout.models.HpoTerm)) Returns: result: pymongo bulkwrite result
juraj-google-style
def get_h_product(self, vector, dtype=None): if dtype is None: dtype = self.nn_dtype beta = tf.cast(vector, self.nn_dtype) h_beta_rows = [] for i in range(self.nn_params.num_hidden_layers): gamma = beta[self.dual_index[i]:self.dual_index[i + 1]] delta = beta[self.dual_index[i + 1]:self.dual_index[i + 2]] if i == 0: h_beta_rows.append( tf.multiply(2 * self.lambda_lu[i], gamma) - self.nn_params.forward_pass( tf.multiply(self.lambda_quad[i + 1], delta), i, is_transpose=True)) else: h_beta_rows[i] = (h_beta_rows[i] + tf.multiply(self.lambda_quad[i] + self.lambda_lu[i], gamma) - self.nn_params.forward_pass( tf.multiply(self.lambda_quad[i+1], delta), i, is_transpose=True)) new_row = ( tf.multiply(self.lambda_quad[i + 1] + self.lambda_lu[i + 1], delta) - tf.multiply(self.lambda_quad[i + 1], self.nn_params.forward_pass(gamma, i))) h_beta_rows.append(new_row) h_beta_rows[self.nn_params.num_hidden_layers] = ( h_beta_rows[self.nn_params.num_hidden_layers] + tf.multiply((self.lambda_quad[self.nn_params.num_hidden_layers] + self.lambda_lu[self.nn_params.num_hidden_layers]), delta)) h_beta = tf.concat(h_beta_rows, axis=0) return tf.cast(h_beta, dtype)
Function that provides matrix product interface with PSD matrix. Args: vector: the vector to be multiplied with matrix H Returns: result_product: Matrix product of H and vector
juraj-google-style
def exists(self, url: str) -> bool: _, path = self._parse_url(url) return self._exists(path)
Checks existence of url in HDFS. Args: url: String in the form hdfs://... Returns: True if url exists as a file or directory in HDFS.
github-repos
def animation(frame_function: types.FrameFunction) -> types.Animation: animation_ = core.Animation(frame_function) @functools.wraps(frame_function) def wrapper(*args, **kwargs): return animation_(*args, **kwargs) return wrapper
Turn a FrameFunction into an Animation. Args: frame_function: A function that returns a FrameGenerator. Returns: an Animation decorator function.
juraj-google-style
def save_dataframes(self, outdir, prefix='df_'): dfs = list(filter(lambda x: x.startswith(prefix), dir(self))) counter = 0 for df in dfs: outpath = ssbio.utils.outfile_maker(inname=df, outext='.csv', outdir=outdir) my_df = getattr(self, df) if not isinstance(my_df, pd.DataFrame): raise TypeError('{}: object is not a Pandas DataFrame'.format(df)) if my_df.empty: log.debug('{}: empty dataframe, not saving'.format(df)) else: my_df.to_csv(outpath) log.debug('{}: saved dataframe'.format(outpath)) counter += 1 log.debug('Saved {} dataframes at {}'.format(counter, outdir))
Save all attributes that start with "df" into a specified directory. Args: outdir (str): Path to output directory prefix (str): Prefix that dataframe attributes start with
juraj-google-style
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False): creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver) return cls(creds)
Return a spreadsheet collection making OAauth 2.0 credentials. Args: secrets (str): location of secrets file (default: ``%r``) storage (str): location of storage file (default: ``%r``) scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``) no_webserver (bool): URL/code prompt instead of webbrowser auth Returns: Sheets: new Sheets instance with OAauth 2.0 credentials
codesearchnet
def camelcase(string): string = re.sub(r"^[\-_\.]", '', str(string)) if not string: return string return lowercase(string[0]) + re.sub(r"[\-_\.\s]([a-z])", lambda matched: uppercase(matched.group(1)), string[1:])
Convert string into camel case. Args: string: String to convert. Returns: string: Camel case string.
juraj-google-style
def comments_2(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `comments_2`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `comments_2`') self._comments_2 = value
Corresponds to IDD Field `comments_2` Args: value (str): value for IDD Field `comments_2` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def __init__(self, job_config: JobConfig, job: Dict, engine: Engine) -> None: self.job_config = job_config self._job = job self._engine = engine self.job_resource_name = job['name'] self.program_resource_name = self.job_resource_name.split('/jobs')[0] self._results = None
A job submitted to the engine. Args: job_config: The JobConfig used to create the job. job: A full Job Dict. engine: Engine connected to the job.
juraj-google-style
def image_to_tf_summary_value(image, tag): curr_image = np.asarray(image, dtype=np.uint8) (height, width, n_channels) = curr_image.shape if (n_channels == 1): curr_image = np.reshape(curr_image, [height, width]) s = io.BytesIO() matplotlib_pyplot().imsave(s, curr_image, format='png') img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=height, width=width, colorspace=n_channels) return tf.Summary.Value(tag=tag, image=img_sum)
Converts a NumPy image to a tf.Summary.Value object. Args: image: 3-D NumPy array. tag: name for tf.Summary.Value for display in tensorboard. Returns: image_summary: A tf.Summary.Value object.
codesearchnet
def compute_covariance(L_aug, Y, k, p): n, d = L_aug.shape assert Y.shape[0] == n mu = compute_mu(L_aug, Y, k, p) return (L_aug.T @ L_aug) / n - mu @ np.diag(p) @ mu.T
Given label matrix L_aug and labels Y, compute the covariance. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
juraj-google-style
def kill(self, exit_code: Any=None): self._force_kill.set() if (exit_code is not None): self._exit_code = exit_code logger.info('Killing behavior {0} with exit code: {1}'.format(self, exit_code))
Stops the behaviour Args: exit_code (object, optional): the exit code of the behaviour (Default value = None)
codesearchnet
def title_of_design_condition(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `title_of_design_condition`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `title_of_design_condition`') self._title_of_design_condition = value
Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _sync_content_metadata(self, serialized_data, http_method): try: status_code, response_body = getattr(self, '_' + http_method)( urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path), serialized_data, self.CONTENT_PROVIDER_SCOPE ) except requests.exceptions.RequestException as exc: raise ClientError( 'DegreedAPIClient request failed: {error} {message}'.format( error=exc.__class__.__name__, message=str(exc) ) ) if status_code >= 400: raise ClientError( 'DegreedAPIClient request failed with status {status_code}: {message}'.format( status_code=status_code, message=response_body ) )
Synchronize content metadata using the Degreed course content API. Args: serialized_data: JSON-encoded object containing content metadata. http_method: The HTTP method to use for the API request. Raises: ClientError: If Degreed API request fails.
juraj-google-style
def _generate_legacy_type_checks(types=()): types = dict(types) def gen_type_check(pytypes): pytypes = _utils.flatten(pytypes) def type_check(checker, instance): if isinstance(instance, bool): if (bool not in pytypes): return False return isinstance(instance, pytypes) return type_check definitions = {} for (typename, pytypes) in iteritems(types): definitions[typename] = gen_type_check(pytypes) return definitions
Generate newer-style type checks out of JSON-type-name-to-type mappings. Arguments: types (dict): A mapping of type names to their Python types Returns: A dictionary of definitions to pass to `TypeChecker`
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def _call_method_from_namespace(obj, method_name, namespace): method = getattr(obj, method_name) method_parser = method.parser arg_names = _get_args_name_from_parser(method_parser) if method_name == "__init__": return _call(obj, arg_names, namespace) return _call(method, arg_names, namespace)
Call the method, retrieved from obj, with the correct arguments via the namespace Args: obj: any kind of object method_name: method to be called namespace: an argparse.Namespace object containing parsed command line arguments
juraj-google-style
def keras_model_summary(name, data, step=None): summary_metadata = summary_pb2.SummaryMetadata() summary_metadata.plugin_data.plugin_name = 'graph_keras_model' summary_metadata.plugin_data.content = b'1' try: json_string = data.to_json() except Exception as exc: logging.warning('Model failed to serialize as JSON. Ignoring... %s', exc) return False with summary_ops_v2.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _): with ops.device('cpu:0'): tensor = constant_op.constant(json_string, dtype=dtypes.string) return summary_ops_v2.write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
Writes a Keras model as JSON to as a Summary. Writing the Keras model configuration allows the TensorBoard graph plugin to render a conceptual graph, as opposed to graph of ops. In case the model fails to serialize as JSON, it ignores and returns False. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A Keras Model to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or False if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
github-repos
def _create_handler(self, config): if config is None: raise ValueError('No handler config to create handler from.') if 'name' not in config: raise ValueError('Handler name is required.') handler_name = config['name'] module_name = handler_name.rsplit('.', 1)[0] class_name = handler_name.rsplit('.', 1)[-1] module = import_module(module_name) handler_class = getattr(module, class_name) instance = handler_class(**config) return instance
Creates a handler from its config. Params: config: handler config Returns: handler instance
juraj-google-style
def create_object(self, obj_type, payload, return_fields=None): self._validate_obj_type_or_die(obj_type) query_params = self._build_query_params(return_fields=return_fields) url = self._construct_url(obj_type, query_params) opts = self._get_request_options(data=payload) self._log_request('post', url, opts) if self.session.cookies: self.session.auth = None r = self.session.post(url, **opts) self._validate_authorized(r) if (r.status_code != requests.codes.CREATED): response = utils.safe_json_load(r.content) already_assigned = 'is assigned to another network view' if (response and (already_assigned in response.get('text'))): exception = ib_ex.InfobloxMemberAlreadyAssigned else: exception = ib_ex.InfobloxCannotCreateObject raise exception(response=response, obj_type=obj_type, content=r.content, args=payload, code=r.status_code) return self._parse_reply(r)
Create an Infoblox object of type 'obj_type' Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned Returns: The object reference of the newly create object Raises: InfobloxException
codesearchnet
def add(clss, func, deprecated_name): @Deprecator(func.__name__, deprecated_name) def _old_function(*args, **kwargs): return func(*args, **kwargs) setattr(clss, deprecated_name, _old_function)
Add the deprecated version of a member function to the given class. Gives a deprecation warning on usage. Args: clss: the class where the deprecated function is to be added func: the actual function that is called by the deprecated version deprecated_name: the deprecated name of the function
juraj-google-style
def _serve_image_metadata(self, request): tag = request.args.get('tag') run = request.args.get('run') sample = int(request.args.get('sample', 0)) response = self._image_response_for_run(run, tag, sample) return http_util.Respond(request, response, 'application/json')
Given a tag and list of runs, serve a list of metadata for images. Note that the images themselves are not sent; instead, we respond with URLs to the images. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application.
codesearchnet
def learn(self, initial_state_key, limit=1000, game_n=1): end_flag_list = [False] * len(self.q_learning_list) for game in range(game_n): state_key = copy.copy(initial_state_key) self.t = 1 while self.t <= limit: for i in range(len(self.q_learning_list)): if game + 1 == game_n: self.state_key_list.append((i, copy.copy(state_key))) self.q_learning_list[i].t = self.t next_action_list = self.q_learning_list[i].extract_possible_actions(state_key) if len(next_action_list): action_key = self.q_learning_list[i].select_action( state_key=state_key, next_action_list=next_action_list ) reward_value = self.q_learning_list[i].observe_reward_value(state_key, action_key) if self.q_learning_list[i].check_the_end_flag(state_key) is True: end_flag_list[i] = True next_state_key = self.q_learning_list[i].update_state( state_key=state_key, action_key=action_key ) next_next_action_list = self.q_learning_list[i].extract_possible_actions(next_state_key) if len(next_next_action_list): next_action_key = self.q_learning_list[i].predict_next_action( next_state_key, next_next_action_list ) next_max_q = self.q_learning_list[i].extract_q_df(next_state_key, next_action_key) self.q_learning_list[i].update_q( state_key=state_key, action_key=action_key, reward_value=reward_value, next_max_q=next_max_q ) state_key = next_state_key self.t += 1 self.q_learning_list[i].t = self.t if False not in end_flag_list: break
Multi-Agent Learning. Override. Args: initial_state_key: Initial state. limit: Limit of the number of learning. game_n: The number of games.
juraj-google-style
def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None: color_p = ffi.new("TCOD_color_t*") color_p.r, color_p.g, color_p.b = c.r, c.g, c.b lib.TCOD_color_scale_HSV(color_p, scoef, vcoef) c[:] = color_p.r, color_p.g, color_p.b
Scale a color's saturation and value. Does not return a new Color. ``c`` is modified inplace. Args: c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list. scoef (float): Saturation multiplier, from 0 to 1. Use 1 to keep current saturation. vcoef (float): Value multiplier, from 0 to 1. Use 1 to keep current value.
juraj-google-style
def download_to_tempfile(url, file_name=None, extension=None): if not file_name: file_name = generate_timestamped_string("wtf_temp_file") if extension: file_path = temp_path(file_name + extension) else: ext = "" try: ext = re.search(u"\\.\\w+$", file_name).group(0) except: pass file_path = temp_path(file_name + ext) webFile = urllib.urlopen(url) localFile = open(file_path, 'w') localFile.write(webFile.read()) webFile.close() localFile.close() return file_path
Downloads a URL contents to a tempfile. This is useful for testing downloads. It will download the contents of a URL to a tempfile, which you then can open and use to validate the downloaded contents. Args: url (str) : URL of the contents to download. Kwargs: file_name (str): Name of file. extension (str): Extension to use. Return: str - Returns path to the temp file.
juraj-google-style
def _hdu_on_disk(self, hdulist_index): if (self._tempfile is None): self._tempfile = tempfile.NamedTemporaryFile(mode='r+b', suffix='.fits') self.hdulist[hdulist_index].writeto(self._tempfile.name) return self._tempfile.name
IRAF routines such as daophot need input on disk. Returns: filename: str The name of the file containing the FITS data.
codesearchnet
def _CTCLossGrad(op, grad_loss, _): return _CTCLossGradImpl(op, grad_loss, _)
The derivative provided by CTC Loss. Args: op: the CTCLoss op. grad_loss: The backprop for cost. Returns: The CTC Loss gradient.
github-repos
def detect_framebuffer(self, glo=None) -> 'Framebuffer': res = Framebuffer.__new__(Framebuffer) res.mglo, res._size, res._samples, res._glo = self.mglo.detect_framebuffer(glo) res._color_attachments = None res._depth_attachment = None res.ctx = self res.extra = None return res
Detect framebuffer. Args: glo (int): Framebuffer object. Returns: :py:class:`Framebuffer` object
juraj-google-style
def area_of_a_triangle_in_cartesian_space( a, b, c ): return 0.5 * np.linalg.norm( np.cross( b-a, c-a ) )
Returns the area of a triangle defined by three points in Cartesian space. Args: a (np.array): Cartesian coordinates of point A. b (np.array): Cartesian coordinates of point B. c (np.array): Cartesian coordinates of point C. Returns: (float): the area of the triangle.
juraj-google-style
def sleep(sleep_microseconds): def _apply_fn(dataset): return _SleepDataset(dataset, sleep_microseconds) return _apply_fn
Sleeps for `sleep_microseconds` before producing each input element. Args: sleep_microseconds: The number of microseconds to sleep before producing an input element. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def make_ar_transition_matrix(coefficients): top_row = tf.expand_dims(coefficients, (- 2)) coef_shape = dist_util.prefer_static_shape(coefficients) (batch_shape, order) = (coef_shape[:(- 1)], coef_shape[(- 1)]) remaining_rows = tf.concat([tf.eye((order - 1), dtype=coefficients.dtype, batch_shape=batch_shape), tf.zeros(tf.concat([batch_shape, ((order - 1), 1)], axis=0), dtype=coefficients.dtype)], axis=(- 1)) ar_matrix = tf.concat([top_row, remaining_rows], axis=(- 2)) return ar_matrix
Build transition matrix for an autoregressive StateSpaceModel. When applied to a vector of previous values, this matrix computes the expected new value (summing the previous states according to the autoregressive coefficients) in the top dimension of the state space, and moves all previous values down by one dimension, 'forgetting' the final (least recent) value. That is, it looks like this: ``` ar_matrix = [ coefs[0], coefs[1], ..., coefs[order] 1., 0 , ..., 0. 0., 1., ..., 0. ... 0., 0., ..., 1., 0. ] ``` Args: coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`. Returns: ar_matrix: float `Tensor` with shape `concat([batch_shape, [order, order]])`.
codesearchnet
def remove(self, email): if email in self._collaborators: if self._collaborators[email] == ShareRequestValue.Add: del self._collaborators[email] else: self._collaborators[email] = ShareRequestValue.Remove self._dirty = True
Remove a Collaborator. Args: str : Collaborator email address.
juraj-google-style
def _transform_col(self, x, i): labels = self.label_encoder._transform_col(x, i) label_max = self.label_encoder.label_maxes[i] index = np.array(range(len(labels))) i = index[labels > 0] j = labels[labels > 0] - 1 if len(i) > 0: return sparse.coo_matrix((np.ones_like(i), (i, j)), shape=(x.shape[0], label_max)) else: return None
Encode one categorical column into sparse matrix with one-hot-encoding. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical variable into dummy variables
juraj-google-style
def get_etexts(feature_name, value): matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value) return frozenset(matching_etexts)
Looks up all the texts that have meta-data matching some criterion. Arguments: feature_name (str): The meta-data on which to select the texts. value (str): The value of the meta-data on which to filter the texts. Returns: frozenset: The set of all the Project Gutenberg text identifiers that match the provided query. Raises: UnsupportedFeature: If there is no MetadataExtractor registered that can extract meta-data for the given feature name.
codesearchnet
def put(self, name, base): async def fini(): if self.base_by_name.get(name) is base: self.base_by_name.pop(name, None) base.onfini(fini) self.base_by_name[name] = base
Add a Base (or sub-class) to the BaseRef by name. Args: name (str): The name/iden of the Base base (Base): The Base instance Returns: (None)
juraj-google-style
def __init__(self, atomic_fn: atomic_function.AtomicFunction, shared_func_graph=True): self._arg_keywords = None self._num_positional_args = None self._func_graph = atomic_fn.graph self._captured_inputs = self._func_graph.external_captures + self._func_graph.deferred_external_captures self._function_type = atomic_fn.function_type self._output_shapes = tuple((output.shape for output in self._func_graph.outputs)) self._attrs = attributes_lib.parse_func_attrs(atomic_fn.attributes or {}) if shared_func_graph: self._garbage_collector = None else: self._garbage_collector = ConcreteFunctionGarbageCollector(atomic_fn.graph) self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(atomic_fn, self._garbage_collector) self._first_order_tape_functions = {} self._higher_order_tape_functions = {} self._inference_function = self._delayed_rewrite_functions.forward()
Initialize a `ConcreteFunction`. Args: atomic_fn: Inference atomic function to form basis of forward pass. shared_func_graph: If False, the ConcreteFunction takes ownership of `func_graph` and will break reference cycles when it is deleted. This makes the FuncGraph inoperable. Raises: ValueError: If number of input_placeholders is not equal to the number of function inputs.
github-repos
def trainable_variables(self): return tuple(self._flatten(predicate=_is_trainable_variable, expand_composites=True))
Sequence of trainable variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).
github-repos
def get_config(self): return {}
Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict.
github-repos
def __init__(self): super(JLinkBreakpointInfo, self).__init__() self.SizeOfStruct = ctypes.sizeof(self)
Initializes the ``JLinkBreakpointInfo`` instance. Sets the size of the structure. Args: self (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instnace Returns: ``None``
juraj-google-style
def cardinal(self, to): return sum((1 for _ in filter((lambda d: ((not d.external) and (d.target in to))), self.dependencies)))
Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies.
codesearchnet
def check(self, digest): path = self.get_file_path(digest) if self._calc_digest(path) != digest: self.logger.warning("found corrupted file: '{0}'".format(path)) return False return True
Check the integrity of the file with the given digest Args: digest -- digest of the file to check Returns: True if the file is not corrupted
juraj-google-style
def label_sequential_regions(inlist): import more_itertools as mit df = pd.DataFrame(inlist).set_index(0) labeled = {} for label in df[1].unique(): iterable = df[df[1] == label].index.tolist() labeled.update({'{}{}'.format(label, i + 1): items for i, items in enumerate([list(group) for group in mit.consecutive_groups(iterable)])}) return labeled
Input a list of labeled tuples and return a dictionary of sequentially labeled regions. Args: inlist (list): A list of tuples with the first number representing the index and the second the index label. Returns: dict: Dictionary of labeled regions. Examples: >>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')]) {'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}
juraj-google-style
def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose): node = annotate.resolve_calls(func) node = desugar.explicit_loop_indexes(node) fence.validate(node, inspect.getsource(func)) node = anf_.anf(node) if verbose >= 2: print('ANF') print(quoting.to_source(node)) if mode == 'reverse': node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result, check_dims) if verbose >= 2: print('RAW') print(quoting.to_source(node)) if motion == 'split': node = reverse_ad.split(node, stack) else: node = reverse_ad.joint(node) if verbose >= 2: print('MOTION') print(quoting.to_source(node)) elif mode == 'forward': node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result, check_dims) return node, required
Perform AD on a single function and return the AST. Args: See `grad`. Returns: node: The AST of a module containing the adjoint and primal function definitions. required: A list of non-built in functions that this function called, and of which the primals and adjoints need to be made available in order for the returned function to run.
juraj-google-style
def _initialize(self, args, kwds, add_initializers_to=None): created_variables = [] def variable_capturing_scope(next_creator, **kwds): enable_variable_lifting = kwds.get('experimental_enable_variable_lifting') if enable_variable_lifting is None: enable_variable_lifting = True if not enable_variable_lifting: return next_creator(**kwds) v = UnliftedInitializerVariable(add_initializers_to=add_initializers_to, **kwds) created_variables.append(weakref.ref(v)) return v self._created_variables = created_variables self._variable_creation_config = self._generate_scoped_tracing_options(variable_capturing_scope, tracing_compilation.ScopeType.VARIABLE_CREATION) self._concrete_variable_creation_fn = tracing_compilation.trace_function(args, kwds, self._variable_creation_config) def invalid_creator_scope(*unused_args, **unused_kwds): raise ValueError('tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See https: self._no_variable_creation_config = self._generate_scoped_tracing_options(invalid_creator_scope, tracing_compilation.ScopeType.NO_VARIABLE_CREATION)
Initializes, on the first call. Creates two `Function`s, one that will allow creation of variables and one that won't. Additionally runs a trace for the `Function` that allows creation of variables. Args: args: Arguments to the underlying python callable. kwds: Keyword arguments to the python callable. add_initializers_to: Where to collect variable initializers, if not None.
github-repos
def generate(self, batch_size, length, samples=1, fix_static=False, fix_dynamic=False): (static_sample, _) = self.sample_static_prior(samples, batch_size, fix_static) (dynamic_sample, _) = self.sample_dynamic_prior(samples, batch_size, length, fix_dynamic) likelihood = self.decoder((dynamic_sample, static_sample)) return likelihood
Generate new sequences. Args: batch_size: Number of sequences to generate. length: Number of timesteps to generate for each sequence. samples: Number of samples to draw from the latent distributions. fix_static: Boolean for whether or not to share the same random sample of the static latent variable `f` from its prior across all examples. fix_dynamic: Boolean for whether or not to share the same random sample of the dynamic latent variable `z_{1:T}` from its prior across all examples. Returns: A batched Independent distribution wrapping a set of Normal distributions over the pixels of the generated sequences, where the Independent distribution has event shape [height, width, channels], batch shape [samples, batch_size, timesteps], and sample shape [sample_shape, samples, batch_size, timesteps, height, width, channels].
codesearchnet
def atol_for_validation(self) -> float: return 1e-05
What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value.
github-repos
def parse_statement(self): self._skip_whitespace_and_comments() if (self._current_token.kind == tokenize.ENDMARKER): return None stmt_loc = self._current_location(ignore_char_num=True) binding_key_or_keyword = self._parse_selector() statement = None if (self._current_token.value != '='): if (binding_key_or_keyword == 'import'): module = self._parse_selector(scoped=False) statement = ImportStatement(module, stmt_loc) elif (binding_key_or_keyword == 'include'): str_loc = self._current_location() (success, filename) = self._maybe_parse_basic_type() if ((not success) or (not isinstance(filename, str))): self._raise_syntax_error('Expected file path as string.', str_loc) statement = IncludeStatement(filename, stmt_loc) else: self._raise_syntax_error("Expected '='.") else: self._advance_one_token() value = self.parse_value() (scope, selector, arg_name) = parse_binding_key(binding_key_or_keyword) statement = BindingStatement(scope, selector, arg_name, value, stmt_loc) assert statement, 'Internal parsing error.' if ((self._current_token.kind != tokenize.NEWLINE) and (self._current_token.kind != tokenize.ENDMARKER)): self._raise_syntax_error('Expected newline.') elif (self._current_token.kind == tokenize.NEWLINE): self._advance_one_token() return statement
Parse a single statement. Returns: Either a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or `None` if no more statements can be parsed (EOF reached).
codesearchnet
def predict(self, x, *args, **kwargs): if (len(args) > 0): if ((type(args[0]) == nx.Graph) or (type(args[0]) == nx.DiGraph)): return self.orient_graph(x, *args, **kwargs) else: return self.predict_proba(x, *args, **kwargs) elif (type(x) == DataFrame): return self.predict_dataset(x, *args, **kwargs) elif (type(x) == Series): return self.predict_proba(x.iloc[0], x.iloc[1], *args, **kwargs)
Generic predict method, chooses which subfunction to use for a more suited. Depending on the type of `x` and of `*args`, this function process to execute different functions in the priority order: 1. If ``args[0]`` is a ``networkx.(Di)Graph``, then ``self.orient_graph`` is executed. 2. If ``args[0]`` exists, then ``self.predict_proba`` is executed. 3. If ``x`` is a ``pandas.DataFrame``, then ``self.predict_dataset`` is executed. 4. If ``x`` is a ``pandas.Series``, then ``self.predict_proba`` is executed. Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array or networkx.Graph): graph or second variable. Returns: pandas.Dataframe or networkx.Digraph: predictions output
codesearchnet