code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def IsRaised(self, matching=None, containing=None): class IsRaisedContext(_EmptySubject): def __init__(self, actual, matching=None, containing=None): super(IsRaisedContext, self).__init__(actual) self._matching = matching self._containing = containing def __enter__(self): return self @asserts_truth def __exit__(self, exc_type, exc, exc_tb): if exc: if issubclass(exc_type, self._actual): if self._matching is not None: AssertThat(exc).HasMessageThat().ContainsMatch(self._matching) if self._containing is not None: AssertThat(exc).HasMessageThat().Contains(self._containing) else: self._FailWithSubject('should have been raised, but caught <{0!r}>'.format(exc)) else: self._Resolve() self._FailWithSubject('should have been raised, but was not') return True return IsRaisedContext(self._actual, matching=matching, containing=containing)
Asserts that an exception matching this subject is raised. The raised exception must be the same type as (or a subclass of) this subject's. None, one, or both of matching= and containing= may be specified. Args: matching: string or regex object. If present, the raised exception's "message" attribute must contain this value, as a regular expression. containing: string. If present, the raised exception's "message" attribute must contain this literal string value. Returns: A context within which an expected exception may be raised and tested.
github-repos
def process_output(meta_file, outfile_name, code_links): doc_str = ' doc_str += 'Generated by [py2md](https: doc_str += strftime("%Y-%m-%d %H:%M:%S ") + '\n\n' if len(meta_file['modules']) > 1: doc_str += " chapter_num = 1 for meta_doc in meta_file['modules']: chapter_name = meta_doc['summary_comment'] chapter_link = chapter_name.lstrip().replace('.', '').replace(' ', '-').lower() doc_str += str(chapter_num) + \ '. [' + chapter_name + ']( chapter_num += 1 for meta_doc in meta_file['modules']: doc_str += ' doc_str += '[source file](' + meta_doc['source_file'] + ')' + '\n' for function_info in meta_doc['functions']: doc_str += ' doc_str += function_info['definition'] + '\n\n' if 'comments' in function_info: doc_str += '```\n' + function_info['comments'] + '\n```\n\n' print('Writing file: ' + outfile_name) out_file = open(outfile_name, 'w') out_file.write(doc_str) out_file.close()
Create a markdown format documentation file. Args: meta_file (dict): Dictionary with documentation metadata. outfile_name (str): Markdown file to write to.
juraj-google-style
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) cookie_name = self._GetRowValue(query_hash, row, 'name') cookie_value = self._GetRowValue(query_hash, row, 'value') path = self._GetRowValue(query_hash, row, 'path') hostname = self._GetRowValue(query_hash, row, 'domain') if hostname.startswith('.'): hostname = hostname[1:] secure = self._GetRowValue(query_hash, row, 'secure') secure = secure != 0 if secure: scheme = 'https' else: scheme = 'http' url = '{0:s}: event_data = WebViewCookieEventData() event_data.cookie_name = cookie_name event_data.data = cookie_value event_data.host = hostname event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.path = path event_data.query = query event_data.secure = secure event_data.url = url timestamp = self._GetRowValue(query_hash, row, 'expires') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) else: date_time = dfdatetime_semantic_time.SemanticTime('Infinity') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for cookie_plugin in self._cookie_plugins: try: cookie_plugin.UpdateChainAndProcess( parser_mediator, cookie_name=cookie_name, cookie_data=cookie_value, url=url) except errors.WrongPlugin: pass
Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def closestsites(struct_blk, struct_def, pos): blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True) blk_close_sites.sort(key=lambda x: x[1]) def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True) def_close_sites.sort(key=lambda x: x[1]) return blk_close_sites[0], def_close_sites[0]
Returns closest site to the input position for both bulk and defect structures Args: struct_blk: Bulk structure struct_def: Defect structure pos: Position Return: (site object, dist, index)
juraj-google-style
def __init__(self, action_type=None, length=None): super().__init__() self.action_type = action_type self.length = length
Create an ActionHeader with the optional parameters below. Args: action_type (~pyof.v0x01.common.action.ActionType): The type of the action. length (int): Length of action, including this header.
juraj-google-style
def __get_path_parameters(self, path): path_parameters_by_segment = {} for format_var_name in re.findall(_PATH_VARIABLE_PATTERN, path): first_segment = format_var_name.split('.', 1)[0] matches = path_parameters_by_segment.setdefault(first_segment, []) matches.append(format_var_name) return path_parameters_by_segment
Parses path paremeters from a URI path and organizes them by parameter. Some of the parameters may correspond to message fields, and so will be represented as segments corresponding to each subfield; e.g. first.second if the field "second" in the message field "first" is pulled from the path. The resulting dictionary uses the first segments as keys and each key has as value the list of full parameter values with first segment equal to the key. If the match path parameter is null, that part of the path template is ignored; this occurs if '{}' is used in a template. Args: path: String; a URI path, potentially with some parameters. Returns: A dictionary with strings as keys and list of strings as values.
codesearchnet
def make_analogous_scheme(self, angle=30, mode='ryb'): (h, s, l) = self.__hsl if (mode == 'ryb'): h = rgb_to_ryb(h) h += 360 h1 = ((h - angle) % 360) h2 = ((h + angle) % 360) if (mode == 'ryb'): h1 = ryb_to_rgb(h1) h2 = ryb_to_rgb(h2) return (Color((h1, s, l), 'hsl', self.__a, self.__wref), Color((h2, s, l), 'hsl', self.__a, self.__wref))
Return two colors analogous to this one. Args: :angle: The angle between the hues of the created colors and this one. :mode: Select which color wheel to use for the generation (ryb/rgb). Returns: A tuple of grapefruit.Colors analogous to this one. >>> c1 = Color.from_hsl(30, 1, 0.5) >>> c2, c3 = c1.make_analogous_scheme(angle=60, mode='rgb') >>> c2.hsl (330.0, 1.0, 0.5) >>> c3.hsl (90.0, 1.0, 0.5) >>> c2, c3 = c1.make_analogous_scheme(angle=10, mode='rgb') >>> c2.hsl (20.0, 1.0, 0.5) >>> c3.hsl (40.0, 1.0, 0.5)
codesearchnet
def get_branches(self): branches = [] for (age, level) in enumerate(self.nodes): branches.append([]) for (n, node) in enumerate(level): if (age == 0): p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent((age - 1), n) branches[age].append((p_node.get_tuple() + node.get_tuple())) return branches
Get the tree branches as list. Returns: list: A 2d-list holding the grown branches coordinates as tupel for every age. Example: [ [(10, 40, 90, 30)], [(90, 30, 100, 40), (90, 30, 300, 60)], [(100, 40, 120, 70), (100, 40, 150, 90), ...], ... ]
codesearchnet
def heightmap_add_hm(hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray) -> None: hm3[:] = (hm1[:] + hm2[:])
Add two heightmaps together and stores the result in ``hm3``. Args: hm1 (numpy.ndarray): The first heightmap. hm2 (numpy.ndarray): The second heightmap to add to the first. hm3 (numpy.ndarray): A destination heightmap to store the result. .. deprecated:: 2.0 Do ``hm3[:] = hm1[:] + hm2[:]`` instead.
codesearchnet
def create_blocking_connection(host): return pika.BlockingConnection(amqpdaemon.getConParams(settings.get_amqp_settings()[host.lower()]['vhost']))
Return properly created blocking connection. Args: host (str): Host as it is defined in :func:`.get_amqp_settings`. Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
codesearchnet
def adapt(self, all_ouputs: AllOutputs) -> DataPacket: adapted = {} for name, recipe in self.adapting_recipes.items(): adapted[name] = self._construct(all_ouputs, recipe) return adapted
Adapt inputs for the transformer included in the step. Args: all_ouputs: Dict of outputs from parent steps. The keys should match the names of these steps and the values should be their respective outputs. Returns: Dictionary with the same keys as `adapting_recipes` and values constructed according to the respective recipes.
juraj-google-style
def calculate(cls, order_id, shipping=None, refund_line_items=None): data = {} if shipping: data['shipping'] = shipping data['refund_line_items'] = refund_line_items or [] body = {'refund': data} resource = cls.post( "calculate", order_id=order_id, body=json.dumps(body).encode() ) return cls( cls.format.decode(resource.body), prefix_options={'order_id': order_id} )
Calculates refund transactions based on line items and shipping. When you want to create a refund, you should first use the calculate endpoint to generate accurate refund transactions. Args: order_id: Order ID for which the Refund has to created. shipping: Specify how much shipping to refund. refund_line_items: A list of line item IDs and quantities to refund. Returns: Unsaved refund record
juraj-google-style
def wrap_module(module, names: Optional[Sequence[str]]=None, where: Optional[Callable[[Type['ClassWrapper']], bool]]=None, export_to: Optional[types.ModuleType]=None, **kwargs): wrapper_classes = [] module_name = export_to.__name__ if export_to else None origin_cls_to_wrap_cls = {} for symbol_name in names or dir(module): s = getattr(module, symbol_name) if inspect.isclass(s) and (not where or where(s)): if s in origin_cls_to_wrap_cls: wrapper_class = origin_cls_to_wrap_cls[s] else: wrapper_class = wrap(s, module_name=module_name, **kwargs) origin_cls_to_wrap_cls[s] = wrapper_class wrapper_classes.append(wrapper_class) if export_to: setattr(export_to, symbol_name, wrapper_class) return wrapper_classes
Wrap classes from a module. For example, users can wrap all subclasses of `xxx.Base` under module `xxx`:: import xxx pg.wrap_module( xxx, where=lambda c: isinstance(c, xxx.Base)) Args: module: A container that contains classes to wrap. names: An optional list of class names. If not provided, all classes under `module` will be considered candidates. where: An optional filter function in signature (user_class) -> bool. Only the classes under `module` with True return value will be wrapped. export_to: An optional module to export the wrapper classes. **kwargs: Keyword arguments passed to `wrap` Returns: Wrapper classes.
github-repos
def getTextlength(text, fontname='helv', fontsize=11, encoding=0): fontname = fontname.lower() basename = Base14_fontdict.get(fontname, None) glyphs = None if (basename == 'Symbol'): glyphs = symbol_glyphs if (basename == 'ZapfDingbats'): glyphs = zapf_glyphs if (glyphs is not None): w = sum([(glyphs[ord(c)][1] if (ord(c) < 256) else glyphs[183][1]) for c in text]) return (w * fontsize) if (fontname in Base14_fontdict.keys()): return TOOLS.measure_string(text, Base14_fontdict[fontname], fontsize, encoding) if (fontname in ['china-t', 'china-s', 'china-ts', 'china-ss', 'japan', 'japan-s', 'korea', 'korea-s']): return (len(text) * fontsize) raise ValueError(("Font '%s' is unsupported" % fontname))
Calculate length of a string for a given built-in font. Args: fontname: name of the font. fontsize: size of font in points. encoding: encoding to use (0=Latin, 1=Greek, 2=Cyrillic). Returns: (float) length of text.
codesearchnet
def stream_file(self, url, folder=None, filename=None, overwrite=False): path = self.get_path_for_url(url, folder, filename, overwrite) f = None try: f = open(path, 'wb') for chunk in self.response.iter_content(chunk_size=10240): if chunk: f.write(chunk) f.flush() return f.name except Exception as e: raisefrom(DownloadError, ('Download of %s failed in retrieval of stream!' % url), e) finally: if f: f.close()
Stream file from url and store in provided folder or temporary folder if no folder supplied. Must call setup method first. Args: url (str): URL to download filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url). folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder). overwrite (bool): Whether to overwrite existing file. Defaults to False. Returns: str: Path of downloaded file
codesearchnet
def first_timestamp(self, event_key=None): if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return min(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].first_timestamp
Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None).
juraj-google-style
def create(self): logging.info('Compiling under python %s...', sys.version) logging.info('Making parfile [%s]...', self.output_filename) remove_if_present(self.output_filename) logging.debug('Compiling file list from [%s]', self.manifest_filename) manifest = manifest_parser.parse(self.manifest_filename) stored_resources = self.scan_manifest(manifest) temp_parfile = self.create_temp_parfile() try: logging.debug('Writing parfile to temp file [%s]...', temp_parfile.name) self.write_bootstrap(temp_parfile) self.write_zip_data(temp_parfile, stored_resources) temp_parfile.close() self.create_final_from_temp(temp_parfile.name) finally: remove_if_present(temp_parfile.name) logging.info('Success!')
Create a .par file on disk Raises: Error, IOError, SystemError
github-repos
def noise_set_type(n: tcod.noise.Noise, typ: int) -> None: n.algorithm = typ
Set a Noise objects default noise algorithm. Args: typ (int): Any NOISE_* constant.
juraj-google-style
def check_timer(self, timer_name): if timer_name in self._timers: elapsed = datetime.datetime.now() - self._timers[timer_name] print('%s: %d.%d' % (timer_name, elapsed.seconds, (elapsed.microseconds - elapsed.seconds * 60 * 1000000) / 1000)) else: print('timer %s not defined' % timer_name)
Checks and prints the elapsed time of a given timer. Args: timer_name: Name of the timer to check and print, it must have been initialized with start_timer.
github-repos
def __init__(self, unique_identifier=None, revocation_reason=None, compromise_occurrence_date=None): super(RevokeRequestPayload, self).__init__( tag=enums.Tags.REQUEST_PAYLOAD) self.unique_identifier = unique_identifier self.compromise_occurrence_date = compromise_occurrence_date self.revocation_reason = revocation_reason if self.revocation_reason is None: self.revocation_reason = objects.RevocationReason() self.validate()
Construct a RevokeRequestPayload object. Args: unique_identifier (UniqueIdentifier): The UUID of a managed cryptographic object. revocation_reason (RevocationReason): The reason why the object was revoked. compromise_occurrence_date (DateTime): the datetime when the object was first believed to be compromised.
juraj-google-style
def get_shared_files_from_shake(self, shake_id=None, before=None, after=None): if (before and after): raise Exception('You cannot specify both before and after keys') endpoint = '/api/shakes' if shake_id: endpoint += '/{0}'.format(shake_id) if before: endpoint += '/before/{0}'.format(before) elif after: endpoint += '/after/{0}'.format(after) data = self._make_request(verb='GET', endpoint=endpoint) return [SharedFile.NewFromJSON(f) for f in data['sharedfiles']]
Returns a list of SharedFile objects from a particular shake. Args: shake_id (int): Shake from which to get a list of SharedFiles before (str): get 10 SharedFile objects before (but not including) the SharedFile given by `before` for the given Shake. after (str): get 10 SharedFile objects after (but not including) the SharedFile give by `after' for the given Shake. Returns: List (list) of SharedFiles.
codesearchnet
def hpo_terms(store, query = None, limit = None): hpo_phenotypes = {} if limit: limit=int(limit) hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit)) return hpo_phenotypes
Retrieves a list of HPO terms from scout database Args: store (obj): an adapter to the scout database query (str): the term to search in the database limit (str): the number of desired results Returns: hpo_phenotypes (dict): the complete list of HPO objects stored in scout
juraj-google-style
def value_to_single_key_strokes(value): result = [] if isinstance(value, Integral): value = str(value) for v in value: if isinstance(v, Keys): result.append(v.value) elif isinstance(v, Integral): result.append(str(v)) else: result.append(v) return result
Convert value to a list of key strokes >>> value_to_single_key_strokes(123) ['1', '2', '3'] >>> value_to_single_key_strokes('123') ['1', '2', '3'] >>> value_to_single_key_strokes([1, 2, 3]) ['1', '2', '3'] >>> value_to_single_key_strokes(['1', '2', '3']) ['1', '2', '3'] Args: value(int|str|list) Returns: A list of string.
juraj-google-style
def _is_propertyable( names, attrs, annotations, attr, ): return ( attr in annotations and not attr.startswith("_") and not attr.isupper() and "__{}".format(attr) not in names and not isinstance(getattr(attrs, attr, None), types.MethodType) )
Determine if an attribute can be replaced with a property. Args: names: The complete list of all attribute names for the class. attrs: The attribute dict returned by __prepare__. annotations: A mapping of all defined annotations for the class. attr: The attribute to test. Returns: True if the attribute can be replaced with a property; else False.
juraj-google-style
def calc_track_errors(model_tracks, obs_tracks, track_pairings): columns = ['obs_track_id', 'translation_error_x', 'translation_error_y', 'start_time_difference', 'end_time_difference'] track_errors = pd.DataFrame(index=list(range(len(model_tracks))), columns=columns) for (p, pair) in enumerate(track_pairings): model_track = model_tracks[pair[0]] if (type(pair[1]) in [int, np.int64]): obs_track = obs_tracks[pair[1]] else: obs_track = obs_tracks[pair[1][0]] model_com = model_track.center_of_mass(model_track.start_time) obs_com = obs_track.center_of_mass(obs_track.start_time) track_errors.loc[(pair[0], 'obs_track_id')] = (pair[1] if (type(pair[1]) in [int, np.int64]) else pair[1][0]) track_errors.loc[(pair[0], 'translation_error_x')] = (model_com[0] - obs_com[0]) track_errors.loc[(pair[0], 'translation_error_y')] = (model_com[1] - obs_com[1]) track_errors.loc[(pair[0], 'start_time_difference')] = (model_track.start_time - obs_track.start_time) track_errors.loc[(pair[0], 'end_time_difference')] = (model_track.end_time - obs_track.end_time) return track_errors
Calculates spatial and temporal translation errors between matched forecast and observed tracks. Args: model_tracks: List of model track STObjects obs_tracks: List of observed track STObjects track_pairings: List of tuples pairing forecast and observed tracks. Returns: pandas DataFrame containing different track errors
codesearchnet
def dump(self, destination, with_defaults=False): if isinstance(destination, six.string_types): with open(destination, 'w', encoding='utf-8') as f: self._rw.dump_config_to_file(self._config, f, with_defaults=with_defaults) else: self._rw.dump_config_to_file(self._config, destination, with_defaults=with_defaults)
Write configuration values to the specified destination. Args: destination: with_defaults (bool): if ``True``, values of items with no custom values will be included in the output if they have a default value set.
codesearchnet
def _get_proxy_info(self, _=None): (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint) sock = None if target_path: sock = self._ssh_tunnel.forward_unix(path=target_path) else: sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port) return SSHTunnelProxyInfo(sock=sock)
Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
codesearchnet
def run_makeblastdb(infile, dbtype, outdir=''): (og_dir, name, ext) = utils.split_folder_and_path(infile) if (not outdir): outdir = og_dir outfile_basename = op.join(outdir, name) if (dbtype == 'nucl'): outext = ['.nhr', '.nin', '.nsq'] elif (dbtype == 'prot'): outext = ['.phr', '.pin', '.psq'] else: raise ValueError('dbtype must be "nucl" or "prot"') outfile_all = [(outfile_basename + x) for x in outext] db_made = True for f in outfile_all: if (not op.exists(f)): db_made = False if db_made: log.debug('BLAST database already exists at {}'.format(outfile_basename)) return outfile_all else: retval = subprocess.call('makeblastdb -in {} -dbtype {} -out {}'.format(infile, dbtype, outfile_basename), shell=True) if (retval == 0): log.debug('Made BLAST database at {}'.format(outfile_basename)) return outfile_all else: log.error('Error running makeblastdb, exit code {}'.format(retval))
Make the BLAST database for a genome file. Args: infile (str): path to genome FASTA file dbtype (str): "nucl" or "prot" - what format your genome files are in outdir (str): path to directory to output database files (default is original folder) Returns: Paths to BLAST databases.
codesearchnet
def trimpath(attributes): if ('pathdepth' in attributes): if (attributes['pathdepth'] != 'full'): pathelements = [] remainder = attributes['file'] limit = int(attributes['pathdepth']) while ((len(pathelements) < limit) and remainder): (remainder, pe) = os.path.split(remainder) pathelements.insert(0, pe) return os.path.join(*pathelements) return attributes['file'] return os.path.basename(attributes['file'])
Simplifies the given path. If pathdepth is in attributes, the last pathdepth elements will be returned. If pathdepth is "full", the full path will be returned. Otherwise the filename only will be returned. Args: attributes: The element attributes. Returns: The trimmed path.
codesearchnet
def dirhash(self, path, **dirhash_opts): path = fs.path(path) last_modified = time.ctime(max( max(os.path.getmtime(os.path.join(root, file)) for file in files) for root,_,files in os.walk(path))) db = sqlite3.connect(self.path) c = db.cursor() c.execute("SELECT date, hash FROM dirhashcache WHERE path=?", (path,)) cached = c.fetchone() if cached: cached_date, cached_hash = cached if cached_date == last_modified: dirhash = cached_hash else: dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("UPDATE dirhashcache SET date=?, hash=? WHERE path=?", (last_modified, dirhash, path)) db.commit() else: dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("INSERT INTO dirhashcache VALUES (?,?,?)", (path, last_modified, dirhash)) db.commit() db.close() return dirhash
Compute the hash of a directory. Arguments: path: Directory. **dirhash_opts: Additional options to checksumdir.dirhash(). Returns: str: Checksum of directory.
juraj-google-style
def scaled_dot_product_attention_simple(q, k, v, bias, name=None): with tf.variable_scope(name, default_name='scaled_dot_product_attention_simple'): scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2])) logits = tf.matmul((q * scalar), k, transpose_b=True) if (bias is not None): logits += bias weights = tf.nn.softmax(logits, name='attention_weights') if common_layers.should_generate_summaries(): tf.summary.image('attention', tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1) return tf.matmul(weights, v)
Scaled dot-product attention. One head. One spatial dimension. Args: q: a Tensor with shape [batch, length_q, depth_k] k: a Tensor with shape [batch, length_kv, depth_k] v: a Tensor with shape [batch, length_kv, depth_v] bias: optional Tensor broadcastable to [batch, length_q, length_kv] name: an optional string Returns: A Tensor.
codesearchnet
def is_process_running(process_name): is_running = False if os.path.isfile('/usr/bin/pgrep'): dev_null = open(os.devnull, 'wb') returncode = subprocess.call(['/usr/bin/pgrep', process_name], stdout=dev_null) is_running = bool((returncode == 0)) return is_running
Check if a process with the given name is running. Args: (str): Process name, e.g. "Sublime Text" Returns: (bool): True if the process is running
codesearchnet
def _ensure_safe(self): if not self._safe_to_run(): raise RuntimeError('There is at least 1 reference to internal data\n in the interpreter in the form of a numpy array or slice. Be sure to\n only hold the function returned from tensor() if you are using raw\n data access.')
Makes sure no numpy arrays pointing to internal buffers are active. This should be called from any function that will call a function on _interpreter that may reallocate memory e.g. invoke(), ... Raises: RuntimeError: If there exist numpy objects pointing to internal memory then we throw.
github-repos
def all_genes(self, build='37'): LOG.info('Fetching all genes') return self.hgnc_collection.find({'build': build}).sort('chromosome', 1)
Fetch all hgnc genes Returns: result()
codesearchnet
def Var(poly, dist=None, **kws): if isinstance(poly, distributions.Dist): x = polynomials.variable(len(poly)) (poly, dist) = (x, poly) else: poly = polynomials.Poly(poly) dim = len(dist) if (poly.dim < dim): polynomials.setdim(poly, dim) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys N = len(keys) A = poly.A keys1 = numpy.array(keys).T if (dim == 1): keys1 = keys1[0] keys2 = sum(numpy.meshgrid(keys, keys)) else: keys2 = numpy.empty((dim, N, N)) for i in range(N): for j in range(N): keys2[(:, i, j)] = (keys1[(:, i)] + keys1[(:, j)]) m1 = numpy.outer(*([dist.mom(keys1, **kws)] * 2)) m2 = dist.mom(keys2, **kws) mom = (m2 - m1) out = numpy.zeros(poly.shape) for i in range(N): a = A[keys[i]] out += ((a * a) * mom[(i, i)]) for j in range((i + 1), N): b = A[keys[j]] out += (((2 * a) * b) * mom[(i, j)]) out = out.reshape(shape) return out
Element by element 2nd order statistics. Args: poly (Poly, Dist): Input to take variance on. dist (Dist): Defines the space the variance is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``variation.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.Var(dist)) [1. 4.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.Var(poly, dist)) [ 0. 1. 4. 800.]
codesearchnet
def filter_iqr(array, lower, upper): upper, lower = iqr(array, upper, lower) new = list(array) for x in new[:]: if x < lower or x > upper: new.remove(x) return new
Return elements which falls within specified interquartile range. Arguments: array (list): Sequence of numbers. lower (float): Lower bound for IQR, in range 0 <= lower <= 1. upper (float): Upper bound for IQR, in range 0 <= upper <= 1. Returns: list: Copy of original list, with elements outside of IQR removed.
juraj-google-style
def _module_info_to_proto(module_info, export_scope=None): def strip_name_scope(name_scope): return ops.strip_name_scope(name_scope, export_scope) def process_leafs(value): return strip_name_scope(_graph_element_to_path(value)) module_info_def = module_pb2.SonnetModule(module_name=module_info.module_name, scope_name=strip_name_scope(module_info.scope_name), class_name=module_info.class_name) for connected_subgraph in module_info.connected_subgraphs: connected_subgraph_info_def = module_info_def.connected_subgraphs.add() connected_subgraph_info_def.name_scope = strip_name_scope(connected_subgraph.name_scope) _nested_to_proto(connected_subgraph.inputs, connected_subgraph_info_def.inputs, process_leafs, set()) _nested_to_proto(connected_subgraph.outputs, connected_subgraph_info_def.outputs, process_leafs, set()) return module_info_def
Serializes `module_into`. Args: module_info: An instance of `ModuleInfo`. export_scope: Optional `string`. Name scope to remove. Returns: An instance of `module_pb2.SonnetModule`.
codesearchnet
def exponential(data): data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): return -1*(t-s) * (np.log(t-s) - np.log(cumm[t] - cumm[s])) return cost
Creates a segment cost function for a time series with a exponential distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
juraj-google-style
class Hinge(reduction_metrics.MeanMetricWrapper): def __init__(self, name='hinge', dtype=None): super().__init__(fn=hinge, name=name, dtype=dtype) self._direction = 'down' def get_config(self): return {'name': self.name, 'dtype': self.dtype}
Computes the hinge metric between `y_true` and `y_pred`. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: >>> m = keras.metrics.Hinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 1.3 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result() 1.1
github-repos
def ones_like(tensor, dtype=None, name=None, optimize=True): return ones_like_impl(tensor, dtype, name, optimize)
Creates a tensor with all elements set to 1. See also `tf.ones`. Given a single tensor (`tensor`), this operation returns a tensor of the same type and shape as `tensor` with all elements set to 1. Optionally, you can specify a new type (`dtype`) for the returned tensor. For example: ```python tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]] ``` Args: tensor: A `Tensor`. dtype: A type for the returned `Tensor`. Must be `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128` or `bool`. name: A name for the operation (optional). optimize: if true, attempt to statically determine the shape of 'tensor' and encode it as a constant. Returns: A `Tensor` with all elements set to 1.
github-repos
def _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index): if not isinstance(branch_index, tensor.Tensor): raise TypeError("'branch_index' must be a Tensor, got {}".format(type(branch_index))) if not branch_index.dtype.is_integer: raise TypeError("'branch_index' must be an integer Tensor, got {}".format(branch_index.dtype)) if not branch_fns: raise ValueError("Must provide at least one item in 'branch_fns'") if not isinstance(branch_fns, (list, tuple, dict)): raise TypeError("'branch_fns' must be a list, tuple, or dict") if isinstance(branch_fns, dict): branch_fns = branch_fns.items() if all((callable(fn) for fn in branch_fns)): branch_fns = list(enumerate(branch_fns)) for key_fn_pair in branch_fns: if not isinstance(key_fn_pair, tuple) or len(key_fn_pair) != 2: raise TypeError(f"Each entry in 'branch_fns' must be a 2-tuple. Received {key_fn_pair}.") key, branch_fn = key_fn_pair if not isinstance(key, int): raise TypeError('key must be a Python `int`, got {}'.format(type(key))) if not callable(branch_fn): raise TypeError('fn for key {} must be callable.'.format(key)) keys = [p[0] for p in branch_fns] if min(keys) < 0 or max(keys) >= len(keys) or len(set(keys)) != len(keys): raise ValueError('branch indices (keys) must form contiguous range of [0 to {}) but found {{{}}}'.format(len(keys), ','.join(map(str, sorted(keys))))) actions = [p[1] for p in sorted(branch_fns)] if default is not None: actions.append(default) return actions
Verifies input arguments for the case function. Args: branch_fns: Dict or list of pairs of an `int` and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. Returns: branch_fns: validated list of callables for each branch (default last).
github-repos
def edit_miz(infile: str, outfile: str=None, metar: typing.Union[(str, Metar)]=None, time: str=None, min_wind: int=0, max_wind: int=40) -> str: if (outfile is None): LOGGER.debug('editing in place: %s', infile) outfile = infile else: LOGGER.debug('editing miz file: %s -> %s', infile, outfile) mission_weather = mission_time = None if metar: (error, metar) = emiz.weather.custom_metar.CustomMetar.get_metar(metar) if error: return error mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind) if time: try: mission_time = MissionTime.from_string(time) except ValueError: return f'badly formatted time string: {time}' if ((not mission_weather) and (not mission_time)): return 'nothing to do!' with Miz(infile) as miz: if mission_weather: LOGGER.debug('applying MissionWeather') if (not mission_weather.apply_to_miz(miz)): return 'error while applying METAR to mission' if mission_time: LOGGER.debug('applying MissionTime') if (not mission_time.apply_to_miz(miz)): return 'error while setting time on mission' try: miz.zip(outfile) return '' except OSError: return f'permission error: cannot edit "{outfile}"; maybe it is in use ?'
Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error
codesearchnet
def save(self, path='speech'): if (self._data is None): raise Exception("There's nothing to save") extension = ('.' + self.__params['format']) if (os.path.splitext(path)[1] != extension): path += extension with open(path, 'wb') as f: for d in self._data: f.write(d) return path
Save data in file. Args: path (optional): A path to save file. Defaults to "speech". File extension is optional. Absolute path is allowed. Returns: The path to the saved file.
codesearchnet
def save(self): args = [('StartLocalTime', self.start_time.strftime(TIME_FORMAT)), ('Duration', ('' if (self.duration is None) else self.duration.strftime(TIME_FORMAT))), ('Recurrence', self.recurrence), ('Enabled', ('1' if self.enabled else '0')), ('RoomUUID', self.zone.uid), ('ProgramURI', ('x-rincon-buzzer:0' if (self.program_uri is None) else self.program_uri)), ('ProgramMetaData', self.program_metadata), ('PlayMode', self.play_mode), ('Volume', self.volume), ('IncludeLinkedZones', ('1' if self.include_linked_zones else '0'))] if (self._alarm_id is None): response = self.zone.alarmClock.CreateAlarm(args) self._alarm_id = response['AssignedID'] Alarm._all_alarms[self._alarm_id] = self else: args.insert(0, ('ID', self._alarm_id)) self.zone.alarmClock.UpdateAlarm(args)
Save the alarm to the Sonos system. Raises: ~soco.exceptions.SoCoUPnPException: if the alarm cannot be created because there is already an alarm for this room at the specified time.
codesearchnet
def contextmanager(target): context_manager = _contextlib.contextmanager(target) return tf_decorator.make_decorator(target, context_manager, 'contextmanager')
A tf_decorator-aware wrapper for `contextlib.contextmanager`. Usage is identical to `contextlib.contextmanager`. Args: target: A callable to be wrapped in a contextmanager. Returns: A callable that can be used inside of a `with` statement.
github-repos
def _full_axis_reduce_along_select_indices(self, func, axis, index): old_index = self.index if axis else self.columns numeric_indices = [i for i, name in enumerate(old_index) if name in index] result = self.data.apply_func_to_select_indices_along_full_axis( axis, func, numeric_indices ) return result
Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object.
juraj-google-style
def __request_start(self, queue_item): try: action = self.__options.callbacks.request_before_start(self.queue, queue_item) except Exception as e: action = None print(e) print(traceback.format_exc()) if (action == CrawlerActions.DO_STOP_CRAWLING): self.__should_stop = True if (action == CrawlerActions.DO_SKIP_TO_NEXT): self.queue.move(queue_item, QueueItem.STATUS_FINISHED) self.__should_spawn_new_requests = True if ((action == CrawlerActions.DO_CONTINUE_CRAWLING) or (action is None)): self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS) thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item) self.__threads[queue_item.get_hash()] = thread thread.daemon = True thread.start()
Execute the request in given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair to scrape.
codesearchnet
def bleu_score(predictions, labels, **unused_kwargs): outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score
juraj-google-style
async def get_match(self, m_id, force_update=False) -> Match: found_m = self._find_match(m_id) if (force_update or (found_m is None)): (await self.get_matches()) found_m = self._find_match(m_id) return found_m
get a single match by id |methcoro| Args: m_id: match id force_update (default=False): True to force an update to the Challonge API Returns: Match Raises: APIException
codesearchnet
def shell(commands, splitlines=False, ignore_errors=False): if isinstance(commands, six.string_types): commands = [commands] all_stdout = [] print_output = ( pseudo_state.print_output if pseudo_state.isset() else False ) for command in commands: print_prefix = 'localhost: ' if print_output: print('{0}>>> {1}'.format(print_prefix, command)) process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT) stdout = read_buffer( process.stdout, print_output=print_output, print_func=lambda line: '{0}{1}'.format(print_prefix, line), ) result = process.wait() process.stdout.close() if result > 0 and not ignore_errors: raise PyinfraError( 'Local command failed: {0}\n{1}'.format(command, stdout), ) all_stdout.extend(stdout) if not splitlines: return '\n'.join(all_stdout) return all_stdout
Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``. Args: commands (string, list): command or list of commands to execute spltlines (bool): optionally have the output split by lines ignore_errors (bool): ignore errors when executing these commands
juraj-google-style
def _add_arg_java(self, key, value, mask=False): if isinstance(value, bool): value = int(value) self._data[key] = value self._args.append('{}{}={}'.format('-D', key, value)) self._args_quoted.append(self.quote('{}{}={}'.format('-D', key, value))) if mask: value = 'x' * len(str(value)) self._args_masked.append('{}{}={}'.format('-D', key, value))
Add CLI Arg formatted specifically for Java. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value.
juraj-google-style
def load(fh, single=False): ms = deserialize(fh) if single: ms = next(ms) return ms
Deserialize DMRX from a file (handle or filename) Args: fh (str, file): input filename or file object single: if `True`, only return the first read Xmrs object Returns: a generator of Xmrs objects (unless the *single* option is `True`)
juraj-google-style
def call(self, context: tf.Tensor, latents: tf.Tensor) -> tf.Tensor: context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) batch_size, seq_length, embed_dim = shape_list(context) q = self.q_proj(latents) k = self.k_proj(tf.concat([context, latents], axis=-2)) v = self.v_proj(tf.concat([context, latents], axis=-2)) q, k, v = [tf.transpose(tf.reshape(x, (batch_size, x.shape[1], self.n_heads, self.head_dim)), perm=[0, 2, 1, 3]) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = tf.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k) stabilized_scores = scores - tf.reduce_max(scores, axis=-1, keepdims=True) attn = tf.nn.softmax(stabilized_scores, axis=-1) resampled = tf.einsum('... i j, ... j d -> ... i d', attn, v) return self.output_proj(tf.reshape(tf.transpose(resampled, perm=[0, 2, 1, 3]), (batch_size, -1, self.n_heads * self.head_dim)))
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: context (`tf.Tensor`): Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample. latents (`tf.Tensor`): Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to. Returns: `tf.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross from context.
github-repos
def cleave_sequence(input_layer, unroll=None): if (unroll is None): raise ValueError('You must set unroll either here or in the defaults.') shape = input_layer.shape if ((shape[0] is not None) and ((shape[0] % unroll) != 0)): raise ValueError(('Must divide the split dimension evenly: %d mod %d != 0' % (shape[0], unroll))) if (unroll <= 0): raise ValueError(('Unroll must be > 0: %s' % unroll)) elif (unroll == 1): splits = [input_layer.tensor] else: splits = tf.split(value=input_layer.tensor, num_or_size_splits=unroll, axis=0) result = input_layer.with_sequence(splits) defaults = result.defaults if ('unroll' in defaults): del defaults['unroll'] return result
Cleaves a tensor into a sequence, this is the inverse of squash. Recurrent methods unroll across an array of Tensors with each one being a timestep. This cleaves the first dim so that each it is an array of Tensors. It is the inverse of squash_sequence. Args: input_layer: The input layer. unroll: The number of time steps. Returns: A PrettyTensor containing an array of tensors. Raises: ValueError: If unroll is not specified and it has no default or it is <= 0.
codesearchnet
def print_generic_type(self, t: types.BaseValue) -> str:
Returns a string of the generic type of t. For example, if t is `[0]`, then this method returns "list[int]". Args: t: An abstract value.
github-repos
def astimezone(self, tzinfo): assert (self.tzinfo is not None) tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone.
codesearchnet
def listChecksumAlgorithms(self, vendorSpecific=None): response = self.listChecksumAlgorithmsResponse(vendorSpecific) return self._read_dataone_type_response(response, 'ChecksumAlgorithmList')
See Also: listChecksumAlgorithmsResponse() Args: vendorSpecific: Returns:
juraj-google-style
def chunk_sequence(sequence, chunk_length=200, padding_value=0): if 'length' in sequence: length = sequence.pop('length') else: length = tf.shape(tools.nested.flatten(sequence)[0])[0] num_chunks = (length - 1) padding_length = chunk_length * num_chunks - length padded = tools.nested.map( lambda tensor: tf.concat([ tensor, 0 * tensor[:padding_length] + padding_value], 0), sequence) chunks = tools.nested.map( lambda tensor: tf.reshape( tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()), padded) chunks['length'] = tf.concat([ chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32), [chunk_length - padding_length]], 0) return chunks
Split a nested dict of sequence tensors into a batch of chunks. This function does not expect a batch of sequences, but a single sequence. A `length` key is added if it did not exist already. Args: sequence: Nested dict of tensors with time dimension. chunk_length: Size of chunks the sequence will be split into. padding_value: Value used for padding the last chunk after the sequence. Returns: Nested dict of sequence tensors with chunk dimension.
juraj-google-style
def get_sessions(self, app_path=None): if (app_path is not None): return self._tornado.get_sessions(app_path) all_sessions = [] for path in self._tornado.app_paths: all_sessions += self._tornado.get_sessions(path) return all_sessions
Gets all currently active sessions for applications. Args: app_path (str, optional) : The configured application path for the application to return sessions for. If None, return active sessions for all applications. (default: None) Returns: list[ServerSession]
codesearchnet
def submit(self, job): self._check_job(job) if (job.workingdir is None): job.workingdir = self.default_wdir job.imageid = du.create_provisioned_image(self.client, job.image, job.workingdir, job.inputs) container_args = self._generate_container_args(job) job.rundata.container = self.client.create_container(job.imageid, **container_args) self.client.start(job.rundata.container) job.rundata.containerid = job.rundata.container['Id'] job.jobid = job.rundata.containerid
Submit job to the engine Args: job (pyccc.job.Job): Job to submit
codesearchnet
def _flag_value_as_int_list(self, wanted_flag_name): int_list = [] found, flag_value = self.get_flag_value(wanted_flag_name) if found and flag_value: try: integer_values = flag_value.split(',') int_list = [int(int_val) for int_val in integer_values] except ValueError: logging.warning('Cannot convert %s to int for flag %s', int_list, wanted_flag_name) return int_list
Returns the integer list of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. Returns: the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached.
github-repos
def saveable_objects_from_trackable(obj, tf1_saver=False): if isinstance(obj, python_state.PythonState): return {python_state.PYTHON_STATE: functools.partial(_PythonStringStateSaveable, state_callback=obj.serialize, restore_callback=obj.deserialize)} if tf1_saver: saveable_factories = obj._gather_saveables_for_checkpoint() if saveable_factories: return saveable_factories if trackable_has_serialize_to_tensor(obj): def create_saveable(name='', call_with_mapped_captures=None): save_fn = obj._serialize_to_tensors if call_with_mapped_captures and isinstance(save_fn, core.ConcreteFunction): tensor_dict = call_with_mapped_captures(save_fn, []) else: tensor_dict = save_fn() specs = [] local_names = [] for tensor_name, maybe_tensor in tensor_dict.items(): local_names.append(tensor_name) if not isinstance(maybe_tensor, dict): maybe_tensor = {'': maybe_tensor} spec_name = name + trackable_utils.escape_local_name(tensor_name) for slice_spec, tensor in maybe_tensor.items(): if isinstance(tensor, saveable_object.SaveSpec): spec = tensor spec.name = spec_name spec.slice_spec = slice_spec else: spec = saveable_object.SaveSpec(tensor, slice_spec, spec_name) specs.append(spec) return TrackableSaveable(obj=obj, specs=specs, name=name, local_names=local_names, prefix=saveable_compat.get_saveable_name(obj) or '', call_with_mapped_captures=call_with_mapped_captures) return {trackable_utils.SERIALIZE_TO_TENSORS_NAME: create_saveable} else: return obj._gather_saveables_for_checkpoint()
Returns SaveableObject factory dict from a Trackable. Args: obj: A `Trackable` tf1_saver: Boolean, whether this is being called from a TF1 Saver ( `tf.compat.v1.train.Saver`). When this is True, the SaveableObject will be generated from `obj`'s legacy `_gather_saveables_for_checkpoint` fn. When saving with TF2, `Trackable._serialize_from_tensors` is preferred. Returns: A dict mapping attribute names to SaveableObject factories (callables that produce a SaveableObject).
github-repos
def validate_format(self, **kwargs): args = dict(dict_type=self._dict, allow_no_value=self._allow_no_value, inline_comment_prefixes=self._inline_comment_prefixes, strict=self._strict, empty_lines_in_values=self._empty_lines_in_values) args.update(kwargs) parser = ConfigParser(**args) updated_cfg = str(self) parser.read_string(updated_cfg)
Call ConfigParser to validate config Args: kwargs: are passed to :class:`configparser.ConfigParser`
codesearchnet
def generate_main(self, main_filename, boilerplate_contents): with io.open(main_filename, 'rt', encoding='latin-1') as main_file: original_content = main_file.read() match = re.match(_boilerplate_insertion_regex, original_content) assert match, original_content assert len(match.group('before')) + len(match.group('after')) == len(original_content), (match, original_content) new_content = match.group('before') + boilerplate_contents + match.group('after') encoded_content = new_content.encode('latin-1') return stored_resource.StoredContent('__main__.py', self.timestamp_tuple, encoded_content)
Generate the contents of the __main__.py file We take the module that is specified as the main entry point, and insert some boilerplate to invoke import helper code. Returns: A StoredResource
github-repos
def build_data(data_path, size, dataset): image_size = 32 if (dataset == 'cifar10'): label_bytes = 1 label_offset = 0 elif (dataset == 'cifar100'): label_bytes = 1 label_offset = 1 depth = 3 image_bytes = ((image_size * image_size) * depth) record_bytes = ((label_bytes + label_offset) + image_bytes) def load_transform(value): record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32) depth_major = tf.reshape(tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size]) image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) data_files = tf.gfile.Glob(data_path) data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() return iterator.get_next()
Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels.
codesearchnet
def get_text_features(self, input_ids=None, attention_mask=None, position_ids=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None): text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features
Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. Examples: ```python >>> from transformers import VisionTextDualEncoderModel, AutoTokenizer >>> model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian") >>> tokenizer = AutoTokenizer.from_pretrained("clip-italian/clip-italian") >>> inputs = tokenizer(["una foto di un gatto", "una foto di un cane"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```
github-repos
def check(self, url: str) -> Optional[dict]: data = self.data.get(url) if data: data = self._check_expiration(url, data) return data.data if data else None
Check if data for a url has expired. Data is not fetched again if it has expired. Args: url: url to check expiration on Returns: value of the data, possibly None
juraj-google-style
def versions(self): if (not self.__versions): self.__versions = Versions(self.__connection) return self.__versions
Gets the Version API client. Returns: Version:
codesearchnet
def _get_flow_for_token(csrf_token, request): flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None) return (None if (flow_pickle is None) else jsonpickle.decode(flow_pickle))
Looks up the flow in session to recover information about requested scopes. Args: csrf_token: The token passed in the callback request that should match the one previously generated and stored in the request on the initial authorization view. Returns: The OAuth2 Flow object associated with this flow based on the CSRF token.
codesearchnet
def deprecate_entity( self, ilx_id: str, note = None, ) -> None: term_id, term_version = [(d['id'], d['version']) for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0] annotations = [{ 'tid': term_id, 'annotation_tid': '306375', 'value': 'True', 'term_version': term_version, 'annotation_term_version': '1', }] if note: editor_note = { 'tid': term_id, 'annotation_tid': '306378', 'value': note, 'term_version': term_version, 'annotation_term_version': '1', } annotations.append(editor_note) self.addAnnotations(annotations, crawl=True, _print=False) print(annotations)
Tagged term in interlex to warn this term is no longer used There isn't an proper way to delete a term and so we have to mark it so I can extrapolate that in mysql/ttl loads. Args: term_id: id of the term of which to be deprecated term_version: version of the term of which to be deprecated Example: deprecateTerm('ilx_0101431', '6')
juraj-google-style
def remove_model_config_classes_from_config_check(model_config_classes): filename = REPO_PATH / 'utils/check_config_attributes.py' with open(filename, 'r') as f: check_config_attributes = f.read() in_special_cases_to_allow = False in_indent = False new_file_lines = [] for line in check_config_attributes.split('\n'): indent = get_line_indent(line) if line.strip() == 'SPECIAL_CASES_TO_ALLOW = {' or line.strip() == 'SPECIAL_CASES_TO_ALLOW.update(': in_special_cases_to_allow = True elif in_special_cases_to_allow and indent == 0 and (line.strip() in ('}', ')')): in_special_cases_to_allow = False if in_indent: if line.strip().endswith((']', '],')): in_indent = False continue if in_special_cases_to_allow and any((model_config_class in line for model_config_class in model_config_classes)): while new_file_lines[-1].strip().startswith(' new_file_lines.pop() if line.strip().endswith('['): in_indent = True continue elif any((model_config_class in line for model_config_class in model_config_classes)): continue new_file_lines.append(line) with open(filename, 'w') as f: f.write('\n'.join(new_file_lines))
Remove the deprecated model config classes from the check_config_attributes.py file Args: model_config_classes (List[str]): The model config classes to remove e.g. ["BertConfig", "DistilBertConfig"]
github-repos
def get_roles(client): done = False marker = None roles = [] while (not done): if marker: response = client.list_roles(Marker=marker) else: response = client.list_roles() roles += response['Roles'] if response['IsTruncated']: marker = response['Marker'] else: done = True return roles
Returns a list of all the roles for an account. Returns a list containing all the roles for the account. Args: client (:obj:`boto3.session.Session`): A boto3 Session object Returns: :obj:`list` of `dict`
codesearchnet
def scalar(self, tag, value, step=None): value = float(onp.array(value)) if step is None: step = self._step else: self._step = step summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)]) self.add_summary(summary, step)
Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step
juraj-google-style
def _Completion(self, match): word = str(match.group())[2:(- 2)] return (('(' + '('.join(word)) + (')?' * len(word)))
r"""Replaces double square brackets with variable length completion. Completion cannot be mixed with regexp matching or '\' characters i.e. '[[(\n)]] would become (\(n)?)?.' Args: match: A regex Match() object. Returns: String of the format '(a(b(c(d)?)?)?)?'.
codesearchnet
def copy_modified_gene(self, modified_gene, ignore_model_attributes=True): ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional'] for attr in filter(lambda a: not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self, a)), dir(modified_gene)): if attr not in ignore and ignore_model_attributes: setattr(self, attr, getattr(modified_gene, attr))
Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID. Args: modified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over. ignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.
juraj-google-style
def update_device_info(self, device_id, display_name): content = { "display_name": display_name } return self._send("PUT", "/devices/%s" % device_id, content=content)
Update the display name of a device. Args: device_id (str): The device ID of the device to update. display_name (str): New display name for the device.
juraj-google-style
def get_url(profile, resource): repo = profile['repo'] url = ((((GITHUB_API_BASE_URL + 'repos/') + repo) + '/git') + resource) return url
Get the URL for a resource. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. Returns: The full URL for the specified resource under the specified profile.
codesearchnet
def run_display_app_output(self, out): if ((not self.profile.get('quiet')) and (not self.args.quiet)): print('App Output:') for o in out.decode('utf-8').split('\n'): print(' {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, o)) self.log.debug('[tcrun] App output: {}'.format(o))
Print any App output. Args: out (str): One or more lines of output messages.
codesearchnet
def destroy_s3_event(app, env, region): generated = get_details(app=app, env=env) bucket = generated.s3_app_bucket() session = boto3.Session(profile_name=env, region_name=region) s3_client = session.client('s3') config = {} s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=config) LOG.debug('Deleted Lambda S3 notification') return True
Destroy S3 event. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion.
codesearchnet
def watch(self, selector, callback): if selector not in self._monitors: self._monitors[selector] = set() self._monitors[selector].add(callback)
Call a function whenever a stream changes. Args: selector (DataStreamSelector): The selector to watch. If this is None, it is treated as a wildcard selector that matches every stream. callback (callable): The function to call when a new reading is pushed. Callback is called as: callback(stream, value)
juraj-google-style
def get_plan(self, plan_code): return self.client._get(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())
Check all the information of a plan for subscriptions associated with the merchant. Args: plan_code: Plan’s identification code for the merchant. Returns:
juraj-google-style
def _wake_up_timer(self, kill_event): while True: prev = self._wake_up_time time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer') else: print("Sleeping a bit more")
Internal. This is the function that the thread will execute. waits on an event so that the thread can make a quick exit when close() is called Args: - kill_event (threading.Event) : Event to wait on
juraj-google-style
async def _handle_watermark_notification(self, watermark_notification): conv_id = watermark_notification.conversation_id.id res = parsers.parse_watermark_notification(watermark_notification) (await self.on_watermark_notification.fire(res)) try: conv = (await self._get_or_fetch_conversation(conv_id)) except exceptions.NetworkError: logger.warning('Failed to fetch conversation for watermark notification: %s', conv_id) else: (await conv.on_watermark_notification.fire(res))
Receive WatermarkNotification and update the conversation. Args: watermark_notification: hangouts_pb2.WatermarkNotification instance
codesearchnet
def initialize_particle(rng, domain, fitness_function): position = rng.uniform(domain.lower, domain.upper, domain.dimension) fitness = fitness_function(position) return Particle(position=position, velocity=np.zeros(domain.dimension), fitness=fitness, best_fitness=fitness, best_position=position)
Initializes a particle within a domain. Args: rng: numpy.random.RandomState: The random number generator. domain: cipy.problems.core.Domain: The domain of the problem. Returns: cipy.algorithms.pso.Particle: A new, fully initialized particle.
juraj-google-style
def consume(generator): if hasattr(generator, '__next__'): return list(generator) if not PY_35: raise RuntimeError( 'paco: asynchronous iterator protocol not supported') buf = [] while True: try: buf.append((yield from generator.__anext__())) except StopAsyncIteration: break return buf
Helper function to consume a synchronous or asynchronous generator. Arguments: generator (generator|asyncgenerator): generator to consume. Returns: list
juraj-google-style
def airborne_position_with_ref(msg, lat_ref, lon_ref): mb = common.hex2bin(msg)[32:] cprlat = (common.bin2int(mb[22:39]) / 131072.0) cprlon = (common.bin2int(mb[39:56]) / 131072.0) i = int(mb[21]) d_lat = ((360.0 / 59) if i else (360.0 / 60)) j = (common.floor((lat_ref / d_lat)) + common.floor(((0.5 + ((lat_ref % d_lat) / d_lat)) - cprlat))) lat = (d_lat * (j + cprlat)) ni = (common.cprNL(lat) - i) if (ni > 0): d_lon = (360.0 / ni) else: d_lon = 360.0 m = (common.floor((lon_ref / d_lon)) + common.floor(((0.5 + ((lon_ref % d_lon) / d_lon)) - cprlon))) lon = (d_lon * (m + cprlon)) return (round(lat, 5), round(lon, 5))
Decode airborne position with only one message, knowing reference nearby location, such as previously calculated location, ground station, or airport location, etc. The reference position shall be with in 180NM of the true position. Args: msg (string): even message (28 bytes hexadecimal string) lat_ref: previous known latitude lon_ref: previous known longitude Returns: (float, float): (latitude, longitude) of the aircraft
codesearchnet
def InTemplateArgumentList(self, clean_lines, linenum, pos): while (linenum < clean_lines.NumLines()): line = clean_lines.elided[linenum] match = Match('^[^{};=\\[\\]\\.<>]*(.)', line[pos:]) if (not match): linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) if (token in ('{', '}', ';')): return False if (token in ('>', '=', '[', ']', '.')): return True if (token != '<'): pos += 1 if (pos >= len(line)): linenum += 1 pos = 0 continue (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, (pos - 1)) if (end_pos < 0): return False linenum = end_line pos = end_pos return False
Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments.
codesearchnet
def upsert(self): required_parameters = [] self._stackParameters = [] try: self._initialize_upsert() except Exception: return False try: available_parameters = self._parameters.keys() for parameter_name in self._template.get('Parameters', {}): required_parameters.append(str(parameter_name)) logging.info((' required parameters: ' + str(required_parameters))) logging.info(('available parameters: ' + str(available_parameters))) parameters = [] for required_parameter in required_parameters: parameter = {} parameter['ParameterKey'] = str(required_parameter) required_parameter = str(required_parameter) if (required_parameter in self._parameters): parameter['ParameterValue'] = self._parameters[required_parameter] else: parameter['ParameterValue'] = self._parameters[required_parameter.lower()] parameters.append(parameter) if (not self._analyze_stuff()): sys.exit(1) if self._config.get('dryrun', False): logging.info('Generating change set') set_id = self._generate_change_set(parameters) if set_id: self._describe_change_set(set_id) logging.info('This was a dryrun') sys.exit(0) self._tags.append({'Key': 'CODE_VERSION_SD', 'Value': self._config.get('codeVersion')}) self._tags.append({'Key': 'ANSWER', 'Value': str(42)}) if self._updateStack: stack = self._cloudFormation.update_stack(StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4())) logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown'))) else: stack = self._cloudFormation.create_stack(StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4())) logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown'))) except Exception as x: if self._verbose: logging.error(x, exc_info=True) else: logging.error(x, exc_info=False) return False return True
The main event of the utility. Create or update a Cloud Formation stack. Injecting properties where needed Args: None Returns: True if the stack create/update is started successfully else False if the start goes off in the weeds. Exits: If the user asked for a dryrun exit(with a code 0) the thing here. There is no point continuing after that point.
codesearchnet
def filter(self, scored_list): if len(scored_list) > 0: avg = np.mean([s[1] for s in scored_list]) std = np.std([s[1] for s in scored_list]) else: avg = 0 std = 0 limiter = avg + 0.5 * std mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_list if score > limiter] return mean_scored
Filtering with std. Args: scored_list: The list of scoring. Retruns: The list of filtered result.
juraj-google-style
def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent': scope = x.scope.as_list() batch = x.batch tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor) return TensorFluent(tensor, scope, batch)
Returns a copy of the inputs fluent with stop_gradient applied at batch level. Args: x: The input fluent. stop_batch: A boolean tf.Tensor with shape=(batch_size, ...) Returns: A TensorFluent that conditionally stops backpropagation of gradient computations.
codesearchnet
def _store_checkpoint(self, sess, saver, global_step): if not self._logdir or not saver: return tf.gfile.MakeDirs(self._logdir) filename = os.path.join(self._logdir, 'model.ckpt') saver.save(sess, filename, global_step)
Store a checkpoint if a log directory was provided to the constructor. The directory will be created if needed. Args: sess: Session containing variables to store. saver: Saver used for checkpointing. global_step: Step number of the checkpoint name.
juraj-google-style
def load_lines(filename): with open(filename, 'r', encoding='utf-8') as f: return [line.rstrip('\n') for line in f.readlines()]
Load a text file as an array of lines. Args: filename: Path to the input file. Returns: An array of strings, each representing an individual line.
codesearchnet
def expire_key(self, key): value = self.base_dict[key] del self[key] if (self.callback is not None): self.callback(key, value, *self.callback_args, **self.callback_kwargs)
Expire the key, delete the value, and call the callback function if one is specified. Args: key: The ``TimedDict`` key
codesearchnet
def __call__(self, request: beam.Row, *args, **kwargs): response_dict: dict[str, Any] = {} row_key_str: str = '' try: if self._row_key_fn: row_key = self._row_key_fn(request) else: request_dict = request._asdict() row_key_str = str(request_dict[self._row_key]) row_key = row_key_str.encode(self._encoding) row = self._table.read_row(row_key, filter_=self._row_filter) if row: for cf_id, cf_v in row.cells.items(): response_dict[cf_id] = {} for col_id, col_v in cf_v.items(): if self._include_timestamp: response_dict[cf_id][col_id.decode(self._encoding)] = [(v.value.decode(self._encoding), v.timestamp) for v in col_v] else: response_dict[cf_id][col_id.decode(self._encoding)] = col_v[0].value.decode(self._encoding) elif self._exception_level == ExceptionLevel.WARN: _LOGGER.warning('no matching row found for row_key: %s with row_filter: %s' % (row_key_str, self._row_filter)) elif self._exception_level == ExceptionLevel.RAISE: raise ValueError('no matching row found for row_key: %s with row_filter=%s' % (row_key_str, self._row_filter)) except KeyError: raise KeyError('row_key %s not found in input PCollection.' % row_key_str) except NotFound: raise NotFound('GCP BigTable cluster `%s:%s:%s` not found.' % (self._project_id, self._instance_id, self._table_id)) except Exception as e: raise e return (request, beam.Row(**response_dict))
Reads a row from the GCP BigTable and returns a `Tuple` of request and response. Args: request: the input `beam.Row` to enrich.
github-repos
def update_utxoset(self, transaction): spent_outputs = [spent_output for spent_output in transaction.spent_outputs] if spent_outputs: self.delete_unspent_outputs(*spent_outputs) self.store_unspent_outputs(*[utxo._asdict() for utxo in transaction.unspent_outputs])
Update the UTXO set given ``transaction``. That is, remove the outputs that the given ``transaction`` spends, and add the outputs that the given ``transaction`` creates. Args: transaction (:obj:`~bigchaindb.models.Transaction`): A new transaction incoming into the system for which the UTXO set needs to be updated.
codesearchnet
def respond(self, prompt_id, response): _LOG.debug('Responding to prompt (%s): "%s"', prompt_id, response) with self._cond: if (not (self._prompt and (self._prompt.id == prompt_id))): return False self._response = response self.last_response = (prompt_id, response) self.remove_prompt() self._cond.notifyAll() return True
Respond to the prompt with the given ID. If there is no active prompt or the given ID doesn't match the active prompt, do nothing. Args: prompt_id: A string uniquely identifying the prompt. response: A string response to the given prompt. Returns: True if the prompt with the given ID was active, otherwise False.
codesearchnet
def parse_mmcif_header(infile): from Bio.PDB.MMCIF2Dict import MMCIF2Dict newdict = {} try: mmdict = MMCIF2Dict(infile) except ValueError as e: log.exception(e) return newdict chemical_ids_exclude = ['HOH'] chemical_types_exclude = ['l-peptide linking', 'peptide linking'] if ('_struct.title' in mmdict): newdict['pdb_title'] = mmdict['_struct.title'] else: log.debug('{}: No title field'.format(infile)) if ('_struct.pdbx_descriptor' in mmdict): newdict['description'] = mmdict['_struct.pdbx_descriptor'] else: log.debug('{}: no description field'.format(infile)) if ('_pdbx_database_status.recvd_initial_deposition_date' in mmdict): newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date'] elif ('_database_PDB_rev.date' in mmdict): newdict['date'] = mmdict['_database_PDB_rev.date'] else: log.debug('{}: no date field'.format(infile)) if ('_exptl.method' in mmdict): newdict['experimental_method'] = mmdict['_exptl.method'] else: log.debug('{}: no experimental method field'.format(infile)) if ('_refine.ls_d_res_high' in mmdict): try: if isinstance(mmdict['_refine.ls_d_res_high'], list): newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']] else: newdict['resolution'] = float(mmdict['_refine.ls_d_res_high']) except: try: newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution']) except: log.debug('{}: no resolution field'.format(infile)) else: log.debug('{}: no resolution field'.format(infile)) if ('_chem_comp.id' in mmdict): chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'], ssbio.utils.not_find(mmdict['_chem_comp.type'], chemical_types_exclude, case_sensitive=False)) chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True) newdict['chemicals'] = chemicals_fitered else: log.debug('{}: no chemical composition field'.format(infile)) if ('_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict): newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name'] else: log.debug('{}: no organism field'.format(infile)) return newdict
Parse a couple important fields from the mmCIF file format with some manual curation of ligands. If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython. Args: infile: Path to mmCIF file Returns: dict: Dictionary of parsed header
codesearchnet
def encode_tf(self, s): ids = subword_text_encoder_ops.subword_text_encoder_encode(s, self._filepath) return ids[:(- 1)]
Encode a tf.Scalar string to a tf.Tensor. This will be necessary for on-the-fly tokenization. Args: s: a tf.Scalar with dtype tf.string Returns: a 1d tf.Tensor with dtype tf.int32
codesearchnet
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None): hidden_states = self.self_att(hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache) hidden_states, attn_weights, current_key_value = hidden_states hidden_states = self.ffn(hidden_states) return (hidden_states, attn_weights, current_key_value)
Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`).
github-repos
def attached_dependencies(self): return self._attached_dependencies
Returns list of dependencies that should be saved in the checkpoint. These dependencies are not tracked by root, but are in the checkpoint. This is defined when the user creates a Checkpoint with both root and kwargs set. Returns: A list of TrackableReferences.
github-repos