content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import List
from functools import reduce
def product(li: List[int]) -> int:
"""Calculates the product of all the numbers in a list.
doctests:
>>> product([2, 1, 4])
8
>>> product([3, 1, 4, 2, 5, 8])
960
"""
x = reduce(lambda a, b: a * b, li)
return x | 8afe00bb6056accc694ab955a48b6be85d8a30bf | 9,050 |
def get_etl_pipeline_ids(client):
"""Return a dict mapping pipeline ids to their names, filtering on ETL pipelines."""
paginator = client.get_paginator("list_pipelines")
response_iterator = paginator.paginate()
filtered_iterator = response_iterator.search("pipelineIdList[?contains(@.name, 'ETL') == `tru... | 10dcd1d933ed8adabd75740a55d567cf786fffbb | 9,051 |
import os
import contextlib
import wave
def skip_long_utterance(wav_file, cut_off_len = 15):
""" from "Text-Free Image-to-Speech Synthesis Using Learned Segmental Units" Appendix A:
When computing duration statistics, we exclude utterances longer than 15s for SpokenCOCO...
"""
if os.path.isl... | 9f2d81f30aaa0e5325b6ec88a116810f4c6c7ae9 | 9,052 |
import os
def GetUniqueName(path, name):
"""Make a file name that will be unique in case a file of the
same name already exists at that path.
@param path: Root path to folder of files destination
@param name: desired file name base
@return: string
"""
tmpname = os.path.join(path, name)
... | c22b5d897fcec291f4e1e69fae8d85a9601ba15e | 9,053 |
def get_clusters_from_file(path, ref_cluster_names=[]):
"""Get cluster names, labels, and cells from cluster file or metadata file
"""
clusters = {}
with open(path) as f:
lines = f.readlines()
headers = [line.strip().split('\t') for line in lines[:2]]
names = headers[0]
types = hea... | cabf14c72b0467b0f2b15e3c0b8c8bd1846e92b5 | 9,055 |
def _tracking(fcn):
""" Decorator to indicate the list has changed """
def new_fcn(self, *args):
self.changed = True
return fcn(self, *args)
return new_fcn | 3f6454190056f112134f01507b2bb353a8043790 | 9,056 |
import sys
def caller_module(level=2, sys=sys):
"""This function is taken from Pyramid Web Framework - ``pyramid.path.caller_module``."""
module_globals = sys._getframe(level).f_globals
module_name = module_globals.get('__name__') or '__main__'
module = sys.modules[module_name]
return module | 4bc6d73f656c98be185f7b5aaa869d7fb6ca841c | 9,057 |
import re
def javacode_to_tokens(code:str):
"""
Starting on method level, without javadocs
returns a touple of ([code-tokens],code-string)
"""
code_tokens = re.findall(r"\w+(?:'\w+)*|[^\w\s]", code)
#print("Javacode to tokens to be done!",code,code_tokens)
return (code_tokens,code) | 85c0dccb06936326929493edc5192ccf01bf59ea | 9,058 |
from typing import Optional
from typing import List
def _convert_names(
names, max_levels: Optional[int] = None, err_msg: Optional[str] = None
) -> List[str]:
"""Helper function that converts arguments of index, columns, values to list.
Also performs check on number of levels. If it exceeds `max_levels`,... | d67fb93b039306e7dac973abffe1e08089993c0d | 9,059 |
def is_MC(parcels):
"""
Dummy for Maricopa County.
"""
return (parcels.county == 'MC').astype(int) | 6e8af2675f1ba40d642ada0d07e133aeb9dd0d70 | 9,060 |
def objScale(obj,factor):
"""
Object scaling function, gets obj and scale factor, returns an array of the scaled size
"""
oldSize = obj.get_size()
newSize = []
for i in oldSize:
newSize.append(int(i/float(factor)))
return newSize | 3104fc4e126299400a5a119fff0d8bc9d3ea32f7 | 9,061 |
def preprocess_encoder_input(arr):
"""
Simple method to handle the complex MFCC coefs that are produced during preprocessing. This means:
1. (For now), discarding one of the channels of the MFCC coefs
2. Collapsing any empty dimensions
:param arr: the array of MFCC coefficients.
"""
return a... | ccd754783377e9fe257e423f9099d6dbef21d11b | 9,062 |
def determineWinCardIndex(pile):
"""
@param list pile: pile of cards in center
@return int: index of winning card -
highest index of leading suit, or if there are spades, highest spades index
"""
bestCard = (pile[0], 0)
for i in range(1, len(pile)):
bestCardSuit = bestCard[0].ind... | c105ade59b79482de0f2d3e6b4d25c4353bb1dcf | 9,063 |
def greyList(n):
"""
生成格雷编码序列
参考:https://www.jb51.net/article/133575.htm
:param n: 长度
:return: 范围 2 ** n的格雷序列
"""
def get_grace(list_grace, n):
if n == 1:
return list_grace
list_before, list_after = [], []
for i in range(len(list_grace)):
list... | eab1f00ec2cdd62fbbffbb78a2c69c6fe9177e66 | 9,065 |
def hbb_to_kaa(hessian):
"""
Unit conversions on input Hessian matrix from (Hartrees/Bohr/Bohr)
(kcal/mol/Angstrom/Angstrom).
"""
hessian = (hessian * 627.509474) / (0.529177**2)
return hessian | e5daec25cba9104f8ecf1bcf30a0b4020969c704 | 9,066 |
def coloring(color, text):
"""Print a text in a specified color"""
color_sequences = {
'default': '\033[0m',
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m',
'lightblue': '... | 3953d72329a01453f52fd099bb20624c7661aa87 | 9,067 |
import token
import requests
def get_webhook():
"""
Getting installed webhooks
:return: installed webhooks
"""
header = {'Content-Type': 'application/json;charset=utf-8'}
request_address = 'https://api.ok.ru/graph/me/subscriptions?access_token={token}'.format(token=token)
response = reques... | 345a331514d276b336803d2fed182d7591eb8cc6 | 9,069 |
import math
def haversine(rad):
"""
Returns the haversine function of an angle in radians.
"""
return (1 - math.cos(rad)) / 2 | 294c901795aa499c42f3d67e6d6a3d5efecd46a8 | 9,070 |
def find_break_edges(ptree):
""" Find edges which to remove from the graph for the original tree behind this ptree.
==> edges between adjac
"""
ret = set()
if len(ptree.insert_descendants) > 0:
lca = ptree.insert_descendants[0]
for lca_child in lca:
ret.add((lca.nodei... | db9ff3ae36dba799d6f479f83e775b41d7bce3df | 9,071 |
def get_celsius(temperature_in_fahrenheit):
"""
Returns the temperature in Celsius of the given Fahrenheit temperature.
For example, this function returns XXX when given YYY.
Type hints:
:type temperature_in_fahrenheit: float
"""
return (temperature_in_fahrenheit - 32) * (5 / 9) | 501b5c3c6c7fe9792fd12cabbae71eddfbc34f58 | 9,072 |
import bz2
import json
import codecs
def json_exporter(data, filepath, compress=True):
"""Export a file to JSON. Compressed with ``bz2`` is ``compress``.
Returns the filepath of the JSON file. Returned filepath is not necessarily ``filepath``, if ``compress`` is ``True``."""
if compress:
filepath... | dcdb9026b302c3bec6b6a7215cee0498a8655a61 | 9,073 |
import math
def _realroots_quadratic(a1, a0):
"""gives the real roots of x**2 + a1 * x + a0 = 0"""
D = a1*a1 - 4*a0
if D < 0:
return []
SD = math.sqrt(D)
return [0.5 * (-a1 + SD), 0.5 * (-a1 - SD)] | ad61307a09b9f5cbf444f0bd75448b39b09b2e96 | 9,074 |
def gunning_fog_index(n_words, n_polysyllable_words, n_sents):
"""https://en.wikipedia.org/wiki/Gunning_fog_index"""
return 0.4 * ((n_words / n_sents) + 100 * (n_polysyllable_words / n_words)) | aeb295edfa563027952f6a934636487e04b2b266 | 9,075 |
def _generate_windows_body(hooks):
"""Generate Windows specific functions.
At the moment it implements load_impls_from_library, class destructor, and an utility function
to convert from utf8 to wide-strings so we can use the wide family of windows
functions that accept unicode.
"""
# generate d... | 27597f8556cdb4383179245a423a45a72324e2ae | 9,076 |
import requests
import json
import logging
def last_failed(url, job_type):
"""Return last failed job for a specified job type."""
# query
query = {
"query": {
"bool": {
"must": [
{
"terms": {
"s... | 567e5ef7afaa460e7fb91e09fa74d2d011cb2cdb | 9,078 |
def fix_spaces_inside_quotes(text, quote='``'):
"""
>>> test = '''\
:meth:`update` accepte soit un autre objet dictionnaire ou un iterable de\
paires clé / valeur (comme tuples ou d'autres iterables de longueur deux).\
Si les arguments de mots clés sont spécifiés, le dictionnaire est alors mis\
... | cafb4dd7d15c4ab1a2cd252352d33b9aa20e4bca | 9,079 |
def toGoatLatin(S):
"""
:type S: str
:rtype: str
"""
count=1
sentences=S.split()
for i in range(len(sentences)):
if sentences[i][0].lower() in "aeiou":
sentences[i]+="ma"+count*"a"
else:
sentences[i]=sentences[i][1:]+sentences[i][0]+'ma'+count*"a"
count+=1
return " ".join(sentences) | ebc1e567dfa60436aea14412d7b347d8481f8b0a | 9,080 |
def __prepare_line(string, dir_source, replace_string):
"""
Prepare the line before it is being written into the content file
"""
if not replace_string == None:
string = string.replace(dir_source, replace_string)
return string | cbec6deab5c66960c5e8d57b52392e4ed3cf2b3d | 9,081 |
def find_closest_raster(return_period,aoi_col='AoI_RP{}y_unique',depth_col='RP{}_max_flood_depth'):
"""
Find the closest AoI and Flood raster column name for given return period
Arguments:
*return_period* (float): Return period of the flood for which to find the nearest inundation raster
*a... | 177041afc9a52d4942ab4095b7383cfc8e17652b | 9,083 |
def _is_bn_diff_doctypes(dyad):
"""Check if a dyad is between two different doctypes.
Args:
dyad (tuple): two-item tuple where each item is a dict which represents a document
Returns:
ind (bool): True if the dyad is between two different doctypes
"""
if dyad[0]["doctype"] != dyad[... | 2480cbca808164b2fec14fd13808cf5ebfb0dcc3 | 9,084 |
import requests
def upload2ipfs(file_path: str) -> str:
"""Upload to ipfs using local port.
IPFS node must be running locally. Run:
$ ipfs daemon
Args:
nft_path (str): path to metadata file
"""
with open(file_path, "rb") as f:
nft_binary = f.read()
ipfs_endpoint = "http://... | 4fdb53d4d6d61b1784673be6d84cb7ba41fc84a5 | 9,085 |
def GetCloudBasePath():
"""Returns the folder within the Makani bucket where all databases live."""
return 'gs://gcp-public-data-makani-deps/deps/turbsim_databases' | 40091d491fdc3960cc5aa08e0ca58ae0cf2009aa | 9,086 |
def line(char='-', length=48):
"""Generates a string of characters with a certain length"""
return ''.join([char for _ in range(length)]) | 32de8abb95ab7e73912e2b37f0996361ed181c5b | 9,087 |
import logging
def group_by_size(input_tensors, bytes_per_pack):
"""Groups `input_tensors` into chunks of `bytes_per_pack`.
The method preserves the original order of `input_tensors`. The grouping is
best effort, each pack could have more or less bytes than `bytes_per_pack`.
It only groups values with known ... | 9ab5805898678b1541f116e5ef5ae1b9a1c42791 | 9,088 |
def adapters(text):
"""
Parse lines of text into a list of adapters (represented by their joltage),
supplemented by the outlet (0) and your device (maximum + 3).
"""
adapters = list(sorted(map(int, text.splitlines())))
adapters = [0] + adapters + [max(adapters) + 3]
return adapters | cb5aa44963506e8d0ea6aa0aeb89d094bfbb0bc8 | 9,089 |
def assign_bonds_to_groups(tors, group):
"""
|
**Description:** Make a group for each torsion bond
and keep track of how many members
Finally it returns the biggest group.
**Input:**
- Tors: atoms with torsions
- Group: Atoms grouped by proximity
**Output:**
- output: li... | 8147c016efe435f46b587bc86e6fab713375bb70 | 9,090 |
def is_right_censored(lc, frange):
""" Returns true if the light curve is cutoff on the right. """
return len(lc['t0'])-1 in frange | de2e81605db2dc2a5f073d8400e2e8ee1b46f199 | 9,093 |
def check_data(func):
"""Decorator function for checking possible exceptions during extraction.
Args:
func (obj): function used in try-except block
Except:
(str) : in case of exception assigns '-' for the missing data.
"""
def inner(line):
try:
return func(line)... | b9dad9ff8adbee9f8307c4c61fc2d5e1918092e2 | 9,094 |
def is_empty_line(line: str) -> bool:
"""Checks whether a line is empty."""
return line.strip("\n").strip("\t").strip() == "" | ad58cc78e5f25353419682343c34c21e2679304d | 9,096 |
def to_host_list(value):
"""Space separated list of FQDNs."""
return value.split() | 85740e6e90096d5711022a7ae18b919673899b36 | 9,097 |
def _is_tarfile(filename):
"""Returns true if 'filename' is TAR file."""
return (filename.endswith(".tar") or filename.endswith(".tar.gz") or
filename.endswith(".tgz")) | 761b776e0e8078ddd4bee694e0a9d853dd2e31fd | 9,098 |
from typing import Optional
import asyncio
import contextlib
from typing import cast
def get_running_loop() -> Optional[asyncio.AbstractEventLoop]:
"""Check if an event loop is already running."""
with contextlib.suppress(RuntimeError):
if hasattr(asyncio, "get_running_loop"):
return cast(... | 48750bc03be5d8cd17da20a3ca01d02149d471f5 | 9,099 |
def distance_diff_catl(ra, dist, gap):
"""
Computes the necessary distance between catalogues
Parameters
-----------
ra: float
1st distance
dist: float
2nd distance
Returns
-----------
dist_diff: float
amount of distance necessary between mocks
"""
... | 2a523d1c9c132dc8fcb65bd8d633bf24fcf46f42 | 9,100 |
def parse_token(filehandle, token):
"""Iterates through filehandle until token found. If value found after
token, returns it."""
for line in filehandle:
line = line.strip()
if line.startswith(token):
if len(line) > len(token):
return line.rsplit('\t', 1)[1]
... | 9f65ec378b33903250173aaa3f97cd058de13d2b | 9,101 |
def short_information(title, index=0):
"""
Takes in track information and returns everything as a short formatted String.
Args:
title (str): track title string
index (str): optional track number string
Returns:
A short formatted string of all track information.
"""
if ... | 6754af1f2327eb5d9f37f4d25aa4f808d4793553 | 9,104 |
def add_css_file_extension(name):
"""
Appends the CSS file extension to a string.
:return: name with '.css' append at the end append at the end
:rType: string
"""
return '%s.css' % name | fbe4569e4660cc4145bac36a5ea88ae87ec4c319 | 9,105 |
def _get_position(a, n):
""" returns position of substring :n: as "start", "end" or "middle" """
position = a.index(n)
if position == 0:
return ("start", position)
elif position+len(n) == len(a):
return ("end", position)
else:
return ("middle", position) | f7a18c540542f117df822c18396e3e554d1eba45 | 9,106 |
def previous(values, elements, scope=None, strict=True):
"""Return closest previous (index, elem) of values withing scope.
Assumption:
values and elements are sorted
"""
# Init iterator on elements
elem_indexes = enumerate(elements)
index, elem = next(elem_indexes)
try:
nin... | 36b83dc2665539a3a9cb8b50419cd15410a8969c | 9,107 |
def slice(from_index, to_index, list_or_string):
"""Returns the elements of the given list or string (or object with a slice
method) from fromIndex (inclusive) to toIndex (exclusive).
Dispatches to the slice method of the third argument, if present"""
return list_or_string[from_index:to_index] | 130692bad6f7de87a07786afe0ea3d6d30902ba7 | 9,108 |
def coding_strand_to_rna(strand):
"""returns the coding strand to the rna strand (T --> U)"""
strand = strand.upper().replace("T","U")
return strand | 5c3420e921c10376b33b17dfc34e7301414bc6ef | 9,110 |
def graph_to_entities_json(g):
"""
Converts the given graph to entities JSON.
:param g: a graph
:return: an array of JSON
"""
entities = []
for u, v in g.edges():
entity = {
"Device": "",
"IP": "",
"Identity": "",
"Location": "",
... | ef790764c9e6ff4f652c41a5af1d5da3e4d98733 | 9,111 |
def recode_mark(mark, mark_shouldbe, no_mark="XX"):
"""A little helper function to remap clips to standard values that can
then be parsed.
Replaces BP with LPRP so ADBP becomes ADLPRP.
Arguments:
- `mark`: A mark string returned by the glfc database.
- `mark_shouldbe`: a dictionary mapping va... | ea31126d8b3d6e519a1f376f4ef58bfdbc24914a | 9,112 |
import os
def remove_extension_from_filename(filename: str) -> str:
"""
Return a filename without its extension
"""
return os.path.splitext(filename)[0] | 3aecca5e188c2a029f3e419070b55e5654fdb49c | 9,113 |
import argparse
import os
def parse_command_line():
"""Parse the command-line options."""
formatter_class = argparse.ArgumentDefaultsHelpFormatter
description = 'Clang-format: Allow CHKERRQ to be on same line.'
parser = argparse.ArgumentParser(description=description,
... | 9f49b12e49bfe21caa7e179fadfb35cee7be35c5 | 9,114 |
from typing import Iterable
from typing import Any
def getWriteOutColour(
colour: Iterable[Any], convertType: type = int, multiplier: int = 255
) -> list[Any]:
"""getWriteOutColour"""
return [convertType(col * multiplier) for col in colour] | b81784b8e6fcce6a1479b710bc59b0db9c94241c | 9,115 |
def add_frame_div_parent(cell_info):
"""
Adds the frame a cells parent divides on to cell info.
Args:
cell_info (dict): dict that maps cells to cell info
Returns:
dict: cell info with added frame_div_parent
"""
new_info = cell_info.copy()
for info in new_info.values():
... | 585feeaaf2a353ea2481cda41d547a004ecb8adc | 9,117 |
import inspect
def get_instances_of(cls, context):
"""从 context 中获取所有类型为 cls 的实例"""
if type(context) is not dict:
names = dir(context)
context = {k: getattr(context, k) for k in names}
objects = []
for name, value in context.items():
value_type = type(value)
if inspect... | f95eae2039f2b8b2bcfb09adbd09e24abb6dba48 | 9,120 |
def purpleair_us_corr(df, param):
"""US-Wide Correction equation of Barkjohn et al. 2021 for PurpleAir PA-II
sensors.
Publication Link:
https://amt.copernicus.org/articles/14/4617/2021/
Args:
df (pandas dataframe):
Dataframe with PurpleAir PA-II concentration values for PM2... | 9a61af20cc6178de099a31f38215044da0eb0bc2 | 9,122 |
def validate_metadata(metadata, parameters):
"""validate metatdata.
Ensure metadata references parameter workload_context,
and that it is a string.
Return error message string or None if no errors.
"""
for value in metadata.values():
if isinstance(value, dict):
if "get_param"... | 177a1133bacd9e7560be9604cd03542eaf5944ff | 9,123 |
import torch
def quaternions_to_so3_matrix(q):
"""Normalises q and maps to group matrix."""
q = q / q.norm(p=2, dim=-1, keepdim=True)
r, i, j, k = q[..., 0], q[..., 1], q[..., 2], q[..., 3]
return torch.stack(
[
r * r - i * i - j * j + k * k,
2 * (r * i + j * k),
... | 7b48bc7176a462497e64671fe8a204a9942c301c | 9,125 |
import os
def get_cache_path(split):
"""Gets cache file name."""
cache_path = os.path.join(os.path.dirname(__file__), "../../../data/mini-imagenet/mini-imagenet-cache-" + split + ".pkl")
return cache_path | ee822a1a1940e61189513dd100693c60ec6f2e4b | 9,126 |
def flux(component):
"""Determine flux in every channel
Parameters
----------
component: `scarlet.Component` or array
Component to analyze or its hyperspectral model
"""
if hasattr(component, "get_model"):
model = component.get_model()
else:
model = component
re... | b95b0aa926ee2cc2c78e90c425b47f04bc0a4d4c | 9,127 |
import _ast
def BinOpMap(operator):
"""Maps operator strings for binary operations to their _ast node."""
op_dict = {
'+': _ast.Add,
'-': _ast.Sub,
'*': _ast.Mult,
'**': _ast.Pow,
'/': _ast.Div,
'//': _ast.FloorDiv,
'%': _ast.Mod,
'<<': _ast.LShift,
'>>': _ast... | 0b332b1043b31b123daf8812e6f2ecb4e3974f19 | 9,128 |
def is_current_game_state(handler_input, state):
"""Check the current game state"""
return handler_input.attributes_manager.session_attributes['game_state'] == state | a06e661408ca560d53ed15679af07dbb535744f0 | 9,129 |
def extract_title_from_text(text: str) -> str:
"""Extract and return the title line from a text written in Markdown.
Returns the first line of the original text, minus any header markup ('#') at the start
of the line.
"""
firstline = text.split('\n', 1)[0]
return firstline.lstrip('# ') | c51c7dd517b7d50a50df472d055618a092bb3518 | 9,131 |
def _all_pairs(i, contextsize, arrlen):
"""
i: index in the array
contextsize: size of the context around i
arrlen: length of the array
Returns iterator for index-tuples near i in a list of size s with
context size @contextsize. Context of k around i-th index means all
substrings/subarrays... | 7234e7b092e60c74d4f1c0af44a469c25cc34dc9 | 9,132 |
def percentformat(x, pos):
"""
Generic percent formatter, just adds a percent sign
"""
if (x==0): return "0%"
if (x<0.1): return ('%4.3f' % (x)) + "%"
if (x<1): return ('%3.2f' % (x)) + "%"
if (x<5): return ('%2.1f' % (x)) + "%"
return ('%1.0f' % x) + "%" | 382f9760e26a31c6ddcdf8a58600fa5010dcba1e | 9,133 |
def bufferParser(readbuffer, burst=16):
""" Parse concatenated frames from a burst
"""
out = b''
offset = 1
while len(readbuffer) > 0:
length = readbuffer[2]
if readbuffer[4] == offset:
out += readbuffer[5:3+length]
offset += 1
readbuffer = readbuffe... | a3a7eb312f9e9c0e9a2183960074ebd1e9925025 | 9,136 |
def iterative_topological_sort(graph, start):
"""doesn't return some nodes"""
seen = set()
stack = [] # path variable is gone, stack and order are new
order = [] # order will be in reverse order at first
queue = [start]
while queue:
node = queue.pop()
if node not in seen:
... | a0654b1b1ce93818f01a5e6347243fabd1c0d23c | 9,137 |
def _helper(length_diff, linked_list):
"""Helper function for longer linked list."""
length_diff = abs(length_diff)
return linked_list[length_diff:] | 67c208d2eab4f1be4e3e7a77008b1c3169fcee73 | 9,138 |
def account(account):
"""Changing scope of the account fixture to be called after
mailhog_delete_all fixture"""
return account | 7e5b4386b5aae8b5f35b6593e328401959c84ee5 | 9,139 |
def create_grouped_word_list(words, group_span_indices, join_string):
"""Group together words with join_string string and return updated token list."""
adjusted_words = []
curr_group = []
group_idx = 0
curr_group_start_idx, curr_group_end_idx = group_span_indices[group_idx]
for i, token in enume... | efd8c4a2e02b9704e6a3f8647ccb0d31be18551c | 9,141 |
def sec_title(default_str: str) -> str:
"""Reads in a section title"""
name = input('What would you like to title this section? '
+ '(default is ' + default_str + ')\n')
if name:
return name
return default_str | 3dfc0ddcdc9cb9beb22b02892959334516b2a90b | 9,144 |
def e_timeToString(dateString):
"""
input: string
output: string
description: format dateString to yyyymmddHHMM
example: Wed Aug 29 07:23:03 CST 2018 ->> 201808290723
"""
# define month list for get digital
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
... | 1a0c3f014bbd95a9da0eb767e1ce219cb0c70195 | 9,145 |
def tachycardic_detector(patient_age, patient_heart_rate):
"""
Determines if patient is tachycardic based on their age and heart rate
Args:
patient_age: integer extracted from patient database entry
patient_heart_rate: integer posted to patient database entry
Returns:
tachycardic... | 595bf87d913cd94b9f4aa089a3f1cf32f342ccbf | 9,146 |
import argparse
def parse_args():
"""
parsing and configuration
:return: parse_args
"""
desc = "TensorFlow implementation of fast-style-GAN"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--module', type=str, default='test',
help='Module to... | fa256927a5b1c0e4cb34b341b7960617f8d238d1 | 9,147 |
import pickle
def save_to_pkl(pkl, obj):
"""Save experiment resource to file."""
with open(pkl, 'wb') as f:
pickle.dump(obj, f)
return obj | cf8c71617faa88a192e8214bd71a43e78c6acb67 | 9,149 |
import torch
def gelu_fast(x):
""" Faster approximate form of GELU activation function
"""
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) | 39f2e888b8e01edf0aaca4987c8a070850f58484 | 9,150 |
def projects(request):
"""Display projects."""
return {} | 749b2a1d5de2427d7059b04c72d28c49f7792187 | 9,152 |
import argparse
from pathlib import Path
def get_args():
""" function to parse command line arguments
Returns:
_type_: parsed arguments
"""
parser = argparse.ArgumentParser()
model_group = parser.add_mutually_exclusive_group(required=True)
model_group.add_argument("--ckpt-path", type... | 15c8b0926ba43c6caa509b725778fb63533e001b | 9,153 |
def diff_field(field1, field2):
"""returns true if field1 == field2"""
return field1 == field2 | 6439d8c06c1d5b460141831acf83275795d19ccc | 9,154 |
import random
def example_classifier(
task_info,
mode="demo",
default_split_prob={
"train": 0.9,
"dev": 0.01,
"test": 0.09,
},
):
"""
This will return the split this data belongs to.
"""
if mode == "demo" or mode == "all":
if random.random() < default_s... | 51aa25630158a4c295df85afc8684be59aca9d25 | 9,155 |
def islist(item):
"""Check if an is item is a list - not just a sequence.
Args:
item (mixed): The item to check as a list.
Returns:
result (bool): True if the item is a list, False if not.
"""
return isinstance(item, list) | 02c4157e1867e7b113e9695f2fa8fd4aaccc043d | 9,156 |
def group_parameters(model_params_dict_expanded):
"""Groups the parameters to be estimates
in flat dictionary structure"""
model_params_dict_flat = dict()
model_params_dict_flat["gamma_0s"] = list(
model_params_dict_expanded["const_wage_eq"].values()
)
model_params_dict_flat["gamma_1s... | deb566114d1b40610bf6e1e814e85b1d8d3e3351 | 9,157 |
import pandas
def read_static_info(static_tracks_file):
"""
This method reads the static info file from highD data.
:param static_tracks_file: the input path for the static csv file.
:return: the static dictionary - the key is the track_id and the value is the corresponding data for this track
""... | 295757466640f90b0d3f95dd1d68aab0c90b329b | 9,158 |
import os
def get_file_extension(fname):
""" Returns the extension from a filepath string ignoring the '.' character """
return os.path.splitext(fname)[-1][1:] | 44c751df76fe34d2df81cc98a2c140556ddfbcf3 | 9,159 |
import os
def check_extension(fname, extension = ".csv"):
"""
Checks whether the fname includes an extension.
Adds an extension if none exists.
fname - the name of the file to check.
extension - the extension to append if necessary.
>> Default: ".csv".
"""
root, ending = os.path.spl... | 05bb018453101d0017be4dade0bc9199e67e7dfb | 9,160 |
def tshirt_code(tshirt_string):
""" convert tshirt size strings into a code for us"""
if not tshirt_string:
return ""
tshirt_code = ""
if tshirt_string[0] == "f":
tshirt_code += "0"
tshirt_string = tshirt_string[1:]
else:
tshirt_code += "1"
size_code = {"s": "1"... | f66d908528c6caa47ca878e4115eec00c52e3046 | 9,161 |
import time
import calendar
def dates_to_epoch(d):
"""Recursively converts all dict values that are struct_times to Unix timestamps."""
for key, value in d.iteritems():
if hasattr(value, 'iteritems'):
d[key] = dates_to_epoch(value)
elif type(value) is time.struct_time:
... | 6a0a9a8f1a1636376973e65c4d3b4ff8a3603d3d | 9,162 |
from typing import Dict
from typing import Any
import logging
def build_log_config(level: str) -> Dict[str, Any]:
"""Build a log config from a level."""
return {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(lev... | e20a419ee6c69f6fa0eefbd51e5542349b1a1e8b | 9,163 |
def i2n(i):
"""ip to number """
ip = [int(x) for x in i.split('.')]
return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3] | 14496c2e7c83794a8364732c512f2d3cfdaba1d9 | 9,165 |
def count_increases(report):
"""Meh
>>> count_increases([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
"""
return sum((1 if report[n] < report[n + 1] else 0 for n in range(len(report) - 1))) | bb38ae2de0f5e7a8f7f2904cdca62a7de80543ab | 9,166 |
from typing import Iterable
from typing import Optional
def effective_sample_size(
weights: Iterable[float],
total_weight: Optional[float] = None,
) -> float:
"""Computes the "effective sample size" of the given weights
This value represents how "healthy" the underlying samples are. The lower
thi... | 6915abd0484dc4b08b47c1c88b6e19e2af5dd1c4 | 9,168 |
def split_pair_occurrence(input_str):
"""
Q9HD36.A79T (x11) → (Q9HD36.A79T, 11)
"""
if '(x' not in input_str:
return input_str, 1
pair, occurrence = [item.strip() for item in input_str.split()]
occurrence = int(occurrence[2:-1])
return pair, occurrence | 0812e907a97894ff6f2d94722874b3917ce30ad8 | 9,169 |
def find_joins(df, ids, downstream_col="downstream", upstream_col="upstream", expand=0):
"""Find the joins for a given segment id in a joins table.
Parameters
----------
df : DataFrame
data frame containing the joins
ids : list-like
ids to lookup in upstream or downstream columns
... | 39f16985ddd8e79338e520e56ba6ee793558d03f | 9,170 |
import math
def max_crossing_subarray(given_array, start_index, mid_index, end_index):
"""Function To Calculate The Mid Crossing Sub Array Sum"""
max_left_sum = - math.inf # Used For Sentinel Value
max_right_sum = - math.inf # Used For Sentinel Value
cross_start = None # Just used for variable pr... | 1106b063b652e0d0d475f5b0979a138f4c48113b | 9,172 |
import re
def camel_2_snake_case(word):
"""
>>> camel_2_snake_case("HTTPResponseCodeXYZ")
'http_response_code_xyz'
From https://stackoverflow.com/a/1176023/548792
"""
return re.sub(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))", r"_\1", word).lower() | dc20c832a212f89d51bb05302c9e1677e8f2cb83 | 9,173 |
from datetime import datetime
def now_str(time=False):
"""Return string to be used as time-stamp."""
now = datetime.now()
return now.strftime(f"%Y-%m-%d{('_%H:%M:%S' if time else '')}") | 02b73bda5f27e7c25120d50d50244bd103661c90 | 9,174 |
def from_file(path: str) -> set:
"""
Read conditions from a file. Each line contains a separate condition.
:param path: Path to file.
:return: Read conditions.
"""
conditions = set()
with open(path) as f:
for line in f:
conditions.add(line)
return conditions | 3780d540d6f300fe0a97d354ed33fa0aab803d56 | 9,175 |
def getLeastReplaggedCommons():
"""
Returns the name of the least replagged Commons replica among s1, s2 and s3
"""
return "commonswiki-p.rrdb.toolserver.org"
# broken:
#return urllib.urlopen("http://toolserver.org/~eusebius/leastreplag").readline() | 5726416f7a1cbb09f51de81d967009005a06af1b | 9,178 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.