content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def order_of_magnitude(x):
"""Calculate the decimal order of magnitude.
Parameters
----------
x : float
Number of which to calculate the decimal order of magnitude.
Returns
-------
int
Order of magnitude of `x`.
"""
return int(math.floor(math.log10(abs(x)))) if x else 0 | 53c0fcfbdb993e2a1c584d416e79c59112f0ceba | 50,192 |
def bits_to_number(bits):
"""convert the binary representation to the original positive number"""
res = 0
for x in bits:
res = res * 2 + x
return res | 9280170a3bfbad88363cd886384a2253e83d5db9 | 50,193 |
import subprocess
def init_RNAfold(version, temperature, paramFile=""):
"""
Initialization RNAfold listener
"""
p2p = ""
t = "-T " + str(temperature)
P = ""
if paramFile != "":
P = "-P " + paramFile
if version == 185:
p2p = "/home/rk/Software/ViennaRNA/ViennaRNA-1.8.5/Progs/RNAfold"
p = subprocess.Popen(
([p2p, "--noPS", "-d 2", t, P]),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
return p
elif version == 213:
p2p = "RNAfold"
p = subprocess.Popen(
([p2p, "--noPS", "-d 2", t, P]),
# shell = True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
return p
else:
exit(0) | 044cefe90acc150c41e3535c8842e14f1add5867 | 50,194 |
from typing import Any
from pathlib import Path
import json
def read_json_file(path: str) -> Any:
"""Read result from file
Args:
path (str): path of result file
Returns:
[Any]: content of the file
"""
file_path = Path(path)
if not file_path.is_file():
raise RuntimeError(f'Invalid file to read {path}')
with open(file_path, 'r') as f:
return json.load(f) | cab88d63721faa9e94704abfeb885d7f6eceaa63 | 50,195 |
def smart_resize(image, max_size=2000):
"""
Resize image to `max_size` using it's bigger size (either width or height).
This will set wide image width to `max_size` and adjust height accordingly.
:param image: Pillow.Image object
:param max_size: maximum value of width or height in pixels.
:rtype Pillow.Image:
"""
if image.width >= image.height:
new_width = max_size
new_height = int(new_width / image.width * image.height)
else:
new_height = max_size
new_width = int(new_height / image.height * image.width)
return image.resize((new_width, new_height)) | eb9f8e6a0407eacecce17b12b850544c42009cf3 | 50,196 |
from typing import FrozenSet
from typing import List
def get_weights(solution: FrozenSet[int], weights: List[int]):
"""
Calculates and returns total weight of given knapsack solution
:param solution: Knapsack solution consisting of packed items
:param weights: profit of items
:return: Total weight of given knapsack solution
"""
return sum(weights[item - 1] for item in solution) | fcd76946a4dd269324bf4e438b83d3f52afb3582 | 50,197 |
def frame_sub_json_object(dataframe,index_column,value):
""" Essentially the same as the sub_json_object function, but can take an
arbitrary dataframe, rather than a wfpd dataframe.
Takes a dataframe and filters it on a provided value on its index_column.
The index column is then removed, effectively providing a table
of related data. This table is then returned as a dataframe.
Useful function for transforming pandas JSON friendly structures.
Parameters
----------
source : string
The name of the wfpd dataframe (aka Excel sheet name)
index_column : string
The name of the column on the wfpd dataframe to filter on
value : string
The value to filter on
"""
dataframe = dataframe.loc[dataframe[index_column] == value]
dataframe = dataframe.drop(index_column,1)
return dataframe | 9a619ad200f2c062b738ac406fe2e9e350eaa447 | 50,198 |
def est_centroid_spectrum(pow_spec, freq):
"""Estimate the centroid of the power spectrum"""
return (pow_spec * freq).sum() / pow_spec.sum() | f92ea45c7835031d03fdc84292c1e4f35a27ccec | 50,199 |
import json
def get_model_data_by_id(model, id, return_data_dict):
"""
通过id获取一条数据
:param model:
:param id:
:param return_data_dict: 返回的data字段列表
:return:
"""
records = model.objects.filter(id=id)
if not records:
return None
# 查询成功
record = records[0]
return_data = {}
if return_data_dict and isinstance(return_data_dict, dict):
if return_data_dict.get("remove_return"):
return_data_dict.pop("remove_return")
return return_data_dict
for k, v in return_data_dict.items():
if isinstance(v, str):
try:
obj_field = getattr(record, v)
except:
return_data[k] = v
else:
if obj_field != None:
return_data[k] = obj_field
else:
return_data[k] = v
elif isinstance(v, dict):
return_dict = {}
return_list = []
for kk, vv in v.items():
obj_field = getattr(record, kk)
if obj_field:
try:
obj_fields = obj_field.all()
for obj_field_a in obj_fields:
dict_a = {}
for kkk, vvv in vv.items():
value = getattr(obj_field_a, vvv)
if kkk.endswith("&json"):
dict_a[kkk[:-5]] = json.loads(value) if value else None
else:
dict_a[kkk] = value
return_list.append(dict_a)
if return_list:
return_data[k] = return_list
except:
for kkk, vvv in vv.items():
if kkk.endswith("&json"):
value = getattr(obj_field, vvv)
return_dict[kkk[:-5]] = json.loads(value) if value else None
else:
return_dict[kkk] = getattr(obj_field, vvv)
if return_dict:
return_data[k] = return_dict
return return_data
return records | e98d1169c1340552f5f17f6db8bb51e2212cfd12 | 50,200 |
def get_index_for_dude(dude):
"""
:param dude: splitwise user object
:return: index in row/column in payment matrix for the incoming dude
"""
if dude.first_name == "Alex":
return 0
elif dude.first_name == "Daniel":
return 1
elif dude.first_name == "Patrick":
return 2
elif dude.first_name == "maany":
return 3 | e9f7597217cd6c68af60be3d7190125c6770470e | 50,201 |
def read_samples(fn):
"""Read samples from the header of a GFF file.
Args:
*fn(str)*: GFF file to read.
Returns:
*(list)*: character list with sample names.
"""
with open(fn) as inh:
for line in inh:
if line.startswith("## COLDATA"):
return line.strip().split(": ")[1].strip().split(",")
raise ValueError("%s doesn't contain COLDATA header." % fn) | e48972fecdbf63abc1ba9398fa95126739dcc324 | 50,202 |
def unpack_int(buffer, ptr, length):
""" Unpack an int of specified length from the buffer and advance the pointer """
return (
int.from_bytes(buffer[ptr:(ptr+length)], 'little', signed=False),
ptr + length
) | 18afc5db9e212b982bea2839ea7bb3600ce27649 | 50,203 |
def make_print_msg_specific(
integration_rate, starting_msg, err_type):
"""
Function is used to make the print message more specific
by replacing 'generic' phrases with phrases that more
completely explain the data quality issue at play.
:param
integration_rate (bool): determines if the data quality
metric to be printed is an 'integration rate' rather
than a problem with data quality. This warrants a
change in how the message will be printed.
starting_msg (str): the message to build off that
will ultimately be displayed
err_type (str): indicates the type of error metric that is
being reported. Used to change what is printed so it is
more appropriate.
:return:
starting_msg (str): the message with the data quality issue
that now has a more specific indicator for the
problem at hand
"""
if integration_rate:
# only one issue; make first informative
starting_msg = starting_msg.replace(
'of data)^', 'of expected concepts are not '
'successfully integrated)')
# series of issues; make first informative
starting_msg = starting_msg.replace(
'of data),^', 'of expected concepts are not '
'successfully integrated),')
# do not make non-first messages overly long
starting_msg = starting_msg.replace(
'of data', ' of concepts not integrated')
elif err_type in ['concept']:
starting_msg = starting_msg.replace(
'of data)^', 'of concept_ids are not '
'properly mapped)')
starting_msg = starting_msg.replace(
'of data),^', 'of concept_ids are not '
'properly mapped),')
starting_msg = starting_msg.replace(
'of data', 'of concept_ids')
elif err_type in ['drug_routes']:
starting_msg = starting_msg.replace(
'of data)^', 'of route_concept_ids '
'are not properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of route_concept_ids '
'are not properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of drugs'
)
elif err_type in ['end_before_begin']:
starting_msg = starting_msg.replace(
'of data)^', 'of end dates precede '
'start dates')
starting_msg = starting_msg.replace(
'of data),^', 'of end dates precede '
'start dates')
elif err_type in ['drug_success']:
starting_msg = starting_msg.replace(
'of data)^', 'of drug ingredients '
'are properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of drug ingredients '
'are properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of drugs'
)
elif err_type in ['sites_measurement']:
starting_msg = starting_msg.replace(
'of data)^', 'of measurement concepts '
'are properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of measurement concepts '
'are properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of measurements'
)
# get rid of lingering underscores
starting_msg = starting_msg.replace('^', '')
return starting_msg | 7d9945cb0efb7d6d7bd541beff10af98d51f4314 | 50,205 |
def solveMeFirst(a,b):
"""
Solve a and b.
Args:
a: (array): write your description
b: (array): write your description
"""
# Hint: Type return a+b below
return(a+b) | 93c42bbdbd726a12fab59309a2a2af8caf06b832 | 50,206 |
def count(pipeObj):
"""Count number of passes (pass-managers excluded) in pipeline object."""
cnt = 0
for c in pipeObj:
if c[0]:
cnt += count(c[1])
else:
cnt += 1
return cnt | fcb28dcf4e8cb50d57988c1aab852ae9e633d9a9 | 50,207 |
import requests
def _download_index(category, index_url):
"""
Download the index.
:param category: suffixed category, e.g. 'filters', 'templates'
:param index_url: url to the index. Default: 'https://raw.githubusercontent.com/pandoc-extras/packages/master/<category>.yaml'
:return: the content of the index, which is in YAML
"""
if index_url is None:
index_url = 'https://raw.githubusercontent.com/pandoc-extras/packages/master/{}.yaml'
url = index_url.format(category)
r = requests.get(url)
if r.status_code != 200:
raise IOError("Cannot download index, error {}".format(r.status_code))
return r.text | 190a4b39f962cae43d281fa91d1614cbebaa681a | 50,209 |
import os
def create_dir_when_none(dir_name):
"""Check if a directory exist or create one.
return: bool."""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
return False
else:
return True | 699aedc1507d8956691164f28b4f0a0b608c40be | 50,210 |
def stats_table(df):
"""Gera tabela com resumo dos dados estatíticos"""
df.loc['IQR'] = df.loc['75%'] - df.loc['25%']
df = df.T.drop(['Acertos parte A'])
df = df.drop(['count'], axis=1)
df = df.reindex(['Pontos - Objetiva',
'Q17',
'Q18',
'Pontos - Discursiva',
'Pontuação final'])
df['mean'] = round(df['mean'], 2)
df['std'] = round(df['std'], 2)
return df | fa6f2f389c09651cd569cb136aeece133f767236 | 50,212 |
import base64
def base64type_convertor(b64str):
"""a base 64 decoded string"""
return base64.b64decode(b64str.encode()).decode("utf-8") | 4e9d4bdfd2a64076ced7860a5ce1fe90b11f83b5 | 50,213 |
import math
def round_mult(val, multiple, direction='round'):
"""Rounds :val: to the nearest :multiple:. The argument :direction: should be either 'round', 'up', or 'down'."""
round_func = {'round': round, 'up': math.ceil, 'down': math.floor}
return round_func[direction](val / multiple) * multiple | 53ba98f1c8a4c623c8831e831b21ff689483f58a | 50,215 |
from typing import Optional
from typing import Tuple
import decimal
def _parse_interval(hours: str, minutes: Optional[str],
seconds: Optional[str]) -> Tuple[int, int, int, int, int]:
"""Return (hours, minutes, seconds, milliseconds, microseconds) from
the specified timestamp parts, where the input arguments 'hours' and
'minutes' are formatted as integers, while the input argument 'seconds' may
be formatted as either an integer or a float.
"""
seconds_dec = decimal.Decimal(seconds or 0)
return_seconds = int(seconds_dec)
fractional = seconds_dec - return_seconds
milliseconds_dec = fractional * 1000
milliseconds = int(milliseconds_dec)
microseconds = int((milliseconds_dec - milliseconds) * 1000)
return (int(hours), int(minutes or 0), return_seconds, milliseconds,
microseconds) | 371641534ae7e8f5c573b83f190b61f9f7c0b54e | 50,216 |
def apply_reactions_to_rapid_tests(
date,
states,
params,
rapid_test_reaction_models,
contacts,
seed,
):
"""Apply reactions to rapid_tests."""
if rapid_test_reaction_models:
for model in rapid_test_reaction_models.values():
loc = model.get("loc", params.index)
func = model["model"]
if model["start"] <= date <= model["end"]:
contacts = func(
contacts=contacts,
states=states,
params=params.loc[loc],
seed=next(seed),
)
return contacts | b3a5c055d30d5d9f4e08032f9cf4eaa25a94f4c8 | 50,217 |
def extractKmers(X, k=12):
"""Extract the kmers of length k of X."""
# Length of the sequences
len_seq = len(X[0])
# Initialisation of the set of kmers
k_mers = set()
# Loop over the sequences of X
for x_i in X:
# Loop over the sequence
for l in range(len_seq - k + 1):
# Extract k_mer of x_i
k_mer_i = x_i[l:(l + k)]
# Update k_mers
k_mers.update([k_mer_i])
return list(k_mers) | 368f6853109e511e80e85b22e063566baef481ba | 50,218 |
import argparse
def parse_arguments(args_to_parse):
""" Parse CLI arguments """
descr = 'Visualise via t-SNE'
parser = argparse.ArgumentParser(description=descr)
general = parser.add_argument_group('General settings')
general.add_argument(
'name', type=str, help="The name of the experimental directory - used for saving visualisations."
)
general.add_argument(
'--load-dir',
type=str,
required=True,
help="The name of the directory from which to load the pre-processed data (excluding `results`)",
)
general.add_argument(
'--data-type',
type=str,
default='',
choices=['vector', 'doc2vec', 'fasttext'],
help="The name of the data to load from",
)
return parser.parse_args(args_to_parse) | 165de765c796772c1f022e5b555147573528a505 | 50,219 |
def mapDataTypesCP(valueFormatUrl):
""" Convert meta data descriptor to numerical descriptor
:param valueFormatUrl = full url to the description
example: https://meta.icos-cp.eu/ontologies/cpmeta/float32
return: numerical descriptor to build schema to send a post
request for retrieving binary data.
"""
dictionary = {
'float32': 'FLOAT',
'float64': 'DOUBLE',
'bmpChar': 'CHAR',
'etcDate': 'INT',
'iso8601date':'INT',
'iso8601timeOfDay':'INT',
'iso8601dateTime': 'DOUBLE',
'isoLikeLocalDateTime' : 'DOUBLE',
'etcLocalDateTime': 'DOUBLE',
'int32':'INT',
'string':'STRING'
}
return dictionary.get(valueFormatUrl.split('/')[-1], False) | 5428a9156e1792f2fa5f47b61415fcece6345a36 | 50,221 |
def parse_options(l):
"""
:param l: str
:return: [opt1, opt2,... ]
"""
if '[' not in l:
return []
options_start = l.index('[') + 1
options_end = l.index(']')
return [o.strip() for o in l[options_start:options_end].split(',')] | 1e05249f62cd4e654d884963e0e0cfe2d3b66aed | 50,222 |
def get_main_entity_from_question(question_object):
"""
Retrieve the main Freebase entity linked in the url field
:param question_object: A question encoded as a Json object
:return: A list of answers as strings
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/natalie_portman", "targetValue": "(list (description \\"Padm\u00e9 Amidala\\"))", "utterance": "what character did natalie portman play in star wars?"})
(['Natalie', 'Portman'], 'URL')
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/j_j_thomson"})
(['J', 'J', 'Thomson'], 'URL')
>>> get_main_entity_from_question({"targetValue": "(list (description Abduction) (description Eclipse) (description \\"Valentine's Day\\") (description \\"New Moon\\"))"})
()
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/j_j_thomson"})
(['J', 'J', 'Thomson'], 'URL')
"""
url = question_object.get('url')
if url:
if "http://www.freebase.com/view/en/" not in url:
return [w.title() for w in url.split()], 'URL'
entity_tokens = url.replace("http://www.freebase.com/view/en/", "").split("_")
return [w.title() for w in entity_tokens], 'URL'
return () | 23b449fa5c1f370248dd1b28d751a4a4f6553fac | 50,223 |
def get_band_index(band_name):
"""Get the write-index value for a Sentinel-2 image band
For bands 1 through 8, we return the band number. For 8A,
we return 9. For bands above 8A, we add one to the band
number.
Args:
band_name (str): the name of the band, e.g. "nir - 8A"
Return:
int
"""
name, num = band_name.split(' - ')
if num.lower() == '8a':
return 9
elif int(num) > 8:
return int(num) + 1
else:
return int(num) | 16197e5303d259b3502cf20255cd68c514215e3c | 50,224 |
import math
def area_triangulo(s1: float, s2: float, s3: float) -> float:
""" Área de un triángulo
Parámetros:
s1 (float): Longitud de uno de los lados del triángulo
s2 (float): Longitud de uno de los lados del triángulo
s3 (float): Longitud de uno de los lados del triángulo
Retorno:
float: El área del triángulo redondeado con una cifra decimal.
"""
s = (s1 + s2 + s3) / 2
area = math.sqrt(s * (s - s1) * (s - s2) * (s - s3))
return round(area, 1) | 432108ca3ecb238e40c82159aa8b47390b6a85a3 | 50,225 |
import math
def formatter(filename):
"""
Formats the file to fit the search form parameters
"""
formattedFilename = filename.replace('.txt','') + 'formatted.txt'
filetype = input('Press 1 if the file is in coordinate form or 2 if the file is in decimal form: ')
if filetype == '1':
with open(filename) as locations:
for location in locations:
if ':' in location:
fields = location.split(' ')
RaDec = fields[0] + ' ' + fields[1]
RaDec = RaDec.replace(':',' ')
with open(formattedFilename, "a+") as formattedFile:
formattedFile.write(RaDec+'\n')
formattedFile.close()
elif 'J' in location:
fields = location.split(' ')
for fieldNumber in range(len(fields)):
if 'J' in fields[fieldNumber]:
coordinates = fields[fieldNumber]
coordinates = coordinates.replace('J','')
newcoords = [coordinates[i:i+2] for i in range(0, len(coordinates), 2)]
Ra = newcoords[0] + ' ' + newcoords[1] + ' ' + newcoords[2] + newcoords[3] + newcoords[4][0]
Dec = newcoords[4][1] + newcoords[5] + ' ' + newcoords[6] + ' ' + newcoords[7] + newcoords[8]
RaDec = Ra + ' ' + Dec
with open(formattedFilename, "a+") as formattedFile:
formattedFile.write(RaDec+'\n')
formattedFile.close()
else:
with open(formattedFilename, "a+") as formattedFile:
formattedFile.write(location)
formattedFile.close()
if filetype == '2':
i = 0
with open(filename) as locations:
for location in locations:
fields = location.split(' ') #this is a tab, not 3 spacings
#the decimals have to be converted to floats and then
#back to strings
hours = float(fields[0])/15
minutes = hours%1 * 60
seconds = minutes%1 * 60
hh = str(math.floor(hours))
if len(hh) == 1:
hh = '0'+hh
mm = str(math.floor(minutes))
if len(mm) == 1:
mm = '0'+mm
ss = str(seconds)[0:5]
if len(str(math.floor(seconds))) == 1:
ss = '0'+ss
Ra = hh + ' ' + mm + ' ' + ss
degrees = float(fields[1])
minutes = degrees%1 * 60
seconds = minutes%1 * 60
dd = str(math.floor(degrees))
if len(dd) == 1:
dd = '0'+dd
mm = str(math.floor(minutes))
if len(mm) == 1:
mm = '0'+mm
ss = str(seconds)[0:5]
if len(str(math.floor(seconds))) == 1:
ss = '0'+ss
Dec = dd + ' ' + mm + ' ' + ss
RaDec = Ra + ' ' + Dec
with open(formattedFilename, "a+") as formattedFile:
formattedFile.write(RaDec+'\n')
formattedFile.close()
return formattedFilename | d0fe2ffc29d4cf4f83411603eac3cefb02dcb560 | 50,226 |
def box_to_coco(box):
"""
convert to COCO Bounding box format => Takes a bounding box in [y_min,x_min,y_max,x_max] format and returns[x,y,s,r]
format, where x,y is the centre of the box and s is the scale/area and r is the aspect ratio "width / height"
"""
height = box[2] - box[0]
width = box[3] - box[1]
x = box[1] + width / 2.
y = box[0] + height / 2.
scale = width * height # scale is just area
ratio = width / float(height)
# return np.array([x, y, scale, ratio]).reshape((4, 1))
# return np.c_[x, y, scale, ratio]
return [x, y, scale, ratio] | 10a4dd00b7b27f2d5f882c6e888cf324b6b1da2a | 50,230 |
def length_more_than_one(x):
"""creating a function for checking the length of word is greater than 1"""
if(len(x[0])>1):
return(x) | fe5894d5ca73876fa44b7e7325c2f0fcc17e492a | 50,231 |
def getInterestedRange(message_context):
"""Return a (start, end) pair of character index for the match in a MessageContext."""
if not message_context.match:
# whole line
return (0, len(message_context.line))
return (message_context.match.start(), message_context.match.end()) | f173a09a7281bb79f20e7658932d1c7f4e5ddd43 | 50,232 |
def fix_image_ch(img):
"""Fix image channel so that it locates last in shape."""
if img.shape[0] <= 3:
return img.transpose(1, 2, 0)
return img | 6e426d1334c79602c308ffc671eacd9302350cd3 | 50,233 |
from typing import Optional
def hex_to_bytearray(hex_str: str) -> Optional[bytearray]:
"""Convert hexstring (starting with 0x) to bytearray."""
return bytearray.fromhex(hex_str[2:]) if hex_str is not None else None | a8a5bcbe0eb7cc009ffe07bdbe719b473d831862 | 50,234 |
import math
def to_geographic(lng, lat):
"""
web mercator to geographic
:param lng:
:param lat:
:return:
"""
if abs(lng) < 180 and abs(lat) < 90:
return
if abs(lng) > 20037508.3427892 or abs(lat) > 20037508.3427892:
return
x = lng
y = lat
num3 = x / 6378137.0
num4 = num3 * 57.295779513082323
num5 = math.floor((num4 + 180.0) / 360.0)
num6 = num4 - (num5 * 360.0)
num7 = 1.5707963267948966 - (2.0 * math.atan(math.exp((-1.0 * y) / 6378137.0)))
return round(num6, 6), round(num7 * 57.295779513082323, 6) | f4fc050f0b5f64dbeac8da06ffef40a042478660 | 50,236 |
def signed_desired(config):
"""
:param config: The cli config.
:return: Returns True if any of the arches specified on the command line require signing.
"""
for a, mode in config.arch:
if mode == 'signed':
return True
if mode != 'unsigned':
raise IOError(f'Unexpected signing mode for arch {a} (must be signed or unsigned): {mode}') | ab343a375d6f7c99432ed358a40b7eacae009521 | 50,238 |
def reindent(s, numSpaces=4, no_empty_lines=False):
""" Return string s reindented by `numSpaces` spaces
Args:
s (string): string to reindent
numSpaces (int): number of spaces to shift to the right
no_empty_lines (bool): if True remove empty lines
Returns:
reindented string
"""
if no_empty_lines:
lines = [numSpaces * " " + line if line
else line for line in s.splitlines()]
else:
lines = [numSpaces * " " + line for line in s.splitlines()]
return "\n".join(lines) | f9a20f1cc51df3108551050d0afe76ff8fa17e1b | 50,239 |
def diff(list1, list2):
"""Return diff b/w lists in a dictionary
About:
Because 0 == False and 1 == True, diff may not work as wanted with
list mixing booleans and integers.
"""
s1 = set(list1)
s2 = set(list2)
common = s1.intersection(s2)
return {
'common': common,
'minus': s1 - common,
'plus': s2 - common,
} | 8e48920ba2dfdd09fdce5a45d6751b650af5c90b | 50,242 |
def insere(texto, posicao, texto_inserir):
"""
Insira na posição especificada o novo texto.
"""
texto.insert(posicao, texto_inserir)
return texto[:posicao] + texto_inserir[:posicao + 2] + texto[posicao:] | 2014c48d1c5816435db09003fc7c062efa1766d1 | 50,243 |
import typing
def splitPath(s3Path: str) -> typing.Tuple[str, str]:
"""Split a full S3 path into its bucket and key components"""
if not s3Path.startswith('s3://'):
raise ValueError('s3Path must begin with "s3://"')
s3Path = s3Path[5:]
bucketName, key = s3Path.split('/', 1)
return bucketName, key | 9d5f84bb7f267f39463d636ed942229e873bfbe8 | 50,244 |
def _branch_correct(tip_distances, i, j, i_sum, j_sum):
"""Calculates weighted unifrac branch length correction.
tip_distances must be 0 except for tips.
"""
result = tip_distances.ravel()*((i/float(i_sum))+(j/float(j_sum)))
return result.sum() | 833ce7106e47e0a911e4f2b34efe879210ad6c8a | 50,245 |
def _id_to_str_util(doc):
"""Used to resolve the following error:
TypeError: Object of type ObjectId is not JSON serializable
:param doc:
:return:
"""
doc['_id'] = str(doc['_id'])
return doc | d28b30598cf858e5825b28734997171edc08d9d8 | 50,246 |
import argparse
import sys
def initializeParser():
"""
Helper method returns a parser for us to parsing command line argument
Return:
- `argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(description='Sort attack list from travian builder')
parser.add_argument(
'inputList',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin
)
return parser | bea70d7fe79ea63441c3bb4c5b6c4259710209cf | 50,247 |
import ast
def total_tags(x) -> int:
"""Sum the total number of tags within a list of cuisines"""
x_list=ast.literal_eval(str(x))
total_tags=0
for el in x_list:
total_tags=total_tags+el[1]
return total_tags | cfe58dc23660a4f8b7b6c351de45d3d60fcf2c0e | 50,248 |
def read_dictionary(filepath):
""" Reads the word list provided in the challenge and returns it in the
form of a list of words.
"""
with open(filepath, 'r') as fil:
words = fil.read().splitlines()
return words | 152a5972f45228cde7dafab32394018928aa640a | 50,249 |
import sys
def augment_filename(filename):
""" Automatically name output JSON file, if not given in arguments """
new_file = filename.rstrip('.xml') + '.json'
if '.xml' in new_file:
sys.stderr.write("Given file is not in XML format. ")
else:
return new_file | 90c5b45333659be79731d22bb08d3eecda1c6d6e | 50,251 |
def agent_status_init(log, workspace, agent_status, field_names):
"""
Init status of a agent
"""
# pylint: disable=unused-argument
return agent_status.basc_init_fields(log, field_names) | 7388a5270d1e0631bc89ba1a766488bd335e0ae1 | 50,253 |
def _serialize(obj):
"""Return a json-serializable form of object."""
return [obj.__class__.__name__, vars(obj)] | bc446e4bd2008207bcb086e5a429b52a1a8ca73a | 50,255 |
import os
import csv
def load_historical_data():
"""Load historical weather data."""
with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/1711054.csv') as f:
reader = csv.reader(f)
next(reader, None)
historical_data = []
for row in reader:
historical_data.append(row)
return historical_data | 76229ed98757b14daf8c04d3897314bc432c0e82 | 50,256 |
import os
def _convert_gif_to_mp4(input_filename):
""" Converts file to mp4 """
output_filename = input_filename[:-3] + "mp4"
os.system(f"ffmpeg -y -hide_banner -loglevel error -i {input_filename} -movflags faststart -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {output_filename}")
if (os.path.exists(output_filename) == True):
os.remove(input_filename)
return output_filename
else:
return None | f0bf172f5e1aacfed488852b1f06fdc71eea3fea | 50,257 |
import six
def make_int(value, missing=-1):
"""Convert string value to long, '' to missing"""
if isinstance(value, six.string_types):
if not value.strip():
return missing
elif value is None:
return missing
return int(value) | d9e7fd310ec392ffe4169ca006afe864716e4fc1 | 50,260 |
def import_data(file):
"""
This function imports the data into a list form a file name passed as an argument.
The file should only the data seperated by a space.(or change the delimiter as required in split)
"""
data = []
f = open(str(file), 'r')
for line in f:
current = line.split() #enter your own delimiter like ","
for j in range(0,len(current)):
#current[j] = int(current[j])
current[j] = current[j]
data.append(current)
#print 'finished importing data'
#print data
return data | a5c8414d7ffad018e07bd45d46c3b7d0822e822d | 50,262 |
def all_tasks(loop=None):
"""Return a set of all tasks for the loop."""
# We could do this, but we will not.
return {} | 6a13ab25bf745689fca412a720f081d55913bd22 | 50,263 |
def ReadStringFromFile(file_name):
"""Writes text to a file, raising an RuntimeError on failure."""
try:
with open(file_name) as f:
return f.read()
except IOError:
raise RuntimeError('Error reading file [%s]' % file_name) | 9efe972fb5fc42e68bef293067cfe5a177f53d95 | 50,264 |
def firstOccurenceInStr(aList, aString):
""" Return the first element in aList that is contained in the
string aString.
"""
for elem in aList:
if elem in aString:
return elem
else:
return None | 6f188333111b7efd835519c0f36279aff56632bc | 50,265 |
def to_html(keyword_rank, now):
"""
:param keyword_rank: 메일 내용에 추가할 dataframe
:param now: 현재시각
:return: html 포맷으로 된 내용
"""
content = '<html>\n<body>\n'
content += "<h3>"+str("무신사 스토어 {} 검색어 랭킹".format(now))+"</h3>\n"
content += "<table>\n<tr>\n<th>순위</th>\n<th>키워드</th>\n<th colspan=\"2\">상승/감소</th>\n</tr>\n"
for i in range(len(keyword_rank)):
content += "<tr>\n"
content += "<td>" + str(keyword_rank.index[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Item'].iloc[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Status'].iloc[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Change'].iloc[i]) + "</td>\n"
content += "</tr>\n"
content += "</table>\n</body>\n</html>"
return content | 71553d8ec73c82be83598a3f4513940de9fb7e3d | 50,266 |
def load_variable_units_dict():
"""
Load dictionary containing variable units - after scale factors have been applied.
"""
variable_units_dict = {'FSNTOA+LWCF': r'W m$^{-2}$',
'SWCF_d1': r'W m$^{-2}$',
'LWCF': r'W m$^{-2}$',
'FSNTOA-FSNTOA_d1': r'W m$^{-2}$',
'FSNTOAC_d1': r'W m$^{-2}$',
'BURDENSO4': r'mg(SO$_4$) m$^{-2}$',
'BURDENPOM': r'mg m$^{-2}$',
'BURDENBC': r'mg m$^{-2}$',
'TGCLDIWP': r'g m$^{-2}$',
'TGCLDLWP': r'g m$^{-2}$',
'CCN3_ml24': r'cm$^{-3}$',
'CDNUMC': r'×$10^6$ cm$^{-2}$',
'AEROD_v': None}
return variable_units_dict | b46d6be7358dadb9ffc2285b6858ac35ccd9e65b | 50,267 |
import os
def find_file_recursive(file_name, root_directory):
"""
Finds the given file in the given root directory and its children
:param file_name: Name of the file to find
:param root_directory: Root folder of recursion
:return: The path to the found file, or None
"""
for root_dirs_files in os.walk(root_directory, followlinks=True):
if file_name in root_dirs_files[2]:
return os.path.abspath(os.path.join(root_dirs_files[0], file_name))
# File not found
return None | 7484a07cef99752740212a7c136de04ee8ac98ff | 50,268 |
def get_keys_hint(shortcuts):
"""Generate hint text to be used in tooltips from a list of QShortcut
@note: It's convention to put this between parentheses in single-line
tooltips, but it's left to be done by the caller since that may not
be the case in longer tooltips and this can also be used after \\t to
display more than one shortcut hint in a QMenu entry.
"""
return ', '.join(x.key().toString() for x in shortcuts) | 122abe213ea6259a1762c86595eb3f6608b4f6b5 | 50,270 |
def get_object_preds(g,id,predlist):
""" get a dict of values for a list of PREDICATES"""
labels = {}
for lp in predlist:
for lab in g.objects(predicate=lp, subject=id):
labels[str(lp)] = lab
return labels | 71a9a0be6d42e6ff2565d93bf6ca191bdef272b6 | 50,272 |
import torch
def get_sleep_loss(generative_model, guide, num_samples=1):
"""Returns:
loss: scalar that we call .backward() on and step the optimizer.
"""
latent, obs = generative_model.sample_latent_and_obs(num_samples=num_samples)
return -torch.mean(guide.get_log_prob(latent, obs)) | 63e0cd287ce6e5d4cfcd47aa0d49747d1c98b8b1 | 50,274 |
def selection_sort(array):
""" Selection sort implementation
Arguments:
- array : (int[]) array of int to sort
Returns:
- array : (int[]) sorted numbers
"""
for slot in range(len(array)-1,0,-1):
maxpos = 0
for index in range(slot+1):
if array[index] > array[maxpos]:
maxpos = index
temp = array[slot]
array[slot] = array[maxpos]
array[maxpos] = temp
return array | 02efcbf26e01c36177f05143c332de85a1823ac5 | 50,275 |
import subprocess
def push(local_path, device_path):
"""Move the file at the given local path to the path on the device."""
return subprocess.check_output(['adb', 'push', local_path, device_path],
stderr=subprocess.STDOUT).strip() | c832c914ba9010a7b11ac15e4414421138dd68c4 | 50,276 |
def table_string(column_headers, column_values):
"""Formats given table data as a markdown pipe_tables table."""
# Constants for markdown formatting
col_delimiter = '|'
row_delimiter = '\n'
rule_str = '-'
md_alignment_str = ':'
n_col = len(column_headers)
# Convert values to strings
column_value_str = []
for k in range(n_col):
column_value_str.append([str(z) for z in column_values[k]])
# Find max width in each column
col_widths = []
for k, x in enumerate(column_value_str):
value_width = len(max(x, key=len))
header_width = len(column_headers[k])
col_widths.append(max((value_width, header_width)))
# Create rule for the top and bottom
# rule = [rule_str * (w+2) for w in col_widths]
# rule = col_delimiter + col_delimiter.join(rule) + col_delimiter + row_delimiter
# Create rule for below header
header_rule = [rule_str * (w+1) for w in col_widths]
header_rule = col_delimiter + md_alignment_str + (col_delimiter + md_alignment_str).join(header_rule) + col_delimiter + row_delimiter
# Create format string for table rows
pos_argument = lambda k, w: ' {' + str(k) + ':<' + str(w) + '} '
positional_arguments = [ pos_argument(k, col_widths[k]) for k in range(n_col)]
row_format_str = col_delimiter + col_delimiter.join(positional_arguments) + col_delimiter + row_delimiter
# Build table string
table_str = row_format_str.format(*column_headers) + header_rule
for row in zip(*column_value_str):
table_str += row_format_str.format(*row)
return (table_str) | 4b3d126954d5c6e8001e43cb3d7426248efb85e8 | 50,277 |
from typing import List
from typing import Optional
def _get_array_num_elements(array_dims: List[int],
index_depth: Optional[int] = None):
"""Returns the number of elements in a (nested) array.
Returns the number of elements in a (nested) array with dimensions
'array_dims'. If the array is indexed 'index_depth' times. If 'index_depth'
is not specified, the maximum number of possible indices is assumed.
Args:
array_dims: Array dimensions.
index_depth: Depth of index.
Returns:
The number of elements in a (nested) array with dimensions 'array_dims'.
"""
if index_depth is None:
index_depth = len(array_dims) - 1
elem_count = 1
for idx in range(len(array_dims) - index_depth, len(array_dims)):
elem_count = elem_count * array_dims[idx]
return elem_count | f363598442145c0c31c03e5c6918e0db6dee1d19 | 50,278 |
from sympy.ntheory.continued_fraction import continued_fraction_convergents, continued_fraction_iterator
from sympy.core.numbers import Rational
def wieners_attack(e: int, n: int):
""" returns d """
#pylint: disable=import-outside-toplevel
#lazy imports to avoid slowing down initial load
c = 2
convergents = continued_fraction_convergents(continued_fraction_iterator(Rational(e, n)))
for convergent in convergents:
d = convergent.denominator()
p = pow(c, d, n)
if pow(p, e, n) == c:
return d
raise ValueError(f"Couldn't find d for {e=} and {n=}") | e120b6b48ec9d16779ea109edd43e249508d1d8e | 50,279 |
def format(target, input):
"""Returns a dictionary with the values formatted with input.
"""
result = ((k, v % input) for k, v in target.iteritems())
return dict(result) | a8189add157ae4da62d710ed728d12d8ba2f30d6 | 50,280 |
def _format_http_text(method, url, headers, body):
"""
print http head and body for request or response
For examples: _format_http_text('', title, response.headers, response.text)
"""
result = method + ' ' + url + '\n'
if headers is not None:
for key, value in headers.items():
result = result + key + ': ' + value + '\n'
result = result + body
return result | 0b039b5a3a21707cd07fbb19b6bdc887741d55a4 | 50,281 |
def neighbours(guest, plan):
"""Return guests’s adjacent neighbours in plan
Guests at one end of the plan are considered to be sitting next
to the guest at the opposite end.
"""
g_index = plan.index(guest)
left = plan[g_index - 1]
right = plan[(g_index + 1) % len(plan)] # Wrap around to zero
return (left, right) | b77f5ce28e794ab4e10bd1b3bb776fb65f4bd4ac | 50,282 |
def edit_distance(word1: str, word2: str) -> int:
"""Check the edit distance between 2 words"""
memo = [[0 for _ in range(len(word2) + 1)] for _ in range(len(word1) + 1)]
for i in range(len(word1) + 1):
memo[i][0] = i
for j in range(len(word2) + 1):
memo[0][j] = j
for i in range(1, len(word1) + 1):
for j in range(1, len(word2) + 1):
l1 = word1[i - 1]
l2 = word2[j - 1]
if l1 == l2:
memo[i][j] = memo[i-1][j-1]
continue
memo[i][j] = min(memo[i-1][j-1] if i > 0 and j > 0 else 0,
memo[i-1][j] if i > 0 else 0, memo[i][j-1] if j > 0 else 0) + 1
return memo[-1][-1] | 442bdd7ba4dd56762bbe3b22febda697096c5846 | 50,286 |
import textwrap
import re
import ast
def _NormalizeCode(code):
"""Make sure that the code snippet is compilable."""
code = textwrap.dedent(code.lstrip('\n')).lstrip()
# Split the code to lines and get rid of all leading full-comment lines as
# they can mess up the normalization attempt.
lines = code.split('\n')
i = 0
for i, line in enumerate(lines):
line = line.strip()
if line and not line.startswith('#'):
break
code = '\n'.join(lines[i:]) + '\n'
if re.match(r'(if|while|for|with|def|class|async|await)\b', code):
code += '\n pass'
elif re.match(r'(elif|else)\b', code):
try:
try_code = 'if True:\n pass\n' + code + '\n pass'
ast.parse(
textwrap.dedent(try_code.lstrip('\n')).lstrip(), '<string>', 'exec')
code = try_code
except SyntaxError:
# The assumption here is that the code is on a single line.
code = 'if True: pass\n' + code
elif code.startswith('@'):
code += '\ndef _():\n pass'
elif re.match(r'try\b', code):
code += '\n pass\nexcept:\n pass'
elif re.match(r'(except|finally)\b', code):
code = 'try:\n pass\n' + code + '\n pass'
elif re.match(r'(return|yield)\b', code):
code = 'def _():\n ' + code
elif re.match(r'(continue|break)\b', code):
code = 'while True:\n ' + code
elif re.match(r'print\b', code):
code = 'from __future__ import print_function\n' + code
return code + '\n' | 97794f5f5a80ef9da7f0001a407c700fe5fd0d1e | 50,287 |
def get_popular_user_agents():
"""
Retrieve most popular user agent strings.
Can look at https://techblog.willshouse.com/2012/01/03/most-common-user-agents/
"""
return '' | 4dd4ed0ee7b719f54d36e4dd4e8b7ea958ca109a | 50,288 |
from typing import Any
import importlib
def dynamic_import_class(module_name: str, class_name: str) -> Any:
"""
Dynamically imports a class from a given module
Args:
module_name (str): the module to dynamically load
class_name (str): the class to dynamically load
Returns:
Any: the class from the module specified
"""
dclass = None
module = None
# assert module existss
try:
module = importlib.import_module(module_name)
except ImportError:
print("module not found: " + module_name)
# load class from module
try:
dclass = getattr(module, class_name)
except Exception as e:
print(e)
return dclass | 5c2983655d509154c1d13f2980e54e0d73db0124 | 50,289 |
def parse_relative_day_value(relative_day: str) -> int:
"""
Parses a relative day value such as "昨日"
:param relative_day: The day to parse
:return: An integer representing the relative value of the day, for example -1
"""
if relative_day == "前月":
return -1
if relative_day == "今月":
return 0
if relative_day == "来月":
return 1
raise ValueError(f"Could not parse the input as a relative month: {relative_day}") | 845ee36e996af38fc0066f59ebcedea68c3d0cc6 | 50,290 |
def ContinuePlaying():
"""Ask the user if they would like to play again, return Y or N"""
#answer = the inputted answer by the user to if they want to keep playing
answer = input("Do you want to play again? (yes/no) ")
while answer.upper() != "YES" and answer.upper() != "NO":
print("Please enter yes or no.")
answer = input("Do you want to play again? (yes/no) ")
return answer | ccd96421078f6d7e344c9c36a6ef243ed1ac41ad | 50,293 |
import torch
def cos_sim(x, y, epsilon=0.01):
"""
Calculates the cosine similarity between the last dimension of two tensors.
"""
numerator = torch.matmul(x, y.transpose(-1,-2))
xnorm = torch.norm(x, dim=-1).unsqueeze(-1)
ynorm = torch.norm(y, dim=-1).unsqueeze(-1)
denominator = torch.matmul(xnorm, ynorm.transpose(-1,-2)) + epsilon
dists = torch.div(numerator, denominator)
return dists | 84fa92595110680350e0fe5eb7c3ba230e7a0ec1 | 50,294 |
def get_url_by_format(config):
"""Get URL depending on the format."""
# types: Config -> string
if config.format == 'gff3':
return config.sviewer_url
return config.entrez_url | 8dcf6a00b56a8a83773c63d976273f12f0f70bf8 | 50,295 |
import subprocess
import sys
def command(c, input = None, check_err = True):
""" Processes commands for `subprocess`.
"""
if input is None:
p = subprocess.Popen(c,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out,err = p.communicate()
else:
p = subprocess.Popen(c,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
# Feed input into subprocess
out,err = p.communicate(input=input)
sys.stdout.flush()
if not err or not check_err:
return out
else:
print("Command {0!s} resulted in the error :".format(
str(c[0])))
print(err)
print(out)
sys.stdout.flush()
return False | dda44a3f612570409a9e097ba03b83e9d8fc0463 | 50,297 |
def get_count(self):
""" Return count value with a default of 1
"""
return self.get("count", 1) | 643064b29fff0b65a39f2eefb4f35d7468db09ae | 50,298 |
from typing import List
def suffixes(word) -> List[str]:
"""All non-empty proper suffixes of word, longest first."""
return [word[i:] for i in range(1, len(word))] | 96f2ef102f41f1a058d4195e6840e44a1b01c5c8 | 50,299 |
def _compute_preferred_numer_of_labels(
available_space: int, vertical_direction: bool
) -> int:
"""
Compute an estimate for the preferred number of labels.
"""
# For horizontal direction (x axis)
preferred_number_of_labels = int(available_space / 15)
if vertical_direction:
# for y axis
preferred_number_of_labels = int(available_space / 5)
return max(2, min(20, preferred_number_of_labels)) | 4209498eda4fe8b35535ec05ad6d368ea6dba736 | 50,302 |
from typing import Dict
from typing import List
def make_data_lists(img_pth_to_cap: Dict[str, str], image_paths: List[str]):
"""Make lists of data paths and respective captions
Args:
img_pth_to_cap: Dictionary of image paths to captions
image_paths: List of image paths
Returns:
img_list: List of image paths
cap: List of captions
"""
cap, img_list = [], []
for im_pth in image_paths:
caption_list = img_pth_to_cap[im_pth]
cap.extend(caption_list)
img_list.extend([im_pth] * len(caption_list))
return img_list, cap | dafee60e5a6ebcab9046cfd7b90f01a9eda08d02 | 50,303 |
def get_log(user_hash: str) -> dict:
""" return the log for current user
:param user_hash: hash identifying a user
:type user_hash: str
:return: dict with all the info. see protocol
:rtype: dict
"""
if len(user_hash) != 64:
return {
'success': False,
'error_msg': 'Invalid Hash',
'log': ''
}
with open("server.log", 'rt') as f:
lns = []
for ln in f:
if user_hash in ln and 'CALLED' in ln:
lns.append(ln.replace('INFO:root:'+user_hash+' - ', ''))
return {
'success': True,
'error_msg': '',
'log': "".join(lns)
} | 800d9f14cce0dfbb53a3d1c8d15113eb9a8f9996 | 50,304 |
def format_table(table, titles=True):
"""
Returns a multilined string representing the given table (2d list) as a table, with equal-width columns.
:param titles: if true, the first row in the table is taken as headers for
the table, adding a separator on the second line
"""
fmt_simple = "{:{width}}"
fmt_string = "{!s:{width}}" # for type that don't accept a ':width' specifier when formatted (NoneType, dict, ...)
def safe_format(cell, width=1):
if isinstance(cell, bool):
# because if we force it to str it becomes an int (True -> 1, False -> 0)
return fmt_string.format(cell, width=width)
try:
return fmt_simple.format(cell, width=width)
except TypeError:
return fmt_string.format(cell, width=width)
widths = [max(map(len, map(safe_format, column))) for column in zip(*table)]
txt = ''
for i, row in enumerate(table):
if titles and i == 1:
txt += '-'.join('-' * width for width in widths) + '\n'
txt += '|'.join(safe_format(cell, width=width) for cell, width in zip(row, widths))
txt += '\n'
return txt | 172892020d8753516b54d204e94e78800a6a8249 | 50,306 |
def response_to_json(response):
"""Return API Response as json
Args:
response (requests.response): response from API request
Returns:
dict: Forbes List data as json
"""
return response.json() | 8770411e27604d95ec8bc18ee09d918157509e7f | 50,308 |
def divide(s):
"""
Converts entries for the --chain argument into tuples, and checks if PDB file
exists
E.g.
ABCD.pdb:A --> ('ABCD.pdb','A')
ABCD.pdb --> ('ABCD.pdb',)
:param s: string with one of the entries for --chain argument
:type s: str
"""
r = tuple(s.split(':'))
# Does not work... find out why
# if not os.path.exists(r[0]):
# raise argparse.ArgumentTypeError('Specified PDB file does not exist')
return r | 889289c271584c6f4874aa992814e45200df8ab9 | 50,309 |
def display(*args, listreturn=1):
"""
Displays menu items [0] from list/tuple pairs/group to console,
gets user selection and returns corresponding function/item
(defaults to list item index[1] from pair/group.
Args:
*args (list/tuple): *Expanded list of list/tuple pairs/groups
with info to display to console, and item or function to return
if chosen. ex: ('display name', function_to_call)
listreturn (int, optional): List index of which item to be returned.
defaults to 1.
Returns:
item/function: Item/Function [1] from corresponding selection.
(list/tuple index [1] by default)
"""
# Number (enumerate) and display the options [0] from args pair
# starting at 1.
for i, arg in enumerate(args, 1):
print(f' [{i}]: {arg[0]}')
# Ask for user input and return the corresponding item/function
# (defaults to index[1])
# only if the selection can be found in list.
while True:
sel = input('\n Selection: ')
if sel.isdigit() and int(sel) <= len(args) and int(sel):
return args[int(sel)-1][listreturn]
else:
print('\n Please choose from available selections.') | af99517f12b9d7f5afe8aedbc5ee4e3e84a7885f | 50,310 |
import six
def normalize_column_name(name):
"""Check if a string is a reasonable thing to use as a column name."""
if not isinstance(name, six.string_types):
raise ValueError('%r is not a valid column name.' % name)
# limit to 63 characters
name = name.strip()[:63]
# column names can be 63 *bytes* max in postgresql
if isinstance(name, six.text_type):
while len(name.encode('utf-8')) >= 64:
name = name[:len(name) - 1]
if not len(name) or '.' in name or '-' in name:
raise ValueError('%r is not a valid column name.' % name)
return name | 1f17d56bfc51c3808c3205f6a9707e380dc5872c | 50,311 |
def parse_subdir_file(path_to_subdir_file):
"""Parses a .subdir.txt file by splitting its contents into a list by line
@retval: list with each element containing a line from the .subdir.txt file.
"""
files = []
with open(path_to_subdir_file, 'r') as f:
while True:
line = f.readline().strip()
if not line:
break
files.append(line)
return files | 59004a6009ba52ba539bf4a1ed1bd6330717b108 | 50,312 |
import json
import pprint
def pformat(value):
"""
Format given object: Try JSON fist and fallback to pformat()
(JSON dumps are nicer than pprint.pformat() ;)
"""
try:
value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False)
except TypeError:
# Fallback if values are not serializable with JSON:
value = pprint.pformat(value, width=120)
return value | 8c63bd0bb2f31dcd35ce9d582c26a6b1b808249f | 50,313 |
def get_branch(branch_dict, n_hash_chars=7, hash_prefix=':'):
"""
Extract branch name from banch meta data.
If no branch name is found, fall back to HEAD's hash.
Arguments
---------
branch_dict: dict
branch meta data dictionary
n_hash_chars: int
number of characters to print if falling back to a Git hash
hash_prefix: str
string to prepend to a hash to diambiguate from a branch name
Returns
-------
dict
a dictionary containing branch meta data
"""
if branch_dict['branch.head'].startswith('('):
return '{:s}{:s}'.format(
hash_prefix,
branch_dict['branch.oid'][:n_hash_chars]
)
return branch_dict['branch.head'] | f4c759874344b8ec77ab329f0176774151ffb7ca | 50,318 |
def r_3p2():
"""Solution to exercise R-3.2.
The number of operations executed by algorithms A and B is 8n*logn and
2n^2, respectively. Determine n_0 such that A is better than B for n ≥ n_0.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
8n*log2(n) <= 2n^2 for every integer n >= n_0
4*log2(n) <= n
I can solve this equation by inspection, recognizing that log2(16) = 4:
4*log2(16) == 16
4*4 == 16
Therefore algorithm A is O(B) when n_0 >= 16.
"""
return 16 | a525201893c42341811f28ffde94fa4049530748 | 50,319 |
import os
def get_df_from_db(spark_session, database: str, schema: str, table: str):
"""
Retrieves a PySpark dataframe containing all of the data in the specified table.
:param spark_session: Existing SparkSession object
:param database: str, name of database
:param schema: str, name of database schema
:param table: str, name of table
:return: DataFrame
"""
properties = {
"driver": "org.postgresql.Driver",
"user": os.environ['POSTGRES_USERNAME'],
"password": os.environ['POSTGRES_PASSWORD'],
"currentSchema": schema
}
df = spark_session.read.jdbc(
url=f"jdbc:postgresql://{os.environ['POSTGRES_IP']}/{database}",
properties=properties,
table=table,
)
return df | de2432f6aaf8d32947faee019ba6ec9e3b834de9 | 50,321 |
def sort(x):
"""Return the sample x in increasing order."""
return sorted(x) | 1338520706d75fe649f13c4724a9b4e6a77c266d | 50,322 |
import os
def img_file_path(instance, filename):
"""Generate file path for new student logo"""
ext = filename.split('.')[-1] # [-1] returns the last item from a list.
filename = f'_{instance.id}.{ext}'
file_path = f'uploads/{instance._meta.model.__name__.lower()}/'
return os.path.join(file_path, filename) | e74fe2ca0718cec2c5e883848ebe9bd573f1d319 | 50,323 |
def _get_tweet_id(a_tweet_it):
"""Obtain id of a tweet from an iterator.
Args:
a_tweet_it (iterator): iterator over XML elements
Returns:
(str):
id of the next tweet returned by iterator
Raises:
StopIteration
"""
return a_tweet_it.next().get("id") | 58c2ad5d3e75da7442a65c817ced1f3d595c338b | 50,324 |
def convert_seconds(seconds: float) -> str:
"""
Convert time in seconds to days:hours:minutes:seconds.milliseconds
with leading 0s removed.
Parameters
----------
seconds : float
Number of seconds to be converted.
Returns
-------
str
Converted time.
"""
mS = int((seconds) * 1000)
D, mS = divmod(mS, 86400000)
H, mS = divmod(mS, 3600000)
M, mS = divmod(mS, 60000)
S, mS = divmod(mS, 1000)
H = str(H).zfill(2)
M = str(M).zfill(2)
S = str(S).zfill(2)
mS = str(mS).zfill(3)
time = f'{D}:{H}:{M}:{S}.{mS}'.lstrip('0:')
if time.startswith('.'):
time = '0' + time
return time | bb467f4e13bdf31db1e4d283b5e47b2ded45d48a | 50,325 |
import binascii
import os
def _get_serial() -> bytes:
"""Generates a serial number for the self-signed SSL.
See Also:
- This function is not called, but it is here only as a just in case measure to insert serial number manually.
- Serial Number is a unique identifier assigned by the CA which issued the certificate.
Returns:
bytes:
Encoded serial number for the certificate.
"""
serial_hex = binascii.hexlify(os.urandom(18)).decode().upper()
return " ".join(serial_hex[i:i + 2] for i in range(0, len(serial_hex), 2)).encode('UTF-8') | e967fc398aa67f2344295717b2d80158ed1adc6b | 50,326 |
def task_docs_jshat():
"""Docs - build jshat documentation"""
return {'actions': ['yarn run --silent docs'],
'task_dep': ['jshat_deps']} | 4b360b591c00a7adbb399d8fd2af17362325cc1f | 50,327 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.