content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _clean_boto3_metadata(boto3_metadata: dict) -> dict:
"""Remove unwanted keys from boto3 metadata dictionaries.
Arguments:
boto3_metadata (dict): The raw dictionary of metadata typically found in resource.meta.data
"""
boto3_metadata = boto3_metadata or {}
unwanted_keys = ["ResponseMetadata"]
for key in unwanted_keys:
if key in boto3_metadata:
del boto3_metadata[key]
return boto3_metadata | 13d3bbfa5a43642ac147eebcc53c1337d12afb6f | 40,683 |
import re
import pickle
def predict(text):
"""This function predicts if a sentence is sarcastic or not."""
data = text
data = re.sub('[^a-zA-Z]', ' ', data)
s = []
s.append(data)
with open('tfidf.pkl', 'rb') as f:
vectorizer = pickle.load(f)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
data = vectorizer.transform(s).toarray()
prediction = model.predict(data)
return int(prediction[0]) | f13929b5df538e1e54a639053faa1cd3b95a3c20 | 40,684 |
import os
import subprocess
def command_available(command, print_error=True):
"""
See if a command exists by trying to call it with --help.
"""
exists = False
with open(os.devnull, 'w') as devnull:
try:
subprocess.call([command, '--help'], stdout=devnull, stderr=devnull)
except OSError as e:
if print_error:
print('You must install {0}.'.format(command))
else:
exists = True
return exists | 60d4af5b1eafc5426e2f0b932f6aa69bcc16a9b7 | 40,686 |
def format_serial(serial_int):
"""Format the volume serial number as a string.
Args:
serial_int (long|int): The integer representing the volume serial number
Returns:
(str): The string representation xxxx-xxxx
"""
serial_str = None
if serial_int == 0:
return serial_str
if serial_int is not None:
serial_str = hex(serial_int)[2:-1].zfill(8)
serial_str = serial_str[:4] + '-' + serial_str[4:]
return serial_str | 138bac4f95f77f59bbbbf1eecce6d14662622d48 | 40,688 |
def log(message: str,
to_console: bool = True,
newline: bool = True,
to_server: bool = True) -> None:
"""log(message: str, to_console: bool = True, newline: bool = True,
to_server: bool = True) -> None
Category: General Utility Functions
Log a message. This goes to the default logging mechanism depending
on the platform (stdout on mac, android log on android, etc).
Log messages also go to the in-game console unless 'to_console'
is False. They are also sent to the master-server for use in analyzing
issues unless to_server is False.
Python's standard print() is wired to call this (with default values)
so in most cases you can just use that.
"""
return None | c0528b661c9dc95f6982a66e6f33687b310c24bf | 40,689 |
def _mapProperties(reqPropDefs, properties, cwr):
""" Converts the given properties. """
mappedProperties = list()
if not properties is None:
for propId, value in properties.iteritems():
if propId in reqPropDefs:
propDef = reqPropDefs[propId]
prop = cwr.createPropertyFromDefinition(propDef, value)
else:
prop = cwr.createProperty(propId, value)
mappedProperties.append(prop)
return mappedProperties | 91d7a5c85de22d6dfe9e212e173236386e02e645 | 40,691 |
def parse_quarantines(_code, details_div):
"""
检验检疫类别
"""
quarantines_txts = details_div[4].table.tbody.find_all('td', class_='td-label')
quarantines = []
for quarantine in quarantines_txts:
txt = quarantine.text
if txt not in ('无', ''):
quarantines.append(txt)
return quarantines | a53f8a04d36a2a7a106d9ca1b307af3eccf81032 | 40,692 |
import sys
import select
def poll():
"""
return whether there is data in the pipe
"""
return sys.stdin.buffer in select.select([sys.stdin.buffer], [], [], 0)[0] | a3585e232ca4d7664894eabe282ce37a6cd59af2 | 40,693 |
def create_model_data(data):
"""
data: pandas DataFrame
This function drops unnecessary columns and reverses the order of DataFrame based on decending dates.
Return: pandas DataFrame
"""
data = data[['Open','High','Low','Close','Volume','Trades','Change','CloseOffHigh','Volatility']]
print(data)
return data | 29f0ff5cc06653837618a613086c4cfe4b006808 | 40,695 |
def getMid(p1, p2):
"""
接受两个点作为输入,并返回它们的中点
:param p1:第一个点
:param p2:第二个点
:return:两个点的中点
"""
return ((p1[0]+p2[0]) /2, (p1[1] + p2[1]) / 2) | d4277a8edfc11de7528c55e10c53001b9a334ea8 | 40,696 |
def fatorial(num=0,show=False):
"""
-> Calcula o fatorial de um numero
:num: fatorial a ser calculado
:show: se False, nao imprime calculo, se True, imprime calculo
:return: retorna o fatorial calculado
"""
fat = 1
for i in range(num,0,-1):
if show:
print(i,end='')
if i > 1 :
print(" X ",end='',)
else:
print(f' = ',end='')
fat *= i
return fat | 22609617cc61365c766bdf42d81a78065af9367d | 40,697 |
def epi_params(echo_spacing, acc_factor, enc_dir, enc_dir_alt, epi_factor):
"""
This function is to create a dict based on the input for epi parameters for both phase encoding directions
:param echo_spacing:
:param acc_factor:
:param enc_dir:
:param enc_dir_alt:
:param epi_factor:
:return:
"""
epi_param, epi_param_alt = dict(echospacing=echo_spacing, acc_factor=acc_factor, enc_dir=enc_dir, epi_factor=epi_factor), dict(echospacing=echo_spacing, acc_factor=acc_factor, enc_dir=enc_dir_alt, epi_factor=epi_factor)
return epi_param, epi_param_alt | 998e228dbef1e332c1890dc348a74154e2acd058 | 40,698 |
def parseDict(AST):
"""
parse AST to dict obj.
"""
return {'symbol': AST.symbol,
'child': [parseDict(node) for node in AST.child if AST.child]} | caf9ee7e63e1674eb5c817a2a90d0096eeea6aa0 | 40,699 |
def _audiocomp(P:dict) -> list:
"""select audio codec"""
return ['-c:a','aac','-b:a','160k','-ar','44100' ] | 1b38fbd3d116707174c342d83ffc072deba8c95f | 40,701 |
def json_export_occurences(self, request):
""" Returns the occurrences as JSON.
This is used for the senantis.dir.eventsportlet.
"""
@request.after
def cors(response):
response.headers.add('Access-Control-Allow-Origin', '*')
query = self.for_filter(
tags=request.params.getall('cat1'),
locations=request.params.getall('cat2')
).query()
limit = request.params.get('max')
if limit and limit.isdigit():
query = query.limit(int(limit))
return [
{
'start': occurrence.start.isoformat(),
'title': occurrence.title,
'url': request.link(occurrence),
} for occurrence in query
] | 7245889b6868ee9459bf91cbecd0f0eb0e8bf976 | 40,702 |
def GetOriginFromDataUnit(data_unit):
""" Return a shortened origin string from the data unit
E.g. 'fb' for Facebook, 'tw' for Twitter
Returns: shortened origin (string)
"""
origin = data_unit.get('origin', '').lower()
if origin == 'facebook':
origin = 'fb'
elif origin == 'twitter':
origin = 'tw'
return origin | 859e65b4376629cc3c5a4ab10e331187f069aad4 | 40,703 |
import collections
def nested_ddict():
"""
Defaultdict with arbitrary number of levels.
"""
return collections.defaultdict(nested_ddict) | e1e10b922c3d03863a1d068e4648ff65761704fc | 40,704 |
import os
def _rel_abs_path(current_path, base_path):
"""
Return the value of the current path, relative to the base path, but
resolving paths absolutely first. This helps when walking a nested
directory structure and want to get the subtree relative to the original
path
"""
tmp_path = os.path.abspath(current_path)
return os.path.relpath(current_path, base_path) | 2cca7e004960de4cb15705e7c61d1b7c8c09db09 | 40,705 |
from datetime import datetime
def path_replace_ymd(path, ymd):
"""
path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例
ymd: yyyymmdd (20180101)
"""
# 转成datetime类型
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path | 93a846886b40a1a43a0c76310e8daf78d73b1163 | 40,706 |
def applescript_escape(string):
"""Escape backlsahes and double quotes for applescript"""
return string.replace('\\', '\\\\').replace('"', '\\"') | 0c545042a8d4145ca064afe458fb9a14d16dee7a | 40,707 |
import operator
def _mkl(dictionary: dict, keys: tuple) -> tuple: # type: ignore[type-arg]
"""multi-key lookup."""
return operator.itemgetter(*keys)(dictionary) | 73a5dc78ecf5b2542c19d4dc4899e1ca6e5adbde | 40,708 |
def _to_list(obj):
"""Put obj in list if it is not a list."""
if not isinstance(obj, list):
return [obj]
else:
return obj | 3b6888428f8f55a627e52bb13c9a5ea44528669f | 40,709 |
import json
def read_gallery_config(gallery_path):
"""
Read the gallery config from the gallery.json file
:param gallery_path: path to the JSON file
:return: dict containing the gallery config
"""
try:
with open(gallery_path, "r") as gallery_in:
return json.load(gallery_in)
except OSError:
return [] | 105641dcfb22f70c5f93ad54ec6bbad85c988e87 | 40,710 |
import numpy
def create_3D_tank(x1, y1, z1, x2, y2, z2, dx):
""" Generate an open rectangular tank.
Parameters:
-----------
x1,y1,x2,y2,x3,y3 : Coordinates defining the rectangle in 2D
dx : The spacing to use
"""
points = []
# create the base X-Y plane
x, y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx]
x = x.ravel(); y = y.ravel()
z = numpy.ones_like(x) * z1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the front X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
points = set(points)
x = numpy.array( [i[0] for i in points] )
y = numpy.array( [i[1] for i in points] )
z = numpy.array( [i[2] for i in points] )
return x, y, z | 8e3b4f8ac97f67fbc4e26f6eb35650d4c1ed5da9 | 40,712 |
def find_disks(disk_type, nodes, partition_name):
"""
return a list of disk that are not used by storage pool
or has a different type as the one required for this cluster
"""
available_disks = {}
def check_partition(disk):
for partition in disk.partitions:
for filesystem in partition.filesystems:
if filesystem['label'].startswith(partition_name):
return True
for node in nodes:
available_disks.setdefault(node.name, [])
for disk in node.disks.list():
# skip disks of wrong type
if disk.type.name != disk_type:
continue
# skip devices which have filesystems on the device
if len(disk.filesystems) > 0:
continue
# include devices which have partitions
if len(disk.partitions) == 0:
available_disks[node.name].append(disk)
else:
if check_partition(disk):
# devices that have partitions with correct label will be in the beginning
available_disks[node.name].insert(0, disk)
return available_disks | feb430ab5adf1a902e434243feb30972109d10b3 | 40,713 |
import re
def _parse_parameters(parameters):
""" Parses parameters string and returns a dict of overrides.
This function assumes that parameters string is in the form of '"key1="value1" key2="value2"'.
Use of single quotes is optional but is helpful for strings that contain spaces.
Args:
parameters (str): A string in the form of '"key="value" key="value"'.
Returns:
dict: A dict containing key/value pairs parsed from the parameters string.
Raises:
ValueError: if the parameters string is malformed.
"""
if not re.match(r'^(\w+)="([^=]+)"(\s{1}(\w+)="([^=]+)")*$', parameters):
raise ValueError
# first we add tokens that separate key/value pairs.
# in case of key='ss sss ss', we skip tokenizing when we se the first single quote
# and resume when we see the second
replace_space = True
tokenized = ""
for c in parameters:
if c == '\"':
replace_space = not replace_space
elif c == ' ' and replace_space:
tokenized += "$$"
else:
tokenized += c
# now get the tokens
tokens = tokenized.split('$$')
result = {}
for token in tokens:
# separate key/values
key_value = token.split("=")
result[key_value[0]] = key_value[1]
return result | cf412e7927cd78e9c154d7a3af09220f68d1311b | 40,714 |
def __get_owning_account_from_arn(arn):
"""
Get the owning aws account id part of the arn
"""
if arn is not None:
return arn.split(':')[4]
return None | 0fa4a4b7de49cb42ebdd25a7e8d67074c85d3974 | 40,715 |
def patch_chunkparser_extract_phrase(mocker):
"""Patch both the ChunkParser class, and its extract_phrase method, but only return the latter."""
patch_chunkparser = mocker.patch("src.make_feedback_tool_data.make_data_for_feedback_tool.ChunkParser")
return patch_chunkparser.return_value.extract_phrase | 8536954da3d742734ff700162d09b324c8e76741 | 40,716 |
import re
def extract_values_with_regex(filepath, regex, names):
"""
Parameters
----------
filepath: str
regex: str
names: list[str]
Returns
-------
dict[str, list[str]]
"""
re_comp = re.compile(regex, re.I)
values = {name: [] for name in names}
for line in open(filepath):
line = line.strip()
match = re_comp.findall(line)
if len(match) > 0:
match = match[0]
if not isinstance(match, tuple):
match = (match,)
assert len(match) == len(names)
for index in range(len(names)):
values[names[index]].append(match[index])
return values | 4ca722920049be4a4f901752199e596e6f543fd4 | 40,717 |
def execute_cypher_query(driver, query, params=None):
""" Given `neo4j.Driver` instance and `query` str, execute `query` via
the `driver` in a session, returning the `neo4j.BoltStatementResult` object
that results.
Args:
driver (neo4j.Driver): instance of database driver
query (str): Cypher query to execute
params (dict): Neo4j parameters that are substituted into `query`
Returns:
(neo4j.BoltStatementResult): the result object
"""
with driver.session() as session:
result = session.run(query, parameters=params)
return result | 8db48ceec4c3ee8b30910934a4f2517443837bde | 40,718 |
def classify_whitespaces(string, tabwidth):
""" calculate the whitespaces raw and effective length """
raw = effective = 0
for ch in string:
if ch == " ":
raw = raw + 1
effective = effective + 1
elif ch == "\t":
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective | 497ff29b02122c8289e259b28ad6d5907dea8e7d | 40,719 |
import re
def exclude_filter(excl_filter, paths):
"""
Matches a set of paths against an exclude filter, removing those that don't match
param: excl_filter: The filter to match.
param: paths: The set of paths to match against the filter.
returns: A set of paths which do not match the filter.
"""
misses = set()
for p in paths:
if re.search(excl_filter, p) is None:
misses.add(p)
return misses | 4332ab8c75e71592ace91a614f73ce260a3895a0 | 40,721 |
def q6(vector, n):
"""
Revertse the input vector in chunks of size n
Args:
vector (1xd): The array to be reversed
n (int): chunk size
Returns:
Array: reversed array
"""
new_vector = []
while len(vector):
new_vector+= vector[-n:]
vector = vector[:-n]
return new_vector | f4f3c6bce5d886eb023575ab76898af766c25eff | 40,722 |
def _is_empty(text: str) -> bool:
"""
Determine if a cell is empty.
Keyword arguments:
text -- the text to check
Returns: True if the cell is empty, False otherwise
"""
return text == "" | dec07be33afb22407107eb65fe45e7d06b3d48b9 | 40,723 |
def kl_div(P, Q):
"""
returns actual KL divergence. torch.nn.functional.kl_div returns different values
:param P: discrete distribution
:param Q: discrete distribution
:return:
"""
kl = (P * (P / Q).log()).sum(1)
return kl | 9b03b173dd5ab1856f90d8e0839900eac71c9d5a | 40,724 |
import argparse
def read_arguments():
"""Parses the arguments from the stdin and returns an object."""
parser = argparse.ArgumentParser()
parser.add_argument('--f1', type=str,
help='Path the first file to read')
parser.add_argument('--f2', type=str,
help='Path the second file to read')
return parser.parse_args() | 79672ed7a4664543df74a2ab4ffe6759e19dd93c | 40,725 |
import re
def remove_trailing_slashes(filepath: str) -> str:
"""
Removes trailing slashes from a directory path or full filepath
Examples:
remove_trailing_slashes("/my/path/") == "my/path"
remove_trailing_slashes("my/path/") == "my/path"
remove_trailing_slashes("/path/to/myfile.pdf") == "path/to/myfile.pdf"
"""
return re.sub(r"^\/|\/$", "", filepath) | c697d0f954d99dbf15be886ca37f06743346c564 | 40,726 |
def snake_to_camel(name):
"""Returns the camel case of the given snake cased input"""
parts = name.split('_')
return parts[0] + "".join(x.title() for x in parts[1:]) | d9d42f4cba3a16af61da8eab1f6ba3cca58204b3 | 40,727 |
def det_dist(coord, coord_2):
"""
Determine the euclidean distance between two points.
"""
d_x = coord_2[0] - coord[0]
d_y = coord_2[1] - coord[1]
return (d_x**2 + d_y**2)**(0.5) | c5acb2e84475babf28a5ff1642847dcb337fe7e4 | 40,728 |
def predictions_from_model(model, preprocessor, new_data_raw):
"""
"""
try:
y_predict = model.predict(preprocessor.transform(new_data_raw))
except Exception as err:
print('error computing model predictions using model', model)
raise err
return y_predict | f93a30e8e6b732b626cd6c4fa41bada4b88c3f5e | 40,729 |
def izipcols(df, cols, index=False):
"""Return an iterator to go through rows of Pandas.DataFrame
(Much faster than DataFrame.rows() which involves instantiation of Series objects)
Args:
df: DataFrame
cols: list of column names
index: if True, includue index at the beginning (default False)
"""
if index:
l = [df.index]+[df[x] for x in cols]
else:
l = [df[x] for x in cols]
#return izip(*l) # python2
return zip(*l) | 885d244ee05df2324a4246bfd5bd77ef1a43142e | 40,730 |
def rm_brs(line):
"""Replace all whitespace (line breaks, etc) with spaces.""" # noqa: DAR101,DAR201
return ' '.join(line.split()) | 39f97bb6aa23fb54cbfaa90aa3d28537a139f3a0 | 40,731 |
def convert_lineage_to_ranks(df):
"""
Given a lineage summary dataframe, extract the taxID at different taxonomic ranks and store them in separate
columns. Check if all organisms appearing in the dataframe have their species-level taxID.
"""
def ext_rank(lineage_dict, rank):
for i in range(len(lineage_dict)):
# full lineage information is stored as a list of dictionaries.
if lineage_dict[i]["Rank"] == rank:
return lineage_dict[i]["TaxId"]
else:
continue
df = df.reset_index(drop = True)
df["superkingdom"] = df["Lineage"].apply(ext_rank, args = ("superkingdom",))
df["phylum"] = df["Lineage"].apply(ext_rank, args = ("phylum",))
df["class"] = df["Lineage"].apply(ext_rank, args = ("class",))
df["order"] = df["Lineage"].apply(ext_rank, args = ("order",))
df["family"] = df["Lineage"].apply(ext_rank, args = ("family",))
df["genus"] = df["Lineage"].apply(ext_rank, args = ("genus",))
df["species_group"] = df["Lineage"].apply(ext_rank, args = ("species group",))
df["species"] = df["Lineage"].apply(ext_rank, args = ("species",))
contradict_indices = []
for i in range(len(df)):
if df.loc[i,"Rank"] == "species":
if df.loc[i,"species"] is None:
df.loc[i,"species"] = df.loc[i,"taxID"]
else:
contradict_indices.append(i)
if len(contradict_indices) >= 1:
print("Potential contradictory record(s) found while converting the original lineage dataframe into a new " +
"dataframe where taxIDs for different taxonomic rank are stored in separate columns:")
for item in contradict_indices:
print("Manual verification is required for ", df.loc[item,"Scientific_Name"], " (taxID: ",
df.loc[item,"taxID"], ") in terms of its species-level taxID.", sep = "")
assert len(contradict_indices) == 0, ("Incorrect full lineage information retrieved for some taxIDs. See above " +
"printed information for details.")
return df | 51ecb46e951b1a11d02ad5ddc1268f7c84c90f04 | 40,732 |
def multi_prot_grounds_fix(match):
"""
Checks whether all protected grounds are listed together (when judge quotes law),
and removes any returned protected grounds if all protected grounds are part of the judge quoting the law
used in protected grounds in order to improve accuracy
"""
prohibited_str = 'race, religion, nationality, membership in a particular social group, or political opinion.'
sent_match = match.sent.text
return prohibited_str in sent_match | 24ada5a97ce99e01cdf8078b8fd072d6245448db | 40,734 |
def scale_unit_interval(mat, eps=1e-8):
"""Scales all values in `mat` to be between 0 and 1."""
mat = mat.copy()
mat -= mat.min()
mat *= 1.0 / (mat.max() + eps)
return mat | 7523e0c707cc5fa8575dd9ac8155af623b19f58a | 40,736 |
def calc_hilo(min_val, max_val, df, cols_to_test):
""" Return lowest and highest values from min_val and max_val if present, or calculate from df. """
# Calculate (or blindly accept) the range of the y-axis, which must be the same for all four axes.
if (max_val is None) and (len(df.index) > 0):
highest_possible_score = max([max(df[col]) for col in cols_to_test])
else:
highest_possible_score = max_val
if (min_val is None) and (len(df.index) > 0):
lowest_possible_score = min([min(df[col]) for col in cols_to_test])
else:
lowest_possible_score = min_val
return lowest_possible_score, highest_possible_score | 49f0bc0ed1080ed0c59828fcdf1263554f32dc5e | 40,737 |
def captured_article_ok(save_option, saved, post_option, posted):
"""
Given four boolean variables, return whether or not the article
should be considered captured or not.
save_option: Was the code required to save the article?
saved: Did the code save the article?
post_option: Was the code required to post the article?
posted: Did the code post the article?
"""
# Nothing to do with article, mark as done.
if save_option == False and post_option == False:
return True
# Only requested saving and it saved:
if saved == True and post_option == False:
return True
# Only requested posting and it posted:
if posted == True and save_option == False:
return True
# Did both saving and posting:
if saved == True and posted == True:
return True
return False | e5de6ce72fa239e509125e6fe213e2e9e6bacc04 | 40,738 |
import typing
def split_name(value: str) -> typing.Tuple[typing.Optional[str], str]:
"""Take a postgres ident and return the proper namespace & tag value"""
parts = value.partition('.')
if (parts[1], parts[2]) == ('', ''):
return None, value
return parts[0], parts[2] | b4cce76adf41b211a093122806d3012d9146cec8 | 40,739 |
def check_completer(completer_obj):
"""Helper function to run completer and parse the results as set of strings"""
completer = completer_obj
def _factory(
line: str, prefix: "None|str" = "", send_original=False, complete_fn=None
):
"""
Parameters
----------
line
prefix
send_original
if True, return the original result from the completer (e.g. RichCompletion instances ...)
complete_fn
if given, use that to get the completions
Returns
-------
completions as set of string if not send
"""
if prefix is not None:
line += " " + prefix
if complete_fn is None:
completions, _ = completer.complete_line(line)
else:
ctx = completer_obj.parse(line)
out = complete_fn(ctx)
if isinstance(out, tuple):
completions = out[0]
else:
completions = out
values = {getattr(i, "value", i).strip() for i in completions}
if send_original:
# just return the bare completions without appended-space for easier assertions
return values, completions
return values
return _factory | 111ff1b353febb616718d2758240e3ca8fde62e1 | 40,740 |
def str_to_dict(s, join_symbol="\n", split_symbol=":"):
"""
把参数字符串转换为一个字典
例如: a=b&c=d join_symbol是&, split_symbol是=
:param s: 原字符串
:param join_symbol: 连接符
:param split_symbol: 分隔符
:return: 字典
"""
# 通过join_symbol把字符串分为一个列表
s_list = s.split(join_symbol)
# 定义一个新的字典
data = dict()
for item in s_list:
item = item.strip()
if item:
# a = b 分成一个元组,第二个参数:分割次数
k, v = item.split(split_symbol, 1)
# 去除空格
data[k.strip()] = v.strip()
return data | 16bc3c31a60c591f3b2cfce282119aebfbb66f83 | 40,741 |
def dist(source, target):
"""trying to match word source to target word"""
dp = [[0 for i in range(len(source)+1)] for i in range(2)]
for i in range(len(source)+1): dp[0][i] = i
for i in range(len(target)+1):
for j in range(len(source)+1):
if j==0: dp[i%2][j] = i
elif source[j-1] == target[i-1]: dp[i%2][j] = dp[(i-1)%2][j-1]
else: dp[i%2][j] = 1+min(dp[(i-1)%2][j], dp[(i)%2][j-1], dp[(i-1)%2][j-1])
return dp[len(target)%2][len(source)] | 350515eb82f371c5e3b5735dafaaaf725ee7c3bc | 40,742 |
def compare_motif_nterm(peptide, motif):
"""N-term position specific motif match."""
for i in range(len(motif)):
if peptide[i]!=motif[i] and motif[i]!='x':
return 0
return 1 | 4b5b99d0ca043a15c56685a0d246993091bc939d | 40,743 |
from typing import Any
def monkeypatch(obj: Any, attr: str, new: Any) -> Any:
"""Temporarily replace a method with a new funtion
The previously set method is passed as the first argument to the new function
"""
def patched(*args: Any, **kwargs: Any) -> Any:
return new(old, *args, **kwargs)
old = getattr(obj, attr)
try:
setattr(obj, attr, patched)
yield
finally:
setattr(obj, attr, old) | 0d948b9d4600218d3d94f9088e9c82c500566e98 | 40,745 |
def gen_full_movement_rewards(full_states, movement_rewards_partial, n):
"""Expands to all states"""
movement_rewards_full={}
for state in full_states:
partial_state=state[:n]+state[-n-n:-n]
movement_rewards_full[state]=movement_rewards_partial[partial_state]
return movement_rewards_full | 49da477d1f861def3f9cf4cbf7507a31c1c01541 | 40,746 |
def stringToBool(s):
"""
Convert a string (True/true/1) to bool
s -- string/int value
return -- True/False
"""
return (s == "True" or s== "true" or s == "1" or s == 1) | 309c0d7628c78dcced26e9442e504e7cdec1450c | 40,747 |
def layer(lines, num_overlaps, comb=' '):
"""
make front-padded overlapping sentences
"""
if num_overlaps < 1:
raise Exception('num_overlaps must be >= 1')
out = ['PAD', ] * min(num_overlaps - 1, len(lines))
for ii in range(len(lines) - num_overlaps + 1):
out.append(comb.join(lines[ii:ii + num_overlaps]))
return out | 82e1186d64b078354fb11ed57c5b3a564f3c0959 | 40,749 |
import subprocess
def _diff_files(file1, file2):
"""diff two files using linux `diff`"""
try:
return subprocess.check_output(["diff", file1, file2]).decode("utf8")
except subprocess.CalledProcessError as err:
return err.output.decode("utf8") | 957965c705c4946d804ee93698fc928f9259eb9c | 40,750 |
import math
def calculate_y_matrix(rs, xs, bc, tau, shift):
"""
Compute the y matrix from various branch properties
Parameters
----------
rs : float
Branch resistance
xs : float
Branch reactance
bc : float
Branch charging susceptance
tau : float
Branch transformer tap ratio
shift : float
Branch transformer phase shift
Returns
-------
list : list of floats representing the y matrix
[Y(ifr,vfr), Y(ifr,vfj), Y(ifr,vtr), Y(ifr,vtj),
Y(ifj,vfr), Y(ifj,vfj), Y(ifj,vtr), Y(ifj,vtj),
Y(itr,vfr), Y(itr,vfj), Y(itr,vtr), Y(itr,vtj),
Y(itj,vfr), Y(itj,vfj), Y(itj,vtr), Y(itj,vtj)]
"""
bc = bc/2
tr = tau * math.cos(math.radians(shift))
tj = tau * math.sin(math.radians(shift))
mag = rs**2 + xs**2
a = rs/(tau**2*mag) # c1
b = (1/tau**2) * (xs/mag - bc) # c2
c = (-rs*tr - xs*tj)/(tau**2 * mag) # c3
d = (rs*tj - xs*tr)/(tau**2 * mag) # c4
e = -b # -c2
f = a # c1
g = -d # -c4
h = c # c3
i = (xs*tj - rs*tr)/(tau**2 * mag) # c7
j = (-rs*tj - xs*tr)/(tau**2 * mag) # c8
k = rs/mag # c5
l = xs/mag - bc # c6
m = -j # -c8
n = i # c7
o = -l # -c6
p = k # c5
# y = [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]
y_dict = {}
y_dict[('ifr', 'vfr')] = a
y_dict[('ifr', 'vfj')] = b
y_dict[('ifr', 'vtr')] = c
y_dict[('ifr', 'vtj')] = d
y_dict[('ifj', 'vfr')] = e
y_dict[('ifj', 'vfj')] = f
y_dict[('ifj', 'vtr')] = g
y_dict[('ifj', 'vtj')] = h
y_dict[('itr', 'vfr')] = i
y_dict[('itr', 'vfj')] = j
y_dict[('itr', 'vtr')] = k
y_dict[('itr', 'vtj')] = l
y_dict[('itj', 'vfr')] = m
y_dict[('itj', 'vfj')] = n
y_dict[('itj', 'vtr')] = o
y_dict[('itj', 'vtj')] = p
return y_dict | 58bb17be7314982013b73ef8cdbee93388746011 | 40,752 |
import random
def make_random_ints_no_dups(num, lower_bound, upper_bound):
"""
Generate a list containing num random ints between
lower_bound and upper_bound. upper_bound is an open bound.
The result list cannot contain duplicates.
"""
result = []
rng = random.Random()
for i in range(num):
while True:
candidate = rng.randrange(lower_bound, upper_bound)
if candidate not in result:
break
result.append(candidate)
return result | e8854c1054b99828551a155b1f9f62e1fbd4c0cc | 40,754 |
def taxon_file(taxon_id, page_no=1):
"""Build the taxon file name."""
file_name = f'taxon_id_{taxon_id}.html'
if page_no > 1:
file_name = f'taxon_id_{taxon_id}_{page_no}.html'
return file_name | a5a7ee2f8fe4387499bc3fdec909c8986b7fcbec | 40,755 |
def max_length(down: list, right: list) -> int:
"""Finds the maximum length of a R/D path given edge weights
:param down: a matrix of down-path weights
:type down: list (of lists (of ints))
:param right: a matrix of right-path weights
:type right: list (of lists (of ints))
:returns: the maximal path weight through the grid
:rtype: int
"""
max_weights = [[0]]
width = len(down[0])
height = len(right)
for i in range(width - 1):
max_weights[0].append(right[0][i] + max_weights[0][i])
for i in range(height - 1):
max_weights.append([down[i][0] + max_weights[i][0]])
for row in range(1, height):
for col in range(1, width):
max_weights[row].append(max(
max_weights[row - 1][col] + down[row - 1][col],
max_weights[row][col - 1] + right[row][col - 1]))
return max_weights[height - 1][width - 1] | f0ac594107cdd6c06d3997e81c8e33352057f875 | 40,759 |
def delete_profile(db, user_id, profile_id):
"""Deletes a profile for the given user.
Args:
db (object): The db object
user_id (int): The id of the user.
profile_id (int): The id of the profile to delete.
Returns:
True if the record was deleted, False otherwise
"""
db.execute('''DELETE FROM profile
WHERE user_id=? and id=?''',
(user_id, profile_id))
return not db.rows_affected == 0 | 728247bd982a7b4f3916b8c358e95ff18c837625 | 40,760 |
import copy
def generate_keyed_value_combinations(args):
"""
From this:
args = {"attr1": ["a", "b", "c"], "attr2": ["1", "2"], "attr3": ["A"]}
To this:
[
{u'attr1': u'a', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'a', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'2', u'attr3': u'A'}
]
"""
# Return empty list if empty
if not args:
return []
# Turn `args` into a list of lists of key-value tuples:
# [
# [(u'attr2', u'1'), (u'attr2', u'2')],
# [(u'attr3', u'A')],
# [(u'attr1', u'a'), (u'attr1', u'b'), (u'attr1', u'c')]
# ]
key_value_lists = [[(key, val) for val in args[key]] for key in args.keys()]
# Store the first, but as objects
# [{u'attr2': u'1'}, {u'attr2': u'2'}]
results = key_value_lists.pop(0)
results = [{d[0]: d[1]} for d in results]
# Iterate the remaining
# Take the next list to fuse with existing results
for l in key_value_lists:
new_results = []
for res in results:
for key_val in l:
# create a new clone of object in result
obj = copy.deepcopy(res)
# to be used with every incoming new value
obj[key_val[0]] = key_val[1]
# and pushed into new_results
new_results.append(obj)
results = new_results
return results | 5a07d45e93ce5ca308fb87fd76c43050a2c154ae | 40,761 |
import os
def is_geckodriver() -> bool:
"""
Checks if geckodriver executable is in the $PATH.
@rtype: bool
"""
paths = [path + "/geckodriver" for path in os.environ["PATH"].split(":")]
return any([os.path.isfile(file) for file in paths]) | 6c56f2ce67075b8c7837ed7fc5dfbf96cb96da17 | 40,762 |
import os
def get_pathext(default_pathext=None):
"""
Returns the path extensions from environment or a default
"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
return os.environ.get('PATHEXT', default_pathext) | c93975c1b3e759873e1b9974f7c3bf833e77a588 | 40,763 |
def validate_mask(mask):
"""Check if the netmask is valid
return mask as string in the range [0, 32] or False if not valid
"""
if not mask:
return False
mask = mask.strip()
if mask.isdigit() and int(mask) >= 0 and int(mask) <= 32:
return mask
return False | 5420e65f0c19022fbf13d5847a94d1d52a6e9c4f | 40,765 |
import typing
import os
def read_path(path: typing.Optional[str]) -> typing.Optional[str]:
"""Expand ~'s in a non-None path."""
if path is None:
return None
return os.path.expanduser(path) | 66c7da1abe9ccd1bd9cde5a2be3bf4425981fdbb | 40,766 |
def parse_request(event):
"""
Parses the input api gateway event and returns the product id
Expects the input event to contain the pathPatameters dict with
the user id and school id key/value pair
:param event: api gateway event
:return: a dict containing the user id and org id
"""
query_params = event.get("queryStringParameters", {})
return {
"user_id": query_params.get('user_id', None),
"org_id": query_params.get('org_id', None),
} | 733b32a3869834792384a568d6e6a5ed608cbd2e | 40,767 |
def b_task(request):
"""Using a list of ids."""
return request.param | 055160bc9123b2eb2174f50e480e5e36f70b34aa | 40,771 |
import re
def strip_trailing_whitespace(content):
"""
Seems to be some inconsistencies re. trailing whitespace with
different versions of the json lib.
"""
return re.sub(' +\n', '\n', content) | 1831475a17ca1c33533cbbbc8e9511acd2507ffc | 40,772 |
import asyncio
def async_testable(foo):
"""Quick and dirty function to allow for tesing of async functions.
:param coro foo: The async function to test.
"""
def test_inner(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(foo(*args, **kwargs))
test_inner.__name__ = foo.__name__
return test_inner | e8036f99e628d42301ce8579f0d5002d631a906b | 40,774 |
def tally_by_taxon(nexus_obj):
"""
Counts the number of states per site that each taxon has (i.e. taxon 1
has three sites coded as "A" and 1 coded as "G")
Returns a dictionary of the cognate sets by members in the nexus
:param nexus_obj: A `NexusReader` instance
:type nexus_obj: NexusReader
:return: A Dictionary
:raises AssertionError: if nexus_obj is not a nexus
:raises NexusFormatException: if nexus_obj does not have a `data` block
e.g. {
'taxon1': {'state1': ['site1', 'site2'], 'state0': ['site3'], }
'taxon2': {'state1': ['site2'], 'state0': ['site1', 'site3'], }
}
"""
tally = {}
for taxon, characters in nexus_obj.data:
tally[taxon] = {}
for pos, char in enumerate(characters):
label = nexus_obj.data.charlabels.get(pos, pos)
tally[taxon][char] = tally[taxon].get(char, [])
tally[taxon][char].append(label)
return tally | 9f4c64972415ca32991c7b0b85fc64f1a8c68aa1 | 40,775 |
def compute_parent_nodes(root_node, stage, n_nodes):
""" Computes the the parents nodes of the specified stage """
root_nodes = [root_node]
for _ in range(1, stage):
children = []
for r in root_nodes:
for n in range(1, n_nodes + 1):
child = '{}.{}'.format(r, n)
children.append(child)
root_nodes = children
return root_nodes | 1a0b81f055ee8987fe0785dde0c9e88cb00074e5 | 40,777 |
def _translate_message(message):
"""Translate the Message model to a dict."""
return {
'id': message['id'],
'project_id': message['project_id'],
'request_id': message['request_id'],
'resource_type': message['resource_type'],
'resource_uuid': message.get('resource_uuid'),
'event_id': message['event_id'],
'message_level': message['message_level'],
'created_at': message['created_at'],
'expires_at': message.get('expires_at'),
} | d8ce8fc82441352e9a2a3ce0334e4afbad3679e8 | 40,779 |
from typing import List
import os
import glob
import time
def sort_files_by_date(
dir_name: str,
reverse: bool = False,
recursive: bool = False,
verbose: bool = False,
) -> List[str]:
"""List files in a folder, based on date last modified
:param dir_name: Directory to search for files
:param reverse: Display last modified at bottom if reverse=Fasle (defualt), else top
:param recursive: If search for last modified files should be searched recursively
:param verbosity: If the modified files and date should be displayed
:return: List of the files in the order specified
$ python sort_files.py
03/14/2022 :: 13:38:27 --> models/epoch=1-step=1072.ckpt
03/14/2022 :: 13:39:33 --> models/epoch=3-step=1071.ckpt
03/14/2022 :: 13:41:30 --> models/epoch=3-step=1071_1.ckpt
"""
# Get list of all files only in the given directory
dir_name = dir_name if dir_name.endswith("/") else f"{dir_name}/"
list_of_files = filter(
os.path.isfile, glob.glob(dir_name + "*", recursive=recursive)
)
files = sorted(list_of_files, key=os.path.getmtime, reverse=reverse)
if verbose:
for file_path in files:
timestamp_str = time.strftime(
"%m/%d/%Y :: %H:%M:%S", time.gmtime(os.path.getmtime(file_path))
)
print(timestamp_str, " -->", file_path)
return files | ab0f12b839455d7bbc2143851f0a861ad9260774 | 40,781 |
import requests
def get_suning_html(url):
"""
获取网页源代码
:param url: 获取源代码目标url
:return: 网页源代码
"""
# headers = {
# 'Accept: */*',
# 'Accept-Encoding: gzip, deflate, br',
# 'Accept-Language: zh-CN,zh;q=0.9',
# 'Connection: keep-alive',
# 'Host: ds.suning.com',
# 'Referer: https://list.suning.com/0-336522-0.html?safp=d488778a.46601.searchMain.16&safc=cate.0.0',
# 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
# }
html = requests.get(url)
return html.content.decode() | d719a8edb9de44b71954f9ff2a872468b110ebc4 | 40,782 |
def clean_error(err):
"""
Take stderr bytes returned from MicroPython and attempt to create a
non-verbose error message.
"""
if err:
decoded = err.decode("utf-8")
try:
return decoded.split("\r\n")[-2]
except Exception:
return decoded
return "There was an error." | f9d667235f81030dc689d3ee825cbba9e495a3b4 | 40,785 |
def deep_index(lst, w):
"""helper function for user_attack to find the corresponding item in weapon_dict"""
return [i for (i, sub) in enumerate(lst) if w in sub][0] | 8205fcf5ef51a8161857d45d117d8dc7d83e46d4 | 40,786 |
def hello_world():
"""Original"""
return "Hello" | f8faf3359e0fe8f4717a94d4f95c6ee60bce8bd0 | 40,787 |
def set_state(S, action, P_no):
"""
set_state(S, action, P_no)
[Inputs]
S is numpy array.
[Returns]
Sa is S with new action.
"""
# assert S[action] == 0, 'position should be empty to put a new stone'
Sa = S.copy()
Sa[action] = P_no # User numpy to insert action in the specific position
return Sa | bccda11826bd17a47764b95bac6eb8f903849c20 | 40,788 |
import inspect
def function_has_return(func):
"""Caution: this will return True if the function contains the word 'return'"""
lines, _ = inspect.getsourcelines(func)
return any("return" in line for line in lines) | 7edc28cc33592b5dea29d6375ffb059b268440df | 40,789 |
def set_to_list(setstring, delimiter="|", affix="|"):
"""Turn a set string into a list."""
if setstring == affix:
return []
setstring = setstring.strip(affix)
return setstring.split(delimiter) | d7ede5607107a3e63ba9a13cb7011e49bde12933 | 40,791 |
def binary_search(lo, hi, p, e):
"""Bisection search for x in [lo, hi] such that condition 'e' holds.
p determines when to go left."""
x = (lo + hi) / 2
if p(lo, hi):
return x
elif e(x):
return binary_search(lo, x, p, e)
else:
return binary_search(x, hi, p, e) | fffdbd81adb3399476989e5c5405461108fd53d5 | 40,792 |
from typing import List
from typing import Tuple
def get_msa_lengths(list_msa: List[List[Tuple[str, str]]], nseq: int) -> List[int]:
"""Get length of an MSA list
All MSA must have at least nseq in msa
Args:
list_msa (List[List[Tuple[str,str]]]): list of MSA. MSA is a list of tuple
nseq
Returns:
List[int]: [description]
"""
def _msa_length(msa: List[Tuple[str, str]]) -> List[int]:
"""get length of each sequence in msa
Example: >> input = ['AAAB','AAAA','AAA-']
>> _msa_length(input)
>> [4,4,4]
Raises:
ValueError if number of seq in the MSA is less than nseq
Args:
msa (List[Tuple[str, str]]): List of sequence
Returns:
List[int]: List of length of each msa
"""
return [len(seq[1]) for seq in msa]
lengths = [_msa_length(msa) for msa in list_msa]
n_different_seq = sum([len(length) != nseq for length in lengths])
if n_different_seq > 0:
msg = (
f"Find {n_different_seq} files with less than {nseq} sequences in the msa. "
f"All msa files must have at least {nseq} sequences. "
f"Use `from biotransformers.utils.msa_utils.msa_to_remove` to get the file to remove."
)
raise ValueError(msg)
unique_length = [max(length) for length in lengths]
return unique_length | 00e1c794f9a9edf815ee9015482ac41d8a1ad93b | 40,793 |
import os
import re
def get_cmor_output_files(input_path, start_year, end_year):
"""
Return a list of CMORize output files from start_year to end_year
Parameters:
input_path (str): the directory to look in
start_year (int): the first year of climos to add to the list
end_year (int): the last year
Returns:
cmor_list (list): A list of the cmor files
"""
if not os.path.exists(input_path):
return None
cmor_list = list()
pattern = r'_{start:04d}01-{end:04d}12\.nc'.format(
start=start_year, end=end_year)
for root, dirs, files in os.walk(input_path):
for file_ in files:
if re.search(pattern, file_):
cmor_list.append(os.path.join(root, file_))
return cmor_list | 0f4c21c1205d34e38ac8f90b02a3e6faaf0a1b19 | 40,795 |
def inSameFamily(desc1, status_entry1, desc2, status_entry2):
"""Takes list of descriptors and two node fingerprints,
checks if nodes list each other as in the same family."""
fprint1 = status_entry1.fingerprint
fprint2 = status_entry2.fingerprint
family1 = set([i.strip(u'$') for i in desc1.family])
family2 = set([i.strip(u'$') for i in desc2.family])
# True only if both nodes list each other
return (fprint1 in family2) and (fprint2 in family1) | 35abeba9a2aa831515d9ecfd8fc1e40194818c6c | 40,796 |
from pathlib import Path
def check_arg_output_dir(output_dir: str) -> bool:
"""Return True of the output_dir can exist.
If the parent directory of the output dir does not exist, it has to either be created or fail the check.
:param output_dir: the output directory
:param create_parent_dir: create the output file's parent folder in case it does not exist
"""
# output_format
path_output_file = Path(output_dir)
if not path_output_file.is_dir():
path_output_file.mkdir(parents=True, exist_ok=True)
return True | d8afb739af85399a2fc24fd0be110f2e2415af77 | 40,797 |
def kWh2therms(x):
"""kWh -> therms"""
return x/29.3 | 5362fce32edfaeb9ba515a12fdc917be447280ea | 40,798 |
import functools
def required_ploidy(n, return_val):
"""
Decorator for methods on GenotypeArrays that returns a given value if the ploidy is not n
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.variant.ploidy != n:
return return_val
else:
return func(*args, **kwargs)
return wrapper
return decorator | 271fbb8646a48b936e991b7cc132666bca3d164f | 40,799 |
def _short_mac(mac: str) -> str:
"""Get the short mac address from the full mac."""
return mac.replace(":", "").upper()[-6:] | f044dec93f0a635c3fc3245355137905c9a1e053 | 40,800 |
def bin_evals (opts, evals):
"""Take eigenvalues and bins them, and return [energy, N], where N
is the number of eigenvalues in the energy bin centered around
energy"""
## they should be sorted but let's make sure
evals_sort = sorted(evals)
emin = evals_sort[0]
emax = evals_sort[-1]
de = (emax - emin) / opts.nbin
#=== XXX HARDCODE ===
# de = 0.01
# opts.nbin = int((emax - emin)/de) + 1
#===
dos_raw = []
for ie in range(opts.nbin+1):
ecenter = emin + ie*de
eleft = ecenter - 0.5*de
eright = ecenter + 0.5*de
count = 0
for val in evals_sort:
if val >= eleft and val <= eright:
count = count + 1
dos_raw.append ([ecenter, count])
## check that total sum is number of eigenvalues
ntot = 0
for d in dos_raw:
ntot = ntot + d[1]
if ntot != len (evals):
raise Exception ("Inconsistent integrated DOS and number of eigenvalues: {0} vs {1}".format(ntot, len(evals)))
return dos_raw | 6f2754d84653913282d3dc333580a740ea5ebf9a | 40,801 |
import os
def print_directory_contents(sPath):
"""
This function takes the name of a directory
and prints out the paths files within that
directory as well as any files contained in
contained directories.
This function is similar to os.walk. Please don't
use os.walk in your answer. We are interested in your
ability to work with nested structures.
"""
for o in os.listdir(sPath):
if not(os.path.isfile(os.path.join(sPath, o))):
print("I'm on " + str(o) + " now.")
print_directory_contents(os.path.join(sPath, o))
else:
print("File: "+ os.path.join(sPath, o)+"\n")
return 0 | 385396b58b3f192b073f8d2824946b18271dc92e | 40,802 |
def requirements():
"""Load requirements file."""
with open('requirements.txt') as f:
return f.read() | 8bde835200cbe5094fef684b58df11a56a0c2f21 | 40,803 |
import os
def get_file_size(file):
"""
get file size
:param file:
:return:
"""
file_stat = os.stat(file)
size = file_stat.st_size
kb = 1024
mb = kb * 1024
gb = mb * 1024
if size >= gb:
return "{:.2f} GB".format(size / gb)
elif size >= mb:
return "{:.2f} MB".format(size / mb)
elif size >= kb:
return "{:.2f} KB".format(size / kb)
else:
return "{:d} B".format(size) | 6c83d51df7896c080010782487ae373042bc807b | 40,805 |
def size_of_shape(x):
"""
# This function returns one dimension size of shpae
Parameters:
x (np.array): Grid to get size from
Returns:
int: One dimension size of grid passed in
"""
return x.shape[0] | a51429a58770fe321c6062d3335aa7eb01724a56 | 40,806 |
def split_wins_and_losses(df, separator):
"""
splits a column by given separator and
creates new columns
args:
df: a data frame
separator: separator to split by
returns: new df wit split columns
"""
for col in df:
lst_entry = df[col].str.split(separator)[0]
len_lst_entry = len(lst_entry)
if len_lst_entry > 1:
df[col + '_win'] = lst_entry[0]
df[col + '_loss'] = lst_entry[1]
del df[col]
return df | c5c73bfa777176b883760ffd82d5b08fe01a33e6 | 40,808 |
from pathlib import Path
def get_datasets_in_path(path: Path):
"""
Gets all the dataset with stored information in a specific path.
This is used to check which datasets has features extracted.
"""
return set(file.name[:-5] for file in path.glob('*.json')) | 782f1fea3daaf1a17fa343f91ee2f2d6d34cea57 | 40,810 |
def replace_characters(main_string, chars, new_string):
"""
Parameters:
main_string (str): The string for which you want to make the replacement
chars (str): The character that you want to replace
new_string (str): The new string that will replace the previous string (chars)
Return:
The original string, but with the new characters now incorporated.
"""
for char in chars:
try :
if char in main_string:
main_string = main_string.replace(char, new_string)
except:
continue
return main_string | cf1101840ab78913b62d53d331f4263d4946ec29 | 40,811 |
def get_index_action(index_name, document_type, document):
"""Generate index action for a given document.
:param index_name: Elasticsearch index to use
:type index_name: str
:param document_type: Elasticsearch document type to use
:type index_name: str
:param document: Document to be indexed
:type row: dict
:return: Action to be passed in bulk request
:rtype: dict
"""
action = {
'_index': index_name,
'_type': document_type,
'_source': document,
}
# Use the same _id field in elasticsearch as in the database table
if '_id' in document:
action['_id'] = document['_id']
return action | 405690e65f1d1d3208ca37cd88efcbcf904add40 | 40,812 |
def get_api_url(repo_url):
"""Return the GitHub API URL for the respository."""
api_url = None
# If the remote url is set for SSH keys
if repo_url[:3] == "git":
api_url = ("https://api.github.com/repos/" +
repo_url[repo_url.index(":") + 1: -3])
# If the remote url is set for http/https
elif repo_url[:4] == "http":
api_url = ("https://api.github.com/repos/" +
repo_url[repo_url.index(".com/") + 5:-4])
return api_url | 387971c4864aa426b7d777544a1bffa1065369c2 | 40,813 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.