content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def unify_str_list(strlist):
"""
Remove duplicates and sort list of strings
:param strlist: list of strings
:return: sorted unique list
"""
return sorted(set(strlist))
|
adb3a3752654bca6a9db7aa0206c41128697a8fc
| 22,770
|
import datetime
def myconverter(o):
"""Use json.dumps(data, default=myconverter)."""
if isinstance(o, datetime.datetime):
return o.__str__()
|
773d278925db88b0d45151a5c4326bcf8bfb8f9f
| 22,771
|
def get_header(cursor):
"""
Given a cursor object, returns the appropriate header (column names)
"""
header = []
for i in cursor.description:
s = i[0]
if "." in s:
s = s[s.find(".")+1:]
header.append(s)
return header
|
fa8d510fe769be4e34064e51744fdccdf3d33339
| 22,772
|
import os
import itertools
def format_in_columns(elements, total_width=None, sep=" ", indent=" ", min_height=10):
"""
>>> print(format_in_columns([str(i) for i in range(100)], 50))
0 10 20 30 40 50 60 70 80 90
1 11 21 31 41 51 61 71 81 91
2 12 22 32 42 52 62 72 82 92
3 13 23 33 43 53 63 73 83 93
4 14 24 34 44 54 64 74 84 94
5 15 25 35 45 55 65 75 85 95
6 16 26 36 46 56 66 76 86 96
7 17 27 37 47 57 67 77 87 97
8 18 28 38 48 58 68 78 88 98
9 19 29 39 49 59 69 79 89 99
"""
if not total_width:
try:
total_width, _ = os.get_terminal_size()
except:
total_width = 80
widest = min(max(len(k) for k in elements), total_width)
columns = max((total_width - len(indent)) // (widest + len(sep)), 1)
height = max(min_height, (len(elements) // columns) + 1)
# arrange the elements in columns
columns = [[elem for (__, elem) in group]
for __, group in itertools.groupby(enumerate(elements), lambda p: p[0]//height)]
rows = itertools.zip_longest(*columns)
col_max = total_width - len(sep) * (len(columns) - 1)
column_lens = [min(max(map(len, column)), col_max) for column in columns]
return '\n'.join(indent + sep.join([(string or "").ljust(column_lens[column_num])
for column_num, string in enumerate(row)])
for row in rows)
|
bc28bd57a1c399505782a8bceb752aa3967d3858
| 22,773
|
import os
def get_arp():
"""Save the output of the arp command into a file, then read it and return it as an array"""
os.system('arp > /tmp/arp_command_data')
with open('/tmp/arp_command_data') as f:
arp = f.read()
# Split data into arp_records and remove useless arp_records
arp_array = arp.split('\n')
arp_array.pop(0)
arp_array.pop(-1)
return arp_array
|
d825c7052c70c78b2cd44223e73460f07c156caf
| 22,775
|
import time
def gm_timestamp():
"""Generate a timestamp in microseconds."""
return int(time.time()) * 1000000
|
0273d31a8eec17cd149d2b1b8087beb376f77f17
| 22,776
|
def _extract_trans_diff(data):
"""Get difference of trained and untrained stitching matrices"""
before = data['before']
after = data['after']
w = after['w'] - before['w']
b = after['b'] - before['b']
return {'w': w, 'b': b}
|
76286867f4d2ad31e64a1f71079175f589ee0d2f
| 22,778
|
def get_other_player(player):
"""
Get the other player.
"""
return "X" if player == "O" else "O"
|
d3626922d2097f115511e067ded461a69a6aded8
| 22,779
|
def sep_nondummies(data):
"""
Finds the features that are:
1. have nominal values
2. have more than 2 distinct values so it needs to be dummified
Args:
data: DataFrame containing the dataset
Returns:
nominal: Array-like structure that contains the nominal features
continuous: Array-like structure that contains the names of all the continuous features
"""
nominal = []
continuous=[]
for col in data.columns:
distinct = data[col].dropna().nunique()
if distinct > 10:
continuous.append(col)
elif distinct > 2:
nominal.append(col)
return [nominal, continuous]
|
f666a12c009dc44eb11335fbe2d3e398bd5415c8
| 22,780
|
def PortToTag(switch, port):
"""Returns the tag for a port."""
return 'switches.%s.%d' % (switch, port)
|
9c5fc26f021d20b2c14838274efada3ea2a513b9
| 22,781
|
def _compose_attribute_file(attributes):
"""Make the contents of an osg attributes file"""
def islist(var):
return isinstance(var, list)
variable_string = ""
export_string = ""
# keep a list of array variables
array_vars = {}
keys = sorted(attributes.keys())
for key in keys:
value = attributes[key]
if value is None:
variable_string += "# " + key + " is undefined\n"
continue
# Special case for SOFTWARE-1567 (let user explicitly unset OSG_APP)
if key == 'OSG_APP' and (value == 'UNSET' or (islist(value) and 'UNSET' in value)):
variable_string += 'unset OSG_APP\n'
elif islist(value):
for item in value:
variable_string += '%s="%s"\n' % (key, item)
else:
variable_string += '%s="%s"\n' % (key, value)
if len(key.split('[')) > 1:
real_key = key.split('[')[0]
if real_key not in array_vars:
export_string += "export %s\n" % key.split('[')[0]
array_vars[real_key] = ""
else:
# 'OSG_APP' is a special case for SOFTWARE-1567
if value is not None and not (key == 'OSG_APP' and value == 'UNSET'):
export_string += "export %s\n" % key
file_contents = """\
#!/bin/sh
#---------- This file automatically generated by osg-configure
#---------- This is periodically overwritten. DO NOT HAND EDIT
#---------- Instead, write any environment variable customizations into
#---------- the config.ini [Local Settings] section, as documented here:
#---------- https://opensciencegrid.github.io/docs/other/configuration-with-osg-configure/#local-settings
#--- variables -----
%s
#--- export variables -----
%s
""" % (variable_string, export_string)
return file_contents
|
4528094683a29f0bda81599667cb85a97a7d21bc
| 22,784
|
from typing import Optional
from typing import Dict
from typing import Any
import json
def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:
"""Convert json input dict to encoded parameter string.
This function is required due to the limitation on YAML component definition
that YAML definition does not have a keyword for apply quote escape, so the
JSON argument's quote must be manually escaped using this function.
Args:
input_dict: The input json dictionary.
Returns:
The encoded string used for parameter.
"""
if not input_dict:
return ''
out = json.dumps(json.dumps(input_dict))
return out[1:-1] # remove the outside quotes, e.g., "foo" -> foo
|
e9470b5050e5260f38b5423d25fb47d197fdbe56
| 22,786
|
def valid_int(param, allow_zero=False, allow_negative=False):
"""Validate that param is an integer, raise an exception if not"""
pt = param
try: # a very permissive integer typecheck
pt += 1
except TypeError:
raise TypeError(
"Expected integer but found argument of type '{}'".format(type(param)))
if not allow_negative and param < 0:
raise ValueError("Expected nonnegative number but got '{}'".format(param))
if not allow_zero and param == 0:
raise ValueError("Expected nonzero number but got '{}'".format(param))
return param
|
e18f6ea4954ad1828ce942239d27219670b7e344
| 22,787
|
def version_to_string(version, parts=3):
""" Convert an n-part version number encoded as a hexadecimal value to a
string. version is the version number. Returns the string.
"""
part_list = [str((version >> 16) & 0xff)]
if parts > 1:
part_list.append(str((version >> 8) & 0xff))
if parts > 2:
part_list.append(str(version & 0xff))
return '.'.join(part_list)
|
c766a2efdede43149d2592fbf3cbea1f6f279b4a
| 22,788
|
def make_html_table():
""" Returns a string of html table data.
"""
html = """
<html>
<h1> Simple HTML </h1>
<body>
<h4>Cell that spans two columns:</h4>
<table border="1">
<tr>
<th>Name</th>
<th colspan="2">Telephone</th>
</tr>
<tr>
<td>Tony Stark</td>
<td>555 77 854</td>
<td>555 77 855</td>
</tr>
</table>
<h4>Cell that spans two rows:</h4>
<table border="1">
<tr>
<th>First Name:</th>
<td>Pepper Potts</td>
</tr>
<tr>
<th rowspan="2">Telephone:</th>
<td>555 77 854</td>
</tr>
<tr>
<td>555 77 855</td>
</tr>
</table>
</body>
</html>
"""
return html
|
92b0b82160ee7f0fff6a66c7bb9d5852642e956b
| 22,790
|
from pathlib import Path
from typing import Tuple
def create_alignment_job_directory_structure(output_directory: Path) -> Tuple[Path, Path, Path]:
"""Create directory structure for a tilt-series alignment job."""
stacks_directory = output_directory / 'stacks'
stacks_directory.mkdir(parents=True, exist_ok=True)
external_directory = output_directory / 'external'
external_directory.mkdir(parents=True, exist_ok=True)
metadata_directory = output_directory / 'tilt_series'
metadata_directory.mkdir(parents=True, exist_ok=True)
return stacks_directory, external_directory, metadata_directory
|
4b5f4b2ad1854dda0b269289942b6baa42e4ade1
| 22,791
|
import struct
import pickle
def indexed_pickler_load_keys(path):
"""
Return the list of keys in the indexed pickle
"""
with open(path, "rb") as f:
index_len = struct.unpack("Q", f.read(8))[0]
return pickle.loads(f.read(index_len)).keys()
|
e813568941929ba444aa5aa7c94578129c68a166
| 22,794
|
import os
import logging
def _CheckIsUploadable(filename, local_dir):
"""Check whether the file is uploadable.
Args:
filename: The name of the file.
local_dir: The local directory where the file resides.
Returns:
is_valid: True if the file should be uploaded.
full_local_path: The full local path of the file to be uploaded.
"""
full_local_path = os.path.join(local_dir, filename)
if not os.path.exists(full_local_path):
logging.error('Local file at %s does not exist!', full_local_path)
return False, full_local_path
elif os.path.islink(full_local_path):
logging.info('Skip symlink %s.', full_local_path)
return False, full_local_path
elif os.path.isdir(full_local_path):
logging.info('Skip directory %s.', full_local_path)
return False, full_local_path
else:
return True, full_local_path
|
e30cde253992aa4fc5111943663c805f6123e8f3
| 22,795
|
import re
def convert_to_filename(sample_name):
"""
Convert to a valid filename.
Removes leading/trailing whitespace, converts internal spaces to underscores.
Allows only alphanumeric, dashes, underscores, unicode.
"""
return re.sub(r"(?u)[^-\w]", "", sample_name.strip().replace(" ", "_"))
|
71fe3f2dee9f633087358f43a930820bc49d0460
| 22,796
|
import math
def get_rgba_from_triplet(incolour: list, alpha=1, as_string=False):
"""
Convert the input colour triplet (list) to a Plotly rgba(r,g,b,a) string if
`as_string` is True. If `False` it will return the list of 3 integer RGB
values.
E.g. [0.9677975592919913, 0.44127456009157356, 0.5358103155058701] -> 'rgba(246,112,136,1)'
"""
assert (
3 <= len(incolour) <= 4
), "`incolour` must be a list of 3 or 4 values; ignores 4th entry"
colours = [max(0, int(math.floor(c * 255))) for c in list(incolour)[0:3]]
if as_string:
return f"rgba({colours[0]},{colours[1]},{colours[2]},{float(alpha)})"
else:
return colours
|
958b8df4ca428f583a0c94691afb86fdb18c91a4
| 22,797
|
def build_factorized(factorized: dict) -> int:
"""
Build integer from its factorized form, (reverse function of factorization)
:param factorized: factorized integer dict
:return: integer
"""
num = 1
for factor in factorized:
num *= factor ** factorized[factor]
return int(num)
|
73db0c09db2c7644f4f95c76dbe21b64f54d7641
| 22,798
|
import re
def check_hgt(value):
"""
hgt (Height) - a number followed by either cm or in:
- If cm, the number must be at least 150 and at most 193.
- If in, the number must be at least 59 and at most 76.
"""
pattern = r"^([0-9]{2,3})(cm|in)$"
match = re.match(pattern, value)
if match:
num, unit = match.groups()
if unit == "cm" and 150 <= int(num) <= 193:
return True
if unit == "in" and 59 <= int(num) <= 76:
return True
return False
|
0832fc4bb1039f997618abaf4c67cc15fe5678b9
| 22,799
|
def stability_selection_to_threshold(stability_selection, n_boots):
"""Converts user inputted stability selection to an array of
thresholds. These thresholds correspond to the number of bootstraps
that a feature must appear in to guarantee placement in the selection
profile.
Parameters
----------
stability_selection : int, float, or array-like
If int, treated as the number of bootstraps that a feature must
appear in to guarantee placement in selection profile. If float,
must be between 0 and 1, and is instead the proportion of
bootstraps.
n_boots: int
The number of bootstraps that will be used for selection
"""
# float, indicating proportion of bootstraps
if isinstance(stability_selection, float):
selection_threshold = int(stability_selection * n_boots)
# int, indicating number of bootstraps
elif isinstance(stability_selection, int):
selection_threshold = stability_selection
else:
raise ValueError("Stability selection must be a valid float or int.")
# ensure that ensuing list of selection thresholds satisfies
# the correct bounds
if not (
selection_threshold <= n_boots and selection_threshold >= 1
):
raise ValueError("Stability selection thresholds must be within "
"the correct bounds.")
return selection_threshold
|
890951185ab98ee50a2a08199a90f180710153f0
| 22,800
|
import argparse
def parse_cmd_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Fetch and format published indices')
parser.add_argument('data_dir', help='location to save raw data to')
parser.add_argument('results_dir',
help='location to save formatted data to')
args = parser.parse_args()
return args.data_dir, args.results_dir
|
bef820add7d88e2bae63847dfdcd9f3add11db7a
| 22,801
|
def loadAsteroidsElements(asteroids_file):
""" Loads the elements of asteroids in the MPC format. """
asteroids_list = []
with open(asteroids_file) as f:
for i, line in enumerate(f):
# Skip first 2 lines
if i < 2:
continue
asteroid_name = ' '.join(line[:38].strip().split())
try:
q = float(line[39:45])
except ValueError:
continue
e = float(line[107:112])
incl = float(line[101:106])
peri = float(line[89:94])
node = float(line[95:100])
asteroids_list.append([asteroid_name, q, e, incl, peri, node])
return asteroids_list
|
a91dc1ba7cd70628b264d947a76031c2fcc70aca
| 22,802
|
from typing import List
def get_available_executors() -> List[str]:
"""
Get all the available executors that can be provided to `run_notebook` and `run_notebooks`.
"""
return [
'simple_executor',
'pandas_miner',
'plotly_miner',
'mpl_seaborn_viz_miner',
]
|
6370c7c0df2792a6d30affc4594b1f1bbe7d657b
| 22,803
|
import re
def convert_numerical(s):
""" try to convert a string tu numerical
:param str s: input string
:return:
>>> convert_numerical('-1')
-1
>>> convert_numerical('-2.0')
-2.0
>>> convert_numerical('.1')
0.1
>>> convert_numerical('-0.')
-0.0
>>> convert_numerical('abc58')
'abc58'
"""
re_int = re.compile(r"^[-]?\d+$")
re_float1 = re.compile(r"^[-]?\d+.\d*$")
re_float2 = re.compile(r"^[-]?\d*.\d+$")
if re_int.match(str(s)) is not None:
return int(s)
elif re_float1.match(str(s)) is not None:
return float(s)
elif re_float2.match(str(s)) is not None:
return float(s)
else:
return s
|
1c97a7f2d3ebc72277736cfe9f1e979d76374232
| 22,804
|
def Entry(fileName):
"""Receive a file name and turns it into a list of string"""
return open(fileName, 'r')
|
ec2629a45e5d5f94f89115fd88125ce7c197c9dd
| 22,805
|
from typing import Tuple
import argparse
def parse_args() -> Tuple[str, int, int]:
"""
:return: (destination, max_ttl, timeout)
"""
parser = argparse.ArgumentParser(description="Tracert program")
parser.add_argument("destination", type=str, help="Destination IPv4 address")
parser.add_argument("--max-ttl", "-m", type=int, help="Max TTL (Max hops)", default=30)
parser.add_argument("--timeout", type=int, help="Timeout (in seconds)", default=2)
args = parser.parse_args()
return args.destination, args.max_ttl, args.timeout
|
ab732eb3f9956e81b2789c572cd94c8888296c56
| 22,806
|
def truncate_string(string, truncation, message = ''):
"""
Truncate a string to a given length. Optionally add a message at the end
explaining the truncation
:param string: A string
:param truncation: An int
:param message: A message, e.g. '...<truncated>'
:return: A new string no longer than truncation
"""
if truncation is None:
return string
assert isinstance(truncation, int)
if len(string)>truncation:
return string[:truncation-len(message)]+message
else:
return string
|
45cce7bf6dec02c0a6fac2cf22da8f217a717948
| 22,807
|
def _(x):
"""Identity function for string extraction."""
return x
|
ba44c55c38d0957374f759f84d17344fd7b53c7b
| 22,809
|
import json
def fetch_dummy_response():
"""To mock requests.response object it is a dummy method."""
with open('./TestData/service_request.json', encoding='utf-8') as f:
data = json.load(f)
return data
|
d26c0abd478b6825032a5302acd7d8cb8e5390e0
| 22,811
|
def meta_data_add(filename,parsefile,kaasmatrix):
""" Function to create metadata file to add via biom add-metadata command"""
outfile = open(filename, 'w')
outfile.write("#OTUID"+"\t"+"KEGG_Pathways"+"\n")
ko = []
infile = open(parsefile, 'rU')
for line in infile:
if not line.startswith("#"):
ko.append(line.strip().split("\t")[0])
for x in list(set(ko)):
funcs = ""
infile = open(parsefile, 'rU')
for line in infile:
if not line.startswith("#"):
spline = line.strip().split("\t")
if spline[0] == x:
funcs += ";".join(spline[2:])
funcs += "|"
outfile.write(x+"\t"+funcs[:-1]+"\n")
ko_check = []
infile1 = open(kaasmatrix, 'rU')
for line in infile1:
if not line.startswith("#"):
ko_check.append(line.strip().split("\t")[0])
kegg_parse = []
infile2 = open(parsefile, 'rU')
for line in infile2:
if not line.startswith("#"):
kegg_parse.append(line.strip().split("\t")[0])
for x in ko_check:
if x not in kegg_parse:
outfile.write(x+"\t"+"null"+"\n")
outfile.close()
return outfile
|
8993bf2b2c4ed4c065c83ebe9e75063e4e3d24aa
| 22,812
|
def take(l,n):
""" Return the first n elements from iterable l."""
itr = iter(l)
results = []
for i in range(n):
results.append(next(itr))
return results
|
d487d58350db09f3c9b135042e1ddf8a4e79fb09
| 22,813
|
import string
import itertools
import zipfile
def bruteforce(nbcharmax, zipf):
"""If the password hasn't been found yet, the function switches to bruteforce"""
alphabet = string.ascii_letters + string.digits + string.punctuation
for pass_len in range(1, nbcharmax):
for j in itertools.product(alphabet, repeat=pass_len):
password = ''.join(j).encode('utf-8')
try:
zipfile.ZipFile(zipf).extractall(pwd=password)
print('Password found')
return True
except RuntimeError:
pass
return False
|
43b84b967172cf52115b8302f4ab5a58a2202a99
| 22,815
|
from typing import Union
def cpf_format(cpf: Union[int, str, float]) -> str:
"""
Format CPF.
Args:
cpf (Union[int, str, float]): CPF
Returns:
str: Formated CPF "***.***.***-**"
"""
try:
if type(cpf) == float:
cpf = int(cpf)
cpf_cleaned = int(''.join(filter(str.isdigit, str(cpf))))
cpf_cleaned = str(cpf_cleaned).zfill(11)
return (f'{cpf_cleaned[:3]}.{cpf_cleaned[3:6]}.{cpf_cleaned[6:9]}-{cpf_cleaned[9:]}')
except ValueError:
return ''
|
596de669ca3305976ec9f45a5a3023ceb406c937
| 22,816
|
def contains(value, arg):
""" Checks insensitive if a substring is found inside a string
Args:
value (str): The string
arg (str): The substring
Returns:
bool: True if string contains substring, False otherwise
"""
value = value.upper()
arg = arg.upper()
if arg in value:
return True
return False
|
7da05cf849c3e0bce88d039852175eee6b35c221
| 22,817
|
import click
def common_cv_options(f):
"""CV-specific CLI options."""
f = click.option('--folds', type=int, default=5, help='Number of folds in CV.')(f)
f = click.option('--replicates', type=int, default=10, help='Number of replicates for CV.')(f)
f = click.option('--threads', type=int, default=1, help='Number of threads to use.')(f)
f = click.option('--phenotype', type=click.Path(exists=True),
required=True, help='Phenotype file path.')(f)
return f
|
12a0e348b8da5288026dfb50fe7e5a32c445ceab
| 22,818
|
import sys
import shlex
def make_pip_command(args: str, split: bool=True, disable_version_check: bool=True):
"""
Construct a call to python's pip
:param args: arguments to pass to the command
:param split: whether to split the result into a list or not using shlex
:param: disable_version_check: if True, tell pip to disable its version check
:return: command line in string or list format
"""
if disable_version_check:
disable = '--disable-pip-version-check'
else:
disable = ''
cmd_line = '{} -m pip {} {}'.format(sys.executable, args, disable)
if split:
return shlex.split(cmd_line)
else:
return cmd_line
|
cf517ba33a6166a22cce7f2143fa6e0de6e8fb86
| 22,819
|
from pathlib import Path
from typing import Dict
from typing import Any
import json
def load_json(path: Path) -> Dict[str, Any]:
"""Load a JSON file to create a dictionary."""
with path.open() as f:
return json.load(f)
|
c63a9ea869335d57096b1cdf2903db8b84be85d9
| 22,820
|
def _show_consistency(field, message, is_consistent):
"""
If `is_consistent` is false,
make `field` RED with tooltip caption `message` & return 1.
Otherwise reset colour and tooltip caption of `field` to
default values, and return 0.
"""
if is_consistent is True:
field.setStyleSheet("")
field.setToolTip("")
return 0
else:
field.setStyleSheet("QLineEdit { background-color : red;}")
field.setToolTip(message)
return 1
|
9276761a629d85005bc499071b8a6ae00bdf23dc
| 22,824
|
def sqlite3_quote_name(name):
"""Quote `name` as a SQL identifier, e.g. a table or column name.
Do NOT use this for strings, e.g. inserting data into a table.
Use query parameters instead.
"""
# XXX Could omit quotes in some cases, but safer this way.
return '"' + name.replace('"', '""') + '"'
|
0ea601e8e4fe4c7b33fb4c32364f6e7d01110bf0
| 22,825
|
import math
def ceil_log2(num):
""" Return integer ceil value of log2(num) """
return int(math.ceil(math.log(int(num), 2) ) )
|
36e0538f332a82377b869e428b130bd2c1045071
| 22,826
|
def prior_transform(self, unit_coords, priors, prior_args=[]):
"""An example of one way to use the `Prior` objects below to go from unit
cube to parameter space, for nested sampling. This takes and returns a
list instead of an array, to accomodate possible vector parameters. Thus
one will need something like ``theta_array=np.concatenate(*theta)``
:param unit_coords:
Coordinates on the unit prior hyper-cube. Iterable.
:param priors:
A list of `Prior` objects, iterable of same length as `unit_coords`.
:param prior_args: (optional)
A list of dictionaries of prior function keyword arguments.
:returns theta:
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube.
"""
theta = []
for i, (u, p) in enumerate(zip(unit_coords, priors)):
func = p.unit_transform
try:
kwargs = prior_args[i]
except(IndexError):
kwargs = {}
theta.append(func(u, **kwargs))
return theta
|
83dfe735336043a912e0795de19ced39c00aca40
| 22,827
|
def json_marshal(data):
"""Serializes arbitrary data to JSON.
Args:
data: any object
Returns:
JSON string representing the data
"""
if type(data) == "dict" or type(data) == "list":
return str(data).replace(": True", ": true").replace(": False", ": false").replace(": None", ": false")
elif type(data) == "int":
return str(data)
elif type(data) == "string":
return "\"" + data + "\""
elif type(data) == "Label":
return "\"//{}:{}\"".format(data.package, data.name)
elif type(data) == "bool":
return "true" if data else "false"
return "unknown type {}: {}".format(type(data), data)
|
eb9738fb57cfa936c31e7dbd7f2ccdbfd75fba52
| 22,828
|
def map_format(*values, pattern):
"""Performs more complex formatting on a list of values"""
return pattern.format(*values)
|
101c07ac0b709ecec49978bc0ac3ccf0fbbc978f
| 22,829
|
def format_song_list(header, songs, footer, *, cnt=1):
"""
Generate the management list of songs.
"""
msg = header
for song in songs:
msg += song.format_menu(cnt)
cnt += 1
msg = msg.rstrip()
msg += footer
return msg
|
be62a0e157841b4292d7e061f90c06bb10557490
| 22,830
|
def delta_cross_entropy_softmax(outputs, labels):
"""Calculates derivative of cross entropy loss (C) w.r.t. weighted sum of inputs (Z).
Arguments:
outputs (torch.Tensor) - predicted probabilities of the model. Size (Batch_size,N_out)
labels (torch.Tensor) - truth labels for the current batch. Size (Batch_size)
Returns:
avg_grads (float) - cross entropy loss of the model predictions and actual labels
"""
m = outputs.size()[0]
outputs[range(m),labels] -= 1
avg_grads = outputs
return avg_grads
|
df22f8d0f0824b465a0dc490dd289803365b7b23
| 22,831
|
import inspect
def object_repr(instance, exclude=None):
"""Format class instance repr() string."""
exclude = exclude or []
def _format_value(value):
if isinstance(value, str):
return "'{}'".format(value)
if inspect.isfunction(value):
return '{}()'.format(value.__name__)
return repr(value)
return '{}({})'.format(
instance.__class__.__name__,
', '.join(['{}={}'.format(k, _format_value(getattr(instance, k)))
for k in sorted(instance.__dict__.keys())
if not k.startswith('_') and k not in exclude]))
|
dd29e0a354d6840185bc369ab77c932e1f1e5f68
| 22,832
|
def is_control_gate(op_type: str) -> bool:
"""Get whether a gate type includes a control (q)bit."""
return op_type in {
"CX",
"CY",
"CZ",
"CH",
"CRx",
"CRy",
"CRz",
"CU1",
"CU3",
"CV",
"CVdg",
"CSx",
"CSXdg",
"CSWAP",
"CnRy",
"CnX",
"CCX",
"Control",
"QControlBox",
"Conditional",
}
|
a26fa5175fbfa48495e72d7f60548bccb093acf5
| 22,834
|
def mocked_json():
"""Mocked version of the json method for the Response class"""
def _json(
*args, **kwargs
): # pylint:disable=unused-argument, missing-docstring, redefined-outer-name
return {}
yield _json
|
509f90200057cc83e7a8cf85de18823873997f8a
| 22,835
|
import argparse
def parse_vu(vu_str: str) -> dict:
"""Parse view string into a dictionary.
Args:
vu_str: view parameters as a string
Returns:
A view dictionary
"""
args_list = vu_str.strip().split()
vparser = argparse.ArgumentParser()
vparser.add_argument('-v', action='store', dest='vt')
vparser.add_argument('-vp', nargs=3, type=float)
vparser.add_argument('-vd', nargs=3, type=float)
vparser.add_argument('-vu', nargs=3, type=float)
vparser.add_argument('-vv', type=float)
vparser.add_argument('-vh', type=float)
vparser.add_argument('-vo', type=float)
vparser.add_argument('-va', type=float)
vparser.add_argument('-vs', type=float)
vparser.add_argument('-vl', type=float)
vparser.add_argument('-x', type=int)
vparser.add_argument('-y', type=int)
vparser.add_argument('-vf', type=str)
args, _ = vparser.parse_known_args(args_list)
view_dict = vars(args)
if view_dict['vt'] is not None:
view_dict['vt'] = view_dict['vt'][-1]
view_dict = {k: v for (k, v) in view_dict.items() if v is not None}
return view_dict
|
af7668684a40e5bbdb62f19904b034b153bc138a
| 22,836
|
import os
def GetCleanEnvironment():
"""Restore environment to pre-virtualenv activate.
If we are already running inside a virtualenv and we shell out to another
virtualenv this wont work because the new activate script will add the second
virtualenv to the path _after_ the current virtualenv. We therefore need to
restore the environment to what it was before the current virtualenv was
running before we launch another one.
The code below is essentially implementing the virtualenv deactivate script.
Returns:
A clean environment.
"""
env = os.environ.copy()
old_virtual_prompt = env.pop("_OLD_VIRTUAL_PROMPT", None)
if old_virtual_prompt:
env["PROMPT"] = old_virtual_prompt
old_virtual_path = env.pop("_OLD_VIRTUAL_PATH", None)
if old_virtual_path:
env["PATH"] = old_virtual_path
return env
|
27aaf5bdb1305a6e71bed594f93109e66fde4337
| 22,837
|
def get_version(_app_dir: str) -> str:
"""Get the application version"""
# Awaiting for implementation
# Takes as a parameter an application directory path to determine its version
# Previously tied to package.json (for Node.JS)
# Either :
# - Make it independent from language
# - Implement different languages support
return "0.0.0"
|
86a4c39c6ad17b51f0ab720167e0690e0b53bd79
| 22,838
|
import requests
def get_value(sensor_id):
"""Get value from Luftdaten-API"""
url = f"https://api.luftdaten.info/v1/sensor/{sensor_id}/"
r = requests.get(url, headers={"Host": "api.luftdaten.info"})
if not r.json():
return None
value = r.json()[-1]["sensordatavalues"][0]["value"]
return float(value)
|
4029c10af9eed0a5b74ea59b03c0d26e5fca4b62
| 22,840
|
import math
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
|
b94799601e5a99398ee3371729ff4dc087cecbcc
| 22,841
|
def subtract(x, y):
"""suntract x from y and return"""
return y-x
|
92443fd4a9b6cddc77bf0ee7f9910721a3d0680c
| 22,842
|
import requests
def clarify_tcp_udp_service(service_member, asa):
"""
Function to clarify if a TcpUdpObject is either TCP or UDP
:param service_member: The ASA object member of a group
:param asa: The ASA where the object is found
:return: Returns either 'tcpportobject' or 'udpportobject'
"""
url = service_member['refLink']
headers = {
'Content-Type': 'application/json',
'User-agent': 'REST API Agent',
'X-Auth-Token': asa.token
}
response = requests.request("GET", url, headers=headers, verify=False).json()
protocol_value = response['value'].split("/")[0] + "portobject"
return protocol_value
|
cccf4b6978ba1b26178cff8c08543d35451c7fa9
| 22,843
|
from uuid import uuid4
def _1_add_profile_uuid(config):
"""Add the required values for a new default profile.
* PROFILE_UUID
The profile uuid will be used as a general purpose identifier for the profile, in
for example the RabbitMQ message queues and exchanges.
"""
for profile in config.get('profiles', {}).values():
profile['PROFILE_UUID'] = uuid4().hex
return config
|
fdadc728cc762bbe3c38a2ee94ef50571da1d64b
| 22,844
|
def unescaper(msg):
""" unescape message
this function undoes any escape sequences in a received message
"""
out = []
escape = False
for x in msg:
if x == 0x5c:
escape = True
continue
if escape:
x = 0x5c ^ x ^ 0xa3
escape = False
out.append(x)
return bytes(out)
|
b3328c7271cc0e97bbd7128d044fb572849d2863
| 22,845
|
import re
def parse_must_reads(date, msg_body):
""" Returns a list of article information
title, subtitle, author, publication, minutes (reading time)"""
must_reads = msg_body[msg_body.index('The must-reads'):]
text = re.sub(r'\(https?:\S+.*\)', '', msg_body, flags=re.MULTILINE)
articles = []
for i in range(1, 11):
try:
articles.append(list(re.findall('\n' + str(i) + ' (.*)\r\n(.*) \((.*) \r\n', text, re.MULTILINE)[0]))
except:
try:
articles.append(list(re.findall('\n' + str(i) + ' (.*)\r\n(.*)\((.*) \r\n', text, re.MULTILINE)[0]))
except:
continue
return articles
|
fc4f598bf277f29db5530898379391790f43d642
| 22,847
|
def service_model(service):
"""Produce the model for a service"""
return {
'name': service.name,
'admin': service.admin,
'url': service.url,
'prefix': service.server.base_url if service.server else '',
'command': service.command,
'pid': service.proc.pid if service.proc else 0,
'info': service.info
}
|
2443a073efdc19dd514f71eb22abcf3d7c6f983d
| 22,848
|
from pathlib import Path
def scanfolder_glob(folder):
"""
scan folder for mp3 files
:param folder: root folder of mp3 files
:return: list of mp3 files
"""
found_files = Path(folder).glob('**/*.mp3')
return found_files
|
184a771b386be2734bbddb528836538bfe936002
| 22,850
|
def _strip(text):
"""Normalize expected strings to allow more readable definition."""
return text.lstrip('\n').rstrip(' ')
|
2adfb31289204d1e3f3cac5c13ebcb26bdd9a7a2
| 22,851
|
import re
def _get_base_key(key):
"""Extracts the base key from the provided key.
Earth Engine exports `TFRecords` containing each data variable with its
corresponding variable name. In the case of time sequences, the name of the
data variable is of the form `variable_1, variable_2, ..., variable_n`,
where `variable` is the name of the variable, and n the number of elements
in the time sequence. Extracting the base key ensures that each step of the
time sequence goes through the same normalization steps.
The base key obeys the following naming pattern: `([a-zA-Z]+)`
For instance, for an input key `variable_1`, this function returns `variable`.
For an input key `variable`, this function simply returns `variable`.
Args:
key: Input key.
Returns:
The corresponding base key.
Raises:
ValueError when `key` does not match the expected pattern.
"""
match = re.fullmatch(r'([a-zA-Z]+)', key)
if match:
return match.group(1)
raise ValueError(
f'The provided key does not match the expected pattern: {key}')
|
be919ec7c038eac4bbfebc779cb1e05ef068dabb
| 22,854
|
import os
import subprocess
def run_command(command, hide_output=False):
"""
Runs a command, return the return code
"""
if hide_output:
devnull = open(os.devnull, 'wb')
return subprocess.call(command, stdout=devnull, stderr=devnull, shell=True)
return subprocess.call(command, shell=True)
|
fd8099673dda3135a3c3e2a514e65eba20efdc08
| 22,856
|
import click
from pathlib import Path
def _cbk_opt_inventory(ctx: click.Context, param, value):
"""callback for inventory option to read into list of strings"""
try:
return Path(value).read_text().splitlines()
except Exception as exc:
ctx.fail(f"Unable to load inventory file '{value}': {str(exc)}")
|
5706a426bf1052bd85da8f7a9da7fd806f7c0100
| 22,857
|
def parse_split_conf(conf):
""" Function parse comma separated values from config """
result = []
if len(conf.strip()):
conf = conf.split(',')
for item in conf:
index = conf.index(item)
conf[index] = item
result = list(map(str.strip, conf))
return result
|
c3248c0817f1e24a16fb43b42cbe2f3a4d64ea6e
| 22,858
|
def rename(new_name):
"""Given the new name, just return it."""
return new_name
|
248408734e725de407dbda7d68de03b0ef12e2e1
| 22,860
|
def _get_leaf(node):
"""
Gets innermost definition of Typedef or Struct-/UnionMember.
Other nodes pass through.
"""
while getattr(node, 'definition', None):
node = node.definition
return node
|
a1430722e4efaeb433c371046814556cc93e786b
| 22,861
|
import pkg_resources
def pkg_req(text):
"""
:param str|None text: Text to parse
:return pkg_resources.Requirement|None: Corresponding parsed requirement, if valid
"""
if text:
try:
return pkg_resources.Requirement(text)
except Exception:
return None
|
6cebc9258937b6fd6a598801b3cf74a819dc6d64
| 22,863
|
def marc21_to_is_part_of(self, key, value):
"""Get is_part_of.
is_part_of: [773$t repetitive]
"""
if not self.get('is_part_of', None):
return value.get('t')
|
d754746387fda30364b3967dfe6d5ee6a8a9fd69
| 22,864
|
def mph2m_s(mph):
"""mph -> m/s"""
return 0.44704*mph
|
87cfc8f9e572919389d7ddca281942968a38b35e
| 22,867
|
def backends_mapping(custom_backend):
"""
Create 2 separate backends:
- path to Backend 1: "/echo"
- path to Backend 2: "/quotes"
"""
return {"/echo": custom_backend("echo"), "/quotes": custom_backend("quotes")}
|
0619e81f10b805a53e482f76341c708c4f16d3c5
| 22,868
|
def convertir_lettre_index(lettre):
"""Convertir une lettre en l'index correspondant"""
lettres = "ABCD"
for i in range(4):
if lettres[i] == lettre:
return i
# erreur de coordonnees (-1 n'est jamais valide)
# la lettre n'est pas dans A,B,C,D
return -1
|
8152e7d710dc94b7b34898dc3dbd2f20904ecf78
| 22,869
|
def elide_filename(filename: str, length: int) -> str:
"""Elide a filename to the given length.
The difference to the elide() is that the text is removed from
the middle instead of from the end. This preserves file name extensions.
Additionally, standard ASCII dots are used ("...") instead of the unicode
"…" (U+2026) so it works regardless of the filesystem encoding.
This function does not handle path separators.
Args:
filename: The filename to elide.
length: The maximum length of the filename, must be at least 3.
Return:
The elided filename.
"""
elidestr = '...'
if length < len(elidestr):
raise ValueError('length must be greater or equal to 3')
if len(filename) <= length:
return filename
# Account for '...'
length -= len(elidestr)
left = length // 2
right = length - left
if right == 0:
return filename[:left] + elidestr
else:
return filename[:left] + elidestr + filename[-right:]
|
0bec1ad17075d61954c2053732e5df7354dcc008
| 22,871
|
def is_std_ref(string):
"""
It finds whether the string has reference in itself.
"""
return 'Prop.' in string or 'C.N.' in string or 'Post.' in string or 'Def.' in string
|
2f8d374e104267a6fb78fdd5376e159a95ca04ff
| 22,873
|
import os
def pathExists(path):
""" check if path exists , does not care if it is file or directory"""
pathstatus = os.path.exists(path)
return pathstatus
|
693dcfc574723d3e5f156039ac13d58871a09953
| 22,874
|
import re
def fix_csv_contents(csv_contents, line_format_str):
"""Check the contents of the raw database .csv file and ensure each row
fits a predefined formatting. This can pick out irregularities in rows,
such as a missing or deformed time entry in a row.
Args:
csv_contents ([str]): Contents of the preprocessed .csv file
Returns:
csv_contents ([str]): Contents of the postprocessed .csv file
"""
# Specify required row format in each csv file
line_format = re.compile(line_format_str)
# Split large string into separate line strings
csv_contents = csv_contents.split("\n")
# Remove last line if empty
if len(csv_contents[-1]) == 0:
csv_contents = csv_contents[0:-1] # Skip last line
# Check and fix formatting of each line
pop_ids = [] # List with to-be-removed row ids
for i in range(1, len(csv_contents)): # Skip first line
csv_line = csv_contents[i]
if line_format.match(csv_line) is None:
print(
" Ignoring this row due to incorrect format: '"
+ csv_line
+ "'"
)
pop_ids.append(i)
for i in pop_ids[::-1]: # Back to front to avoid data shifts
csv_contents.pop(i) # Remove lines
csv_contents = "\n".join(csv_contents) + "\n"
return csv_contents
|
65fe4df9529a63356039e992830385c7afbeec11
| 22,875
|
def crud_url_name(model, action, prefix=None):
"""
Returns url name for given model and action.
"""
if prefix is None:
prefix = ""
app_label = model._meta.app_label
model_lower = model.__name__.lower()
return '%s%s_%s_%s' % (prefix, app_label, model_lower, action)
|
8b22a371e089c512dcaa0100e26851c1bfdc88cd
| 22,876
|
import logging
def split_list(source_list, splitter_algo: str, splitter_args, randomizer):
"""
Removes a set of items from the source list and returns them in a new list based on the
splitter algorithm and splitter_args;
:param source_list: The source list to split. It is MODIFIED.
:param splitter_algo: The algorithm to use to split out the data
:param splitter_args: The arguments to the splitting algorithm.
:param randomizer: Randomizer if needed
:return: The elements removed out of the source list.
"""
shard = []
if splitter_algo == 'randomFraction':
count = round(len(source_list) * float(splitter_args['fraction']))
val_indexes = randomizer.sample(range(0, len(source_list)), count)
val_indexes = sorted(val_indexes, reverse=True)
for i in val_indexes:
shard.append(source_list[i])
del (source_list[i])
else:
logging.error(f"Unknown validation algorithm '{splitter_algo}', not splitting list.")
return shard
|
35151342826ad76d714ec2615ba9f1fa7a98d126
| 22,877
|
def geoIsPointInPoly(loc, poly):
"""
Determine if a point is inside a polygon. Points that are along the perimeter of the polygon (including vertices) are considered to be "inside".
Parameters
----------
loc: list
The coordinate of the point, in [lat, lon] format
poly: list of lists
The polygon to check if the point is inside, in [[lat, lon], [lat, lon], ..., [lat, lon]] format
Returns
-------
boolean
The point is inside the polygon or not
"""
if (loc in poly):
return True
x = loc[1]
y = loc[0]
inside = False
j = len(poly) - 1
for i in range(0,len(poly)):
# Check if pt is in interior:
xi = poly[i][1]
yi = poly[i][0]
xj = poly[j][1]
yj = poly[j][0]
intersect = (yi > y) != (yj > y)
if (intersect):
intersect = (x < (xj - xi) * (y - yi) / float(yj - yi) + xi)
if (intersect):
inside = not inside
j = i
return inside
|
b1544f55499e6233e40c7f48eadb88b193527d8d
| 22,878
|
def q2mat(q):
"""
Generate a left rotation matrix from a normalized quaternion
Parameters
q: The normalized quaternion (list)
Returns
u: The rotation matrix (2-dimensional list)
"""
u = []
for i in range(3):
u.append([])
for j in range(3):
u[i].append(0.0)
u[0][0] = q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]
u[0][1] = 2.0 * (q[1] * q[2] - q[0] * q[3])
u[0][2] = 2.0 * (q[1] * q[3] + q[0] * q[2])
u[1][0] = 2.0 * (q[2] * q[1] + q[0] * q[3])
u[1][1] = q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]
u[1][2] = 2.0 * (q[2] * q[3] - q[0] * q[1])
u[2][0] = 2.0 * (q[3] * q[1] - q[0] * q[2])
u[2][1] = 2.0 * (q[3] * q[2] + q[0] * q[1])
u[2][2] = q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]
return u
|
8f79c260eec3388c3b2d6a209c9445a91e87b0e9
| 22,879
|
def obj_to_dict(obj):
"""
Converts an :py:obj:`object` to a :py:obj:`dict` by taking the object's
properties and variables and their values and putting them into a dict.
Private and dunder (``__``) properties are ignored.
The use case for this is to enable passing of an object's data across
the app/task barrier in a format that is serializable by a JSON-based
serializer.
Args:
obj: An opaque object
Returns:
dict:
A :py:obj:`dict` that contains the attributes from ``obj``.
"""
return {
attr:getattr(obj, attr)
for attr in dir(obj)
if not attr.startswith('_')
}
|
0c7a8d758357dcd7f33b0351004fe29354d0134e
| 22,881
|
def selected_list(element):
""" Given an element dict, return a list of the indexes. """
if 'selected' in element:
return element['selected']
return [int(idx.strip()) for idx in element['value'].split(',')]
|
c1fb376825a62e11fcb7501f29055f454c1fd99d
| 22,885
|
def update_with(l, r, default_value, f):
"""
unionWith for Python, but modifying first dictionary instead of returning result
"""
for k, v in r.items():
if k not in l:
l[k] = default_value[:]
l[k] = f(l[k], v)
return l
|
facae66dd00bc47fe5ec51d2701dd1b254a30f5e
| 22,887
|
import time
def _parse_images(browser):
"""Parses and returns all found images on a webpage"""
# Scroll until all the images have been found, which is
# useful for infinite scroll pages
prev_elems = 0
post_elems = browser.find_elements_by_tag_name("img")
while len(post_elems) > prev_elems:
prev_elems = len(post_elems)
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
time.sleep(0.5)
post_elems = browser.find_elements_by_tag_name("img")
images = [elem.get_attribute('src') for elem in post_elems]
images = list(set(images))
images.sort()
return images
|
57dd8f156537805e86b4d4d400374efa3cd4c448
| 22,888
|
def repeatedString(s, n):
"""Either 1,000,000,000,000
The length of S could be less than N.
The length of S could be greater than N
The length of S could be equal to N
"""
# The number of times the letter 'a' occurs in the string S.
num_a = s.count('a')
# If the length of S is less than N, there must be some number of times
# that S wholy divides into N, and there may or may not be a remainder when
# doing so.
if len(s) < n:
whole = n // len(s)
remainder = n % len(s)
# If the remainder is 0, then we just need to count the number of times
# 'a' occurrs in S, and then multiply this by how many times S wholy
# divides into N.
if remainder == 0:
answer = num_a * whole
return answer
else:
return num_a * whole + s[:remainder].count('a')
elif len(s) > n:
return s[:n].count('a')
return
|
f5962cd07ed1ecc6512edbc411dc1b2d034896ea
| 22,890
|
def zf_bin(n, width):
"""Zero-filled bin() of specified width.
>>> zf_bin(1, 8)
'00000001'
>>> zf_bin(255, 8)
'11111111'
>>> zf_bin(256, 8)
'00000000'
"""
return bin(n)[2:].zfill(width)[0-width:]
|
db8f25d7590207187a96be4d266ad57a10959995
| 22,892
|
def pipeline(Gst):
"""Make sure test pipeline is correct and test env setup"""
SIMPLE_PIPELINE_DESCRIPTION = """videotestsrc ! queue ! fakesink"""
pipeline = Gst.parse_launch(SIMPLE_PIPELINE_DESCRIPTION)
assert isinstance(pipeline, Gst.Pipeline)
return pipeline
|
84b97744e0d237c6a305cc6bf9ad5ab02fee15d5
| 22,894
|
def check_inputs(input1, input2):
"""
Checks the inputs given to ensure that input1 is
a list of just numbers, input2 is a number, and input2
is within input1. Raises an exception if any of the
conditions are not true.
>>> check_inputs([1, 2.0, 3.0, 4], 4)
'Input validated'
>>> check_inputs([], 1)
Traceback (most recent call last):
...
TypeError: input2 not in input1
>>> check_inputs(1, 1)
Traceback (most recent call last):
...
TypeError: input1 is not the correct type
>>> check_inputs([1, 2, 'hi'], 4)
Traceback (most recent call last):
...
TypeError: The element at index 2 is not numeric
>>> check_inputs([1.0, 2.0, 3.0], 'hello')
Traceback (most recent call last):
...
TypeError: input2 is not the correct type
# MY DOCTESTS
"""
if not isinstance(input1, list):
raise TypeError('input1 is not the correct type')
numeric = [isinstance(i, int) or isinstance(i, float) for i in input1]
if not all(numeric):
raise TypeError('The element at index ' + str(numeric.index(False)) + ' is not numeric')
if not type(input2) in [float, int]:
raise TypeError('input2 is not the correct type')
if input2 not in input1:
raise TypeError('input2 not in input1')
return 'Input validated'
|
34a2b93b9c2d72d33a1241ef014fa6cbeec265c7
| 22,895
|
def get_regularizer(regularizer, is_bias=False):
"""
get caffe regularizer method
:param regularizer: DLMDL weight/bias regularizer declaration. Dictionary
:param is_bias: whether bias or not
:return: weight/bias regularizer dictionary
"""
def get_value(key, default=None):
"""
get attributes in regularizer
:param key: key value for parsing
:param default: default value of attribute
:return: value
"""
value = regularizer.get(key)
if value is None:
return default
return value
# get regularizer type
# default: L2 regularizer
type = get_value('type', default='L2')
decay = float(get_value('bias', default=0.0)) if is_bias else float(get_value('weight', default=0.0))
reg = {'decay_mult': decay}
return reg, type
|
4e10d7bd8ec416c2293a2cb4e5c1dc3b309ab6b5
| 22,896
|
import os
import base64
def deserialize_file(file_info, dst_path):
"""
Deserialize single response file to the specified location
:param file_info: Response file to deserialize
:param dst_path: Destination folder path
:return: Path to deserialized file
"""
if not os.path.exists(dst_path):
os.makedirs(dst_path)
dst_file_path = os.path.join(dst_path, file_info.name)
with open(dst_file_path, 'w+b') as f:
f.write(base64.b64decode(file_info.data))
print('File saved %s' % file_info.name)
return dst_file_path
|
ba24c607109e515f36dc21f14fdbf2a7ffc896e4
| 22,899
|
def correct_info(item):
"""
it should be return first match
>>> correct_info({'state': 'open', 'number': 29604, 'title': '[3.9] [docs] Add missing word'})
{'state': 'open', 'number': 29604, 'title': '[3.9] [docs] Add missing word', 'closed_at': None}
it should be return second match
>>> correct_info({'state': 'closed', 'number': 30604, 'title': 'foo', 'closed_at': '2021-11-18T00:26:13Z'})
{'state': 'closed', 'number': 30604, 'title': 'foo', 'closed_at': '2021-11-18T00:26:13Z'}
it should be return last match
>>> correct_info({'state': 'wip', 'number': 40604, 'title': 'bar', 'closed_at': None})
{'state': 'unknown', 'number': None, 'title': None, 'closed_at': None}
"""
match item:
case {'state': 'open', 'number': number, 'title': title}:
return {'state': 'open', 'number': number, 'title': title, 'closed_at': None}
case {'state': 'closed', 'number': number, 'title': title, 'closed_at': closed_at}:
return {'state': 'closed', 'number': number, 'title': title, 'closed_at': closed_at}
case _:
return {'state': 'unknown', 'number': None, 'title': None, 'closed_at': None}
|
accd2ed7b37c4e46e61f552642923dfcbd11378c
| 22,902
|
import struct
def format_oath_code(response: bytes, digits: int = 6) -> str:
"""Formats an OATH code from a hash response."""
offs = response[-1] & 0xF
code = struct.unpack_from(">I", response[offs:])[0] & 0x7FFFFFFF
return ("%%0%dd" % digits) % (code % 10 ** digits)
|
0601f2bcdae071ed989eefda67534096976ffb78
| 22,903
|
def select_candidates(candidate_data, number):
"""
Select top candidates from candidate dataframe, based off of lowest bandgap.
"""
candidate_data = candidate_data.sort_values('bandgap_pred', ascending=True)
top_candidates = candidate_data.head(number)
return top_candidates
|
d83d6520276f9d67fc913927c79f6d4e24c9b48b
| 22,904
|
def details(client, tld_name):
"""
Get TLD details.
"""
return client.domain.tld(tld_name=tld_name)
|
c6aa9463880d7e84d63c775fa596a765633f816d
| 22,905
|
def welcome():
"""List all available api routes."""
return (
f"<b>Available Routes</b><br/>"
f"<br/>"
f"List of the dataset's most recent year's precipitation by date:<br/>"
f"/api/v1.0/precipitation<br/>"
f"<br/>"
f"<br/>"
f"List of all stations which took measurements:<br/>"
f"/api/v1.0/stations<br/>"
f"<br/>"
f"<br/>"
f"List of the dataset's most recent year's temperature by date:<br/>"
f"/api/v1.0/tobs<br/>"
f"<br/>"
f"<br/>"
f"List of the dataset's content, based on a user-supplied start date:<br/>"
f"/api/v1.0/[start]"
f"<br/>"
f"<br/>"
f"List of the dataset's content, based on a user-supplied start and end date:<br/>"
f"/api/v1.0/[start]/[end]"
)
|
35464de0910d6b1eace6114cc51f70a28a51a6ed
| 22,907
|
def translate_macros(macro_dic: dict, data: str) -> str:
"""Expects a macro dictionary key:value (macro_name:macro_value)
and a string to replace macro. \n
It will replace the macro_name for the macro_value in any string.
"""
for macro_name, macro_value in macro_dic.items():
data = data.replace(macro_name, macro_value)
return data
|
dff25b05229477db2ce2de5eae98585642e13d12
| 22,908
|
from typing import Iterable
def base_typed(obj):
"""Recursive reflection method to convert any object property into a comparable form."""
BASE_TYPES = [str, int, float, bool, type(None)]
T = type(obj)
from_numpy = T.__module__ == "numpy"
if (
T in BASE_TYPES
or callable(obj)
or (from_numpy and not isinstance(obj, Iterable))
):
return obj
if isinstance(obj, Iterable):
base_items = [base_typed(item) for item in obj]
return base_items if from_numpy else T(base_items)
d = obj if T is dict else obj.__dict__
return {k: base_typed(v) for k, v in d.items()}
|
2e06ec4fe6871ab606c3de1b5c3564036d98871c
| 22,909
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.