content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
from pathlib import Path
from typing import Generator
def collect_input_files(input_directory_path: Path) -> Generator[Path, None, None]:
"""
Grab every file inside the input directory.
:param input_directory_path: Full path to the input directory.
:return: List of full path of every file inside the input folder.
"""
return input_directory_path.glob('**/*')
|
06e2ed94c155c1759e70c96af99054a93147b089
| 236,350
|
def elm2ind(el, m):
"""
Convert (el,m) spherical harmonics indices to single index
in `omegacomplex` array.
Parameters
----------
el: int or ndarray of ints
m: int or ndarray of ints
Returns
-------
ind: int
"""
return el*el + el + m
|
29c3b694d04ff715068dfea97dc7153fca16b950
| 426,345
|
def _list_to_index_dict(lst):
"""Create dictionary mapping list items to their indices in the list."""
return {item: n for n, item in enumerate(lst)}
|
38d8b9a963ebdd40b8f14b3052937e3450853fab
| 630,097
|
def second_arg (x, y, * args, ** kw) :
"""Returns the second argument unchanged"""
return y
|
ed06a4ee018f0de029c6a0be32d54a4ebaa0d50c
| 599,821
|
def hasnested(container, *chain):
"""Checks if chain contains in container.
Parameters
----------
container: `collections.abc.Container`
chain: tuple
List of keywords.
Returns
-------
out: bool
True if `chain[0]` in container and `chain[1]` in `container[chain[0]]` etc.
or if chain is empty, else False.
"""
key, chain = chain[0], chain[1:]
if key in container:
return hasnested(container[key], *chain) if chain else True
return False
|
3903a0b6bdcb3906f6d47385e3f8df0f2279aabd
| 343,422
|
from datetime import datetime
def get_ages(pt_dob, examdates):
"""
Calculates age as time passed by since DOB to EXAMDATE
:param pt_dob: string - Date of birth of patient ("/%Y" format)
:param examdates: list - Exam dates ("%m/%d/%Y" format)
:return: list - Age at each exam date
"""
age = []
dob = datetime.strptime(pt_dob, "/%Y")
for exam in examdates:
examdate = datetime.strptime(exam, "%m/%d/%Y")
delta = examdate - dob
age.append(round(delta.days / 365.25, 1))
return age
|
9390c737576dbf3eb9b94bccc5bd7a1e24938df2
| 569,490
|
def synergy_rows_list(ws, signal_names):
"""
Params
- ws: openpyxl worksheet object
- signal_names: name given to the signal by the user, i.e., "CFP"
Returns
- rows: list of tuples, each containing ('signal', signal_row_position)
"""
rows = [(celda.value, celda.row)
for celda in ws['A']
if celda.value in signal_names]
return rows
|
69f117a50e0682f50a6b0d84f235910cd657e223
| 667,486
|
def gari(b2, b3, b4, b8):
"""
Green Atmospherically Resistant Vegetation Index \
(Gitelson, Kaufman and Merzlyak, 1996).
.. math:: GARI = b8 - (b3 - (b2 - b4)) / b8 + (b3 - (b2 - b4))
:param b2: Blue.
:type b2: numpy.ndarray or float
:param b3: Green.
:type b3: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns GARI: Index value
.. Tip::
Gitelson, A. A., Kaufman, Y. J., & Merzlyak, M. N. (1996). Use of a \
green channel in remote sensing of global vegetation from EOS-MODIS. \
Remote Sensing of Environment, 58(3), 289–298. \
doi:10.1016/s0034-4257(96)00072-7
"""
GARI = b8 - (b3 - (b2 - b4)) / b8 + (b3 - (b2 - b4))
return GARI
|
96007b96e452694f792b29331b1de36465a870e4
| 466,419
|
def make_table(data, col_names):
"""Code for this RST-formatted table generator comes from
http://stackoverflow.com/a/11350643
"""
n_cols = len(data[0])
assert n_cols == len(col_names)
col_sizes = [max(len(r[i]) for r in data) for i in range(n_cols)]
for i, cname in enumerate(col_names):
if col_sizes[i] < len(cname):
col_sizes[i] = len(cname)
formatter = ' '.join('{:<%d}' % c for c in col_sizes)
rows = '\n'.join([formatter.format(*row) for row in data])
header = formatter.format(*col_names)
divider = formatter.format(*['=' * c for c in col_sizes])
output = '\n'.join((divider, header, divider, rows, divider))
return output
|
cb0667fa4b3d2fe36c6ffa364f82de4a3b06f211
| 455,853
|
import typing
def _fixed_slices(psize: int, dlim: int, overlap: int, start: int = 0) -> typing.Tuple[slice]:
"""
Creates fixed slicing of a single axis. Only last patch exceeds dlim.
Args
psize: size of patch
dlim: size of data
overlap: overlap between patches
start: where to start patches, by default 0
Returns
typing.List[slice]: ordered slices for a single axis
"""
upper_limit = 0
lower_limit = start
idx = 0
crops = []
while upper_limit < dlim:
if idx != 0:
lower_limit = lower_limit - overlap
upper_limit = lower_limit + psize
crops.append(slice(lower_limit, upper_limit))
lower_limit = upper_limit
idx += 1
return tuple(crops)
|
edb9735f95cb6e82e8d2bf61c63e72cd21106cae
| 441,353
|
def write_csv(tmp_path):
"""
Creates a report1.csv in a temporary folder for the
purposes of testing.
"""
lines = [
('NFLX,3,99.66,319,998.1,957,-41.1,-0.041\r\n'),
('XRX,40,33.94,30,1357.6,1200,-157.6,-0.116\r\n'),
]
filename = tmp_path / 'report1.csv'
with open(filename, 'w', newline='') as file:
file.writelines(lines)
return filename
|
e96c55bc3a575b3ef165d9032ef2285669150aa5
| 449,806
|
def sort_words(string_in):
"""
:param string_in:
:return list_string:
Takes a string and returns a sorted list of the string's contents
If the input string is empty it returns an empty list
"""
if string_in == '':
return []
split_string = string_in.split(' ', string_in.count(' '))
list_string = list(split_string)
list_string.sort()
return list_string
|
6ac6068d2cc89a9c2ff229caff77698a3a81b8dd
| 94,316
|
import json
def load_json_from_file(file_path):
"""Loads and returns json data from the file at file_path.
arguments:
file_path: a path of the json file to load.
"""
with open(file_path, "rb") as file:
result = json.load(file)
return result
|
6b9e82054a6198ec3186e4da927ea2b1f2835795
| 217,609
|
def get_device_name(revision):
"""
Returns the device name based on the info byte array values returned by atcab_info
"""
devices = {0x10: 'ATECC108A',
0x50: 'ATECC508A',
0x60: 'ATECC608A',
0x00: 'ATSHA204A',
0x02: 'ATSHA204A'}
return devices.get(revision[2], 'UNKNOWN')
|
a3f18da729894eef80aef2616516e33bdf5c87aa
| 486,923
|
import base64
import hashlib
def sha256_encode(message):
"""
Encode a message with SHA256 as base64
"""
return base64.b64encode(hashlib.sha256(message.encode()).digest()).decode()
|
5890c610b1b8eef5048fb524a3041391fd88994d
| 589,959
|
def pack_float64(float_num):
"""
Package 'std_msgs/Float64' message.
"""
# Place Float into dict
data = float_num
float_msg = {"data": data}
return float_msg
|
9a39206caefa89b7a1cab67837ddd0e4ace26137
| 611,324
|
from typing import List
def contains_negative(span: List[int], offset: int) -> bool:
"""
Checks if a recombined span contains negative values.
"""
for idx in span:
if idx - int(offset) < 0:
return True
return False
|
c9f77c40ff1ba7d1cc7cee366e614c749a53f030
| 568,297
|
def _prod(op1, op2):
"""Product of two operators, allowing for one of them to be None."""
if op1 is None or op2 is None:
return None
else:
return op1 * op2
|
2a1048b995b680b43d67d8f3c237fc42f5d16bc6
| 667,627
|
import yaml
def read_yaml_file(filename):
"""Read yaml file and return python object
Parameters
----------
filename: `file`
yaml file to open and parse
Returns
-------
yaml `dict`
"""
with open(filename, 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except Exception as e:
raise e
|
ab285c6bb0d184bec054b33e1dab94b17d8afaad
| 261,930
|
def prop_all(printer, ast):
"""Prints an all property "A ..."."""
prop_str = printer.ast_to_string(ast["prop"])
return f'A{prop_str}'
|
0a03f04d955251918a1ea7dcf3576f30cb790656
| 696,568
|
def extract_mocha_summary(lines):
"""
Example mocha summary lines (both lines can be missing if no tests passed/failed):
✔ 3 tests completed
✖ 1 test failed
"""
passes, fails = 0, 0
for line in lines:
if line and line[0] == '✔':
passes = int(line.split()[1])
elif line and line[0] == '✖':
fails = int(line.split()[1])
return {
'total_tests': passes + fails,
'passed_tests': passes,
'failed_tests': fails
}
|
53d671c3d18f0421cbb512835a84f71a9588f947
| 702,727
|
import re
def get_suggested_nickname(user):
"""Returns the suggested Account.nickname based on a user.nickname"""
return re.sub('@.*', '', user and user.nickname() or '')
|
5c66f348132ee3f25ade66a5de17120ac837cf6b
| 215,831
|
import random
def rand_suffix(length=7):
"""Generate a random suffix of `length` chars"""
charset = '0123456789abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(charset) for i in range(length))
|
7af309ccb9e2ba477d491209e441208f9dd8bcc8
| 637,580
|
def _hdf5_yt_array_lite(fh, field):
"""
Read an hdf5 dataset. If that dataset has a "units" attribute,
return that as well, but do not cast as a unyt_array.
"""
units = ""
if "units" in fh[field].attrs:
units = fh[field].attrs["units"]
if units == "dimensionless": units = ""
return (fh[field][()], units)
|
c03f5f8e5f0c62cb5d0ef18b9aee0ed629a47000
| 583,399
|
def _extract_pipeline_of_pvalueish(pvalueish):
"""Extracts the pipeline that the given pvalueish belongs to."""
if isinstance(pvalueish, tuple):
pvalue = pvalueish[0]
elif isinstance(pvalueish, dict):
pvalue = next(iter(pvalueish.values()))
else:
pvalue = pvalueish
if hasattr(pvalue, 'pipeline'):
return pvalue.pipeline
return None
|
9d9c7e1f007d9986feeb428047a6ae4a15fc4cae
| 74,569
|
def decode_channel_parameters(channel):
"""Decode a channel object's parameters into human-readable format."""
channel_types = {
1: 'device',
5: 'static',
6: 'user input',
7: 'system'
}
io_options = {
0: 'readonly',
1: 'readwrite'
}
datatype_options = {
1: "float",
2: 'string',
3: 'integer',
4: 'boolean',
5: 'datetime',
6: 'timespan',
7: 'file',
8: 'latlng'
}
channel['channelType'] = channel_types[channel['channelType']]
channel['io'] = io_options[channel['io']]
channel['dataType'] = datatype_options[channel['dataType']]
return channel
|
ea976cbf4872bf8a2fd8de3bea05d8d6bf8b019c
| 505,850
|
import re
def coerceColType(colType):
"""
Given a colType, coerce to closest native SQLite type and return that, otherwise raise a ValueError.
Parameters:
colType (str): column type from DBML specification.
Returns:
str: valid SQLite column type.
"""
colType = colType.upper()
nativeTypes = ('NULL', 'INTEGER', 'REAL', 'TEXT', 'BLOB')
if colType in nativeTypes:
return colType
nils = ('NONE', 'NIL')
if colType in nils:
return 'NULL'
integers = ('BOOL', 'BOOLEAN', 'INT', 'TINYINT', 'SMALLINT', 'MEDIUMINT', 'LONGINT', 'BIGINT', 'YEAR')
if colType in integers:
return 'INTEGER'
reals = ('FLOAT', 'DOUBLE', 'DECIMAL', 'NUMERIC')
if colType in reals:
return 'REAL'
texts = ('STR', 'DATE', 'DATETIME', 'TIMESTAMP', 'TIME', 'VARCHAR', 'TINYTEXT', 'SMALLTEXT', 'MEDIUMTEXT', 'LONGTEXT')
if colType in texts:
return 'TEXT'
blobs = ('TINYBLOB', 'SMALLBLOB', 'MEDIUMBLOB', 'LONGBLOB', 'BYTE', 'BYTES', 'UUID')
if colType in blobs:
return 'BLOB'
res = re.search(r'VARCHAR\([0-9]+\)', colType)
if res:
return 'TEXT'
else:
raise ValueError(f'Could not figure out how to coerce "{colType}" to valid SQLite type.')
|
acff9fac321b26b1a3591eb81f2ce24997a130fd
| 672,832
|
def verify_setup_command(cmd):
"""
Verify the setup command.
:param cmd: command string to be verified (string).
:return: pilot error code (int), diagnostics (string).
"""
ec = 0
diagnostics = ""
return ec, diagnostics
|
1e5cba884047e4a13d3c08749e62ab7a34f8d932
| 571,557
|
def all_same_length(seqs):
"""
Ensure all strings are of same length and
determine what that length is
"""
seq = seqs[0]
n = len(seq)
assert all(len(s) == n for s in seqs)
return n
|
2c80197fd076604a9340a9ee890ce5820102d563
| 416,733
|
def get_type_from_path(path):
"""Get the "file type" from a path.
This is just the last bit of text following the last ".", by definition.
"""
return path.split('.')[-1]
|
ecf59c13bc5bd5dc3a1749de0ff3ff01f40ad037
| 71,064
|
def reverse_dic(dic):
"""Reverses a dictionary
This assumes the dicitonary is injective
Arguments:
dic {dict} -- Dictionary to reverse
Returns:
dict-- Reversed dictionary
"""
rev_dic = dict()
for k, v in dic.items():
rev_dic[v] = k
return rev_dic
|
a99cf80eb30424016b76b3916cedd09d7dabdba9
| 459,985
|
def mean(dictionary: dict):
"""
Author: SW
Returns the mean of each value in the dictionary for each key
:param dictionary: dict: an input dictionary of iterables
:return: dict: dictionary with the mean of all values
"""
for key, value in dictionary.items():
dictionary[key] = sum(value) / len(value)
return dictionary
|
99ac3444f360860ad567b1c0dbdcdd7bb942db6a
| 448,528
|
from typing import Union
def compose_tweet_action(tweet_id: Union[str, int], action: str):
""":class:`str`: Make a link that lets you interact a tweet with certain actions.
Parameters
------------
tweet_id: Union[:class:`str`, :class:`int`]
The tweet id you want to compose.
action: str
The action of a link.
.. versionadded: 1.3.5
"""
if action.lower() not in ("retweet", "like", "reply"):
return TypeError("Action must be either 'retweet', 'like', or 'reply'")
return (
f"https://twitter.com/intent/{action}?tweet_id={tweet_id}"
if action != "reply"
else f"https://twitter.com/intent/tweet?in_reply_to={tweet_id}"
)
|
671301d5e3029e578eaa320ce5d5f68b0e1195b0
| 521,347
|
def lcfa_pattern(tmp_path):
"""Fixture to mock pattern for LCFA files."""
return str(tmp_path / "lcfa-fake" /
"lcfa-fake-{year}{month}{day}{hour}{minute}{second}-"
"{end_hour}{end_minute}{end_second}.nc")
|
6a2ca379692fc61f6a17ff3198a7cbe2a09b1181
| 663,574
|
def convert_spanstring(span_string):
"""
converts a span of tokens (str, e.g. 'word_88..word_91')
into a list of token IDs (e.g. ['word_88', 'word_89', 'word_90', 'word_91']
Note: Please don't use this function directly, use spanstring2tokens()
instead, which checks for non-existing tokens!
Examples
--------
>>> convert_spanstring('word_1')
['word_1']
>>> convert_spanstring('word_2,word_3')
['word_2', 'word_3']
>>> convert_spanstring('word_7..word_11')
['word_7', 'word_8', 'word_9', 'word_10', 'word_11']
>>> convert_spanstring('word_2,word_3,word_7..word_9')
['word_2', 'word_3', 'word_7', 'word_8', 'word_9']
>>> convert_spanstring('word_7..word_9,word_15,word_17..word_19')
['word_7', 'word_8', 'word_9', 'word_15', 'word_17', 'word_18', 'word_19']
"""
prefix_err = "All tokens must share the same prefix: {0} vs. {1}"
tokens = []
if not span_string:
return tokens
spans = span_string.split(',')
for span in spans:
span_elements = span.split('..')
if len(span_elements) == 1:
tokens.append(span_elements[0])
elif len(span_elements) == 2:
start, end = span_elements
start_prefix, start_id_str = start.split('_')
end_prefix, end_id_str = end.split('_')
assert start_prefix == end_prefix, prefix_err.format(
start_prefix, end_prefix)
tokens.extend("{0}_{1}".format(start_prefix, token_id)
for token_id in range(int(start_id_str),
int(end_id_str)+1))
else:
raise ValueError("Can't parse span '{}'".format(span_string))
first_prefix = tokens[0].split('_')[0]
for token in tokens:
token_parts = token.split('_')
assert len(token_parts) == 2, \
"All token IDs must use the format prefix + '_' + number"
assert token_parts[0] == first_prefix, prefix_err.format(
token_parts[0], first_prefix)
return tokens
|
db49e1e9ddba1810e43b11bb573ab0f5bc50556e
| 293,837
|
def attribute_ac(M):
"""Compute assortativity for attribute matrix M.
Parameters
----------
M : numpy.ndarray
2D ndarray representing the attribute mixing matrix.
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
if M.sum() != 1.0:
M = M / M.sum()
s = (M @ M).sum()
t = M.trace()
r = (t - s) / (1 - s)
return r
|
f728a7540566fa47d63ad0d4f07fdaa7509e9699
| 502,094
|
def payor_section_seven_expenses(responses, derived):
""" Return the monthly cost of section 7 expense for the identified payor """
if derived['child_support_payor'] == 'Claimant 1':
return derived['claimant_1_share']
elif derived['child_support_payor'] == 'Claimant 2':
return derived['claimant_2_share']
return derived['total_section_seven_expenses']
|
aeff365fbcaabe01d28189a6000490df1f7ecf2a
| 194,605
|
def _sequence_of_strings(s_or_l):
"""if the argument is a string, wraps a tuple around it.
Otherwise, returns the argument untouched
"""
if isinstance(s_or_l, type('')):
return (s_or_l,)
else:
return s_or_l
|
3b96b31e88c3304b0ac22e2eb8ac13c31c767938
| 145,851
|
def splinter_remote_url(request):
"""Remote webdriver url.
:return: URL of remote webdriver.
"""
return request.config.option.splinter_remote_url
|
17bf9bf3ebd7296a2305fe9edeb7168fbca7db10
| 705,102
|
import uuid
import json
def _create_uuid(filename):
"""Create a uuid for the device."""
with open(filename, "w") as fptr:
new_uuid = uuid.uuid4()
fptr.write(json.dumps({"nexia_uuid": str(new_uuid)}))
return new_uuid
|
26ab24f444377b9f2c63b569af1b3924099f1a84
| 76,236
|
def keynum(msg):
"""Returns the key number of a note on, off, or aftertouch message."""
return msg[1]
|
a0a3605a7e8ef5ea7d8192f79d7c2d0d884692c3
| 337,844
|
def Q3(x):
"""Third quartile."""
return x.quantile(0.75)
|
a8794e0187a9376d777fbf482d16e6e15010bb8e
| 343,697
|
def is_identical(root1, root2):
"""
Determine if Two Trees are Identical
"""
# Both trees are empty
if root1 is None and root2 is None:
return True
# Recursively compare them if both are non-empty
if root1 != None and root2 != None:
data_check = root1.data == root2.data
left_check = is_identical(root1.left, root2.left)
right_check = is_identical(root1.right, root2.right)
if data_check and left_check and right_check:
return True
return False
|
cd6301ab2720e9b7407c55537ffbaf36661f8ea3
| 221,736
|
def set_default_action(args, subparser):
"""
Look at the args passed and determine if there is a subparse action set for
it. If there is, then return the normal set of args. If NOT then append the
default 'install' action and return it.
This is primarily to get around a shortcoming in python2 :-|
Args:
args: list, of the args passed to the script
Returns:
list
"""
action_exists = False
args_passed = list(args)
for action in subparser.choices:
if action in args_passed:
action_exists = True
break
if not action_exists:
args_passed.append('install')
return args_passed
|
3768b1ccfee94623cbfa4397641886c41c977239
| 218,708
|
def _translate_time(time):
"""
Translate time from format:
Feb 8 01:44:59 EST 2013
to format:
2013/03/06-00:17:54
"""
# get all parts
time_parts = time.split()
# do we have the correct number of parts?
if len(time_parts) != 5:
raise Exception("Unexpected number of parts in time: " + time)
# validate month
months = {"Jan":"01", "Feb":"02", "Mar":"03", "Apr": "04",
"May":"05", "Jun":"06", "Jul":"07", "Aug":"08",
"Sep":"09", "Oct":"10", "Nov":"11", "Dec":"12"}
if time_parts[0] in months:
time_parts[0] = months.get(time_parts[0])
else:
raise Exception("Unexpected format when translating month " +
time_parts[0] + " of time: " + time)
# time_parts[1] should be a number representing the day.
try:
int(time_parts[1])
except ValueError:
raise Exception("Unexpected format when translating day " +
time_parts[1] + " of time: " + time)
else:
# if day is less than 10 prepend a 0.
if int(time_parts[1]) < 10:
time_parts[1] = "0" + time_parts[1]
# validate hour:minute:second
hour_minute_second = time_parts[2].split(":")
if (len(hour_minute_second) != 3 or not
hour_minute_second[0].isdigit() or not
hour_minute_second[1].isdigit() or not
hour_minute_second[2].isdigit()):
raise Exception("Unexpected format when translating "
"hour:minute:second " + time_parts[2] +
" of time: " + time)
# time_parts[4] should be a number representing the year.
try:
int(time_parts[4])
except ValueError:
raise Exception("Unexpected format when translating year " +
time_parts[4] + " of time: " + time)
return (time_parts[4] + "/" + time_parts[0] + "/" + time_parts[1] +
"-" + time_parts[2])
|
410ba083a32867bebed4d34ba3d04fafdd95fcbd
| 263,862
|
import json
def _parse_json(schema_path):
"""Parse the schema, return JSON"""
with open(schema_path) as stream:
data = json.load(stream)
return data
|
a7422046751de640c105589dd1b4d54911d45a40
| 195,632
|
def get_image_uri(aws_cli, account, repository):
"""
Finds the repository URI for a given ECR resource.
"""
for repo in aws_cli.resources[account]['ecr']:
if repo['repositoryName'] == repository:
return repo['repositoryUri']
|
e4a301e1ef39d19eee474431a00c029eba3f94ee
| 403,938
|
import random
def estimate_occ_per_ap(prob_dist=[0.405, 0.345, 0.125, 0.092, 0.033]):
"""
Randomly generates a number of occupants between 1 and 5
Parameters
----------
prob_dist : list (of floats), optional
Defines probability distribution of occupants per apartment
Default: [0.405, 0.345, 0.125, 0.092, 0.033]
Based on data of Statistisches Bundesamt (2012)
https://www.destatis.de/DE/ZahlenFakten/Indikatoren/LangeReihen/Bevoelkerung/lrbev05.html;jsessionid=4AACC10D2225591EC88C40EDEFB5EDAC.cae2
Returns
-------
nb_occ : int
Number of occupants within one apartment
"""
# Generate random float between 0 and 1 (0 and 1 included!)
rand_val = random.randint(0, 100000) / 100000
if rand_val < prob_dist[0]:
nb_occ = 1
elif rand_val < prob_dist[0] + prob_dist[1]:
nb_occ = 2
elif rand_val < prob_dist[0] + prob_dist[1] + prob_dist[2]:
nb_occ = 3
elif rand_val < prob_dist[0] + prob_dist[1] + prob_dist[2] + prob_dist[3]:
nb_occ = 4
else:
nb_occ = 5
return int(nb_occ)
|
68e778f73e96822ebf3e47e4d9d290da8ca96e53
| 615,217
|
def update_custom_certs(
self,
alias: str,
key: str,
expiration: str,
issuer: str,
issued_to: str,
last_timestamp: str = "",
) -> bool:
"""Add new or update existing custom CA certificate
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - customCerts
- POST
- /customCert
:param alias: Alias used to identify the CA Certificate
:type alias: str
:param key: The CA Certificate
:type key: str
:param expiration: CA Certificate's expiration date
:type expiration: str
:param issuer: CA Certificate's issuer
:type issuer: str
:param issued_to: CA Certificate's issued to
:type issued_to: str
:param last_timestamp: The timestamp of the last time the CA
Certificate was updated in Orchestrator
:type last_timestamp: str, optional
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {
"alias": alias,
"key": key,
"lastUpdatedTimestamp": last_timestamp,
"expiration": expiration,
"issuer": issuer,
"issuedTo": issued_to,
}
return self._post(
"/customCert",
data=data,
expected_status=[204],
return_type="bool",
)
|
fa53b4e30559bbeda5d484541eb44af5df4b508c
| 445,145
|
def name(name):
"""Give a command a alternate name."""
def _decorator(func):
func.__dict__['_cmd_name'] = name
return func
return _decorator
|
f03f80f1022b87380ba2a15b2ff783885b722556
| 651,646
|
import re
def _except(txt, pattern):
"""
Show only text that does not match a pattern.
"""
rgx = "^.*({pattern}).*$".format(pattern=pattern)
unmatched = [
line for line in txt.splitlines() if not re.search(rgx, line, re.I)
]
return "\n".join(unmatched)
|
537e6631c09040e172f8309d171715d3c0794602
| 647,727
|
import re
def did_parse(did):
"""
Parse a DID into it's parts.
:param did: Asset did, str.
:return: Python dictionary with the method and the id.
"""
if not isinstance(did, str):
raise TypeError(f'Expecting DID of string type, got {did} of {type(did)} type')
match = re.match('^did:([a-z0-9]+):([a-zA-Z0-9-.]+)(.*)', did)
if not match:
raise ValueError(f'DID {did} does not seem to be valid.')
result = {
'method': match.group(1),
'id': match.group(2),
}
return result
|
a0ed14d68aac933ead173b53ba26a80c1e6c83fd
| 35,905
|
def _LDMODULEVERSION(target, source, env, for_signature):
"""
Return "." + version if it's set, otherwise just a blank
"""
value = env.subst('$LDMODULEVERSION', target=target, source=source)
# print("_has_LDMODULEVERSION:%s"%value)
if value:
return "."+value
else:
return ""
|
9b3bb78e65014f73be483d67572d5a70a3efef75
| 576,598
|
def humanize_size(size):
"""
Return the file size as a nice, readable string.
"""
for limit, suffix in ((1024**3, 'GiB'), (1024**2, 'MiB'), (1024, 'KiB'), (1, 'B')):
hsize = float(size) / limit
if hsize > 0.5:
return '%.2f %s' % (hsize, suffix)
|
59d20601903af3ef36667a843c5f2f4c6c331166
| 507,959
|
import random
def sample_with_replacement(population, k):
""" Sample <k> items from iterable <population> with replacement """
n = len(population)
pop = list(population)
return [pop[int(n * random.random())] for _ in range(k)]
|
4114a49faae9981dc2f850db4465ec1ccdc7101c
| 31,381
|
def _is_numeric(obj):
"""Return True if obj is a number, otherwise False.
>>> _is_numeric(2.5)
True
>>> _is_numeric('spam')
False
"""
try:
obj + 0
except TypeError:
return False
else:
return True
|
8122eea635fd5ed9b2d0e42bda284631cc6cd07b
| 17,249
|
import re
def headers_to_strings(headers, titled_key=False):
"""Convert HTTP headers to multi-line string."""
return "\n".join(
[
"{0}: {1}".format(
key.title() if titled_key else key,
re.sub(
r"Credential=([^/]+)",
"Credential=*REDACTED*",
re.sub(
r"Signature=([0-9a-f]+)",
"Signature=*REDACTED*",
value,
),
) if titled_key else value,
) for key, value in headers.items()
]
)
|
8dd64c582c4c6abcb13525406455caf5c4e38547
| 81,769
|
import torch
import glob
def lazily_load_dataset(corpus_type, opt):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid"]
def lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
print('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
for pt in pts:
yield lazy_dataset_loader(pt, corpus_type)
else:
# Only one onmt.io.*Dataset, simple!
pt = opt.data + '.' + corpus_type + '.pt'
yield lazy_dataset_loader(pt, corpus_type)
|
ccdee7cebbe293225fd708192ef5ce79b454fe99
| 302,940
|
def countIncreasingNumbers(numbers: list[int]) -> int:
"""Method to count the number of times a number, in a list, increases over the previous entry."""
count: int = 0
for i, n in enumerate(numbers[1:]):
if n > numbers[i]:
count += 1
return count
|
7818d3b7db74d62a08aede1fbf1efab3ed8da3a3
| 128,650
|
from typing import Any
def hasattr_path(data: Any, path: str) -> bool:
"""Tests if attrib string in dot-notation is present in data."""
attribs = path.split(".")
for attrib in attribs:
if not hasattr(data, attrib):
return False
data = getattr(data, attrib)
return True
|
ce503eee52d366a56f6c2b10cb41396ee74322f4
| 263,579
|
def get_unique_id(home_id: str, value_id: str) -> str:
"""Get unique ID from home ID and value ID."""
return f"{home_id}.{value_id}"
|
06793047048165e27d280e89e0e59e1fa5718fc1
| 369,146
|
def needs_lchuv_workaround(color):
"""
Check if LCHuv has high chroma and no lightness.
LCHuv will have such values all in gamut and create weird graphs.
This is just due to how LCHuv algorithm handles high chroma with and zero lightness,
it all gets treated as black which is in gamut for almost any color.
"""
return color.space().startswith('lchuv') and color.l == 0 and not color.normalize().is_nan('hue')
|
34aa2787bd47f060a51774040a9d42e38413194d
| 312,515
|
def startswith(x, lst):
""" Select longest prefix that matches x from provided list(lst)
:param x: input string
:param lst: prefixes to compare with
:return: longest prefix that matches input string if available otherwise None
"""
longest_prefix = None
for prefix in lst:
if x.startswith(prefix):
if longest_prefix is None or len(longest_prefix) < len(prefix):
longest_prefix = prefix
return longest_prefix
|
bbffef79e3f3c03fb5e41564310df143eef5484d
| 657,524
|
def VersionString(versionTuple):
""" (x,y,z .. n) -> 'x.y.z...n' """
return '.'.join(str(x) for x in versionTuple)
|
564951e2535a4bd5c1da976a6af86dfbb8d12fba
| 286,212
|
from datetime import datetime
def get_entry_date(entry):
"""
Get the last modified date from the file metadata.
"""
return datetime.fromtimestamp(entry.stat().st_mtime)
|
c14973154b9814faffe07dd62d9119543bb72a8c
| 53,828
|
def steamIdFormula(z, v=0x0110000100000000, y=1):
"""Formula for converting Steam ID to Steam Community ID
From https://developer.valvesoftware.com/wiki/SteamID
Args:
v (int, optional) : account type, defaults to user: 0x0110000100000000
y (int, optional) : account universe, defaults to public: 1
z (int) : account id
Returns:
Steam Community ID (int)
"""
return 2 * z + v + y
|
e018e9f604772dce4b4366ac01621d76d3c666cb
| 217,794
|
def all_subclasses(cls):
"""Recursively retrieve all subclasses of the specified class"""
return cls.__subclasses__() + [
g for s in cls.__subclasses__() for g in all_subclasses(s)
]
|
d720cf1f739c9591b407c46e6ad5b6a9a8be8700
| 116,958
|
def _FormatLogContent(name, content):
"""Helper function for printing out complete log content."""
return '\n'.join((
'----------- Start of %s log -----------\n' % name,
content,
'----------- End of %s log -----------\n' % name,
))
|
23b0f7808f2af63d6aebaa2055fc009d36ff100d
| 81,901
|
def make_Hamiltonian(pauli_list):
"""Creates a matrix operator out of a list of Paulis.
Args:
pauli_list : list of list [coeff,Pauli]
Returns:
A matrix representing pauli_list
"""
Hamiltonian = 0
for p in pauli_list:
Hamiltonian += p[0] * p[1].to_matrix()
return Hamiltonian
|
3162acc167864b8afe4348d10df32f9c8c31a9c6
| 432,222
|
def average_list(list):
"""
Returns average of all values in list
:param list: list of values to be averaged
:return: average of values in list
"""
sum = 0
for i in list:
sum+=i
return float("{:.2f}".format(sum/len(list)))
|
5e6c71917aef2d8e1bc570a060443a793a04920f
| 425,089
|
def combine_dicts(*post_dicts, **post_vars):
"""Combine positional parameters (dictionaries) and individual
variables specified by keyword into a single parameter dict.
"""
vars = dict()
for pars in post_dicts:
vars.update(pars)
vars.update(post_vars)
return vars
|
0f25e33300a6eae1e9b2dd879fcb76f7a880e5e8
| 511,057
|
import pickle
def save_obj(obj, file):
"""
Dump an object with pickle.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
"""
try:
with open(file, "wb") as f:
pickle.dump(obj, f)
print("Object saved to {}".format(file))
return True
except:
print("Error: Object not saved...")
return False
|
b20c17fbfc3113282e7f125bb8a3861d555ca72a
| 303,863
|
def shorten(s: str, max_len: int = 60) -> str:
"""Truncate a long string, appending '...' if a change is made."""
if len(s) < max_len:
return s
return s[:max_len-3] + '...'
|
8bafe69253e12a67fdb4c476a2d4b55f6ad4d2af
| 13,302
|
import csv
def _read_file_to_dict(path):
"""
Load the problems and the corresponding labels from the *.txt file.
:param path: The full path to the file to read
:return: The dictionary with the problem names as keys and the true class labels as values
"""
label_dict = {}
with open(path, 'r', encoding='utf-8-sig') as truth_file:
truth = csv.reader(truth_file, delimiter=' ')
for problem in truth:
label_dict[problem[0]] = problem[1]
return label_dict
|
83bd3b04afc995176dc4dfefb9863b9f1ba09888
| 703,216
|
def select_rois(cur, exp_id):
"""Selects ROIs from the given experiment.
Args:
cur (:obj:`sqlite3.Cursor): Connection's cursor.
exp_id (int): ID of the experiment.
Returns:
List[:obj:`sqlite3.Row`]: Sequence of all ROIs for the experiment.
"""
cur.execute("SELECT id, experiment_id, series, "
"offset_x, offset_y, offset_z, size_x, size_y, "
"size_z FROM rois WHERE experiment_id = ?", (exp_id, ))
rows = cur.fetchall()
return rows
|
d00f883314a28fa5d1fb3b4e2618b6900780b8b8
| 453,763
|
def execute(conn, sql_stmt):
"""Execute the given sql statement and return True on success
Args:
conn: a database connection, this function will leave it open
sql_stmt: the sql statement to execute
Returns:
True on success and False on failure
"""
if conn.driver == "maxcompute":
inst = conn.execute_sql(sql_stmt)
return inst.is_successful()
else:
cur = conn.cursor()
try:
cur.execute(sql_stmt)
conn.commit()
return True
except: # noqa: E722
return False
finally:
cur.close()
|
3c0d3f15903e328ac7a906f5b3678626f15d67fe
| 436,605
|
def corr_shape(ncorr, corr_shape):
"""
Returns the shape of the correlations, given
``ncorr`` and the type of correlation shape requested
Parameters
----------
ncorr : integer
Number of correlations
corr_shape : {'flat', 'matrix'}
Shape of output correlations
Returns
-------
tuple
Shape tuple describing the correlation dimensions
* If ``flat`` returns :code:`(ncorr,)`
* If ``matrix`` returns
* :code:`(1,)` if :code:`ncorr == 1`
* :code:`(2,)` if :code:`ncorr == 2`
* :code:`(2,2)` if :code:`ncorr == 4`
"""
if corr_shape == "flat":
return (ncorr,)
elif corr_shape == "matrix":
if ncorr == 1:
return (1,)
elif ncorr == 2:
return (2,)
elif ncorr == 4:
return (2, 2)
else:
raise ValueError("ncorr not in (1, 2, 4)")
else:
raise ValueError("corr_shape must be 'flat' or 'matrix'")
|
2841e16cf8ff5c96c7e00da061e6e45cd10ba177
| 229,667
|
def int_to_decimal_str(integer):
"""
Helper to convert integers (representing cents) into decimal currency
string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT
ERRORS ARE NO FUN IN FINANCIAL SYSTEMS.
@param integer The amount in cents
@return string The amount in currency with full stop decimal separator
"""
int_string = str(integer)
if len(int_string) <= 2:
return "0." + int_string.zfill(2)
else:
return int_string[:-2] + "." + int_string[-2:]
|
55ec88780cb03739440d3a8c4f7529f4e90e8f95
| 451,760
|
from typing import Optional
from typing import Type
def is_type_unknown(type_: Optional[Type]) -> bool:
"""Is the type of this variable unknown?"""
return type_ is None
|
d951e5362da8ec6239cf574e443a0c054abd1e70
| 68,779
|
def getChanceAgreement(l1, l2):
"""
Returns p_e, the probability of chance agreement: (1/N^2) * sum(n_k1 * n_k2) for rater1, rater2, k categories (i.e. two in this case, 0 or 1), for two binary lists L1 and L2
"""
assert(len(l1) == len(l2))
summation = 0
for label in [0, 1]:
summation += l1.count(label) * l2.count(label)
return (1 / float(len(l1)**2)) * summation
|
a4d11255ab8607c62a140b4bbd145b73b1e18524
| 696,684
|
def get_gene_to_uniprot_list(file_path):
"""
Args:
file_path for the 'gene_to_uniprot_list.txt'.
Returns:
A dict mapping gene code to UniProt protein entry.
example: {'TSPAN6': ['O43657']}
"""
with open(file_path, 'r') as file:
lines = file.read().split('\n')
gene_to_uniprot_list = {}
for line in lines:
# line example: 'TSPAN6: O43657\n'
line_split = line.split(': ')
gene = line_split[0]
uniprot_list = line_split[1].split(' ')
gene_to_uniprot_list[gene] = uniprot_list
return gene_to_uniprot_list
|
bec21994d275342bd76c021ed0ff61f27491914a
| 135,420
|
def clean_borders(plate_image, epsilon):
"""
This function aims to clean the pixels close to the borders of the plate image.
:param plate_image: plate image (gray scaled)
:param epsilon: width of the cleaning zone around the borders (epsilon_h, epsilon_w)
:return: cleaned plate image
"""
height = plate_image.shape[0]
width = plate_image.shape[1]
plate_image[0:epsilon[0], :] = 0
plate_image[height - epsilon[0]:height, :] = 0
plate_image[:, 0:epsilon[1]] = 0
plate_image[:, width - epsilon[1]:width] = 0
return plate_image
|
75e4102cae0c51b540cd984ac6d834686d295d49
| 317,865
|
def get_requires_for_build_wheel(config_settings=None):
"""Returns a list of requirements for building, as strings"""
return []
|
12e139b2052d7f52ea92a04703f118bd0ae436f8
| 652,797
|
def extract_ints_from_str(str):
"""
Extract all integers from a string.
Parameters
----------
str : str
The input string.
Returns
-------
ints : list
List of integers in `str`.
Examples
--------
>>> extract_ints_from_str("I have 2 apples and 4 pears")
[2, 4]
>>> extract_ints_from_str("I have 0.5 apples and 4 pears")
[4]
>>> extract_ints_from_str("I have no apples and no pears")
[]
"""
ints = [int(i) for i in str.split() if i.isdigit()]
return ints
|
9e08883aa05503b2c6c6fea5faa214206a698cd9
| 543,023
|
def grubler(L,J1,J2):
"""
Calculates the DOF of a linkage using Grübler-Kutzbach criterion
Parameters
----------
L : int
Number of links
J1 : int
1-DOF pairs
J2 : int
2-DOF pairs
Returns
-------
M : int
Degrees of freedom
"""
M = 3*(L-1) - 2*J1 - J2
return M
|
e432c2214c0cb9b0455b27b5e5742c518c90a3b5
| 124,918
|
import hashlib
def bin_dbl_sha256(s):
"""
Perform a double SHA256 operation on the input.
Args:
s(str): message to hash.
Returns:
bytes: hash.
"""
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
|
f4062fadc7450764c6f537eebdefba49bc952990
| 568,052
|
def create_hive_table_from_hbase_table(hive_connection, table_hive, table_hbase, key, columns, id_task=None):
"""
Creates a hive table linked to an hbase table
:param hive_connection: The connection to hive
:param table_hive: the hive table to be created
:param table_hbase: the hbase table with the data
:param key: a list of tuples (hbase_key_name, type) of the hbase key
:param columns: a list of tuples (hive_column_name, type, "<hbase_column_family>:<hbase_column_name>") of the hbase columns
:param id_task: The id of the task to generate unique tables
:return: the name of the hive table
"""
if id_task:
table_hive = table_hive + '_' + id_task
sentence = "CREATE EXTERNAL TABLE IF NOT EXISTS \
{table_hive}( key struct<{hive_key}>, {hive_columns} ) \
ROW FORMAT DELIMITED \
COLLECTION ITEMS TERMINATED BY '~' \
STORED BY \
'org.apache.hadoop.hive.hbase.HBaseStorageHandler' \
WITH SERDEPROPERTIES \
('hbase.columns.mapping' = ':key, {hbase_columns}') \
TBLPROPERTIES \
('hbase.table.name' = '{table_hbase}')"
sentence = sentence.format(table_hive=table_hive,
hive_key=",".join(["{}:{}".format(k[0],k[1]) for k in key]),
hive_columns=",".join(["{} {}".format(c[0],c[1]) for c in columns]),
hbase_columns=",".join([c[2] for c in columns]),
table_hbase=table_hbase)
try:
hive_connection.execute(sentence)
except Exception as e:
raise Exception('Failed to create HIVE table {}: {}'.format(table_hive, e))
return table_hive
|
b984f1d2aa92a0600ab0a49e52075352ee956c12
| 268,201
|
def convert_to_filename(filename,
remove_file_extension=True):
"""
Attempts to make the filename string valid.
Args:
filename: string
String of a potential filename.
remove_file_extension: bool
Removes everything after the first found value of "." found in the
string if set to true.
Returns:
A string that is valid for saving.
"""
if remove_file_extension:
filename = filename.split(".")[0]
return "".join(x for x in str(
filename) if (x.isalnum() or x.isascii()) and x != ":")
|
7fbeb98e143c23e296d0c6a5cf253e7a24a5767d
| 312,728
|
def df_drop_duplicated_index(df):
"""Drop all duplicated indices in a dataframe or series"""
df2 = df.copy()
return df2[~df2.index.duplicated(keep='first')]
|
aa9666d098d7f07687603cfafe4b9fd0a595e51e
| 619,327
|
def _prepend_comments_to_lines(content: str, num_symbols: int = 2) -> str:
"""Generate new string with one or more hashtags prepended to each line."""
prepend_str = f"{'#' * num_symbols} "
return "\n".join(f"{prepend_str}{line}" for line in content.splitlines())
|
4645d72ed95beb91bc3d09231af61d698c66b784
| 128,716
|
def crop(x, area):
"""
* inputs:
- x (torch.Tensor, required)
A torch tensor of shape (N, C, H, W) is assumed.
- area (sequence, required)
A sequence of length 2 ((X, Y), (W, H)) is assumed.
sequence[0] (X, Y) is the left corner of an area to be cropped.
sequence[1] (W, H) is its width and height.
* returns:
A torch tensor of shape (N, C, H, W) cropped in the specified area.
"""
xmin, ymin = area[0]
w, h = area[1]
return x[:, :, ymin: ymin + h, xmin: xmin + w]
|
822b51b53744f0f6004343f67bf1624449a06ada
| 665,393
|
import math
def _calculate_label_rotation(startx, starty, endx, endy):
"""
Calculates the appropriate rotation angle for a label on an arrow (matches line, is between -90 and 90 degrees)
:param startx: start of arrow (x)
:param starty: start of arrow (y)
:param endx: end of arrow (x)
:param endy: end of arrow (y)
:return: rotation angle.
"""
return math.degrees(math.atan((endy - starty)/(endx - startx)))
|
45a4928102f040847e99c6983ccbff59aa2c841f
| 665,505
|
import collections
def GetChildPids(processes, pid):
"""Returns all child processes of |pid| from the given |processes| list.
Args:
processes: A tuple of (pid, ppid, state) as generated by ps.
pid: The pid for which to get children.
Returns:
A list of child pids.
"""
child_dict = collections.defaultdict(list)
for curr_pid, curr_ppid, state in processes:
if 'Z' in state:
continue # Ignore zombie processes
child_dict[int(curr_ppid)].append(int(curr_pid))
queue = [pid]
child_ids = []
while queue:
parent = queue.pop()
if parent in child_dict:
children = child_dict[parent]
queue.extend(children)
child_ids.extend(children)
return child_ids
|
531710343f2e767768285c22065a1c3e21003c31
| 536,157
|
import math
def age_band_5_years(age: int) -> str:
"""
Place age into appropriate 5 year band
This function takes the age supplied as an argument and returns a string
representing the relevant 5 year banding.
Parameters
----------
age : int
Age of the person
Returns
-------
out : str
The 5 year age band
Examples
--------
>>> age_band_5_years(3)
'0-4'
>>> age_band_5_years(None)
'Age not known'
>>> age_band_5_years(95)
'90 and over'
"""
if age is None:
return "Age not known"
if age >= 90:
if age >= 150:
raise ValueError("The age input: {} is too large.".format(age))
else:
return "90 and over"
elif age < 0:
raise ValueError("The age input: {} is too low.".format(age))
else:
lowerbound = 5 * int(math.floor(age / 5))
upperbound = lowerbound + 4
return "{}-{}".format(lowerbound, upperbound)
|
4f3a3bc324842283142c7f44655699e1e53822fe
| 373,332
|
def day_of_year(dt):
"""
Calculates the day of year for a numpy.array of numpy.datetime64 date-time
objects.
Parameters
----------
dt : numpy.datetime64
Objects of type datetime64 representing the date for which the day of
the year will be calculated.
Returns
-------
int
An integer between 1 and 366 representing the day of year for a given date.
"""
return ((dt - dt.astype("datetime64[Y]")).astype("timedelta64[D]") + 1).astype(int)
|
6717ff9b29826fe19312f688b2c5605b4b986d16
| 479,795
|
def linear_search(List, item):
"""
Find the element in list using linear search
>>> from pydsa.searching.linear_search import linear_search
>>> List = [3, 4, 6, 8, 12, 15, 26]
>>> item = 6
>>> linear_search(List, item)
2
"""
index = len(List)
for i in range(0, index):
if List[i] == item:
return i
return -1
|
3fddff364a118c8cd40b472733ea0fc0ec6f048d
| 404,227
|
import re
def pod2OptionList(pod):
"""
Return option names found in POD snippet. Option names are recognized in
`=item B<option>' constructs.
@param pod: Snippet in POD format to be analyzed.
@type pod: str
@return: All option names contained in POD snippet as a list.
@rtype: [ str, ..., ]
"""
result = [ ]
for line in pod.split("\n"):
found = re.search("^=item\s*B<(-[^>]+)>", line)
if found:
result.append(found.group(1))
return result
|
8cea80a6a9f58f645676eefdb7bdba2c9f6f4c50
| 253,778
|
def split_thousands(s):
"""
Splits a number on thousands.
>>> split_thousands(1000012)
"1'000'012"
"""
# Check input #
if s is None: return "0"
# If it's a string #
if isinstance(s, str): s = float(s)
# If it's a float that should be an int #
if isinstance(s, float) and s.is_integer(): s = int(s)
# Use python built-in #
result = "{:,}".format(s)
# But we want single quotes #
result = result.replace(',', "'")
# Return #
return result
|
68b9db1c5d2d5e6a50aae25f273a4dc31711483f
| 321,566
|
def compute_missing_stats(dat):
"""Computes summary of missing values in the dataset"""
dat_missing = dat.isnull()
missing_total = dat_missing.sum().sum()
missing_dict = {
"total": {
"num_missing": int(missing_total),
"num_missing_pct": float(missing_total / (dat.shape[0] * dat.shape[1])),
}
}
if missing_total > 0:
missing_dict["rows"] = dat_missing.T.sum().to_dict()
missing_dict["columns"] = dat_missing.sum().to_dict()
return missing_dict
|
291812f7cc2c2b22728d895bb64d237a08f0f601
| 408,715
|
from typing import Iterable
from typing import Any
def nget(dictionary: dict, keys: Iterable, default: Any = None) -> Any:
"""nget - nested get call to easily retrieve nested information with a single call and set a default
Ex.
nget(dict, ['key1', 'key2', ..], default)
nget(dict, key1, key2, .., default)
nget use an iterable of keys to retrieve nested information and can set a default if a key is not found
"""
for key in iter(keys):
if not isinstance(dictionary, dict):
raise KeyError(f'About to attempt retrieval of {key} on improper type of {type(dictionary)}')
if key not in dictionary.keys():
return default
dictionary = dictionary[key]
return dictionary
|
69d7f27b74ecb5f300a9028cafcaf5c92c482bef
| 203,868
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.