content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def ignore_transitive_dependency(name):
"""
Return True if @name should not be included in the Steam Runtime
tarball or directly depended on by the metapackage, even though
packages in the Steam Runtime might have dependencies on it.
"""
return name in (
# Must be provided by host system
'libc6',
'libegl-mesa0',
'libegl1-mesa',
'libegl1-mesa-drivers',
'libgl1-mesa-dri',
'libgl1-mesa-glx',
'libgles1-mesa',
'libgles2-mesa',
'libglx-mesa0',
'mesa-opencl-icd',
'mesa-va-drivers',
'mesa-vdpau-drivers',
'mesa-vulkan-drivers',
# Assumed to be provided by host system if needed
'ca-certificates',
'fontconfig',
'fontconfig-config',
'gconf2-common',
'iso-codes',
'libasound2-data',
'libatk1.0-data',
'libavahi-common-data',
'libdb5.1',
'libdconf0',
'libdrm-intel1',
'libdrm-radeon1',
'libdrm-nouveau1a',
'libdrm2',
'libglapi-mesa',
'libllvm3.0',
'libopenal-data',
'libthai-data',
'libthai0',
'libtxc-dxtn-s2tc0',
'passwd',
'shared-mime-info',
'sound-theme-freedesktop',
'x11-common',
# Non-essential: only contains localizations
'libgdk-pixbuf2.0-common',
'libjson-glib-1.0-common',
# Depended on by packages that are present for historical
# reasons
'libcggl',
'libstdc++6-4.6-dev',
'zenity-common',
# Only exists for packaging/dependency purposes
'debconf',
'libjpeg8', # transitions to libjpeg-turbo8
'multiarch-support',
# Used for development in Steam Runtime, but not in
# chroots/containers that satisfy dependencies
'dummygl-dev',
)
|
90835936cfa86b451139df717095ac4bbcd6ca7e
| 71,404
|
import re
def parse_roll_syntax(die_info: str):
"""Parses a dice roll of format '2d10' into a tuple, e.g. (2, 10)"""
return map(int, re.findall(r'\d+', die_info))
|
e18b67aa5560538e3fcab7fa8624683f48a67bc5
| 71,406
|
def filter_headline(df, non_disaggregation_columns):
"""Given a dataframe filter it down to just the headline data.
In the case of multiple units it will keep all headline for each unit.
"""
special_cols = [col for col in non_disaggregation_columns if col in df.columns]
# Select the non-data rows and filter rows that are all missing (nan)
disag = df.drop(special_cols, axis=1)
headline_rows = disag.isnull().all(axis=1)
headline = df.filter(special_cols, axis=1)[headline_rows]
return headline
|
3e2697a9802a6d5493ea8b81366ca5bf591d17e4
| 71,409
|
def reste(a,b):
"""
a,b sont des nombres entiers (b non nul)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Cette fonction renvoie le reste de la division de a par b
"""
r=a%b
if r<0 : r=r+abs(b)
return r
|
95389c47d0402ac6cd0ed8dead2dcd5e3412b055
| 71,412
|
def get_absolute_path(path):
"""
Get the absolute path.
:param pathlib.Path path: Path object
:return: Absolute path
:rtype: pathlib.Path
"""
return path.expanduser().resolve()
|
8e7f0668f1491a0b898751ad2377ad02c08e5f25
| 71,414
|
import math
def bf_wheel (n):
"""
Bruteforce trial factorization with successive integers as
candidate factors. uses a wheel mod 6 to speed things
up a lot. can be expanded to wheel mod 30 by adding to the
prime basis, but takes extra steps in the trial phase. this seems
to be the most optimized version I could put together.
Arguments:
n (:int) - the number to factor
Returns:
(generator) - prime factors of n
Examples:
>>> [p for p in bf_wheel(20)]
(2, 2, 5,)
Author: ChairsDaily
"""
if n <= 3: return n > 1
while math.sqrt(n) == int(math.sqrt(n)):
yield math.sqrt(n)
n //= math.sqrt(n)
if not n % 2 or not n % 3:
while not n % 2:
yield 2
n //= 2
while not n % 3:
yield 3
n //= 3
# starting value for candidate
# and improved upper bound
d = 5
upper = math.ceil(math.sqrt(n))
while d < upper:
while n % d == 0:
n //= d
yield d
# correction case for wheel mod 6
# we skipped the middle five digits essentially
while n % (d + 2) == 0:
n //= (d + 2)
yield (d + 2)
if n == 1: break
d += 6
if n > 1: yield n
|
eff52e8c94c5179bc78b70597ebe0a6fa0f287cf
| 71,417
|
from typing import List
def __et_elements_to_string(elements: List[str]) -> str:
"""
Converts a list of ET.Element to a string.
"""
return '\n'.join(element for element in elements)
|
a668a4e4412dc73b885e504ad9368535d300a9ff
| 71,419
|
def append_to_get(context, replace=True, **kwargs):
"""
Adds/deletes arguments to the current GET value
and returns a querystring containing it.
@argument replace: If true, any existing argument
named in kwargs will have their value overridden.
If false, kwargs are appended only.
@argument kwargs: key-val pairs to add, with a value of
None for deletion. (Use "None" if you don't want that)
"""
updated = context['request'].GET.copy()
if not replace:
# duplicates will be appended to the query
updated.update(kwargs)
else:
# duplicates will be replaced
for arg, val in kwargs.items():
updated[arg] = val
# if the kwarg is None delete it instead
for arg, val in kwargs.items():
if val is None:
updated.pop(arg)
if updated:
return "?" + updated.urlencode()
else:
return ""
|
6cd1d4aea2cab30f7ddcddd9a08fa384a4182c59
| 71,421
|
def listing_stopwords(filename):
"""Returns a list of all the stopwords in stopwords.txt"""
try:
h = open(filename)
except:
raise FileNotFoundError
h.close()
f = open(filename, 'r')
list = []
lines = f.readlines()
for line in range(len(lines)): # each line has one word
list.append(lines[line].strip())
f.close()
"""Improvement could be where we load the stopwords into a hash table"""
return list
|
496eae2244bfd77e8aa1fc57dec208f2a961955b
| 71,427
|
def get_distinct_values(column, session):
"""
Return a list of all the distinct values in a table column
"""
q = session.query(column).distinct()
return [v[0] for v in q if v != (None,)]
|
8b3802c442daaf9d90d5975217b50d245d6871a8
| 71,428
|
def join_lemmas(doc):
"""Return joined lemmas with appropriate whitespace."""
return "".join(token.lemma_ + token.whitespace_ for token in doc)
|
87e6f5b1d826dd9d97519dfe105d9020260d8626
| 71,434
|
def reverse(text):
""" Reverse a string (trivial in python) """
return text[::-1]
|
994a75b9d2e5ea9315cd8b9f526eafe4074aa214
| 71,436
|
import re
def canonical_name(artist):
""" Normalize names across variations """
name = re.sub(r' {2,}', ' ', artist.lower())
if name[:2] == '((':
name = name[2:]
if name in ('rem', 'r.e.m.'):
name = 'r. e. m.'
elif name in ('kt tunstall', 'k t tunstall', 'k.t. tunstall'):
name = 'k. t. tunstall'
elif name in ('kd lang', 'k d lang', 'k.d. lang'):
name = 'k. d. lang'
elif name in ('st vincent', ):
name = 'st. vincent'
return name
|
ed00188ccd1c57ad4cce19e7155e5e5a19048728
| 71,437
|
import re
def strip_comments(text: str) -> str:
"""Remove XML comments from a string.
.. note::
This will not remove lines that had comments, it only removes the text
from "<!--" to "-->".
:param text:
XML text to strip comments from.
:return:
The given `text` without XML comments.
"""
# thanks: https://stackoverflow.com/a/6806096
return re.sub(r"(?s)<!--.*?-->", "", text)
|
6c0a9bc36264a47ecd96d17316a4f031cdfec6d5
| 71,443
|
import re
def parseval(val):
"""Parse a value that could be int, float, % or contain a memory unit."""
if val == "-":
return None
if re.match("\d+$", val):
return int(val)
if re.match("\d+(.\d+)?([eE][+-]\d+)?$", val):
return float(val)
if re.match("\d+(.\d+)?%$", val):
return 100 * float(val[:-1])
if re.match("\d+(.\d+)?[KMGT]$", val):
e = {"K": 1, "M": 2, "G": 3, "T": 4}[val[-1]]
return int(float(val[:-1]) * 1024 ** e)
return val
|
0c91a520d275331a4c3e41fcaf3dd6ad1128ba30
| 71,444
|
import unittest
def skip_unless(condition, reason): # pragma: no cover
"""Skip tests unless a condition holds.
This implements the basic functionality of unittest.skipUnless
which is only available on Python 2.7+.
:param bool condition: If ``False``, the test will be skipped
:param str reason: the reason for skipping the test
:rtype: callable
:returns: decorator that hides tests unless condition is ``True``
"""
if hasattr(unittest, "skipUnless"):
return unittest.skipUnless(condition, reason)
elif condition:
return lambda cls: cls
else:
return lambda cls: None
|
eb4854a206e5dd3396484fc8c02701052fd39981
| 71,446
|
import re
def rootcint_emitter(target, source, env):
"""
With ROOT >= 6, rootcling generates a <dict>_rdict.pcm mapping file
in addition to the dictionary source file. Add this "side effect" artifact
to the list of targets so that SCons can automatically keep track of it.
"""
if int(env.get('ROOTVERS','0')[0]) >= 6:
if env['PCMNAME']:
target.append(env['PCMNAME']+'_rdict.pcm')
else:
# Default PCM file name that rootcling generates without -s <pcmname>
target.append(re.sub(r'\.cxx\Z','_rdict.pcm',str(target[0])))
return target, source
|
5a64157614f686c3754022a68f1c7d6fb6a1cfae
| 71,447
|
def map_sequence(seq, sequence_map, unk_item_id):
""" Transform a splitted sequence of items into another sequence of items
according to the rules encoded in the dict item2id
seq: iterable
sequence_map: dict
unk_item_id: int"""
item_ids = []
for item in seq:
item_id = sequence_map.get(item, unk_item_id)
item_ids.append(item_id)
return item_ids
|
1206740cc3b2e810168a4a24dffbd0acc870ce46
| 71,448
|
def create_extra_var_str(vars_dict):
"""
This method encodes variables into an --extra-vars string
:param vars_dict:
:return: a string that can be added to the ansible-playbook exe
"""
if len(vars_dict) < 1:
return ''
out_val = "--extra-vars='{"
for key, value in vars_dict.items():
out_val = '{}"{}":"{}",'.format(out_val, key, value)
out_val = "{}{}".format(out_val[:-1], "}'")
return out_val
|
af00a41492a7ea2e3eaed32a8581002fe16672f7
| 71,451
|
def kwargs_to_variable_assignment(kwargs: dict, value_representation=repr,
assignment_operator: str = ' = ',
statement_separator: str = '\n',
statement_per_line: bool = False) -> str:
"""
Convert a dictionary into a string with assignments
Each assignment is constructed based on:
key assignment_operator value_representation(value) statement_separator,
where key and value are the key and value of the dictionary.
Moreover one can seprate the assignment statements by new lines.
Parameters
----------
kwargs : dict
assignment_operator: str, optional:
Assignment operator (" = " in python)
value_representation: str, optinal
How to represent the value in the assignments (repr function in python)
statement_separator : str, optional:
Statement separator (new line in python)
statement_per_line: bool, optional
Insert each statement on a different line
Returns
-------
str
All the assignemnts.
>>> kwargs_to_variable_assignment({'a': 2, 'b': "abc"})
"a = 2\\nb = 'abc'\\n"
>>> kwargs_to_variable_assignment({'a':2 ,'b': "abc"}, statement_per_line=True)
"a = 2\\n\\nb = 'abc'\\n"
>>> kwargs_to_variable_assignment({'a': 2})
'a = 2\\n'
>>> kwargs_to_variable_assignment({'a': 2}, statement_per_line=True)
'a = 2\\n'
"""
code = []
join_str = '\n' if statement_per_line else ''
for key, value in kwargs.items():
code.append(key + assignment_operator +
value_representation(value)+statement_separator)
return join_str.join(code)
|
aec76c6a7b1e29c9540b0cb2a8161f831d2058de
| 71,454
|
def combine_password_with_salt(password, salt):
"""
Combine a password with a salt.
Args:
password the password.
salt the salt.
Returns:
A combined string that can be hashed.
"""
return salt + password
|
d4ceb2c100521253ef6d562fde3077c2994d2333
| 71,455
|
def exceed_ns_icpm(cln_min, cln_max, cls_min, cls_max, dep_n, dep_s):
"""Calculates exceedances based on the methodology outlined by Max
Posch in the ICP Mapping manual (section VII.4):
https://www.umweltbundesamt.de/sites/default/files/medien/4292/dokumente/ch7-mapman-2016-04-26.pdf
NB: All units should be in eq/l.
Args:
cln_min: Float. Parameter to define "critical load function" (see PDF)
cln_max: Float. Parameter to define "critical load function" (see PDF)
cls_min: Float. Parameter to define "critical load function" (see PDF)
cls_max: Float. Parameter to define "critical load function" (see PDF)
dep_n: Float. Total N deposition
dep_s: Float. Total (non-marine) S deposition
Returns:
Tuple (ex_n, ex_s, reg_id)
ex_n and ex_s are the exceedances for N and S depositions dep_n and dep_s
and the CLF defined by (cln_min, cls_max) and (cln_max, cls_min). The
overall exceedance is (ex_n + ex_s).
reg_id is an integer region ID, as defined in Figure VII.3 of the PDF.
"""
# Check inputs
assert (dep_n >= 0) and (dep_s >= 0), "Deposition cannot be negative."
# Make sure floats
cln_min = float(cln_min)
cln_max = float(cln_max)
cls_min = float(cls_min)
cls_max = float(cls_max)
dep_n = float(dep_n)
dep_s = float(dep_s)
# Handle edge cases
# CLF pars < 0
if (cln_min < 0) or (cln_max < 0) or (cls_min < 0) or (cls_max < 0):
# Pars not valid
# Updated 07.11.2020. Values < 0 do not make sense, so were originally set to -1.
# This change is equivalent to setting values less than zero back to zero
# return (-1, -1, -1)
return (dep_n, dep_s, -1)
# CL = 0
if (cls_max == 0) and (cln_max == 0):
# All dep is above CL
return (dep_n, dep_s, 9)
# Otherwise, we're somewhere on Fig. VII.3
dn = cln_min - cln_max
ds = cls_max - cls_min
if (
(dep_s <= cls_max)
and (dep_n <= cln_max)
and ((dep_n - cln_max) * ds <= (dep_s - cls_min) * dn)
):
# Non-exceedance
return (0, 0, 0)
elif dep_s <= cls_min:
# Region 1
ex_s = 0
ex_n = dep_n - cln_max
return (ex_n, ex_s, 1)
elif dep_n <= cln_min:
# Region 5
ex_s = dep_s - cls_max
ex_n = 0
return (ex_n, ex_s, 5)
elif -(dep_n - cln_max) * dn >= (dep_s - cls_min) * ds:
# Region 2
ex_n = dep_n - cln_max
ex_s = dep_s - cls_min
return (ex_n, ex_s, 2)
elif -(dep_n - cln_min) * dn <= (dep_s - cls_max) * ds:
# Region 4
ex_n = dep_n - cln_min
ex_s = dep_s - cls_max
return (ex_n, ex_s, 4)
else:
# Region 3
dd = dn ** 2 + ds ** 2
s = dep_n * dn + dep_s * ds
v = cln_max * ds - cls_min * dn
xf = (dn * s + ds * v) / dd
yf = (ds * s - dn * v) / dd
ex_n = dep_n - xf
ex_s = dep_s - yf
return (ex_n, ex_s, 3)
|
a49a1ba13996b54f046629af5718e702db89eec6
| 71,456
|
def split_smb_path(_path):
"""
Parse the Service name from a full path
:param _path: A full SMB path ("/servicename/dir/subdir")
:return: A tuple with service name and the path part
"""
_splitted = _path.split("/")
_service = _splitted[0]
_remote_path = "/".join(_splitted[1:])
return _service, _remote_path
|
84d0d955c63fdd2f5c3156dfc1c8e66118fbcab6
| 71,458
|
def transform_NRS7(df):
"""Recodes NRS7 to a ordered number scale, 0-10.
"""
def transform_row(row):
if row == "Värsta tänkbara värk":
return 10
elif row == "Ingen värk":
return 0
else:
return int(row)
df["ini_NRS7"] = df["ini_NRS7"].apply(transform_row)
return df
|
695657cf5df4ff254feb3e73deaf498f70c46459
| 71,463
|
def find_cfn_output(key, outputs):
"""Return CFN output value."""
for i in outputs:
if i['OutputKey'] == key:
return i['OutputValue']
return None
|
0126ee4aa3c13e0371dd04cd6d6695730f5378a9
| 71,468
|
import math
def dot(p0, p1, p2, p3) -> float:
"""Calculare cosinue of angle between vectors p0->p1 and p2->p3"""
v0 = (p1[0] - p0[0], p1[1] - p0[1])
v1 = (p3[0] - p2[0], p3[1] - p2[1])
return (v0[0] * v1[0] + v0[1] * v1[1]) / (math.hypot(*v0) * math.hypot(*v1))
|
f85dc47d0bef10ff96f95f23df2891a8e9eeb870
| 71,469
|
def __SXname__(x):
"""
Returns the name of casadi.SX symbolics
Parameters
----------
x : list[casadi.SX] or casadi.SX
List of casadi symbolics or just casadi symbolics.
Returns
-------
list[str] or str
List of name of symbolics or just name of symbolics
"""
if isinstance(x, list):
nameList = []
for tmp in x:
z = str(tmp)
z = z.split('_')
nameList.append(z[0])
return nameList
else:
z = str(x[0])
z = z.split('_')
return z[0]
|
697328380c940e38dcb724f5b4383d0343f2ec7e
| 71,471
|
def fitAlgorithm(classifier, trainingData, trainingTarget):
"""
Fits a given classifier / pipeline
"""
#train the model
return classifier.fit(trainingData, trainingTarget)
|
ddc6a7b2f5c42e07e212c2a48fd1d35b1c77dab2
| 71,473
|
def replace_s3_invalid_characters(key):
"""Replaces characters invalid for an S3 object key in a string
Args:
key: string where to replace characters
Returns:
string where any invalid characters were replaced with underscores
"""
spec_chars = " !-_'.,*()"
lst = []
for char in key:
if char.isalpha() or char.isdigit() or spec_chars.find(char) >= 0:
lst.append(char)
else:
lst.append('_')
return ''.join(lst)
|
3ca34ceeddd0133a076f956d430f6e7b77b7097a
| 71,476
|
from typing import Any
def composite(*funcs):
"""
Returns the composition of all the given functions, which is a function that runs the last
function with the input args, and then runs the function before that with the return value of
the last function and so on. Finally, the composition function will return the return value of
the first function.
Every function beside the last function should receive one argument.
For example:
f = composite(lambda x: x * 5, lambda x, y: x + y)
assert f(2, 3) == (2 + 3) * 5
"""
assert len(funcs) > 0
def composition_function(*args, **kwargs):
return_value: Any = funcs[-1](*args, **kwargs)
for func in reversed(funcs[:-1]):
return_value = func(return_value)
return return_value
return composition_function
|
b180782c7358ee8f947c4a41dd1092690cc28094
| 71,477
|
import itertools
def selectSorous (aSorouListList, aF0):
"""
Takes [[f_1, f_2, ...]_j, ...] and returns the list [g_1, g_2, ...]
where each g_i is a sum sorous selected one from each list in aSorouListList + aF_0
"""
output = []
combinations = [p for p in itertools.product(*aSorouListList)]
for combo in combinations:
sorou = []
for item in combo:
sorou += item
sorou += aF0
output.append(sorou)
return output
|
fa5d5721df5763a3325db96286f92ecd156874e1
| 71,479
|
def translate(english_days):
""" translate the name of a day in a week from English to French.
:param english_days: string, a day in a week in English.
:return: string, the French name for the day.
"""
french_days = ''
if english_days == 'Monday':
french_days = 'lundi'
elif english_days == 'Tuesday':
french_days = 'mardi'
elif english_days == 'Wednesday':
french_days = 'mercredi'
elif english_days == 'Thursday':
french_days = 'jeudi'
elif english_days == 'Friday':
french_days = 'vendredi'
elif english_days == 'Saturday':
french_days = 'samedi'
elif english_days == 'Sunday':
french_days = 'dimanche'
else:
raise Exception("Days in wrong form!")
return french_days
|
8f43faa3796d9818e33d39c69cf6c0076993b728
| 71,480
|
def marginCorrect(tr, margin):
"""
Correct affine transform for the margin that was added to an image.
"""
tr[0] += margin - (tr[1] + tr[2]) * margin
return tr
|
0e35d5fcb208605e2bc1b112999780fcb3f98511
| 71,483
|
def get_field_on_block(block, field_name, default_value=None):
"""
Get the field value that is directly set on the xblock.
Do not get the inherited value since field inheritance
returns value from only a single parent chain
(e.g., doesn't take a union in DAGs).
"""
try:
if block.fields[field_name].is_set_on(block):
return getattr(block, field_name)
except KeyError:
pass
return default_value
|
6f48a89a4684869b2b5ceec0a276b5f8117f70f4
| 71,486
|
def elementwise_residual(true_val, pred_val):
"""The residual between a single true and predicted value.
Parameters
----------
true_val : float
True value.
pred_val : float
Predicted value.
Returns
-------
residual : float
The residual, true minus predicted
"""
return true_val - pred_val
|
e1eb2434b1d24032f3b7abc1233c165d7146ffff
| 71,487
|
def get_submit_attr_str(submit_attrs):
"""Convert submit attributes from a dictionary form to the corresponding configuration string
Args:
submit_attrs (dict): the dictionary containing the submit attributes
Returns:
string: the string representing the xml submit attributes section for a single entry
"""
out = ""
if submit_attrs:
for name, value in sorted(submit_attrs.items()):
if value is not None:
out += f'\n <submit_attr name="{name}" value="{value}"/>'
return out
|
69f35d1db2436abdc648b68f365f54f0041cd5b7
| 71,488
|
import colorsys
def hsv_color(h, s, v):
"""Return an RGB color from HSV."""
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return r * 255, g * 255, b * 255
|
dd525abb57b6320688d931e3b97e584086b5f839
| 71,490
|
def gf_degree(f):
"""
Return leading degree of ``f``.
**Examples**
>>> from sympy.polys.galoistools import gf_degree
>>> gf_degree([1, 1, 2, 0])
3
>>> gf_degree([])
-1
"""
return len(f)-1
|
4c5853ddddfa1b65a39b6c7b0b1a822684cc5490
| 71,495
|
def sign_extend(num):
"""Sign extend 8-bit integer"""
num |= (-1 & ~0xFF) if (num & 0x80) else 0
return num
|
2601e4f5ddb7c05b8785934c4e4cb8e665835041
| 71,496
|
def rewrite_title(lines, expected_title, rewritten_title):
"""Rewrite the title of a .rst file.
Args:
lines (List[str]): lines of the file.
expected_title (str): title expected to be generated by apidoc
rewritten_title (str): title to replace
Returns
List[str]: rewritten title lines.
Raises
ValueError: If the title '==...==' isn't on line 1.
ValueError: If the title isn't the expected value.
"""
rewritten_content = []
# Find the title header **and** assert there is only one '==...==' line.
title_index, = [i for i, line in enumerate(lines) if set(line) == set('=')]
if title_index != 1:
raise ValueError('Unexpected title line', title_index)
if lines[0] != expected_title:
raise ValueError('Unexpected title content', lines[0])
rewritten_content.append(rewritten_title)
rewritten_content.append('=' * len(rewritten_title))
return rewritten_content
|
ec67359363fe82eaf91ea63b92fabca7cbf94606
| 71,497
|
import re
def fix_name(name):
""" Fix the names so that they are valid python variable names """
name = name.lower()
name = re.sub(r'[^a-z0-9_]+', '_', name).strip('_')
name = re.sub(r'[_]+', '_', name)
name = re.sub(r'^[0-9_]+', '', name)
return name
|
23c634e4a1496d23c9b932b0ab78dc0029580fc2
| 71,503
|
import struct
def read_double(buf):
"""
Reads a double from `buf`.
"""
return struct.unpack("f", buf.read(4))[0]
|
01b639f6221a1591dba41ec864648ee487351ebf
| 71,504
|
def gcd(x: int, y: int) -> int:
""" Greatest Common Divisor. """
return x if y == 0 else gcd(y, x % y)
|
2b1f6fbebaded95f862a3a37793bd38c27e575a6
| 71,505
|
def is_docs_file(metadata):
"""Only Docs files have the "exportLinks" key."""
return metadata.get('exportLinks')
|
3a0daa89e4dddbc75f4fbddba801468c14aa48f0
| 71,512
|
from bs4 import BeautifulSoup
def extract_data_from_html(html, data):
"""
Uses the Beautiful Soup library to extract all links (href) and
images (src) on a given page and stores the results in the
dictionary (dict). The dictionary with collected data is returned
from the task.
Args:
html: String with HTML that will be parsed.
data: dict object that will be complemented by extracted data.
Returns:
dict: Dictioanary containing extracted information.
"""
soup = BeautifulSoup(html, 'html.parser')
data['links'] = []
data['images'] = []
for link in soup.find_all('a'):
data['links'].append(link.get('href'))
for img in soup.find_all('img'):
data['images'].append(img.get('src'))
print('Links and images extracted.')
return data
|
0340d51a648efe1652c5458eafb846c84bc80802
| 71,515
|
def name_from_fn(fn):
"""Human readable module.function name."""
return f"{fn.__module__}.{fn.__name__}"
|
85c18ee9140ddca44b5798fed792451f645c62ea
| 71,521
|
def convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
"""
return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)
|
ae45957295b282946d44b085eb5af03101313a41
| 71,523
|
import struct
def read_uint(buf, endianness=""):
"""
Reads an unsigned integer from `buf`.
"""
return struct.unpack(endianness + "H", buf.read(2))[0]
|
19e9a962e9d8e66fe04832cf6bfcac72c19943b0
| 71,527
|
def float_range(start, end, step=1., exceed=False):
"""Like range() but for floats"""
l = []
i = start
if exceed:
more = step
else:
more = 0.
while i < end + more:
l.append(i)
i += step
return l
|
5d6a57a4cd8004809a7c87ca09ca4aba170e6a6d
| 71,537
|
def rescale(old_array, min_, max_):
"""Scale array to vary between min and max."""
scale_factor = (max_ - min_) / (old_array.max() - old_array.min())
return min_ + scale_factor * (old_array - old_array.min())
|
9c1feec7ca305b71ccc68783f9af8f4bfc1adee3
| 71,538
|
import re
def ensure_two_blank_lines_preceding_cell(text: str) -> str:
"""Ensure every cell delimiters has two preceding blank lines.
Adds/deletes lines if there are less/more.
--- before ---
# %%
some_code = 'here'
# %%
--- after ---
# %%
some_code = 'here'
# %%
"""
return re.sub(r"(?m)\n+^# %%", r"\n\n\n# %%", text)
|
a52a46e29acfa1d335e309267a15e8ba5571e4be
| 71,539
|
def trunc(s: str, left=70, right=25, dots=' ... '):
"""All of string s if it fits; else left and right ends of s with dots in the middle."""
dots = ' ... '
return s if len(s) <= left + right + len(dots) else s[:left] + dots + s[-right:]
|
08ed3990baa6df66f3508219e96a53612c1cd9f8
| 71,542
|
def read_lines(filename, verbose=True):
"""
Load a file line by line into a list
"""
with open(filename, 'r') as fp:
lines = fp.readlines()
if verbose:
print("Done reading file", filename)
return [line.strip() for line in lines]
|
3adc03ddd3afd9e077b8a7f656a274cce91af137
| 71,546
|
import functools
def compose(*functions):
""" Compose functions left-to-right.
NOTE: This implementation is based on a blog post by Mathieu Larose.
https://mathieularose.com/function-composition-in-python/
Parameters
----------
*functions : function
functions = func_1, func_2, ... func_n
These should be single-argument functions. If they are not functions of
a single argument, you can do partial function application using
`functools.partial` before calling `compose`.
Returns
-------
composition : function
Composition of the arguments. In other words,
composition(x) = func_1( func_2( ... func_n(x) ... ) )
"""
def compose2(f, g):
return lambda x: f(g(x))
composition = functools.reduce(compose2, functions)
return composition
|
7796c3af1e717d0820eb20ea9ebf22509e64dc2e
| 71,549
|
def get_path(haystack, needle, path=None):
"""Search a nested dictionary for the path to a specific value.
The value may be a key in a dict or a value in a list.
:param haystack: nested dictionary to search
:param needle: string value to search for
:param path: pre-path
:return: path to value as list
"""
if path is None:
path = []
if isinstance(haystack, dict):
if needle in haystack:
# Value is a key and has children
path.append(needle)
return path, True
for k, v in haystack.items():
# Value not yet found
result = get_path(v, needle, path + [k])
if result is not None:
return result
elif isinstance(haystack, list):
# Value is in a list and does not have children
if needle in haystack:
path.append(needle)
return path, False
else:
# Value is not in this path
return None, None
|
3bcbc563b7bc957260163f359094920075364697
| 71,562
|
def correct_flask_vol(flask_vol, t=20.0, glass="borosilicate"):
"""
Correct flask volume for changes from thermal expansion of glass.
Parameters
----------
flask_vol : array-like
Flask volumes at standard temperature (20C)
t : float, optional
New temperature to calculate volume
glass : str, optional
Type of glass ("borosilicate" or "soft)
Returns
-------
corrected_vol : array-like
Flask volumes are new temperature
Notes
-----
Flask volume equation from 2007 Best Practices for Ocean CO2 Measurements,
SOP 13 - Gravimetric calibration of volume contained using water
"""
alpha = { # thermal expansion coefficient
"borosilicate": 1.0e-5,
"soft": 2.5e-3,
}
if glass not in alpha.keys():
raise KeyError(f"Glass type not found, must be one of {list(alpha.keys())}")
standard_t = 20.0
corrected_vol = flask_vol * (1.0 + alpha[glass] * (t - standard_t))
return corrected_vol
|
f73e4b2797be3a040449d408688388c7b0674ef0
| 71,564
|
def option(option, text, **kwargs):
""" Simple shortcut to construct an option for the KolektoPrinter.choices method.
"""
return option, text, kwargs
|
b50d3ce02fc1099853d95f980cc85b010e42c30e
| 71,565
|
def _channel_transf(channel):
"""Transform channel for luminance calculation."""
if channel < 0.03928:
return channel / 12.92
return ((channel + 0.055) / 1.055)**2.4
|
7e220248854257398794f0cc23d6f3bd2206202d
| 71,567
|
def getDecile(type):
"""
Return a list of decile in either string or numeric form from ``5p`` to ``95p``.
We also include the ``50p`` for convinient.
Parameters
----------
type: str
Type of decile. Currently supported ``'string'`` (e.g. ``'5p'``) or
``'numeric'`` (e.g. ``0.5``).
Returns
----------
class 'list'
A list of decile.
"""
if type == 'numeric': return [0.05, 0.15, 0.25, 0.35, 0.45, 0.50, 0.55, 0.65, 0.75, 0.85, 0.95]
elif type == 'string': return ['5p','15p','25p','35p','45p','50p','55p','65p','75p','85p','95p']
else: raise ValueError
|
20c7c08639159ee41fc94dfbf124f62c42b44f1f
| 71,574
|
import torch
def images_to_cpc_patches(images):
"""Converts (N, C, 256, 256) tensors to (N*49, C, 64, 64) patches
for CPC training"""
all_image_patches = []
for r in range(7):
for c in range(7):
batch_patch = images[:, :, r*32:r*32+64, c*32:c*32+64]
all_image_patches.append(batch_patch)
# (N, 49, C, 64, 64)
image_patches_tensor = torch.stack(all_image_patches, dim=1)
return image_patches_tensor.view(-1, *image_patches_tensor.shape[-3:])
|
0f630f83bfe1e97b3e619eb09f0d750821685979
| 71,575
|
def _format_read_number(read, read_type=None):
"""
Catch read values without a letter prefix (legacy format), convert
them to Rn format. This could be fixed by a database migration that
updates the JSON blobs.
:param read: The read number. May be eg 'R1', 'R2', 'I1', or
the old format used previously, '1', '2', '3'
(as string or int).
:type read: str | int
:return: The read number properly formatted for output - Rn or In.
:rtype: str
"""
try:
read = int(read)
if read_type is None:
read_type = 'R'
read = '%s%s' % (read_type, read)
except ValueError:
# read is in the format 'Rn' or 'In', so just return it unmodified
pass
return read
|
e1cb89a778b0d5d2a1d946627e06f755a8f62011
| 71,578
|
def _cast_safe(self, value, cast_type = str, default_value = None):
"""
Casts the given value to the given type.
The cast is made in safe mode, if an exception
occurs the default value is returned.
:type value: Object
:param value: The value to be casted.
:type cast_type: Type
:param cast_type: The type to be used to cast the retrieved
value (this should be a valid type, with constructor).
:type default_value: Object
:param default_value: The default value to be used
when something wrong (exception raised) occurs.
:rtype: Object
:return: The value casted to the defined type.
"""
# in case the value is none it's a considered special case
# and the value should be returned immediately to caller
if value == None: return value
try:
# retrieves the value type
value_type = type(value)
# in case the value type is the same
# as the cast type
if value_type == cast_type:
# sets the value as the value casted
value_casted = value
# otherwise
else:
# casts the value using the type
value_casted = cast_type(value)
# returns the value casted
return value_casted
except Exception:
# returns the default value
return default_value
|
f89300e21a1758c892eb42ad35696a56f77313b5
| 71,584
|
from typing import List
def replace_item(init: str, fin: str, loc_string_list: List[str]) -> List[str]:
"""
Replace items in a list of strings.
A helper function for the Ocean class, as it allows the inputs
to fortran/C ocean model to be rewritten.
Args:
init (str): initial expression to find.
fin (str): expression to replace it with.
loc_string_list (List[str]): string list to search through.
Returns:
List[str]: Altered list of strings.
"""
for j in range(len(loc_string_list)):
loc_string_list[j] = loc_string_list[j].replace(init, fin)
return loc_string_list
|
6c54430977cb48760ce459ef500fdb0d2f78370d
| 71,585
|
import csv
def write_to_csv(dictionary, header, csvfile):
"""Writes a dictionary from SQL query and list of headers into a csv file"""
with open(csvfile, 'w') as a:
writer = csv.DictWriter(a, delimiter=';', fieldnames=header, lineterminator='\n')
writer.writeheader()
for d in dictionary:
writer.writerow(
dict(
(k, v) for k, v in d.items()
)
)
return csvfile
|
356a800953b6565301e1b0a6477de32284cb53b7
| 71,589
|
import pickle
def _pickle_import(filename):
"""
Pickle Importer
Helper function for importing pickled objects.
:param filename: Name and path to the pickle file.
:type filename: str
:return: The pickled object
:rtype: Object
"""
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
|
2470736ed09211a72cc7d3a61c1eaa8d33361ffc
| 71,594
|
def flip_pancakes(pancakes, flipper):
""" Calculates the minimum number of flips
:param pancakes: string representing line of pancakes with + (smile) or
- (empty)
:param flipper: the length of a flipper
:return: number of flips or "IMPOSSIBLE" if it is impossible to do this
"""
flips = 0
for i in range(0, len(pancakes) - flipper + 1):
if pancakes[i] == '-':
new_pancakes = pancakes[0:i]
for j in range(0, flipper):
new_pancakes += '+' if pancakes[i + j] == '-' else '-'
new_pancakes += pancakes[i + flipper:]
pancakes = new_pancakes
flips += 1
return flips if pancakes == '+' * len(pancakes) else "IMPOSSIBLE"
|
ddc1e038a8ce3129e1904c62cf2c34f91385a4bc
| 71,595
|
from typing import Dict
def dict_cut(d: Dict, a: int, b: int) -> Dict:
"""
Helper function for creating subdictionary by numeric indexing of items.
Assumes that `dict.items()` will have a fixed order.
Args:
d: The dictionary to "split"
a: Start index of range of items to include in result.
b: End index of range of items to include in result.
Returns:
A dictionary that contains a subset of the items in the original dict.
"""
return dict(list(d.items())[a:b])
|
faee98deb5fb049958ba5c354c0e7a4987556665
| 71,597
|
def fetch_sensor_pattern(pattern, client, log):
"""Fetch sensor pattern for each antenna.
Args:
pattern (str): per-antenna sensor name with antenna fields
replaced with braces.
client (obj): KATPortalClient object.
log: logger
Returns:
sensor_details (dict): sensor results including value and timestamp
of last change in value.
None if no sensor results obtainable.
"""
try:
sensor_details = yield client.sensor_values(pattern, include_value_ts=True)
return(sensor_details)
except Exception as e:
log.error(e)
return(None)
|
8d4dc48a5592a5be4269c3f61ff518133229b722
| 71,602
|
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data with the following schema:
[
{
"ip": string,
"hostname": [
string
]
}
]
"""
# no additional processing needed
return proc_data
|
3d818834869bfab56e12ba73c3a9dd9be229e2cc
| 71,605
|
def brightness_to_rflink(brightness: int) -> int:
"""Convert 0-255 brightness to RFLink dim level (0-15)."""
return int(brightness / 17)
|
c3a066e0b94144ebcb835664e66f395d3a906047
| 71,614
|
def _fmt_simple(name, arg):
"""Format a simple item that consists of a name and argument."""
if arg is None:
return None
return "%s %s" % (name, arg)
|
6524de2ec20384b4ca5d56c05527c22ac9ec0cf2
| 71,618
|
def eval_metrics(model_type, params, dtrain, dtest, metrics):
"""Evaluates model with given parameters on train dataset,
evaluates on test dataset and calculates given metrics
Args:
model_type (type, subclass of modelgym.models.Model): class of model
(e.g modelgym.models.XGBClassifier)
params (dict of str: obj): model parameters
dtrain (modelgym.utils.XYCDataset): train dataset
dtest (modelgym.utils.XYCDataset): test dataset
metrics (list of modelgym.metrics.Metric): metrics to evaluate
Returns:
dict of str: obj: Mapping from metric name to metric result
"""
model = model_type(params=params)
model.fit(dtrain)
y_pred_proba = None
y_pred = None
if any(map(lambda metric: metric.requires_proba, metrics)):
y_pred_proba = model.predict_proba(dtest)
if not all(map(lambda metric: metric.requires_proba, metrics)):
y_pred = model.predict(dtest)
metrics_results = {}
for metric in metrics:
pred = y_pred_proba if metric.requires_proba else y_pred
metrics_results[metric.name] = metric.calculate(dtest.y, pred)
return metrics_results
|
9c177b617e53de04824925af911af8e84606f200
| 71,622
|
def density_to_air_quality(density):
"""Map PM2.5 density to HomeKit AirQuality level."""
if density <= 35:
return 1
elif density <= 75:
return 2
elif density <= 115:
return 3
elif density <= 150:
return 4
return 5
|
3a1b551f262770d8219874e880e35de5e7fa6b7a
| 71,623
|
def convert_time(seconds):
"""convert seconds into natural time. i.e, 122 seconds => 2 minutes 32 seconds
note: does not go beyond minutes
"""
seconds = round(seconds)
minutes, seconds = divmod(seconds, 60)
x, y = "minute", "second"
if minutes > 1:
x += "s"
if seconds > 1:
y += "s"
if minutes:
return f"{minutes} {x} {seconds} {y}"
return f"{seconds} {y}"
|
46ab118a08171234ae94ee9e456fb0b66e707f57
| 71,628
|
def PolygonToXYZ(poly, elev=0.0):
"""Converts Polygons to list of coordinate tuples (x,y,z).
Parameters:
poly: shapely.Polygon
elev: float
Optional z level.
Returns:
List of polygon border coordinate tuples.
"""
print("Converting polygon to points:")
points = []
# Exterior points
x, y = poly.exterior.xy
for p in range(1,len(x)): # Ignore first (or last) coordinate pair as it is repeated
points.append((x[p],y[p],elev))
print("...found",len(x),"exterior points.")
# Interior points
nbi = len(poly.interiors)
print("...found", nbi, "islands.")
for i in range(0,nbi):
x, y = poly.interiors[i].xy
for p in range(1,len(x)): # Ignore first (or last) coordinate pair as it is repeated
points.append((x[p],y[p],elev))
print("...found a total of",len(points),"shoreline points.")
return points
|
d2c8c6e890b86ee5070b885e84099561a0fddcb6
| 71,629
|
import json
def get_credentials(credentials_path="credentials.json"):
"""Access database credentials from JSON file
Parameters
----------
credentials_path : str
Location of the credentials JSON file
Returns
-------
credentials : dict
Dictionary containing the database connection details
"""
with open(credentials_path, "r") as f:
credentials = json.load(f)
return credentials
|
9d5c06da334b90691985ab26cf071648600d210f
| 71,636
|
import hashlib
def sha1(text: str) -> str:
"""Computes SHA1 hash over input string.
Args:
text: Input string.
Returns:
Hex encoded hashed string.
"""
hasher = hashlib.sha1()
hasher.update(text.encode())
return hasher.hexdigest()
|
ca6b68f89d986b52dc560c2e57ff96777d2347fb
| 71,644
|
import math
def hex_shape(x, y, size):
"""
Generate the hexagon shape with specific size
"""
x_set = [math.cos(math.pi/6 * (i*2)) * size + x for i in range(0, 6)]
y_set = [math.sin(math.pi/6 * (i*2)) * size + y for i in range(0, 6)]
return list(map(lambda x,y: (x,y), x_set, y_set))
|
371262dd3156747db9598637a21f37e32d316ab7
| 71,645
|
def help_description(s="", compact=False):
"""
Append and return a brief description of the Sage documentation builder.
If 'compact' is ``False``, the function adds a final newline character.
"""
s += "Build or return information about Sage documentation. "
s += "A DOCUMENT and either a FORMAT or a COMMAND are required."
if not compact:
s += "\n"
return s
|
999a92049f7167b198119ea4fb3eab98f02031db
| 71,646
|
import glob
def get_chpl_files(mod_path):
"""
Get the chapel files that need to be compiled for the provided package
Parameters
__________
mod_path: path to the module being added to the server.
Returns
______
List of .chpl files at mod_path/server
"""
path = mod_path + "/server/*.chpl"
return glob.glob(path)
|
a32c8b04d67259940fdd148b8b0ee893f3753076
| 71,649
|
import ast
from typing import Container
def _get_name_of_class_if_from_modules(
classnode: ast.expr, *, modules: Container[str]
) -> str | None:
"""
If `classnode` is an `ast.Name`, return `classnode.id`.
If it's an `ast.Attribute`, check that the part before the dot is a module in `modules`.
If it is, return the part after the dot; if it isn't, return `None`.
If `classnode` is anything else, return `None`.
>>> _get_name_of_class_if_from_modules(_ast_node_for('int'), modules={'builtins'})
'int'
>>> _get_name_of_class_if_from_modules(_ast_node_for('builtins.int'), modules={'builtins'})
'int'
>>> _get_name_of_class_if_from_modules(_ast_node_for('builtins.int'), modules={'typing'}) is None
True
"""
if isinstance(classnode, ast.Name):
return classnode.id
if isinstance(classnode, ast.Attribute):
module_node = classnode.value
if isinstance(module_node, ast.Name) and module_node.id in modules:
return classnode.attr
if (
isinstance(module_node, ast.Attribute)
and isinstance(module_node.value, ast.Name)
and f"{module_node.value.id}.{module_node.attr}" in modules
):
return classnode.attr
return None
|
b5f235a2c185573a5c9dbf270c89abead2d5272a
| 71,650
|
def test_decorated_function(x):
"""Test Decorated Function Docstring."""
return x * 2
|
9bb8e1716d2292656ec8d3fead1ee6f935bdc5ef
| 71,655
|
import re
def validate_eye_color(ecl):
"""
Validate Eye Color - exactly one of: amb blu brn gry grn hzl oth.
"""
match = re.search("(amb|blu|brn|gry|grn|hzl|oth)", ecl)
return bool(match)
|
be2e2ad4f09afa1c7afcdc8015d1e1b880bf96e1
| 71,657
|
def parse_csv_data(csv_filename:str) -> list:
"""
Description:
Function which opens a csv file and iterates through the file, adding each line to a list, which is returned.
Each line in the list is seperated by commas like in the csv file.
Arguments:
csv_file_name {str} : is the string name of the csv file
Returns:
corona_data_list {list} : is a list of strings containing the covid data
"""
# empty list which will be filled with every line from the csv
corona_data_list = []
corona_data = open(csv_filename, 'r')
# iterate through every line in the csv file and appends each line to corona_data_list
for line in corona_data:
line_data = line.strip()
corona_data_list.append(line_data)
corona_data.close()
return corona_data_list
|
e2288a8bd3c6cf6aac24bd504cba500aad55870a
| 71,658
|
import json
def read_json(path: str, encoding: str) -> dict:
"""Read JSON file."""
with open(path, encoding=encoding) as file:
return json.load(file)
|
476b072bbb441744a740703df0a583acbfe73de7
| 71,659
|
def makePath(path):
"""
Make path element of URI from a supplied list of segments.
The leadfing '/' is not included.
"""
return "/".join(path)
|
03760ebb75684b26db4653e749c46dbbce49c25c
| 71,662
|
def count_model_parameters(model):
"""Count trainable parameters of a given model
Args:
model(object): torch model object
Returns:
trainable_parameters(int): number of trainable parameters
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
17b32f0e37f9f47fca4de17ee4cfc27cbaa12b02
| 71,667
|
def build_content_snippet(suggestion):
"""Build snippet for function autocompletion."""
name = suggestion["name"]
if suggestion["func_details"]:
paramText = ""
params = suggestion["func_details"]["params"]
for param in params:
if not paramText:
paramText += param["name"]
else:
paramText += ", " + param["name"]
return "{}({})".format(name, paramText)
else:
return name
|
45f02b43fd61633887a7a35a17670369224c9361
| 71,669
|
def param_combinations(key_values):
"""Recursively combine the value lists that appear in a keyword values dict
into dictionaries describing combinations of simple values.
More directly, it reduces a dict describing valid keyword values to
CRDS bestrefs header dicts:
{ valid keyword values dict... } -->
[ bestrefs_header1, bestrefs_header2, ...]
or in more detail:
{ keyword1 : [values_for_keyword1, ...], ...} -->
[{ keyword1 : simple_value_for_keyword1, ...}, ...]
"""
if isinstance(key_values, dict):
key_values = list(key_values.items())
if key_values:
combs = []
key, values = key_values[0]
for subcomb in param_combinations(key_values[1:]):
if len(values):
for value in values:
comb = dict(subcomb)
comb[key] = value
combs.append(comb)
else:
combs.append(subcomb)
return combs
else:
return [{}]
|
81cbe4974dc8f8e3d110ae308590090950fa5620
| 71,673
|
def get_last_url_item(url: str):
"""Returns everything after the last slash ("/") of a URL."""
last_slash_index = url.rindex("/")
return url[last_slash_index + 1 :]
|
7227b8ebbe1b36585eb376e9621854acaa803317
| 71,676
|
def max_interval_intersec(S):
"""determine a value that is contained in a largest number of given intervals
:param S: list of half open intervals
:complexity: O(n log n), where n = len(S)
"""
B = ([(left, +1) for left, right in S] +
[(right, -1) for left, right in S])
B.sort()
c = 0
best = (c, None)
for x, d in B:
c += d
if best[0] < c:
best = (c, x)
return best
|
db9998b831c6689fec1bdf5a79d57296301d405f
| 71,678
|
import unicodedata
def normalize_fs_path(path: str) -> str:
"""Normalize filesystem paths with unicode in them"""
# macOS HFS+ uses NFD, APFS doesn't normalize but stick with NFD
# ref: https://eclecticlight.co/2021/05/08/explainer-unicode-normalization-and-apfs/
return unicodedata.normalize("NFD", path)
|
b0ba3926213ae0b6f4e269a78241a3af19be9d1f
| 71,685
|
import requests
def return_package_task(task_id):
"""Returns the content of an iControl LX task"""
task_url = 'http://localhost:8100/mgmt/shared/iapp/package-management-tasks/' + task_id
response = requests.get(task_url, auth=('admin', ''))
if response.status_code < 400:
response_json = response.json()
if 'queryResponse' in response_json:
return response_json['queryResponse']
return response_json
return False
|
cf5bbea84794adbce8ae9412d680ce4dc9a3c364
| 71,687
|
def get_isoweek_from_date(date):
"""Convenience method to get the ISO week from a datetime
:param datetime.datetime date:
:rtype: int
"""
return date.isocalendar()[1]
|
444b237b9474a1427629eb60fa63a7296bf966a5
| 71,688
|
def quality_to_proba_sanger(quality):
"""Quality to probability (Sanger)"""
return 10**(quality/-10.)
|
99f068e7d57d291802d8411ccac88897cb4b6110
| 71,693
|
def to_force(weight):
"""
Converts weight in grams to force in N at standard earth gravity.
"""
return 9.81 * weight / 1000
|
f173c3e1598edcb7255ec5aff3389ae2de054fc5
| 71,698
|
def reverse(list):
"""
given a list, return a list with all the original items, but in reversed order
"""
return list[::-1]
|
468ad9ff6416edf68639e7840abadce9754ac4ac
| 71,700
|
def check_spg_settings(fs, window, nperseg, noverlap):
"""Check settings used for calculating spectrogram.
Parameters
----------
fs : float
Sampling rate, in Hz.
window : str or tuple or array_like
Desired window to use. See scipy.signal.get_window for a list of available windows.
If array_like, the array will be used as the window and its length must be nperseg.
nperseg : int or None
Length of each segment, in number of samples.
noverlap : int or None
Number of points to overlap between segments.
Returns
-------
nperseg : int
Length of each segment, in number of samples.
noverlap : int
Number of points to overlap between segments.
"""
# Set the nperseg, if not provided
if nperseg is None:
# If the window is a string or tuple, defaults to 1 second of data
if isinstance(window, (str, tuple)):
nperseg = int(fs)
# If the window is an array, defaults to window length
else:
nperseg = len(window)
else:
nperseg = int(nperseg)
if noverlap is not None:
noverlap = int(noverlap)
return nperseg, noverlap
|
b3933b03ca77a4ede73e7c4d9e416830355eeb93
| 71,706
|
def get_xref_mcf(xrefs, xref_to_label):
"""Returns the mcf format of a given string of xrefs.
Convert a list of xrefs to their mcf format of <prop_label>: <prop_text_value>
using the xref_to_label dict to lookup the property label of the given
indentifier. For this import, xref_to_label is either GENE_XREF_PROP_DICT or
DRUG_XREF_PROP_DICT from config.py .
Args:
xref: a string representing a comma-separated list of xrefs enclosed by
double quotes
xref_to_label: xref name in pahrmgkb to DC property label mapping
Returns:
a multiline mcf formatted string of all of the xrefs' prop labels + values
"""
xref_mcf = ''
if not xrefs:
return ''
for xref in xrefs.split(','):
xref_pair = xref.replace('"', '').strip().split(':')
if xref_pair[0] not in xref_to_label:
print('unexpected format in gene xrefs:' + xrefs)
continue
prop_label = xref_to_label[xref_pair[0]]
prop_value = ':'.join(xref_pair[1:]).strip()
xref_mcf += prop_label + ': "' + prop_value + '"\n'
return xref_mcf
|
85c530d4082133943bfbf2459a5c0c3c8f2d00cc
| 71,711
|
def is_successful_upgrade(upgrade_response):
"""
Match against:
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk=
Sec-WebSocket-Protocol: chat
:see: https://en.wikipedia.org/wiki/WebSocket
:param upgrade_response: The HTTP response
:return: True if the response is a successful upgrade
"""
if upgrade_response.get_code() != 101:
return False
headers = upgrade_response.get_headers()
upgrade_value, _ = headers.iget('Upgrade', None)
connection_value, _ = headers.iget('Connection', None)
sec_websocket_accept_value, _ = headers.iget('Sec-WebSocket-Accept', None)
# Relaxed check
if upgrade_value and connection_value and sec_websocket_accept_value:
return True
return False
|
88b0fa832b054e73f8eabe7f33866442b679e6b2
| 71,712
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.