content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import yaml
def load_config_yaml(path_config):
""" loading the
:param str path_config:
:return dict:
>>> p_conf = './testing-congif.yaml'
>>> save_config_yaml(p_conf, {'a': 2})
>>> load_config_yaml(p_conf)
{'a': 2}
>>> os.remove(p_conf)
"""
with open(path_config, 'r') as fp:
config = yaml.safe_load(fp)
return config
|
ec94f22b24b097c2fd7499f7d0fc8f2f9dceb3d4
| 29,556
|
import random
def generate_random_list():
"""
generate_random_list generates a random list of a random size n (2 ≤ n ≤ 2·10^5).
Returns
-------
random_list: lists first element is the size of the list n(2 ≤ n ≤ 2·10^5).
each other element between 2 <= i < n contains eather a (dragon, coins) or (princess, beauty).
coins = g (1 <= g <= 10^4)
beauty = b (1 <= b <= 2·10^5)
last element is a princess.
"""
random_list = []
size = random.randint(2, 2 * (pow(10, 5)))
random_list.append(size)
for i in range(size - 2):
charachter = random.randint(0, 1)
if charachter == 0:
b = random.randint(1, 2 * (pow(10, 5)))
random_list.append("p " + str(b))
if charachter == 1:
g = random.randint(1, (pow(10, 4)))
random_list.append("d " + str(g))
b = random.randint(1, 2 * (pow(10, 5)))
random_list.append("p " + str(b))
return random_list
|
93035c159bed25ff87453784645bb509ca84344d
| 29,558
|
def map_vectors(vectors, fun):
"""map function over vectors of image information"""
new_vectors = []
for vector in vectors:
new_vector = []
for elem in vector:
new_vector.append(fun(elem))
return(new_vectors)
|
3e51d265836d108ff5a794807f7f8d8a86b64145
| 29,560
|
def convert_webaccess_datestamp(entry):
"""Set date and time attributes based on an iso date stamp"""
attrlist = entry['date_stamp'].split('/')
timelist = attrlist[-1].split(':')
entry['year'] = timelist[0]
months = {'Jan': '01', 'Feb': '02', 'Mar': '03',
'Apr': '04', 'May': '05', 'Jun': '06',
'Jul': '07', 'Aug': '08', 'Sep': '09',
'Oct': '10', 'Nov': '11', 'Dec': '12'}
entry['month'] = months[attrlist[1]]
entry['day'] = attrlist[0]
entry['tstamp'] = ''.join(timelist[1:])
entry['numeric_date_stamp'] = entry['year'] + entry['month'] + \
entry['day'] + entry['tstamp']
return entry
|
795c889f93f6d87cced5f9aa9293e6491f6b2678
| 29,561
|
def derr(min, max):
"""Keep reads within this limit
:param min:
:param max:
:return:
"""
return lambda mate: min <= mate['d_err'] <= max
|
68e9065bd4eebd7816579d8feb05b22849630c55
| 29,562
|
def multipart_content_type(boundary, subtype='mixed'):
"""Creates a MIME multipart header with the given configuration.
Returns a dict containing a MIME multipart header with the given
boundary.
.. code-block:: python
>>> multipart_content_type('8K5rNKlLQVyreRNncxOTeg')
{'Content-Type': 'multipart/mixed; boundary="8K5rNKlLQVyreRNncxOTeg"'}
>>> multipart_content_type('8K5rNKlLQVyreRNncxOTeg', 'alt')
{'Content-Type': 'multipart/alt; boundary="8K5rNKlLQVyreRNncxOTeg"'}
Parameters
----------
boundry : str
The content delimiter to put into the header
subtype : str
The subtype in :mimetype:`multipart/*`-domain to put into the header
"""
ctype = 'multipart/%s; boundary="%s"' % (
subtype,
boundary
)
return {'Content-Type': ctype}
|
a19cb6fcf43f2b3c2abecf3af960f57987967c0e
| 29,563
|
import re
def pattern_count(pattern, text):
"""Counts apperances of a Regex pattern in a string."""
return len(re.findall(pattern, text))
|
b12c8bc2132a9fd9afba7468befceac7edfb636f
| 29,564
|
def halvorsen(xyz, t, a):
"""Like a rose with only three petals. Cyclically symmetric."""
x, y, z = xyz
dx = - a * x - 4 * (y + z) - y ** 2 # dt
dy = - a * y - 4 * (z + x) - z ** 2 # dt
dz = - a * z - 4 * (x + y) - x ** 2 # dt
return dx, dy, dz
|
dfa6ab8d1d125c9d5b1d60ed9293ce4691818b37
| 29,567
|
from typing import List
from typing import Any
from typing import Dict
from typing import Counter
def get_common_items(list_to_count: List[Any], top_n: int) -> Dict[Any, int]:
"""Get the n most common items in descending order in a list of items.
Args:
text (List[Any]): List of items to be counted.
top_n (int): This sets the limit for how many of the common items to return.
Returns:
Dict[str, int]: Dictionary with keys given by the item, and the value being count of the item in the list.
"""
counts = Counter(list_to_count)
return {item: count for item, count in Counter(counts).most_common(top_n)}
|
928a243ab48cce35ead2c821b1891f88ce20123e
| 29,568
|
def get_all_capture_s(searchers: dict, s_id):
"""get capture matrices from searchers (s_info or searchers class)
for a single searcher s"""
# get s
s = searchers[s_id]
# s_info or searchers
if isinstance(s, dict):
# old form, extract from s_info
c_matrices = s.get('c_matrix')
else:
# new, extract from class MySearcher
c_matrices = s.get_all_capture_matrices()
return c_matrices
|
e8595fb5f47abdedf8c33230d090791d7532d2bf
| 29,569
|
def trycast(value):
"""
Try to cast a string attribute from an XML tag to an integer, then to a
float. If both fails, return the original string.
"""
for cast in (int, float):
try:
return cast(value)
except ValueError:
continue
return value
|
3faaaa52a2d50e20a925db96de884709c052e7e3
| 29,571
|
def compareTriplets(a, b):
"""Rate two tuples, if a[i] > b[i] give a 1 point, b[i] > a[i] give b 1 point, give 0 otherwise"""
cmp = lambda x,y: x > y
return [sum(map(cmp, a,b)), sum(map(cmp, b, a))]
|
ebad57194188294b7d7012fa72b1ef5d3902ce40
| 29,572
|
def _fix_bed_coltype(bed):
"""Fix bed chrom and name columns to be string
This is necessary since the chromosome numbers are often interpreted as int
"""
bed["chrom"] = bed["chrom"].astype(str)
bed["name"] = bed["name"].astype(str)
return bed
|
471410f31675d0de2f35883679e760c515ff0880
| 29,573
|
import re
def get_figure(text):
""" Extracts specific latex elements (currently, figure, itemize, equation, table and enumerate sections)
:param text : content of the latex file
:type text : str
:return : list with all specific latex elements and their content
:rtype : str
"""
patt = "|".join(["figure", "itemize", "equation", "table", "enumerate", "problem", "align", "layer1"])
tmp = re.finditer(r'((?<!\\begin{comment})\s\\begin{(?P<sec>(?:' +
'{}'.format(patt) +
r')\*{0,1})}(?:.*?)\\end{(?P=sec)})', text, re.S)
return [(e.start(), e.end(), e.group().strip()) for e in tmp]
|
22bf43d58d939f0c2f06bd60332a42727a297de3
| 29,575
|
def get_time_slot(hour, minute):
"""
Computes time_slot id for given hour and minute. Time slot is corresponding to 15 minutes time
slots in reservation table on Sutka pool web page (https://www.sutka.eu/en/obsazenost-bazenu).
time_slot = 0 is time from 6:00 to 6:15, time_slot = 59 is time from 20:45 to 21:00.
:param hour: hour of the day in 24 hour format
:param minute: minute of the hour
:return: time slot for line usage
"""
slot_id = (hour - 6)*4 + int(minute/15)
return slot_id
|
dbfc510755a0b0a9612c0aa7d94922199c3f7f7d
| 29,576
|
import time
def datetime_from_local_to_utc(local_datetime):
"""
:param local_datetime: Python datetime object
:return: UTC datetime string
"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.mktime(local_datetime.timetuple())))
|
5e11911586ec2370168174e20061f6a96340af18
| 29,577
|
import os
def _get_pathconf(file_system_path, param_suffix, default):
"""Return a pathconf parameter value for a filesystem.
"""
param_str = [s for s in os.pathconf_names if s.endswith(param_suffix)]
if len(param_str) == 1:
try:
return os.pathconf(file_system_path, param_str[0])
except OSError:
pass
return default
|
2e351529af998fc2a21c44fc37500db4df6a9c6b
| 29,580
|
from datetime import datetime
def int_to_date_str(i_time):
"""unix epoc time in seconds to YYYY-MM-DD hh:mm:ss"""
return str(datetime.fromtimestamp(i_time))
|
27f0773a78202e57ae498c1d40faabb29877b573
| 29,581
|
import random
def make_acc_type1_data(client_tp):
"""
非商户不填(网络支付、预付卡、银行卡收单均需填写)
:param client_tp:
:return:
"""
if client_tp == "02":
acc_type1 = random.choice(["11", "12"])
else:
acc_type1 = ""
return acc_type1
|
21f482cf02a22b7745e422035125cc27e6887b06
| 29,584
|
def get_waist_to_height_ratio(df):
"""Waist-to-height ratio
A measure of the distribution of body fat.
WHR= waist/height
Ref.: https://en.wikipedia.org/wiki/Waist-to-height_ratio
"""
df["WHR"] = df["ac"] / df["height"]
return df
|
0720dcda80bd5bccb3e121ca9859ea2da3b75144
| 29,586
|
def longestPalindrome(s: str) -> str:
"""
Time: O(n^2)
Space: O(1)
"""
n = len(s)
res = s[0]
for i in range(1, n):
left = i - 1
mid = i
while s[left] == s[mid] and left >= 0 and mid < n:
left -= 1
mid += 1
res = max(res, s[left + 1:mid], key=len)
left = i - 1
right = i + 1
while left >= 0 and right < n and s[left] == s[right]:
left -= 1
right += 1
res = max(res, s[left + 1:right], key=len)
return res
|
75471dc49581deddcacf17a0a698c714caf17b7e
| 29,587
|
def data_preprocess(rdd):
"""
This function receives a RDD: sc.textFile(input_path).map(lambda _:json.loads(_))
"""
return rdd
|
a525c2f92b39a496398a2db0b9c9b184c9fef3b8
| 29,588
|
def evaluate_acc(predictions, outputs):
"""Compute accuracy score by comparing two lists
of same length element-wise
Arguments:
predictions {series} -- first list to be compared with the second one
outputs {series} -- same length as predictions
"""
assert len(predictions) == len(outputs), "Lists have different size"
return (predictions == outputs).sum()/len(predictions)
|
299086ba456799241505ab9df6c64e3cf54ea13a
| 29,590
|
def has_tool_tag(invocation):
"""
Checks if invocation has a tool_tag in workspace info
Args:
invocation (Invocation)
Returns:
True if tool_tag exists else false
"""
if (hasattr(invocation, 'workspace_info')
and hasattr(invocation.workspace_info, 'tool_tag')
and invocation.workspace_info.tool_tag != ''):
return True
return False
|
7ea992c62d6d3f9ec5e3d26d5e1c3ab850d6fe04
| 29,591
|
def tuple_replace():
"""Returns a new tuple with elements replaced at various indices.
A utility fixture which could be helpful for creating new input cases on
the fly from existing tuples without needing to unpack the individual
elements of the original tuple. New elements and their indices are
specified in ``(idx, val)``pairs pass as positional arguments.
:rtype: function
"""
def _tuple_replacer(op, *args):
# op (tuple) as a new list that can be mutated
op_ = list(op)
# for each idx, val pair, update
for idx, val in args:
op_[idx] = val
# return new tuple from op_
return tuple(op_)
return _tuple_replacer
|
54e204035d66c79c43a68c7d5889ac37c7c270aa
| 29,592
|
import math
def get_fitting_frames_of_size(size, constraints):
"""
Get all frames of size 'size' that fit on the pizza
:param size:
:param constraints:
:return:
"""
def _get_fitting_frames_of_size(_size, _max_row, _max_col):
slices = list()
for i in range(1, int(math.floor(math.sqrt(_size)) + 1)):
if _size % i == 0:
cur_slice = {'r': i, 'c': _size // i}
cur_slice_invert = {'r': _size // i, 'c': i}
if cur_slice not in slices:
if cur_slice['r'] <= _max_row and cur_slice['c'] <= _max_col:
slices.append(cur_slice)
if cur_slice_invert not in slices:
if cur_slice_invert['r'] <= _max_row and cur_slice_invert['c'] <= _max_col:
slices.append(cur_slice_invert)
return slices
return _get_fitting_frames_of_size(size, constraints["R"], constraints["C"])
|
101d18844ea675421689514a900dbc289f69d525
| 29,593
|
from typing import List
def get_param_with_name(parameters: List[dict], name: str) -> dict:
"""Get the parameter that has the given name."""
matched_params = (p for p in parameters if p["name"] == name)
return next(matched_params)
|
ea1f527bc03cb41a77e3b9743ee372246dc12f54
| 29,594
|
def add_data_in_session(session, user_query, session_id, data_type, add_if_exist):
"""Adds the user queries in the session"""
query_list = []
session_list = session.get(session_id)
flag = False
if session_list is None:
session[session_id] = [{data_type: user_query}]
elif session_list is not None and add_if_exist:
for item in session_list:
if data_type in item:
item[data_type] += [user_query.strip()]
flag = True
break
if not flag:
session[session_id] = session_list + [{data_type: [user_query]}]
else:
if add_if_exist:
query_list.append(user_query.strip())
session[session_id] = session_list + [{data_type: [query_list]}]
return None
|
f5d24049139ea254ae67d27a06cf7b0c86e7aa45
| 29,595
|
def cote_boostee(cote, boost_selon_cote=True, freebet=True, boost=1):
"""
Calcul de cote boostee pour promotion Betclic
"""
mult_freebet = 1 * (not freebet) + 0.8 * freebet
if not boost_selon_cote:
return cote + (cote - 1) * boost * mult_freebet
if cote < 2:
return cote
if cote < 2.51:
return cote + (cote - 1) * 0.25 * mult_freebet
if cote < 3.51:
return cote + (cote - 1) * 0.5 * mult_freebet
return cote + (cote - 1) * mult_freebet
|
4d62c52301cc5b28f1a12546a7313b4f8b943f36
| 29,596
|
import os
import glob
def get_expanded_paths(paths):
"""
Translate patterns of paths to real path
:param paths: Pattern for the path location of xml files
:return: expanded_paths: The list of matching paths from paths argument
"""
expanded_paths = []
for path in paths:
path = os.path.expanduser(os.path.expandvars(path))
# Expand any glob characters. If found, add the expanded glob to
# the list of expanded_paths, which might be empty.
if ('*' in path or '?' in path):
expanded_paths = expanded_paths + glob.glob(path)
# If there are no glob characters the path is added
# to the expanded paths whether the path exists or not
else:
expanded_paths.append(path)
return expanded_paths
|
ce75a51019f2dd742a39fbe180c22a8032802766
| 29,597
|
def extract_layout_switch(dic, default=None, do_pop=True):
"""
Extract the layout and/or view_id value
from the given dict; if both are present but different,
a ValueError is raised.
Futhermore, the "layout" might be got (.getLayout) or set
(.selectViewTemplate);
thus, a 3-tuple ('layout_id', do_set, do_get) is returned.
"""
if do_pop:
get = dic.pop
else:
get = dic.get
layout = None
layout_given = False
set_given = 'set_layout' in dic
set_layout = get('set_layout', None)
get_given = 'get_layout' in dic
get_layout = get('get_layout', None)
keys = []
for key in (
'layout', 'view_id',
):
val = get(key, None)
if val is not None:
if layout is None:
layout = val
elif val != layout:
earlier = keys[1:] and tuple(keys) or keys[0]
raise ValueError('%(key)r: %(val)r conflicts '
'%(earlier)r value %(layout)r!'
% locals())
keys.append(key)
layout_given = True
if layout is None:
layout = default
if set_layout is None:
set_layout = bool(layout)
return (layout, set_layout, get_layout)
|
41a7a11e72ed70dee1456334a93a5cfe0651ec37
| 29,600
|
from bs4 import BeautifulSoup
def get_targets(html):
"""
Return bolded targeting parameters
"""
if html:
doc = BeautifulSoup(html, "html.parser")
return " ".join([b.get_text(" ") for b in doc.find_all("b")])
return ""
|
04248637d3f77d4ed5b5365e60298e7bfb0c6933
| 29,601
|
import os
import shutil
def _mkdir(path, delete_if_exists=False):
"""Make directory (recursive)."""
if os.path.exists(path):
if not delete_if_exists:
return path
shutil.rmtree(path)
os.makedirs(path)
return path
|
ece5326ae3fcc1f6bae421765f9454e1c5d34827
| 29,602
|
def convert_fiscal_quarter_to_fiscal_period(fiscal_quarter):
""" Returns None if fiscal_quarter is invalid or not a number. """
return {1: 3, 2: 6, 3: 9, 4: 12}.get(fiscal_quarter)
|
96875f895e6ffd6cf80c56b94f1ba1490c197523
| 29,603
|
def diff_possible(numbers, k):
"""
Given a list of sorted integers and a non negative
integer k, find if there exists 2 indicies i and j
such that A[i] - A[j] = k, i != j
"""
if k < 0:
raise ValueError('k can not be non negative')
# Find k since as long as i is not larger than k
# we do not even need to compare
if numbers[-1] < k:
return False
start_i = 0
while start_i < len(numbers):
if numbers[start_i] >= k:
break
else:
start_i += 1
for i in range(start_i, len(numbers)):
needed_num = numbers[i] - k
for j in reversed(range(0, i)):
if numbers[j] == needed_num:
return True
elif numbers[j] < needed_num:
# All hope is lost, we can never reach k again
break
return False
|
54c45f24644c478dc48679c11c5217c0318af2b4
| 29,604
|
import tarfile
def smart_is_tarfile(filepath):
"""
:func:`tarfile.is_tarfile` plus error handling.
:param filepath: Filename.
:type filepath: str
"""
try:
istar = tarfile.is_tarfile(filepath)
except (OSError, IOError):
return False
else:
return istar
|
4fb203685100204ea4a28d7854634628599801ce
| 29,605
|
def _initialize_dicts():
"""
Create the colorspace encoding and decoding dictionaries.
"""
enc = {}
for i, char1 in enumerate("ACGT"):
enc['N' + char1] = '4'
enc[char1 + 'N'] = '4'
enc['.' + char1] = '4'
enc[char1 + '.'] = '4'
for j, char2 in enumerate("ACGT"):
# XOR of nucleotides gives color
enc[char1 + char2] = chr(ord('0') + (i ^ j))
enc.update({ 'NN': '4', 'N.': '4', '.N': '4', '..': '4'})
dec = {}
for i, char1 in enumerate("ACGT"):
dec['.' + str(i)] = 'N'
dec['N' + str(i)] = 'N'
dec[char1 + '4'] = 'N'
dec[char1 + '.'] = 'N'
for j, char2 in enumerate("ACGT"):
# XOR of nucleotides gives color
dec[char1 + chr(ord('0') + (i ^ j))] = char2
dec['N4'] = 'N'
return (enc, dec)
|
801dd9c9b3c1fdf0aba44cb9be3d63d23c3c275f
| 29,606
|
def to_cmd_string(unquoted_str: str) -> str:
"""
Add quotes around the string in order to make the command understand it's a string
(useful with tricky symbols like & or white spaces):
.. code-block:: python
>>> # This str wont work in the terminal without quotes (because of the &)
>>> pb_str = r"D:\Minab_4-DA&VHR\Minab_4-DA&VHR.shp"
>>> to_cmd_string(pb_str)
"\"D:\Minab_4-DA&VHR\Minab_4-DA&VHR.shp\""
Args:
unquoted_str (str): String to update
Returns:
str: Quoted string
"""
if not isinstance(unquoted_str, str):
unquoted_str = str(unquoted_str)
cmd_str = unquoted_str
if not unquoted_str.startswith('"'):
cmd_str = '"' + cmd_str
if not unquoted_str.endswith('"'):
cmd_str = cmd_str + '"'
return cmd_str
|
bdba5138daa580c34d49618da511ff0e20f192d4
| 29,607
|
from typing import Optional
from typing import Dict
from typing import Any
def define_by_run_func(trial) -> Optional[Dict[str, Any]]:
"""Define-by-run function to create the search space.
Ensure no actual computation takes place here. That should go into
the trainable passed to ``tune.run`` (in this example, that's
``easy_objective``).
For more information, see https://optuna.readthedocs.io/en/stable\
/tutorial/10_key_features/002_configurations.html
This function should either return None or a dict with constant values.
"""
# This param is not used in the objective function.
activation = trial.suggest_categorical("activation", ["relu", "tanh"])
trial.suggest_float("width", 0, 20)
trial.suggest_float("height", -100, 100)
# Define-by-run allows for conditional search spaces.
if activation == "relu":
trial.suggest_float("mult", 1, 2)
# Return all constants in a dictionary.
return {"steps": 100}
|
f8237b135e3a6467bdefd397f1da0ebaec0f4380
| 29,608
|
def flip_name(name):
"""
Given a single name return any "Last, First" as "First Last".
Strings with more or less than one comma are returned unchanged.
@param name: string to be flipped
@return: list
"""
p = name.split(',')
if len(p) == 2:
return '%s %s' % (p[1].strip(), p[0].strip())
else:
return name
|
bca4e0b9f61e7df5d22a96dee31f88151c439514
| 29,612
|
def rectangle_to_square(rectangle, width, height):
"""
Converts a rectangle in the image, to a valid square. Keeps the original
rectangle centered whenever possible, but when that requires going outside
the original picture, it moves the square so it stays inside.
Assumes the square is able to fit inside the original picture.
"""
from_x, from_y, to_x, to_y = rectangle
rectangle_width = to_x - from_x
rectangle_height = to_y - from_y
size = max(rectangle_width, rectangle_height)
x_center = from_x + rectangle_width // 2
y_center = from_y + rectangle_height // 2
from_x = x_center - size // 2
to_x = x_center + size // 2
from_y = y_center - size // 2
to_y = y_center + size // 2
# ensure fitting horizontally
if from_x < 0:
to_x = to_x - from_x
from_x = 0
elif to_x > width:
from_x = from_x - (to_x - width)
to_x = width
# ensure fitting vertically
if from_y < 0:
to_y = to_y - from_y
from_y = 0
elif to_y > height:
from_y = from_y - (to_y - height)
to_y = height
return from_x, from_y, to_x, to_y
|
e879cf81c1448f72d1d5326b0a8f964e47173f07
| 29,613
|
def welcome():
"""List all available api routes."""
return (
f"/api/v1.0/precipitation<br/>Returns a JSON list of percipitation data for the dates between 8/23/16 and 8/23/17<br/><br/>"
f"/api/v1.0/stations<br/>Return a JSON list of stations from the dataset"
f"/api/v1.0/tobs<br/>Return a JSON list of temperature observations (TOBS) for the previous year."
f"/api/v1.0/<start><br/>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given"
f"/api/v1.0/<start>/<end>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given"
)
|
72a4d106c1f10c12ae94271c3f6b2cf045af2dfd
| 29,616
|
def check_treedepth(samples, max_treedepth=10, quiet=False, return_diagnostics=False):
"""Check transitions that ended prematurely due to maximum tree depth limit.
Parameters
----------
samples : ArviZ InferenceData instance
Contains samples to be checked. Must contain both `posterior`
and `samples_stats`.
max_treedepth: int, default 10
Maximum tree depth used in the calculation.
quiet : bool, default False
If True, do not print diagnostic result to the screen.
return_diagnostics : bool, default False
If True, return both a Boolean about whether the diagnostic
passed and the number of samples where the tree depth was too
deep. Otherwise, only return Boolean if the test passed.
Returns
-------
passed : bool
Return True if tree depth test passed. Return False otherwise.
n_too_deep : int, optional
Number of samplers wherein the tree depth was greater than
`max_treedepth`.
"""
n_too_deep = (samples.sample_stats.treedepth.values >= max_treedepth).sum()
n_total = samples.sample_stats.dims["chain"] * samples.sample_stats.dims["draw"]
if not quiet:
msg = "{} of {} ({}%) iterations saturated".format(
n_too_deep, n_total, 100 * n_too_deep / n_total
)
msg += " the maximum tree depth of {}.".format(max_treedepth)
print(msg)
pass_check = n_too_deep == 0
if not pass_check and not quiet:
print(
" Try running again with max_treedepth set to a larger value"
+ " to avoid saturation."
)
if return_diagnostics:
return pass_check, n_too_deep
return pass_check
|
ed949405b408841d4d716151187c65d2638b1b4e
| 29,617
|
import os
def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home
|
24df248e5b25b018b124b48aa19bcb66661c1993
| 29,618
|
import struct
def is_python_64bit():
"""Returns whether Python is 64-bit."""
return (struct.calcsize("P") * 8) == 64
|
99d059bc9e3ca9bdeb48514bee255547d40328aa
| 29,619
|
def get_rpi_total_memory(self):
"""
Returns info about how much total memory is available
:returns: Total memory
"""
memory = self.run_command_get_output('free').split()[7]
return memory
|
2eddd0e1317d769020d55ba353a9decc36322a62
| 29,622
|
def _getInputValue(self, name, target = ''):
"""Return the typed value of an input by its name, taking both the
calling element and its declaration into account. If the given
input is not found, then None is returned."""
value = self._getInputValue(name, target)
return value.getData() if value else None
|
f12178f4a790f8d51f2f89d096f38b6dc780bd66
| 29,623
|
import re
import click
def dict_raise_on_duplicates_recursive(ordered_pairs, masterKeys):
"""Reject duplicate keys."""
d = {}
# print(ordered_pairs)
for k, v in ordered_pairs.items():
if re.search(r"\s", k)!= None:
# click.secho("Please remove the whitespace in property name %r," % (k,),
# "\n inside property" , masterKeys ,". The Validator will proceed without the whitespaces\n")
# k = k.strip()
click.secho(str(masterKeys))
if re.search("[^a-zA-Z@\$]", k) != None :
# click.secho(k,v)
# click.secho("Please be noted there are non alphabetic character in property name %r," % (k,),
# "\n inside property" ,masterKeys ,". Schema.org has no property name with non alphabetic character therefore the Bioschemas validator will not validate this property.")
click.secho(str(masterKeys))
if k in d:
# click.secho("duplicate property: %r," % (k,), "inside property", masterKeys,", the value for the last %r will be used"% (k,))
click.secho(str(masterKeys))
elif type(v) is dict:
masterKeys.append(k)
d[k] = dict_raise_on_duplicates_recursive(v, masterKeys)
else:
d[k] = v
return d
|
552084d668000cce6aff03b16516f858921070ee
| 29,624
|
def are_same_dictionaries(dict_1, dict_2):
"""
({ 'key': 'value', ...},
{ 'key': 'value', ...}) -> Bool
Compares simple objects (not nested), goes into keys and compares their
values
"""
# find out if the keys lists are the same
if not dict_1.keys() == dict_2.keys():
return False
# keys are the same, we can simply compare values
for k in dict_1:
if dict_1[k] != dict_2[k]:
return False
return True
|
e10b32c11c992b71dd9f419ed91a10604a1c4ae0
| 29,625
|
def action_quit():
"""Функция формирует сообщение QUIT"""
msg = {"action": "quit"}
return msg
|
b172a084666b01a66c1a1fca01f4fdae0c509273
| 29,628
|
import sys
def fixture_patch_argv(monkeypatch):
"""Function for passing mock commandline arguments to ``sys.argv``.
:param monkeypatch: ``pytest`` fixture for mocking attributes.
:return: Function for using this fixture.
"""
def _argv(*args):
args = [__name__, *args]
monkeypatch.setattr(sys, "argv", args)
return _argv
|
7cf045b4379a7f606b5e1f315aef7285455cb6da
| 29,629
|
import re
def update_network_name(info_file, new_example_file, default_name, model_name):
""" replace old_name by new_name """
# load file
with info_file.open() as fr:
lines = fr.read()
if default_name != model_name:
old_name_list = [default_name, default_name.upper()]
new_name_list = [model_name, model_name.upper()]
# replace file
for i in range(len(old_name_list)):
lines = re.sub(old_name_list[i], new_name_list[i], lines)
# save new example file
with new_example_file.open("w") as fw:
fw.write(lines)
return new_example_file
|
787ffd63f1d110e5ba9c60df9096bfb645cfc2ca
| 29,630
|
def inchesToMeters(inches):
"""Convert inches to meters."""
return inches * 0.0254
|
b9f87ddc885474964ff59618bd560fb20ac9d7ff
| 29,640
|
def invalid_colour(colour):
"""Validate the RGB colour."""
error_message = f"`{colour}` is not a valid RGB colour"
if not isinstance(colour, list):
return error_message
if not all([0 <= component <= 255 for component in colour]):
return error_message
return False
|
ae8d6160d9ee032d24904127d594e42387072562
| 29,642
|
from bs4 import BeautifulSoup
def decoding(data,form):
"""
将输入的data解码为form格式的文本,并返回bs4对象
params:
data:(requests对象)待解码的data
form:解码的格式
"""
data.encoding = form #'gb18030'
dataContent = data.text
dataContent = BeautifulSoup(dataContent,'html.parser')
return dataContent
|
d73713ff2aa1e057dfeb70fb699551973eef2c4f
| 29,644
|
def gn_refs(api, step_name, args):
"""Runs gn refs with given additional arguments.
Returns: the list of matched targets.
"""
step_result = api.python(step_name,
api.depot_tools.gn_py_path,
['--root=%s' % str(api.path['checkout']),
'refs',
str(api.chromium.output_dir),
] + args,
stdout=api.raw_io.output_text())
return step_result.stdout.split()
|
83a6b423b8d578747638305dd8b4421e7fc180c1
| 29,645
|
import sys
def _is_permission_error(e):
"""\
受け取った例外がアクセス権限のものであればTrue、そうでなければFalseを返す
"""
# See PEP 3151
if sys.version_info[0:2] < (3, 2):
return (isinstance(e, OSError) or isinstance(e, IOError)
and e.args[0] == 13)
else:
return isinstance(e, PermissionError)
|
009cb088ceaf398eb1714f08ef9dd4f9ccb010c2
| 29,648
|
import bs4
def get_navigable_string_type(obj):
"""Get navigable string type."""
return bs4.NavigableString
|
79d47783e0441a17fc3016218683dc19c7dc78f2
| 29,650
|
def convert_parentheses_back(text: str):
"""Replaces ( and ) tokens with -LRB- and -RRB-"""
return text.replace("(", "-LRB-").replace(")", "-RRB-")
|
7bf23c858ade23d61c3d12b39cb3a76b840aefc0
| 29,651
|
def is_asc_sorted(lst: list) -> bool:
"""
Utility that tells whether a given list is already ascendentally sorted
with every item in the list greater than the previous.
:param lst: list to check
:return: true if and only if the given list is ascendentally sorted
"""
return all(lst[i+1] > lst[i] for i in range(0, len(lst) - 2))
|
9c1fcd2354196ca33c3f61fc91c17fa5cf6907a8
| 29,654
|
def identifyFileType(suffix):
"""Identify the file type and return correct syntax.
:suffix: file suffix
:returns: [inline comment syntax, multiple line comment syntax]
"""
if suffix == "py":
return "#", "\"\"\"", "\"\"\""
elif suffix == "c" or suffix == "h" or suffix == "cpp" or suffix == "hpp":
return "//", "/*", "*/"
elif suffix == "java":
return "//", "/*", "*/"
else:
return "not defined"
|
b4a6de0039fc7ca1459fffb56b0e95b884a10704
| 29,655
|
def remove_bracket(string: str):
"""
removes brackets and stuff in them from strings
"""
return string.replace('[','(').split('(')[0].strip()
|
bb0805601bd2c396fcdaf153c38d7dbb2157e2c8
| 29,658
|
def parse_walltime(wt_arg):
""" Converts walltimes to format dd:hh:mm:ss.
"""
# Clean the wt
parts = wt_arg.split(":")
wt_clean = ["{0:02d}".format(int(value)) for value in parts]
# Pad with 00
wt_clean = ["00"] * (4 - len(wt_clean)) + wt_clean
# Join and return
return ":".join(wt_clean)
|
85327866ccf738f4212324f258e7400318b6a2d1
| 29,659
|
def read_list(fn):
"""Reads a list from a file without using readline.
Uses standard line endings ("\n") to delimit list items.
"""
f = open(fn, "r")
s = f.read()
# If the last character in the file is a newline, delete it
if s[-1] == "\n":
s = s[:-1]
l = s.split("\n")
return l
|
0b73082074914b824e93cceb18ea773a916db5bc
| 29,660
|
def difference(f1_d, f2_d, out_f1_head, out_f2_head):
"""
Figures out the difference between two dictionaries
and reports the difference
Parameters
----------
f1_d : dict
Dictionary for first fasta, chromosome names as values
f2_d : dict
Dictionary for second fasta, chromosome names as values
out_f1_head : list
Dictionary of filtered out chromosomes from first fasta
out_f2_head : list
List of filtered out chromosomes from second fasta
Returns
-------
The dictionaries of chromosome names found in one fasta and not the other
nor in the filtered out chromosome names
"""
f1_not_f2 = [
f1_d[i]
for i in set(list(f1_d.keys())).difference(list(f2_d.keys()))
]
f1_not_f2_not_out = list(set(f1_not_f2) - set(list(out_f1_head)))
f1_not_f2_not_out.sort()
f2_not_f1 = [
f2_d[i]
for i in set(list(f2_d.keys())).difference(list(f1_d.keys()))
]
f2_not_f1_not_out = list(set(f2_not_f1) - set(list(out_f2_head)))
f2_not_f1_not_out.sort()
return(f1_not_f2_not_out, f2_not_f1_not_out)
|
4f25fc4595fbc59f02121a772123413a9e2a61a6
| 29,663
|
def add_error_total_and_proportion_cols(
total_errs, total_rows, date,
errs_by_table, total_by_table,
aggregate_df_total, aggregate_df_error,
aggregate_df_proportion):
"""
Function adds a new column to the growing dataframes. This
column contains information depending on the dataframe in
question. The column may contain information about
a. the total number of rows
b. the total number of 'poorly defined' rows
c. the relative contribution of a particular table
to the number of 'poorly defined' rows
The column represents a particular date. Each row
represents a particular table type. The final row
is an 'aggregate' metric that is a sum of the
rows immediately above it.
:param
total_errs (int): total number of errors across all sites
for a particular date; across all tables
total_rows (int): total number of rows across all sites
for a particular date; across all tables
date (string): date used to index into the dictionaries above
errs_by_table (list): list of the total number of
poorly defined row counts across all sites.
sequence of the list parallels the alphabetical
order of the tables. has 'total' metric at the end.
total_by_table (list): list of the total number of row counts
across all sites. sequence of the list parallels the
alphabetical order of the tables. has 'total'
metric at the end.
aggregate_df_total (dataframe): dataframe that contains the
total number of rows across all sites. each column
is a date. each row is a table type. last row
is the total number of rows for a particular
date.
aggregate_df_error (dataframe): dataframe that contains
the total number of poorly defined rows across
all sites. each column is a date. each row is a table
type. last row is the total number of poorly defined
rows for a particular date.
aggregate_df_proportion (dataframe): dataframe that
shows the 'contribution' of each table to the
total number of poorly defined rows. for instance,
if a table has half of all of the 'poorly defined
rows' for a particular date, it will have a value of
0.5. each column is a date. each row is a table
type.
:return:
aggregate_df_total (dataframe): same as the df that
entered except now with an additional column
to represent the date in question
aggregate_df_error (dataframe): same as the df that
entered except now with an additional column
to represent the date in question
aggregate_df_proportion (dataframe): same as the df
that entered except now with an additional
column to represent the date in question
"""
# adding to the growing column
# total number of 'poorly defined' rows for a date
if total_errs > 0:
errs_by_table.append(total_errs)
else:
errs_by_table.append(float('NaN'))
# total number of rows for a table for the date
if total_rows > 0:
total_by_table.append(total_rows)
else:
total_by_table.append(float('NaN'))
# column for the 'total' rows; column for one date
aggregate_df_total[date] = total_by_table
# column for 'error' rows; column for one date
aggregate_df_error[date] = errs_by_table
# column for the contribution of each error type
succ_rate_by_table = []
for errors, total in zip(errs_by_table, total_by_table):
error_rate = round(errors / total * 100, 2)
success_rate = 100 - error_rate
succ_rate_by_table.append(success_rate)
aggregate_df_proportion[date] = succ_rate_by_table
return aggregate_df_error, aggregate_df_total, \
aggregate_df_proportion
|
3f1e6b7aa675d0184cde7d7712e41b89d689a536
| 29,664
|
import yaml
def parse_str(source: str, model_content: str) -> dict[str, dict]:
"""Parse a string containing one or more yaml model definitions.
Args:
source: The file the content came from (to help with better logging)
model_content: The yaml to parse
Returns:
A dictionary of the parsed model(s). The key is the type name from the model and the
value is the parsed model root.
"""
parsed_models = {}
roots = yaml.load_all(model_content, Loader=yaml.FullLoader)
for root in roots:
if "import" in root:
del root["import"]
root_name = list(root.keys())[0]
parsed_models[root[root_name]["name"]] = root
return parsed_models
|
acbca7fb764b09c8ccb2b5d7e28748bbb74ad870
| 29,665
|
def shift_sparse_voxel(sparse_v, shift_vec, min_b=None, max_b=None):
"""
Takes a voxel and shifts it by the shift_vec, dropping all blocks that
fall outside min_b (inclusive) and max_b (exclusive) if they are specified.
"""
new_v = []
for v in sparse_v:
block = v[0]
nb = tuple(b + s for b, s in zip(block, shift_vec))
if min_b is not None:
in_bounds = [b >= mn for b, mn in zip(nb, min_b)]
if not all(in_bounds):
continue
if max_b is not None:
in_bounds = [b < mx for b, mx in zip(nb, max_b)]
if not all(in_bounds):
continue
new_v.append((nb, v[1]))
return new_v
|
9fd897ab4c3ccde5e2789ae7d7b3e21c3e778491
| 29,666
|
def int_scale(val, val_range, out_range):
"""
Scale val in the range [0, val_range-1] to an integer in the range
[0, out_range-1]. This implementation uses the "round-half-up" rounding
method.
>>> "%x" % int_scale(0x7, 0x10, 0x10000)
'7777'
>>> "%x" % int_scale(0x5f, 0x100, 0x10)
'6'
>>> int_scale(2, 6, 101)
40
>>> int_scale(1, 3, 4)
2
"""
num = int(val * (out_range-1) * 2 + (val_range-1))
dem = ((val_range-1) * 2)
# if num % dem == 0 then we are exactly half-way and have rounded up.
return num // dem
|
a58bd33dd0712444bbfbd91e4f988a44780de323
| 29,667
|
def dict_search(data: dict, key: str, depth: int = 3):
"""Search a key value in a dict, return None if not found.
Warn: This method can be slow due to the amount of depth
"""
data_keys = data.keys()
for keys in data_keys:
if keys == key:
return data[key]
if depth > 0:
for keys in data_keys:
if isinstance(data[keys], dict):
result = dict_search(data[keys], key, depth - 1)
if result:
return result
|
94bf3753b8b37afb313983ff8a0d99a14b2e9b98
| 29,668
|
import importlib
def load_plugin(plugin):
"""Get the plugin module
"""
return importlib.import_module(plugin)
|
5a59ee8f41e7a0e3d96e0582937a7f9eb7fa4be8
| 29,669
|
def bisection_search2(L, e):
"""
Implementation 2 for the divide-and-conquer algorithm
Complexity: O(log n)
:param L: List-object
:param e: Element to look for
:return: Boolean value if element has been found
"""
def helper(L, e, low, high):
if high == low:
return L[low] == e
mid = (low + high) // 2
if L[mid] == e:
return True
elif L[mid] > e:
if low == mid:
return False
else:
return helper(L, e, low, mid - 1)
else:
return helper(L, e, mid + 1, high)
if len(L) == 0:
return False
else:
return helper(L, e, 0, len(L) - 1)
|
d3bda3698b5a321ac6307fe2a068c4ec61d6c57b
| 29,670
|
from importlib import import_module
def try_import_for_setup(module_name):
"""
Returns the imported module named :samp:`{module_name}`.
Attempts to import the module named :samp:`{module_name}` and
translates any raised :obj:`ImportError` into a :obj:`NotImplementedError`.
This is useful in benchmark :samp:`setup`, so that a failed import results in
the benchmark getting *skipped*.
:type module_name: :obj:`str`
:param module_name: Attempt to import this module.
:rtype: :obj:`module`
:returns: The imported module named :samp:`{module_name}`.
:raises NotImplementedError: if there is an :obj:`ImportError`.
"""
try:
module = import_module(module_name)
except ImportError as e:
raise NotImplementedError("Error during '%s' module import: %s." % (module_name, str(e)))
return module
|
9dc45df6372276258f6b0b9aab2a151e9b4b75af
| 29,671
|
import random
def computer_turn(user_difficulty, player2_score):
""" Function that increases the computer_turn randomly and depending on the user difficulty."""
print("The computer's turn...")
answer = ("","")
if user_difficulty == "e":
random_question = random.randint(2,10)
else:
random_question = random.randint(1,6)
if random_question <= 5:
player2_score += 1
print("The computer's is done.")
print("")
return player2_score
|
284b369c028a76496e68fa3ebfde19c03b54c11f
| 29,672
|
import base64
def encrypted(plain):
"""encrypt plaintext password"""
return base64.b64encode(bytes(plain, 'utf-8'))
|
44efb281ac0ad55e1c8394d289d51e993df53c75
| 29,675
|
from typing import Callable
def string_filter(s: str, method: Callable) -> str:
"""
>>> string_filter('1s6 1g7 599h 29h89 0d53', method=str.isdigit)
'16175992989053'
>>> string_filter('16 17 599 2989 053', method=str.isalpha)
''
"""
return ''.join(filter(method, s))
|
cd5c6d05fe73336cc1472b0796b3adef4c408779
| 29,676
|
def convert_into_ascii(dec):
""" Convert the given input into ASCII """
char = str(chr(dec))
return char
|
7f563deaf35f59199a4ff6467ecd8a61dd244b3a
| 29,677
|
def deepmap(func, seq):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
"""
if isinstance(seq, list):
return [deepmap(func, item) for item in seq]
else:
return func(seq)
|
43856a93e472f30b84bf842586003952649369c0
| 29,678
|
def get_endpoint(nautobot, term):
"""
get_endpoint(nautobot, term)
nautobot: a predefined pynautobot.api() pointing to a valid instance
of Nautobot
term: the term passed to the lookup function upon which the api
call will be identified
"""
endpoint_map = {
"aggregates": {"endpoint": nautobot.ipam.aggregates},
"circuit-terminations": {"endpoint": nautobot.circuits.circuit_terminations},
"circuit-types": {"endpoint": nautobot.circuits.circuit_types},
"circuits": {"endpoint": nautobot.circuits.circuits},
"circuit-providers": {"endpoint": nautobot.circuits.providers},
"cables": {"endpoint": nautobot.dcim.cables},
"cluster-groups": {"endpoint": nautobot.virtualization.cluster_groups},
"cluster-types": {"endpoint": nautobot.virtualization.cluster_types},
"clusters": {"endpoint": nautobot.virtualization.clusters},
"config-contexts": {"endpoint": nautobot.extras.config_contexts},
"console-connections": {"endpoint": nautobot.dcim.console_connections},
"console-ports": {"endpoint": nautobot.dcim.console_ports},
"console-server-port-templates": {
"endpoint": nautobot.dcim.console_server_port_templates
},
"console-server-ports": {"endpoint": nautobot.dcim.console_server_ports},
"device-bay-templates": {"endpoint": nautobot.dcim.device_bay_templates},
"device-bays": {"endpoint": nautobot.dcim.device_bays},
"device-roles": {"endpoint": nautobot.dcim.device_roles},
"device-types": {"endpoint": nautobot.dcim.device_types},
"devices": {"endpoint": nautobot.dcim.devices},
"export-templates": {"endpoint": nautobot.dcim.export_templates},
"front-port-templates": {"endpoint": nautobot.dcim.front_port_templates},
"front-ports": {"endpoint": nautobot.dcim.front_ports},
"graphs": {"endpoint": nautobot.extras.graphs},
"image-attachments": {"endpoint": nautobot.extras.image_attachments},
"interface-connections": {"endpoint": nautobot.dcim.interface_connections},
"interface-templates": {"endpoint": nautobot.dcim.interface_templates},
"interfaces": {"endpoint": nautobot.dcim.interfaces},
"inventory-items": {"endpoint": nautobot.dcim.inventory_items},
"ip-addresses": {"endpoint": nautobot.ipam.ip_addresses},
"manufacturers": {"endpoint": nautobot.dcim.manufacturers},
"object-changes": {"endpoint": nautobot.extras.object_changes},
"platforms": {"endpoint": nautobot.dcim.platforms},
"power-connections": {"endpoint": nautobot.dcim.power_connections},
"power-outlet-templates": {"endpoint": nautobot.dcim.power_outlet_templates},
"power-outlets": {"endpoint": nautobot.dcim.power_outlets},
"power-port-templates": {"endpoint": nautobot.dcim.power_port_templates},
"power-ports": {"endpoint": nautobot.dcim.power_ports},
"prefixes": {"endpoint": nautobot.ipam.prefixes},
"rack-groups": {"endpoint": nautobot.dcim.rack_groups},
"rack-reservations": {"endpoint": nautobot.dcim.rack_reservations},
"rack-roles": {"endpoint": nautobot.dcim.rack_roles},
"racks": {"endpoint": nautobot.dcim.racks},
"rear-port-templates": {"endpoint": nautobot.dcim.rear_port_templates},
"rear-ports": {"endpoint": nautobot.dcim.rear_ports},
"regions": {"endpoint": nautobot.dcim.regions},
"reports": {"endpoint": nautobot.extras.reports},
"rirs": {"endpoint": nautobot.ipam.rirs},
"roles": {"endpoint": nautobot.ipam.roles},
"services": {"endpoint": nautobot.ipam.services},
"sites": {"endpoint": nautobot.dcim.sites},
"tags": {"endpoint": nautobot.extras.tags},
"tenant-groups": {"endpoint": nautobot.tenancy.tenant_groups},
"tenants": {"endpoint": nautobot.tenancy.tenants},
"topology-maps": {"endpoint": nautobot.extras.topology_maps},
"virtual-chassis": {"endpoint": nautobot.dcim.virtual_chassis},
"virtual-machines": {"endpoint": nautobot.virtualization.virtual_machines},
"virtualization-interfaces": {"endpoint": nautobot.virtualization.interfaces},
"vlan-groups": {"endpoint": nautobot.ipam.vlan_groups},
"vlans": {"endpoint": nautobot.ipam.vlans},
"vrfs": {"endpoint": nautobot.ipam.vrfs},
}
return endpoint_map[term]["endpoint"]
|
f1fff2fabd66dbaa2e228ef3839a11c258423510
| 29,680
|
def appforth(df, line):
"""
Function that adds a line at the top of a dataframe
"""
df.loc[-1]=line
df.index = df.index + 1 # shifting index
df = df.sort_index() # sorting by index
return df
|
4ae666c5a0a46b92f832dee38b661c90bbcd4ba8
| 29,681
|
def center_id_from_filename(filename):
"""Given the name of a rollgen PDF output file, return the center_id embedded in that name"""
# Fortunately all filenames are of the format NNNNN_XXXXXX.pdf where NNNNN is the center id
# and everything else comes after the first underscore.
return int(filename[:filename.index('_')])
|
d977e6acf509be011692bbf3e1fa910d16921130
| 29,683
|
def transcript(cloud_response):
"""Get text transcription with the highest confidence
from the response from google cloud speech-to-text
service.
Args:
cloud_response: response from speech-to-text service
Returns:
(transcription, confidence): string value of transcription
with corresponding confidence score
"""
transcription = None
confidence = 0.0
try:
for result in cloud_response.results:
for alt in result.alternatives:
if confidence < alt.confidence:
confidence = alt.confidence
transcription = alt.transcript
except:
pass
return (transcription, confidence)
|
a46b033a5baad298497a22b55550c0c9c16b7544
| 29,684
|
import os
def init_all_poolcount_str(all_vars):
"""
Description:
We add the variables "all_poolcount_fp" and
"all_poolcount_fH" to all_vars.
We initialize the all_poolcount file and write
the headers, which are:
barcode, rcbarcode, scaffold, strand, pos, locusId, f
'all_poolcount_fH' contains the file handle for
all.poolcount, and it already will have had the header
written
"""
allfile = os.path.join(all_vars["outdir"], "all.poolcount")
all_vars["all_poolcount_fp"] = allfile
allfields = "barcode rcbarcode scaffold strand pos locusId f".split(" ")
for exp in all_vars["exps"]:
allfields.append(exp["SetName"] + "." + exp["Index"])
# We initiate file handle for all.poolcount
allfile_handle = open(allfile, "w")
allfile_handle.write("\t".join(allfields) + "\n")
all_vars["all_poolcount_fH"] = allfile_handle
return all_vars
|
73c0e2c16bcefbb79713d64687bfb66fded17d45
| 29,688
|
from typing import Any
def _default_key_func(item: Any):
"""Key function that orders based upon timestampMs value."""
return item.get("timestampMs")
|
d9cc0932ca3e8469d33186100f698293322a96bb
| 29,689
|
def list_truncate(lst, trunc_size):
""" 将一个list切分成若干个等长的sublist,除最末一个列表以外,所有列表的元素数量都为trunc_size
:param lst:
list, 需要被切分的列表
:param trunc_size:
int, 列表中元素数量
:return:
"""
assert isinstance(lst, list), f'first parameter should be a list, got {type(lst)}'
assert isinstance(trunc_size, int), f'second parameter should be an integer larger than 0'
assert trunc_size > 0, f'second parameter should be an integer larger than 0, got {trunc_size}'
total = len(lst)
if total <= trunc_size:
return [lst]
else:
sub_lists = []
begin = 0
end = trunc_size
while begin < total:
sub_lists.append(lst[begin:end])
begin += trunc_size
end += trunc_size
return sub_lists
|
e967a25bb6f22e656703dd0fe5b11bc697f5a996
| 29,691
|
def scalar_floordiv(x, y):
"""Implementation of `scalar_floordiv`."""
return x.__floordiv__(y)
|
32bb16afb46d645f756cb21b9d9a3e609d0d839a
| 29,692
|
def is_scale_type(variable_type):
"""Utility method that checks to see if variable_type is a float"""
try:
float(variable_type)
return True
except ValueError:
return False
|
97e2ff180f2480d54da3d3701b56518b39fb1a7b
| 29,693
|
def get_empty_of_type(input_type):
"""
Return an empty form of a given type
----
examples:
1) get_empty_of_type('str') -> "''"
2) get_empty_of_type('???') -> None
----
:param input_type: str
:return: str
"""
empty_type = {
'str': '\'\'',
'int': '0',
'list': '[]',
'dict': '{}',
'': '\'\''
}
return empty_type.get(input_type)
|
2bcdf09a9cf07cdda6a3376894254be036056565
| 29,694
|
def volt2temp(volt):
"""
Returns the temperature in float format from float voltage
Eq at: http://ww1.microchip.com/downloads/en/DeviceDoc/20001942G.pdf
"""
return (volt - 0.5) / 0.010
|
78691d6a7d02485ad2413b8d51c1ec4e7f448230
| 29,699
|
import json
def read_from_file(file_name):
"""Read data from file"""
with open(file_name, 'r') as load_file:
data = json.load(load_file)
return data
|
a83257b610ad420f7dd0fe0726d73111b020310e
| 29,700
|
def mean_photon_v(pk1,pk2,pk3,mu1,mu2,mu3):
"""
Calculate the mean photon number for a signal sent by Alice.
This function uses individual values.
Parameters
----------
pk1 : float
Probability that Alice prepares a signal with intensity 1.
pk2 : float
Probability that Alice prepares a signal with intensity 2.
pk3 : float
Probability that Alice prepares a signal with intensity 3.
mu1 : float
Intensity 1.
mu2 : float
Intensity 2.
mu3 : float
Intensity 3.
Returns
-------
float
Mean signal photon number.
"""
return pk1*mu1 + pk2*mu2 + pk3*mu3
|
7afbcc2fa3030611c87da00b9ac164c6c20f485f
| 29,701
|
def generate_coordinate(random_func, d):
"""Generate different r for each node"""
r = [random_func() for i in range(7)]
coordinate = [[0.0, 0.0, 0.0],\
[1.0 + r[0]*d, 0.0, 0.0],\
[1.0 + r[1]*d, 1.0 + r[1]*d, r[1]*d],\
[r[2]*d, 1.0 + r[2]*d, 0.0],\
[r[3]*d, r[3]*d, 1.0 + r[3]*d],\
[1.0 + r[4]*d, r[4]*d, 1.0 + r[4]*d],\
[1.0 + r[5]*d, 1.0 + r[5]*d, 1.0 + r[5]*d],\
[r[6]*d, 1.0 + r[6]*d, 1.0 + r[6]*d]]
return coordinate
|
92bc575360b9c1ffa19cfba09120df7ef67d2695
| 29,704
|
def CsvEscape(text):
"""Escapes data entry for consistency with CSV format.
The CSV format rules:
- Fields with embedded commas must be enclosed within double-quote
characters.
- Fields with embedded double-quote characters must be enclosed within
double-quote characters, and each of the embedded double-quote characters
must be represented by a pair of double-quote characters.
- Fields with embedded line breaks must be enclosed within double-quote
characters.
- Fields with leading or trailing spaces must be enclosed within
double-quote characters.
Args:
text: str Data entry.
Returns:
str CSV encoded data entry.
"""
if not text: return ''
if text.find('"') > -1: text = text.replace('"', '""')
if (text == '' or text.find(',') > -1 or text.find('"') > -1 or
text.find('\n') > -1 or text.find('\r') > -1 or text[0] == ' ' or
text[-1] == ' '):
text = '"%s"' % text
return text
|
a28620e204f5433c580c00a234ea0ab5e6ac060c
| 29,705
|
from typing import Union
def string_to_number(string: str) -> Union[int, float]:
"""Helper for converting numbers to string and keeping their original type."""
return float(string) if "." in string else int(string)
|
a53a6764171384ee258d38ff82014b4ac2724ab2
| 29,706
|
def _parse_imdb_id(imdb_id):
""" Parse the int value from a imdb id format ttNNNNNN if the prefix is found
on the input value. If the input is an int just return it.
This method is used because if using the sql based imdb database it will expect an
integer without the tt prefix. The http method will accept both so we default to
always using the integer
:param imdb_id:
:type imdb_id:
:return:
:rtype:
"""
if isinstance(imdb_id, int):
imdb_id = "{}".format(imdb_id)
if imdb_id.startswith("tt"):
imdb_id = imdb_id[2:]
return imdb_id
|
35453944faf9024e5534ceeceb9d96caa8c8a142
| 29,708
|
def fib_recursivo(n):
"""
Función recursiva que devuelve el término n de la secuencia de Fibonacci
@param n: posición a devolver
@return: El término n de la secuencia
"""
if n == 0:
res = 0
elif n == 1:
res = 1
else:
res = fib_recursivo(n - 1) + fib_recursivo(n - 2)
return res
|
6d57d8408a2e55b9f327c98b31a55f16165983f6
| 29,709
|
def _get_interpolation(data, val):
"""Returns an interpolation of the data between current year and next year."""
year = val['year']
alpha = val['alpha']
# Check for the value.
# Need to theck for alpha = 0 and alpha = 1 if year is the last year
# which would result in a KeyError as year+1 will not exist then.
if alpha == 0:
return data.loc[year]
elif alpha == 1:
return data.loc[year+1]
else:
return (1 - alpha)*data.loc[year] + alpha*data.loc[year+1]
|
3be1d15f64f00331565fd30fff3d6f50d05d9f67
| 29,710
|
def _handle_output(results_queue):
"""Scan output for exceptions
If there is an output from an add task collection call add it to the results.
:param results_queue: Queue containing results of attempted add_collection's
:type results_queue: collections.deque
:return: list of TaskAddResults
:rtype: list[~TaskAddResult]
"""
results = []
while results_queue:
queue_item = results_queue.pop()
results.append(queue_item)
return results
|
66f77ea1a2abc452521a312946ab1e3414d84b1c
| 29,711
|
def avg(first_num, second_num):
"""computes the average of two numbers"""
return (first_num + second_num) / 2.0
|
06277da01bdd122bad7957ed9675d00bcda16438
| 29,712
|
def list_workers(input_data, workerlimit):
"""
Count number of threads, either length of iterable or provided limit.
:param input_data: Input data, some iterable.
:type input_data: list
:param workerlimit: Maximum number of workers.
:type workerlimit: int
"""
runners = len(input_data) if len(input_data) < workerlimit else workerlimit
return runners
|
9ea30a3a3fc3ebd67ffee7117d502caa9110de08
| 29,713
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.