content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def checksum(numbers):
"""Form checksum from list of numbers."""
return numbers[0] * numbers[1]
|
cdab30f2be037940d4d6f3cf39f15ce4d7efb072
| 15,423
|
def Normalize(D):
"""
Method to normalize feature matrix.
Parameters:
- - - - -
D : array
input feature matrix
Returns:
- - - -
D_norm : array
normalized feature matrix
"""
mu = D.mean(0)
stdev = D.std(0)
zs = (stdev != 0)
D_norm = (D[:, zs] - mu[zs][None, :]) / stdev[zs][None, :]
return D_norm
|
93ce0760850ef1aeb40685589f843efe35e78384
| 15,424
|
import math
def nernst_potential(ion_conc_out, ion_conc_in, charge, T,
constants=None, units=None, backend=math):
"""
Calculates the Nernst potential using the Nernst equation for a particular
ion.
Parameters
----------
ion_conc_out: float with unit
Extracellular concentration of ion
ion_conc_in: float with unit
Intracellular concentration of ion
charge: integer
Charge of the ion
T: float with unit
Absolute temperature
constants: object (optional, default: None)
constant attributes accessed:
F - Faraday constant
R - Ideal Gas constant
units: object (optional, default: None)
unit attributes: coulomb, joule, kelvin, mol
backend: module (optional, default: math)
module used to calculate log using `log` method, can be substituted
with sympy to get symbolic answers
Returns
-------
Membrane potential
"""
if constants is None:
F = 96485.33289
R = 8.3144598
if units is not None:
F *= units.coulomb / units.mol
R *= units.joule / units.kelvin / units.mol
else:
F = constants.Faraday_constant
R = constants.molar_gas_constant
return (R * T) / (charge * F) * backend.log(ion_conc_out / ion_conc_in)
|
2a2171df2b6fc6789f7a8d4add044b70404f49ed
| 15,425
|
def offer_publication_list():
"""
A simple exemple of ids used in OfferPublicationList in Offers.xml
"""
return [1, 16]
|
9d8e936c8cea8e568f4941dc84eeee4b2bebeca2
| 15,426
|
def smallest_with_symbols(expr, symbols):
"""
Return the smallest sub-tree in an expression that contains all given symbols.
"""
assert all(x in expr.free_symbols for x in symbols)
if len(expr.args) == 1:
return smallest_with_symbols(expr.args[0], symbols)
candidates = [
arg for arg in expr.args if any(x in arg.free_symbols for x in symbols)
]
return (
smallest_with_symbols(candidates[0], symbols) if len(candidates) == 1 else expr
)
|
1706c58a53e8a7718969d0194af81964cc673926
| 15,427
|
def get_title_elements(soup):
"""Get title elements from the homepage"""
title_elements = soup.select('span[class*="k-card__title-piece"]')
return title_elements
|
0df3606c78abe5550b2aae959083b670513a9d05
| 15,428
|
def _get_ch_type_mapping(fro='mne', to='bids'):
"""Map between BIDS and MNE nomenclatures for channel types.
Parameters
----------
fro : str
Mapping from nomenclature of `fro`. Can be 'mne', 'bids'
to : str
Mapping to nomenclature of `to`. Can be 'mne', 'bids'
Returns
-------
mapping : dict
Dictionary mapping from one nomenclature of channel types to another.
If a key is not present, a default value will be returned that depends
on the `fro` and `to` parameters.
Notes
-----
For the mapping from BIDS to MNE, MEG channel types are ignored for now.
Furthermore, this is not a one-to-one mapping: Incomplete and partially
one-to-many/many-to-one.
Bio channels are supported in mne-python and are converted to MISC
because there is no "Bio" supported channel in BIDS.
"""
if fro == 'mne' and to == 'bids':
mapping = dict(eeg='EEG', misc='MISC', stim='TRIG', emg='EMG',
ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG',
resp='RESP', bio='MISC', dbs='DBS',
# MEG channels
meggradaxial='MEGGRADAXIAL', megmag='MEGMAG',
megrefgradaxial='MEGREFGRADAXIAL',
meggradplanar='MEGGRADPLANAR', megrefmag='MEGREFMAG',
ias='MEGOTHER', syst='MEGOTHER', exci='MEGOTHER')
elif fro == 'bids' and to == 'mne':
mapping = dict(EEG='eeg', MISC='misc', TRIG='stim', EMG='emg',
ECOG='ecog', SEEG='seeg', EOG='eog', ECG='ecg',
RESP='resp',
# No MEG channels for now
# Many to one mapping
VEOG='eog', HEOG='eog', DBS='dbs')
else:
raise ValueError('Only two types of mappings are currently supported: '
'from mne to bids, or from bids to mne. However, '
'you specified from "{}" to "{}"'.format(fro, to))
return mapping
|
b90bd99da3ad0a59fcf3fcd38f04670ba5ed16b0
| 15,430
|
def get_deid_field_dict(item):
"""
Return a dictionary with custom fields from the DeID Upload metadata.
:param item: the item with data.
:returns: a dictionary of key-vlaue pairs.
"""
deid = item.get('meta', {}).get('deidUpload', {})
if not isinstance(deid, dict):
deid = {}
result = {}
for k, v in deid.items():
result['CustomField.%s' % k] = str(v).replace('|', ' ')
return result
|
4de64e7dc205687c193a70c4e4dcfb6d30d436c3
| 15,435
|
def _split_constraints(constraints, type_):
"""Split list of constraints in two list.
The first list contains all constraints of type and the second the rest.
"""
filtered = [c for c in constraints if c["type"] == type_]
rest = [c for c in constraints if c["type"] != type_]
return filtered, rest
|
28a54eb323dc61ef69fd57a01eacb26689290385
| 15,436
|
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
|
32546e1fa96337b351ef4dabc020d65659f8f2bd
| 15,437
|
def insertion_sort(nums):
"""Insertion Sort"""
if not nums:
return None
for i in range(1, len(nums)):
value = nums[i]
j = i - 1
while j >= 0:
if nums[j] > value:
nums[j + 1] = nums[j]
else:
break
j = j - 1
nums[j + 1] = value
return
|
60c0f89df9fcfcc4cc2bd683a903483cf7c1590f
| 15,438
|
import os
import glob
def load_acdc_files(path):
"""
path: root path for all acdc patient folders
returns: a tuple (images, masks) with full file names
"""
assert (os.path.exists(path)), 'Path: {} does not exist'.format(path)
images = sorted(glob.glob(os.path.join(path, '**/*frame[0-9][0-9].nii.gz')))
masks = sorted(glob.glob(os.path.join(path, '**/*frame*_gt.nii.gz')))
return images, masks
|
60cbb78dea9659745bd97ecbbfe7ad2bf5d68d81
| 15,439
|
def _headers(source_parameters) -> dict:
"""Return the headers for the url-check."""
return {"Private-Token": source_parameters["private_token"]} if "private_token" in source_parameters else {}
|
1aaae1e716871000d83c9610a71080dc7b5e550c
| 15,440
|
import functools
def cached_property(inputs=None):
"""Returns a cached proeprty that is calculated once.
If inputs are specified, then if those properties change the propery is
recalculated.
Usage is as follows; given a class, you can declare a cached property with
the `@cached_property` decorator:
```python
class Swallow:
def __init__(self, laden):
self.mass = 5
self.laden = laden
@cached_property(['mass', 'laden'])
def air_speed(self):
mass = self.mass + 16 if laden else 0
time.sleep(100) # must sleep after flying
return mass / 400
```
you can do the following:
```
s = Swallow(laden=False)
s.air_speed # will be slow first time
s.air_speed # returns instantly using cache
s.laden = True # invalidate cache
s.air_speed # will recalculate
```
i,.e. the `air_speed` will be lazily recalculated if `self.mass`, or
`self.laden` change.
Parameters:
inputs - dependencies which should be checked for changes
to determine whether to recalculate the property. If None then
this property is only laxily calculated once.
"""
# Handle defaults
inputs = inputs or []
# Wrap property method
def smart_cached_property(func):
@functools.wraps(func)
def get(self):
# Determine whether we can use the cached value
input_values = dict((k, getattr(self, k)) for k in inputs)
try:
# Pull from cache if possible
x = self._property_cache[func]
if input_values == self.property_input_cache[func]:
return x
except AttributeError:
# We haven't created the property cache yet
self._property_cache = {}
self._property_input_cache = {}
except KeyError:
# Input cache has been invalidated
pass
# Recalculate value
x = self._property_cache[func] = func(self)
self._property_input_cache[func] = input_values
return x
return property(get)
return smart_cached_property
|
25c8291e4ba798727b6cf14161393085b269cc3f
| 15,441
|
import subprocess
def git_tracked_files():
"""
Returns
-------
list or None
List of tracked files or None if an error happened
None of str
None if successfully retrieved tracked files, str if an error happened
"""
res = subprocess.run(['git', 'ls-tree', '-r', 'HEAD', '--name-only'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not res.returncode:
return res.stdout.decode().splitlines(), None
else:
return None, res.stderr.decode().strip()
|
e32a7ab4b2a210be0f6b7e4de4378c15598b41f3
| 15,442
|
def binarize(y, thres=3):
"""Given threshold, binarize the ratings.
"""
y[y< thres] = 0
y[y>=thres] = 1
return y
|
385a10a652aa3a89874ee65520511f1554295ffe
| 15,443
|
def preserve_linefeeds(value):
"""Escape linefeeds.
To make templates work with both YAML and JSON, escape linefeeds instead of
allowing Jinja to render them.
"""
return value.replace("\n", "\\n").replace("\r", "")
|
ee8e4c33d81b0a31c8a0cbf99369235e34327a61
| 15,444
|
import os
def is_non_zero_file(fpath):
"""Check if file exists and is not empty"""
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
|
fc37f01c6d92e6b0f6b82a1b544494943072e3b5
| 15,445
|
from typing import List
from typing import Set
from typing import FrozenSet
def get_packed_items(weights: List[int], profits: List[float], solution: List[List[float]]) -> Set[FrozenSet[int]]:
"""
Returns the possible solution from a solved knapsack instance.
:param weights: Weights of items
:param profits: Profits of items
:param solution: Solution matrix of the solved knapsack instance.
:return: Set of item allocation of given knapsack instance
"""
def _get_packed_items_rek(solution: List[List[float]], weights: List[int], profits: List[float], item: int, c: int,
items: FrozenSet[int], solution_set: Set[FrozenSet[int]]) -> Set[FrozenSet[int]]:
"""
Recursive function for calculating the packed items
:param solution: The matrix of the solved knapsack instance
:param weights: The list of the weight of the items
:param profits: The list of the profit of the items
:param item: The current index of the item
:param c: The current capacity of the baggage
:param items: The set of the currently packed items
:param solution_set: The set of all the gathered solutions to the knapsack instance
:return: The solution set with all the item combinations used
"""
if item == 0 or c == 0:
if solution[item][c] != 0:
# When we need to pack in the first item
items |= frozenset({item + 1})
return solution_set | {items}
if solution[item][c] - profits[item] == solution[item - 1][c - weights[item]]:
# When we pack the item
solution_set |= _get_packed_items_rek(solution, weights, profits, item - 1, c - weights[item],
items | frozenset({item + 1}), solution_set)
if solution[item][c] == solution[item - 1][c]:
# When we do not pack the item, because the maximal profit would stay the same
solution_set |= _get_packed_items_rek(solution, weights, profits, item - 1, c, items, solution_set)
return solution_set
number_items: int = len(solution)
n: int = len(solution[0])
return _get_packed_items_rek(solution, weights, profits, number_items - 1, n - 1, frozenset(), set())
|
97f9142b49418918e483cae14675a38f16b24a7c
| 15,447
|
import sys
def get_best_config(results, objective, objective_higher_is_better=False):
""" Returns the best configuration from a list of results according to some objective """
func = max if objective_higher_is_better else min
ignore_val = sys.float_info.max if not objective_higher_is_better else -sys.float_info.max
best_config = func(results, key=lambda x: x[objective] if isinstance(x[objective], float) else ignore_val)
return best_config
|
3e68d644a472255ec4aacbad4da0574136346372
| 15,448
|
def count_1(a: int) -> int:
"""
计算数值的二进制表示的1的数量
:param a:
:return:
"""
count = 0
while (a):
a = a & a - 1
count += 1
return count
|
7217b3501cee8fff8eb6cf5887e982c879f95880
| 15,451
|
import os
import shutil
import subprocess
def ExtractLibraries(deb_paths, work_directory, output_directory):
"""Extract libraries from .deb packages."""
extract_directory = os.path.join(work_directory, 'extracted')
if os.path.exists(extract_directory):
shutil.rmtree(extract_directory, ignore_errors=True)
os.mkdir(extract_directory)
for deb_path in deb_paths:
subprocess.check_call(['dpkg-deb', '-x', deb_path, extract_directory])
extracted = []
for root, _, filenames in os.walk(extract_directory):
if 'libx32' in root or 'lib32' in root:
continue
for filename in filenames:
if (not filename.endswith('.so') and '.so.' not in filename and
not filename.endswith('.a') and '.a' not in filename):
continue
file_path = os.path.join(root, filename)
rel_file_path = os.path.relpath(file_path, extract_directory)
rel_directory = os.path.dirname(rel_file_path)
target_dir = os.path.join(output_directory, rel_directory)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_file_path = os.path.join(output_directory, rel_file_path)
extracted.append(target_file_path)
if os.path.lexists(target_file_path):
os.remove(target_file_path)
if os.path.islink(file_path):
link_path = os.readlink(file_path)
if os.path.isabs(link_path):
# Make absolute links relative.
link_path = os.path.relpath(
link_path, os.path.join('/', rel_directory))
os.symlink(link_path, target_file_path)
else:
shutil.copy2(file_path, target_file_path)
return extracted
|
9bb8ea0bd86dafef80b1294be07143a5b21708d7
| 15,452
|
import json
def is_json(data):
"""
Test, if a data set can be expressed as JSON.
"""
try:
json.dumps(data)
return True
except:
return False
|
a0e1da7dc341b2f8867fa1721cb0fe0cc0bcfa24
| 15,453
|
from typing import List
def _make_array(parts: List[str]) -> str:
"""
Utility to format an array of strings for passing to command line.
:param parts: List of strings.
:return: Formatted string.
"""
return "\"" + ",".join(parts) + "\""
|
3c364ee9b483274c2aad1f8df6afcebaabd09ed1
| 15,454
|
def parse_V1A_fname(fname):
"""
Note: See http://info.geonet.org.nz/display/appdata/Accelerogram+Data+Filenames+and+Formats
The file name can take three forms:
XNNSITEJJ, where X is the instrument code1, NN is the two-digit year
of acquisition, SITE is the 4 character site code, and JJ is the site's
unique accelerogram number for that year.
YYYYMMDD_HHMM_SITE, where YYYY/MM/DD HH:MM is the earthquake origin time
(UTC) and SITE is the 3 or 4 character site code.
YYYYMMDD_HHMM_SITE_XX, where YYYY/MM/DD HH:MM is the instrument trigger time
(UTC), SITE is the 3 or 4 character site code, and XX is the sensor location
code. The latter is most appropriate for arrays of borehole sensors.
fname: name of geoNet data file, must not include path
"""
form1 = "XNNSITEJJ"
form2 = "YYYYMMDD_HHMM_SITE"
form3 = "YYYYMMDD_HHMM_SITE_XX"
#Remove .V1A, .V2A extension
fname = fname.split(".")[0]
YYYYMMDD = ""
HHMM = ""
if fname.count("_") == form1.count("_"):
stat_code = fname[3:7]
elif fname.count("_") == form2.count("_"):
YYYYMMDD, HHMM, stat_code = fname.split("_")
elif fname.count("_") == form3.count("_"):
YYYYMMDD, HHMM, stat_code, XX = fname.split("_")
else:
raise Exception("{:s} is unknow file name format for .V1A files\n".format(fname))
return YYYYMMDD, HHMM, stat_code
|
7fb355c395cdc9b45f3cb0350c1df9469a471fb4
| 15,457
|
def make_snake_case(text: str) -> str:
"""
A very basic way to converts some text into snake case.
Strips out any non a-z, 0-9 characters.
:param text:
:return: a string which snake cases the text provided
"""
chars = 'abcdefghijklmnopqrstuvwxyz1234567890_'
unclean = text.lower().strip().replace(' ', '_')
return ''.join(e for e in unclean if e in chars)
|
cd4c46918da543f3f537020cf1e084c54182229e
| 15,459
|
import torch
def instance_aware_loss(solo_masks, instance_map, pred_param, k_inv_dot_xy1,
valid_region, gt_depth, return_loss=True):
"""
calculate loss of parameters
first we combine sample segmentation with sample params to get K plane parameters
then we used this parameter to infer plane based Q loss as done in PlaneRecover
the loss enforce parameter is consistent with ground truth depth
:param solo_masks: tensor with size (K, h, w)
:param instance_map: tensor with size (h, w)
:param pred_param: tensor with size (3, h, w)
:param valid_region: tensor with size (1, 1, h, w), indicate planar region
:param gt_depth: tensor with size (1, 1, h, w)
:param return_loss: bool
:return: loss
inferred depth with size (1, 1, h, w) corresponded to instance parameters
"""
_, _, h, w = gt_depth.size()
# # combine sample segmentation and sample params to get instance parameters
# instance_param = []
# for mask in solo_masks:
# param = torch.cat([param[mask > 0] for param in pred_param], dim=0)
# param = param.view(3, -1).mean(dim=1)
# instance_param.append(param.detach().cpu().numpy())
# instance_param = torch.tensor(instance_param, device=pred_param.device) # (K, 3)
#
# # infer depth for every pixels and select the one with highest probability
# depth_maps = 1. / torch.matmul(instance_param, k_inv_dot_xy1) # (K, h*w)
# solo_ins = instance_map.view(-1)
# inferred_depth = depth_maps.t()[range(h * w), solo_ins].view(1, 1, h, w)
# infer depth for every pixels
param = pred_param.clone()
instance_param = []
for mask in solo_masks:
mask = (mask > 0)
ins_param = pred_param[:, mask].mean(dim=1)
param[:, mask] = ins_param.repeat(mask.sum(), 1).transpose(0, 1)
instance_param.append(ins_param)
instance_param = torch.cat(instance_param, dim=0).view(-1, 3) # (K, 3)
param = param.view(-1, h*w)
inferred_depth = 1. / torch.sum(param * k_inv_dot_xy1, dim=0, keepdim=True) # (1, h*w)
inferred_depth = inferred_depth.view(1, 1, h, w)
if not return_loss:
return _, inferred_depth, _, instance_param
# select valid region
valid_region = ((valid_region + (gt_depth != 0.0)) == 2).view(-1)
valid_param = param[:, valid_region] # (3, N)
ray = k_inv_dot_xy1[:, valid_region] # (3, N)
valid_depth = gt_depth.view(1, -1)[:, valid_region] # (1, N)
valid_inferred_depth = inferred_depth.view(1, -1)[:, valid_region]
# abs distance for valid infered depth
abs_distance = torch.mean(torch.abs(valid_inferred_depth - valid_depth))
# Q_loss for every instance
Q = valid_depth * ray # (3, N)
q_diff = torch.abs(torch.sum(valid_param * Q, dim=0, keepdim=True) - 1.)
instance_loss = torch.mean(q_diff)
# # weight Q_loss with probability
# Q_loss = torch.abs(torch.matmul(instance_param, Q) - 1.) # (K, N)
# solo_masks = solo_masks.view(-1, h*w)[:, valid_region] # (K, N)
# weighted_Q_loss = Q_loss * solo_masks # (K, N)
# instance_loss = torch.sum(torch.mean(weighted_Q_loss, dim=1))
return instance_loss, inferred_depth, abs_distance, instance_param
|
2c5c15aab5c12d147d5670b9b6ba249befbbc6c9
| 15,461
|
def get_batches(dataloader):
"""
Args: Pytorch Dataloader object
returns a list of samples from the dataloader
"""
batch_list = []
count = 0
for fname, fobj in dataloader:
fname = [x.split("/")[-1] for x in fname]
batch_list.append(list(zip(fname, fobj)))
count += 1
return batch_list
|
11970ec2f74112d9d7cf56ec26eefc585328c04c
| 15,463
|
def annalistuser_create_values(
coll_id="testcoll", user_id="testuser",
user_name="Test User",
user_uri="mailto:testuser@example.org",
user_permissions=["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
):
"""
Values used when creating a user record
"""
d = (
{ 'annal:type': "annal:User"
, 'rdfs:label': user_name
, 'rdfs:comment': "User %s: permissions for %s in collection %s"%(user_id, user_name, coll_id)
, 'annal:user_uri': user_uri
, 'annal:user_permission': user_permissions
})
return d
|
e28b27e0d716123efd4738d976385ef5f703154e
| 15,464
|
import importlib
def can_contact_customer_support(botengine):
"""
Leverages the com.domain.YourBot/domain.py file or organization properties to determine if customer support is available for this bot
:return:
"""
try:
properties = importlib.import_module('properties')
except ImportError:
return False
if properties.get_property(botengine, "CS_SCHEDULE_URL") is not None:
if len(properties.get_property(botengine, "CS_SCHEDULE_URL")) > 0:
return True
if properties.get_property(botengine, "CS_EMAIL_ADDRESS") is not None:
if len(properties.get_property(botengine, "CS_EMAIL_ADDRESS")) > 0:
return True
if properties.get_property(botengine, "CS_PHONE_NUMBER") is not None:
if len(properties.get_property(botengine, "CS_PHONE_NUMBER")) > 0:
return True
|
5916b9346d7853d1cd838f50ba67d2871b233772
| 15,465
|
import random
def set_folders_cv5(users_items_dict):
"""set_folders_cv5 function distributes items per each user randomly for different cross-validation splits
Args:
users_items_dict (dictionary): dictionary which was generated by fill_users_items_dict function
Returns:
users_items_dict: dictionary with filled information about splitting folders
"""
for user, items in users_items_dict.items():
# for each user shuffle data and split at n-items folders
random.shuffle(items)
n = len(items) // 5
for i, item in enumerate(items):
if 0 <= i < n:
item['folder'] = 1
elif n <= i < 2 * n:
item['folder'] = 2
elif (2 * n) <= i < (3 * n):
item['folder'] = 3
elif (3 * n) <= i < (4 * n):
item['folder'] = 4
elif (4 * n) <= i:
item['folder'] = 5
return users_items_dict
|
c3bfe53c55866ed0c7593ff0873f4fe2e6272067
| 15,467
|
import os
import io
def upload_file(client,
bucket,
file_path,
minio_file_path,
content_type=None,
metadata=None):
"""
Uploads single file directly to a minio_path.
"""
size = os.stat(file_path).st_size
with io.open(file_path, 'rb') as f:
client.put_object(
bucket,
minio_file_path,
f,
size,
content_type=content_type or 'application/octet-stream',
metadata=metadata
)
return minio_file_path
|
8b979a115b1e65f59e8813a82abf64c21e53bd28
| 15,468
|
def identity(x):
"""Simple identity
"""
return x
|
abebf1ea2ef1579164a9ead43348c10fbfc1a43e
| 15,469
|
from typing import Mapping
def is_upload(association):
"""
Indicates whether the association corresponds to an upload object.
"""
upload_keys = set([
'created_at', 'id', 'job_id', 'updated_at', 'upload_content_type',
'upload_file_name', 'upload_file_size', 'upload_updated_at'
])
association_value = association.value
return (isinstance(association_value, Mapping)
and
association_value.keys() == upload_keys)
|
d2adf1ea3077b2021a448f7f5ecbb28ab342b3cc
| 15,470
|
def lenprefix(data, nbytes=2):
"""Prefix `data` with its length, in `nbytes` big-endian bytes.
If `data` is a string, it is first converted to bytes as UTF-8.
"""
assert type(data) in (str, bytes)
if type(data) is str:
data = bytes(data, "utf8")
return len(data).to_bytes(nbytes, "big") + data
|
b59e9aff1c7500fdcfa1482012ee9bab16c04022
| 15,471
|
import sys
import time
def display_events(events, shift):
"""display the text according to its start and end time, time starts at 'shift'"""
write, flush = sys.stdout.write, sys.stdout.flush
begin_time = time.time()
msg, back_len = '', 0
def update_msg(text):
nonlocal msg
seconds = time.time() - begin_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
time_str = "%d:%02d:%02d" % (h, m, s)
msg = ' '.join([time_str, text])
def flush_msg():
nonlocal back_len
write('\x08' * back_len + ' ' * back_len)
write('\x08' * back_len + msg)
flush()
back_len = len(msg.encode('utf-8'))
def to_seconds(time_str):
ftr = [3600, 60, 1]
return sum([a * b for a, b in zip(ftr, map(float, time_str.split(':')))])
if shift:
begin_time -= to_seconds(shift)
i = 0
while i < len(events):
start, end, text = events[i]
if time.time() < begin_time + to_seconds(start):
update_msg('')
elif time.time() < begin_time + to_seconds(end):
update_msg(text)
else:
i += 1
flush_msg()
time.sleep(0.01)
|
7ac38b80e80a9122f5773a687c464fe6a9925b24
| 15,472
|
from stat import ST_MTIME
import os
def is_newer(source, target):
"""Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
Raise ValueError if 'source' does not exist.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % source)
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
|
c2cf21c1f95938ed38a5cf7b4c70e69ff1d18788
| 15,473
|
from typing import List
def _create_path_from_parts(path_parts: List[str]) -> str:
"""Creates the typical path format from a list of the individual parts
Args:
path_parts (List[str]): list containing the parts of a path
Ex.: ['home', 'usr', 'dir1']
Returns:
str: Concatenation of the parts to a path format
Ex..: home/usr/dir
"""
return '/'.join(path_parts)
|
4e8f38f98c2d5da9db5d4607b49c0a70cc8c9c33
| 15,474
|
def tie(p, ptied = None):
"""Tie one parameter to another."""
if (ptied == None): return p
for i in range(len(ptied)):
if ptied[i] == '': continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
|
621100a4cb3cf1a576e8d71c2870d0cf79959d77
| 15,475
|
def hamming_distance(w1: str, w2: str)->int:
"""Compute the hamming distance between the given strings"""
d = 0
for i in range(len(w1)):
if i >= len(w2):
return d + len(w1) - len(w2)
if w1[i] != w2[i]:
d += 1
return d
|
3eda58c4c56a1b8fbb79719051c650884a396f68
| 15,476
|
def crime_filter(df, categories, column="Straftat"):
"""Construct filter for crimes listed in [categories]
Args:
df ([type]): [description]
column (str, optional): [description]. Defaults to "Straftat".
Returns:
[type]: [description]
"""
filt = df[column] == categories[0]
for cat in categories:
filt = filt | (df[column] == cat)
return filt
|
1800933bc879f6153d5fcf640327cd9ad86e6d62
| 15,477
|
def btsearch(f, b, f0=None, maxiter=20, tau=0.5):
"""
Backtracking search
Arguments:
f -- function f(x)
b -- end point
f0 -- f(0)
"""
x = b
for i in range(maxiter):
fx = f(x)
if fx.F > f0:
x *= tau
else:
return x, fx
raise ValueError('backtracking search could not find a new minimum')
|
2125e7c48b157dee68ff2b3cca0dd3b277e6f662
| 15,478
|
import re
def break_down(compound, forgiving=True):
"""
Breaks a string representing a chemical formula down into constituent parts.
Things in parentheses are considered one part.
Any string of a capital letter followed by lower case letters is considered to be
an irriducible element.
Any number is considered to quantify the element imediately proceeding it.
Space is ignored
Other characters raise a ValueError unless forgiving=True
Example:
>>> break_down('CH3(CH2)5CHO')
>>> {'C':2, 'H':4', 'CH2':5, 'O':1}
This function is called recursively by get_elements.
"""
parts = {}
number = ""
element = ""
subcompound = ""
nest = 0
N = len(compound)
addit = False
for i, char in enumerate(compound):
if char == "(":
if nest == 0:
addit = True
else:
subcompound += char
nest += 1
elif nest > 0:
if char == ")":
nest -= 1
if nest == 0:
element = subcompound
subcompound = ""
else:
subcompound += char
else:
if re.search("[/.0-9]", char):
number += char
elif re.search("[a-z]", char):
element += char
elif re.search("[A-Z\-\+]", char):
addit = True
elif re.search("\S", char):
print(
"Not quite sure what you're talking about, mate, when you say ",
char,
)
if not forgiving:
raise ValueError
if i == N - 1:
addit = True
# print('char = ' + char + '\nelement = ' + element + '\naddit = ' + str(addit))
if addit:
if len(number) > 0:
try:
n = int(number)
except ValueError:
n = float(number)
number = ""
else:
n = 1
if len(element) > 0:
if element in parts:
parts[element] += n
else:
parts[element] = n
if nest == 0:
element = char
if i == N - 1 and re.search("[A-Z\-\+]", char):
if element in parts:
parts[element] += 1
else:
parts[element] = 1
addit = False
return parts
|
d09c038423f5a89d75499c1395e31c3328c9df17
| 15,479
|
def note_and_bend(mycents, middle_note=60):
"""Take a cent value and return a MIDI note and bend.
Scale to '0 cents = middle-C' if middle_c=True.
"""
note, remain = divmod(mycents + 50, 100.0)
note = int(note + middle_note)
bend = 8192 + int(round((remain - 50) * 40.96))
return note, bend
|
69ce2fb0e8089061f2c14e6e2828ccb0feef122a
| 15,481
|
def cgc_posneg(header='', data_dir='.', extension='.obfs', save=True,
recurse=0, save_dir=None, posfile='pos', negfile='neg'):
"""
This preprocessing function uses the cgc directory README scheme
to split files into separate data sets of positive and negative
samples.
We assume no recursion is required, i.e. data_dir points directly to
the codeflaws directory.
We assume there are two .txt files in the direcotry
pos_labels.txt - with the positive CWE labels
neg_labels.txt - with the negative CWE labels
"""
posdata = []
negdata = []
return posdata, negdata
|
46d7853b6c9c46bdd99e1735fd92815583ec7c22
| 15,482
|
def _convert_coords_dt_to_np_coords(coords_dt):
"""convert coord_dt to numpy coords """
return coords_dt.view(int).reshape(-1, 3)
|
90db6d289834a1764acc53154a2d5d6d8a03538d
| 15,483
|
def read_row(width):
"""Reads a grid row of the specific width."""
row = list(input().strip())
if len(row) != width:
raise ValueError('wrong row width')
return row
|
5b55f0a5df2c1d966b970705d5b1bbbc8a1aec12
| 15,484
|
def multiplicative_inverse_factors(a_value, m_modulus):
"""[Summary: Returns a list of all the values of 'x'
that solve the equation 'ax mod n = 1'.]
Arguments:
a_value {[int]} -- [description]
m_modulus {[int]} -- [description]
"""
mult_inverse_list = list()
for test_mult_inverse in range(1, m_modulus):
b_product = test_mult_inverse*a_value
mod_result = b_product % m_modulus
print(a_value, "*", test_mult_inverse, " mod ", m_modulus,
" = ", mod_result)
if mod_result == 1:
print("SOLUTION FOUND!", test_mult_inverse)
mult_inverse_list.append(test_mult_inverse)
# print("number of factors:"+str(len(x_result)))
# print("list of factors:"+str(x_result))
return mult_inverse_list
|
f91d28c760963861ef173ef3907f9b77867b3451
| 15,486
|
def sum_abs_of_all_without_range(sequence):
"""
Same specification as sum_abs_of_all above,
but with a different implementation.
"""
# ------------------------------------------------------------------
# EXAMPLE 2. Iterates through a sequence of numbers, summing them.
# Same as Example 1 above, but uses the "no range" form.
# ------------------------------------------------------------------
total = 0
for number in sequence:
total = total + abs(number)
return total
# ------------------------------------------------------------------
# The above example shows how you can iterate through a sequence
# WITHOUT using a RANGE expression. This works ONLY
# ** IF you do NOT need the index variable. **
#
# You can ALWAYS use the form in Example 1 that uses RANGE;
# this form in Example 2 is just "syntactic sugar."
# Use this form if you like, but:
# -- Don't let it keep you from understanding the critical
# concept of an INDEX.
# -- Be aware of the limitation of this form.
# -- Don't confuse the two forms!
# ------------------------------------------------------------------
|
21fdb3d5d4edf6a920a3e0523b8af5948c6244f4
| 15,487
|
from operator import eq
from operator import le
from operator import ge
def OperatorFromStr():
""" Module for program-wide constant maps (e.g., dicts that should never change) """
return {"=": eq,
"le": le,
"ge": ge}
|
1767821ea0b734312b41a49e5226df682aece430
| 15,489
|
def _default_inner_shape_for_pylist(pylist, ragged_rank):
"""Computes a default inner shape for the given python list."""
def get_inner_shape(item):
"""Returns the inner shape for a python list `item`."""
if not isinstance(item, (list, tuple)):
return ()
elif item:
return (len(item),) + get_inner_shape(item[0])
else:
return (0,)
def check_inner_shape(item, shape):
"""Checks that `item` has a consistent shape matching `shape`."""
is_nested = isinstance(item, (list, tuple))
if is_nested != bool(shape):
raise ValueError('inner values have inconsistent shape')
if is_nested:
if shape[0] != len(item):
raise ValueError('inner values have inconsistent shape')
for child in item:
check_inner_shape(child, shape[1:])
# Collapse the ragged layers to get the list of inner values.
inner_values = pylist
for dim in range(ragged_rank):
if not all(isinstance(v, (list, tuple)) for v in inner_values):
raise ValueError('pylist has scalar values depth %d, but ragged_rank=%d '
'requires scalar value depth greater than %d' %
(dim + 1, ragged_rank, ragged_rank))
inner_values = sum((list(v) for v in inner_values), [])
# Compute the inner shape looking only at the leftmost elements; and then
# use check_inner_shape to verify that other elements have the same shape.
inner_shape = get_inner_shape(inner_values)
check_inner_shape(inner_values, inner_shape)
return inner_shape[1:]
|
33ef55f87c65707ff2f92989ba6874964042fddf
| 15,492
|
def get_progress_string(tag, epoch, minibatch, nbatches, cost, time,
blockchar=u'\u2588'):
"""
Generate a progress bar string.
Arguments:
tag (string): Label to print before the bar (i.e. Train, Valid, Test )
epoch (int): current epoch to display
minibatch (int): current minibatch to display
nbatches (int): total number of minibatches, used to display relative progress
cost (float): current cost value
time (float): time elapsed so far in epoch
blockchar (str, optional): character to display for each step of
progress in the bar. Defaults to u2588
(solid block)
"""
max_bar_width = 20
bar_width = int(float(minibatch) / nbatches * max_bar_width)
s = u'Epoch {:<3} [{} |{:<%s}| {:4}/{:<4} batches, {:.2f} cost, {:.2f}s]' % max_bar_width
return s.format(epoch, tag, blockchar * bar_width, minibatch, nbatches, cost, time)
|
9c5a927433795426fe6466708d24b925ae37e4a5
| 15,493
|
import random
def coin_toss():
"""Simulates a coin toss."""
coin = random.randint(0,1)
if coin == 0:
return 0
else:
return 1
|
4b02cf069963d53b5cafd75bbdcfcdeb55d8eb4d
| 15,495
|
import json
def interface_format_params(params_list):
"""
格式化存入数据库中的参数
:param params_list:
:return:
"""
if params_list:
var = []
params_list = eval(params_list)
for i in range(len(params_list)):
var.append({"var_name": "", "var_remark": ""})
var[i]['var_name'] = params_list[i]['var_name']
return json.dumps(var)
else:
return []
|
9663b07c2ad19e4f51118441babf1ef2b04e239a
| 15,497
|
def cell_has_code(lines):
"""Is there any code in this cell?"""
for i, line in enumerate(lines):
stripped_line = line.strip()
if stripped_line.startswith('#'):
continue
# Two consecutive blank lines?
if not stripped_line:
if i > 0 and not lines[i - 1].strip():
return False
continue
return True
return False
|
c7094accae4d7c1a9f8eae3aa33eda2e980993bb
| 15,498
|
def pointsInRect(array, rect):
"""Determine which points are inside a bounding rectangle.
Args:
array: A sequence of 2D tuples.
rect: A bounding rectangle expressed as a tuple
``(xMin, yMin, xMax, yMax)``.
Returns:
A list containing the points inside the rectangle.
"""
if len(array) < 1:
return []
xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
|
53fdd42520c38b3ffcb19edc655955caf063ebfc
| 15,499
|
def _get_nearest_value(value, step_width):
"""
helper function. Sould not be used from the api.
:param value:
:param step_width:
:return:
"""
steps = value // step_width
return min(steps * step_width, (steps + 1) * step_width, key=lambda new_value: abs(new_value - value))
|
c2af02e059ec62911bf549b4b5d6013e02e34df9
| 15,500
|
def numtocn(data, cny=True):
"""
算法说明:要求字符串输入,现将字符串差费为整数部分和小数部分生成list[整数部分,小数部分]
将整数部分拆分为:[亿,万,仟]三组字符串组成的List:['0000','0000','0000'](根据实际输入生成阶梯List)
例如:600190000010.70整数部分拆分为:['600','1900','0010']
然后对list中每个字符串分组进行大写化再合并
最后处理小数部分的大写化
"""
cdict = {1: '', 2: '拾', 3: '佰', 4: '仟'} if cny else {1: '', 2: '十', 3: '百', 4: '千'}
xdict = {2: '万', 3: '亿', 4: '兆'}
xdict[1] = '元' if cny else ''
if cny:
gdict = {'0': '零', '1': '壹', '2': '贰', '3': '叁','4': '肆',
'5': '伍', '6': '陆', '7': '柒', '8': '捌', '9': '玖'}
else:
gdict = {'0': '零', '1': '一', '2': '二', '3': '三', '4': '四',
'5': '五', '6': '六', '7': '七', '8': '八', '9': '九'}
cdata = str(data).split('.')
integer = ''.join(list(reversed(cdata[0])))
decimal = cdata[1] if len(cdata) == 2 else []
ch_str = ''
# 分解字符数组[亿,万,仟]三组List:['0000','0000','0000']
split_integer = list(
reversed([''.join(list(reversed(integer[i:i+4])))
for i in range(0,len(integer),4)])
)
split_integer_len = len(split_integer) # 获取拆分后的List长度
# 大写合并
for i in range(split_integer_len):
split_integer_group = split_integer[i]
grouped_str = ''
# 对[亿,万,仟]的list中每个字符串分组进行大写化再合并
split_integer_group_len = len(split_integer_group)
lk = split_integer_group_len
for j in range(split_integer_group_len):
this_char = split_integer_group[j]
if this_char == '0':
if j < split_integer_group_len-1:
if split_integer_group[j+1] != '0':
grouped_str = grouped_str+gdict[this_char]
else:
grouped_str = grouped_str+gdict[this_char]+cdict[lk]
lk -= 1
if grouped_str == '': # 有可能一个字符串全是0的情况
ch_str += grouped_str # 此时不需要将数字标识符引入
else:
# 合并:前字符串大写+当前字符串大写+标识符
ch_str += grouped_str+xdict[split_integer_len-i]
# 处理小数部分
decimal_len = len(decimal)
if cny:
if decimal_len == 0:
ch_str += '整'
elif decimal_len == 1: # 若小数只有1位
if int(decimal[0]) == 0:
ch_str += '整'
else:
ch_str += gdict[decimal[0]]+'角整'
else: # 若小数有两位的四种情况
if int(decimal[0]) == 0 and int(decimal[1]) != 0:
ch_str += '零'+gdict[decimal[1]]+'分'
elif int(decimal[0]) == 0 and int(decimal[1]) == 0:
ch_str += '整'
elif int(decimal[0]) != 0 and int(decimal[1]) != 0:
ch_str += gdict[decimal[0]]+'角'+gdict[decimal[1]]+'分'
else:
ch_str += gdict[decimal[0]]+'角整'
else:
if decimal_len != 0:
ch_str = ch_str + '点'
for decimal_char in decimal:
ch_str += gdict[decimal_char]
return ch_str
|
e7c07b19d426b3c0abeaa4a6ae8058f6391b316e
| 15,501
|
def clip(number,start,end):
"""Returns `number`, but makes sure it's in the range of [start..end]"""
return max(start, min(number, end))
|
c2fed329f855e36c08a05cc5e72d33e743c183c4
| 15,504
|
def mean_feat(col_list, feature, feat_table):
"""
trace_df() can output for eg. TR and SI values off nn nearest neighbour
positions. This function calculates the mean of all neighbouring positions
and also the zero position and returns a pandas Series of the means for
each read
Parameters
----------
col_list : list
List of strings, each of which is the name of the feature columns
present in the table outputted by trace_df() in get_features.py
for eg. SI_0, TR_-1 etc
feature : str
string which is an element of col_list without the position number, of
which the mean is going to be calculated eg SI in case of calculation
of mean of signal intensity for all positions
feat_table : pandas DataFrame
Dataframe of the kind returned by trace_df()
Returns
-------
mean : pandas Series
Series which is a column of the mean values of the required feature
across all positions (-nn,nn)
"""
cols = []
for feat in col_list:
if feature in feat:
cols.append(feat)
feat_table = feat_table[feat_table.columns.intersection(cols)]
mean = feat_table.mean(axis=1)
mean = mean.rename(feature + "_mean")
return mean
|
4acbddc86aceb69d4e6f624c9c5927084020d44f
| 15,505
|
import argparse
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='be more verbose')
p.add_argument('--thresholds', '-t', nargs=2, type=float,
default=(0.5, 0.1),
help='thresholds for labeling as prostate, lesion')
p.add_argument('pmaps', nargs='+',
help='input pmaps')
return p.parse_args()
|
8f89e2d118ae181ff32bf60a7ac0847dd0164b13
| 15,506
|
def get_nested_value(d: dict, path: dict):
"""
Retrieves value from nested dictionary
:param d: {key: {key_2: {key_2_1: hey}, key_1: 1234}
:param path: {key: {key_2: key_2_1}} path to take through the dictionary d
:return: d[key][key_2][key_2_1]
"""
if isinstance(path, dict):
for k, v in path.items():
if k in d:
return get_nested_value(d[k], v)
else:
raise KeyError(f'Dictionary {d} does not have key {k}')
else:
return d[path]
|
a9eaf03cd549b6794b4ea7ffd9bb3ab853a7d35d
| 15,508
|
import math
def entropia(p):
"""Entropía de una Bernoulli de parámetro p."""
if p == 0 or p == 1:
return 0
else:
return - p * math.log(p, 2) - (1 - p) * math.log(1 - p, 2)
|
2ded93b09f5199fbd9d7564d45d1937735fb73eb
| 15,509
|
def predict(data, best_stump):
"""
Predicting data through decision stumps
@param data: Data to be predicted
@param best_stump: Decision stump
@return:
"""
if (best_stump["unequal"] == 'lt' and data[best_stump["feature"]] <= best_stump["threshold"]) or (
best_stump["unequal"] == 'gt' and data[best_stump["feature"]] >= best_stump["threshold"]):
return 1
else:
return -1
|
6b80ca7ce12541de1ddd21648050861c74afe36a
| 15,510
|
def img_to_pencil(raw_image, threshold=15):
""" Convert color map to simple pencil style.
Args:
raw_image: Image path to be processed.
threshold: Thresholds are defined between 0 and 100.
Returns:
array.
"""
if threshold < 0:
threshold = 0
if threshold > 100:
threshold = 100
width, height = raw_image.size
raw_image = raw_image.convert('L') # convert to gray scale mode
pixel = raw_image.load() # get pixel matrix
for w in range(width):
for h in range(height):
if w == width - 1 or h == height - 1:
continue
src = pixel[w, h]
dst = pixel[w + 1, h + 1]
diff = abs(src - dst)
if diff >= threshold:
pixel[w, h] = 0
else:
pixel[w, h] = 255
return raw_image
|
8d56d4b88db1d33d31d40c23ce0a6058e69c2c53
| 15,511
|
import pkg_resources
def to_pep440(version: str) -> str:
"""Derive pep440 compliant version string from PyCalVer version string.
>>> to_pep440("v201811.0007-beta")
'201811.7b0'
"""
return str(pkg_resources.parse_version(version))
|
91a5c19a95320b04fbdcf0917058168d7d1e2704
| 15,512
|
def getIdForMember(member):
"""
Returns the member's unique id as a string.
"""
return member['@Member_Id']
|
5aca8b6413581313fffee8ca21761a23c9315e38
| 15,515
|
import re
def course_item(soup):
"""
Gets the course items and lectures for the soup object.
"""
course_list_html = soup.find('div',class_='course-lectures-list')
topic_html = course_list_html.find_all('div',class_='course-item-list-header expanded')
view = []
for topics in topic_html:
top_html = str(topics.find('h3'))
TAG_RE = re.compile(r'<[^>]+>')
topic = TAG_RE.sub('', top_html).replace('\xc2\xa0','').replace(' ','')
ul_html = topics.findNextSibling('ul')
li_html = ul_html.find_all('li',class_="unviewed")
count = len(li_html)
ob = (count,topic)
view.append(ob)
return view
|
1d31abdee9a9d48d4484eabdfd838251e5b35fdb
| 15,517
|
def binary_search(array, x):
"""
Vanilla BS algorithm
Look for x in the sorted array
return the index if found
or else return None
[8, 9, 10]
"""
# A: init the low and high values
low, high = 0, len(array) - 1
# B: search the array
while low <= high:
# find the middle value
middle = (low + high) // 2
mid_elem = array[middle]
# if found, return the index
if mid_elem == x:
return middle
# if not, divide the array into two subproblems
elif mid_elem < x: # go right
low = middle + 1
elif mid_elem > x: # go left
high = middle - 1
# C: if not found, return None
return -1
|
99fb659774efabbbd5f53c91fd47d6e106542a4c
| 15,519
|
from typing import List
from typing import Dict
import json
def load_jsonl(filepath: str) -> List[Dict[str, str]]:
"""
Load data from a `.jsonl` file, i.e., a file with one dictionary per line
Args:
filepath (str): Path to `.jsonl` file.
Returns:
List[Dict[str, str]]: A list of dictionaries, one per paper.
"""
with open(filepath, "r") as f:
data = [json.loads(line) for line in f.readlines()]
return data
|
defed6436b4e6dd17aae7163db19a3558446fb34
| 15,521
|
def shell_sort(arr):
"""
Here lies the shell sort algorithm
"""
comp = 0
arr_len = len(arr)
gap = 1
# Get the gap number
while gap < arr_len/3:
gap = 3*gap + 1
while gap >= 1:
for i in range(gap, arr_len):
j = i
while (j >= gap):
if (arr[j] < arr[j-gap]):
arr[j], arr[j-gap] = arr[j-gap], arr[j]
j -= gap
comp += 1
else:
comp += 1
break
gap = gap // 3
return comp
|
87774557017093d1152b45f29947ba5f2b8b33b6
| 15,523
|
def get_select_options(form):
"""
Return a dict or tags and descriptions based on the SELECT field in the
given form.
"""
tags = {}
selects = []
for input in form.inputs:
if not input.attrib.has_key('type'):
selects.append(input)
for select in selects:
tags[select.attrib['name']] = []
for child in select.getchildren():
tags[select.attrib['name']].append(child.attrib['value'])
return tags
|
15773b1fd7483a71e6bae7d5d00647af788b8c8a
| 15,524
|
def write_kpts_vasp(kpts):
"""
Write a list of kpoints to a vasp-formatted KPOINTS file.
"""
output = "Supercell_k-points_from_primitive_cell\n"
output += str(len(kpts))+"\n"
output += "reciprocal\n"
for i in range(len(kpts)):
for j in range(3):
output += str(kpts[i,j])+" "
output += "1.0\n"
return output
|
66a0f5b5128172d64c1e3d0af55695ffb10653b3
| 15,525
|
def _justify_and_indent(text, level, munge=0, width=72):
""" indent and justify text, rejustify (munge) if specified """
indent = " " * level
if munge:
lines = []
line = indent
text = text.split()
for word in text:
line = ' '.join([line, word])
if len(line) > width:
lines.append(line)
line = indent
else:
lines.append(line)
return '\n'.join(lines)
else:
return indent + \
text.strip().replace("\r\n", "\n") .replace("\n", "\n" + indent)
|
dafa407bdf42832d92f8c3592ec9ab4f29711710
| 15,526
|
import random
def total():
"""Produce a random purchase amount."""
return round(random.random() * 1000, 2)
|
5034ed943b1860384c3a19f129d0ab1b7d14629e
| 15,527
|
import os
def get_current_working_directory():
"""
Returns current working directory
:return: str, path to the current working directory
"""
return os.getcwd()
|
3e0364700576ff86a9ddd48286759c74dd71f705
| 15,529
|
def prettyprint_binding(binding, indent_level=0):
"""Pretty print a binding with variable id and data."""
indent = " " * indent_level
if not binding:
return indent + "<>"
return "%s<v%d : %r>" % (indent, binding.variable.id, binding.data)
|
e1e196afd53027bbab076585dd8b6b6464b578bc
| 15,531
|
def database_oops(exc, _):
"""View triggered when the database falls over."""
return {
'no_database': True,
'exception': exc
}
|
f5fdaf887659f62c849a9beaa7a2cba5333ffe01
| 15,533
|
def get_surface_albedo() -> float:
"""
:return: 地面の日射反射率(アルベド), -
"""
return 0.2
|
dc0bb08520d0858fdd6a95ab33932e7db4f710ef
| 15,534
|
def dist(p,q):
""" Calculates the distance between the points p and q.
Parameters
---------
p : Point
the first point
q : Point
the second point
Returns
----------
Float
The distance between the points p and q.
"""
return (sum((p.array()-q.array())**2))**0.5
|
97c0beb7f8c22e59da9551e35e66a7d8e39bf667
| 15,535
|
def try_int(text, default=0):
"""Try to parse an integer but return a default if it fails."""
try:
return int(text)
except ValueError:
return default
|
b699bbd7209a88e1df3d2b941d581904bf30e737
| 15,536
|
from typing import IO
from typing import Any
from typing import Sequence
import struct
def read_bytes_as_tuple(f: IO[Any], fmt: str) -> Sequence[Any]:
"""Read bytes using a `struct` format string and return the unpacked data values.
Parameters
----------
f : IO[Any]
The IO stream to read bytes from.
fmt : str
A Python `struct` format string.
Returns
-------
Sequence[Any]
The unpacked data values read from the stream.
"""
data = f.read(struct.calcsize(fmt))
return struct.Struct(fmt).unpack(data)
|
b2622d5aad02c528163cb3318b5c70ea0510ea01
| 15,537
|
import os
def Prex_img(cls,img_name):
"""
Add root_path to each img
e.g. root_path = '/Users/lees/Desktop/img_folder/cls_name
img = 1.png
then return: /Users/lees/Desktop/img_folder/cls_name/1.png
"""
return os.path.join(cls,img_name)
|
98cfd559c02d5e1a7739843bfaa7fd26e06f6881
| 15,538
|
def clean_record(rec: list, field_names: list) -> dict:
"""
Parses the record supplied by the calling function and returns a dictionary
that can be transformed into a database record.
:param rec: record extracted from file
:ptype rec: list
:param field_names: field names for the record
:ptype field_names: list
:rtype: Dictionary of cleaned elements.
"""
return dict(zip(field_names, [elem.strip('~') for elem in rec]))
|
a76974cb8f7e12aec80404df85b90001df2a69f4
| 15,539
|
import re
def _get_changelist(perforce_str):
"""Extract change list from p4 str"""
rx = re.compile(r'Change: (\d+)')
match = rx.search(perforce_str)
if match is None:
v = 'UnknownChangelist'
else:
try:
v = int(match.group(1))
except (TypeError, IndexError):
v = "UnknownChangelist"
return v
|
26b08d8ccf6251e0d3a9698147093e2bb6e89919
| 15,540
|
def parse_step_id_from_sacct(output, step_name):
"""Parse and return the step id from a sacct command
:param output: output of sacct --noheader -p
--format=jobname,jobid --job <alloc>
:type output: str
:param step_name: the name of the step to query
:type step_name: str
:return: the step_id
:rtype: str
"""
step_id = None
for line in output.split("\n"):
sacct_string = line.split("|")
if len(sacct_string) < 2:
continue
if sacct_string[0] == step_name:
step_id = sacct_string[1]
return step_id
|
2ed06aba82665ad2a42c12a39c589d10258883f4
| 15,541
|
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].values():
if module.location.block_type == 'course':
if not module._field_data.has(module, 'rerandomize'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
|
5725d03bd3b3823f0c6dc6f5d6d4a1cb94692420
| 15,543
|
def extract_items(topitems_or_libraryitems):
"""
Extracts a sequence of items from a sequence of TopItem or
LibraryItem objects.
"""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
|
6a6918b4fc8153f4c98f5df906ce584422b3df78
| 15,544
|
def city_data():
"""The data for the one and only `City` object as a `dict`."""
return {
'id': 1,
'name': 'Paris',
'kml': "<?xml version='1.0' encoding='UTF-8'?> ...",
'center_latitude': 48.856614,
'center_longitude': 2.3522219,
'northeast_latitude': 48.9021449,
'northeast_longitude': 2.4699208,
'southwest_latitude': 48.815573,
'southwest_longitude': 2.225193,
'initial_zoom': 12,
}
|
346e067ecb48e8f3c56eb0834af92a78867ce9f5
| 15,545
|
def get_custom_module_description(name):
"""Return string with description for custom modules."""
return "Custom module from corpus directory ({}.py).".format(name.split(".")[1])
|
647e2df266a25b888558c34b4d425e09b0f58ca1
| 15,546
|
def _showcompatlist(
context, mapping, name, values, plural=None, separator=b' '
):
"""Return a generator that renders old-style list template
name is name of key in template map.
values is list of strings or dicts.
plural is plural of name, if not simply name + 's'.
separator is used to join values as a string
expansion works like this, given name 'foo'.
if values is empty, expand 'no_foos'.
if 'foo' not in template map, return values as a string,
joined by 'separator'.
expand 'start_foos'.
for each value, expand 'foo'. if 'last_foo' in template
map, expand it instead of 'foo' for last key.
expand 'end_foos'.
"""
if not plural:
plural = name + b's'
if not values:
noname = b'no_' + plural
if context.preload(noname):
yield context.process(noname, mapping)
return
if not context.preload(name):
if isinstance(values[0], bytes):
yield separator.join(values)
else:
for v in values:
r = dict(v)
r.update(mapping)
yield r
return
startname = b'start_' + plural
if context.preload(startname):
yield context.process(startname, mapping)
def one(v, tag=name):
vmapping = {}
try:
vmapping.update(v)
# Python 2 raises ValueError if the type of v is wrong. Python
# 3 raises TypeError.
except (AttributeError, TypeError, ValueError):
try:
# Python 2 raises ValueError trying to destructure an e.g.
# bytes. Python 3 raises TypeError.
for a, b in v:
vmapping[a] = b
except (TypeError, ValueError):
vmapping[name] = v
vmapping = context.overlaymap(mapping, vmapping)
return context.process(tag, vmapping)
lastname = b'last_' + name
if context.preload(lastname):
last = values.pop()
else:
last = None
for v in values:
yield one(v)
if last is not None:
yield one(last, tag=lastname)
endname = b'end_' + plural
if context.preload(endname):
yield context.process(endname, mapping)
|
a0e717c645aa00a8dd239a435cdec42593d58411
| 15,547
|
def get_pixdist_ratio(m_size, ant_rad):
"""Get the ratio between pixel number and physical distance
Returns the pixel-to-distance ratio (physical distance, in meters)
Parameters
----------
m_size : int
The number of pixels used along one-dimension for the model
(the model is assumed to be square)
ant_rad : float
The radius of the antenna trajectory during the scan, in meters
Returns
-------
pix_to_dist_ratio : float
The number of pixels per physical meter
"""
# Get the ratio between pixel and physical length
pix_to_dist_ratio = m_size / (2 * ant_rad)
return pix_to_dist_ratio
|
b307ee5da49b9cd92958aa6d94c4e3ffa860bf1d
| 15,549
|
def sstrip(s, suffix):
"""Suffix strip
>>> sstrip('foo.oof', '.oof')
'foo'
>>> sstrip('baroof', '.oof')
'baroof'
"""
i = - len(suffix)
if s[i:] == suffix:
return s[:i]
return s
|
7f4d0f118caa48d68ae826813b65e3d452c2c1e8
| 15,550
|
import textwrap
def wrap_string(data, width=40, indent=32, indentAll=False, followingHeader=None):
"""
Print a option description message in a nicely
wrapped and formatted paragraph.
followingHeader -> text that also goes on the first line
"""
data = str(data)
if len(data) > width:
lines = textwrap.wrap(textwrap.dedent(data).strip(), width=width)
if indentAll:
returnString = ' ' * indent + lines[0]
if followingHeader:
returnString += " " + followingHeader
else:
returnString = lines[0]
if followingHeader:
returnString += " " + followingHeader
i = 1
while i < len(lines):
returnString += "\n" + ' ' * indent + (lines[i]).strip()
i += 1
return returnString
else:
return data.strip()
|
4e983e4116058da1fa263e65aa3516a275d89001
| 15,551
|
def initial(x, idx):
"""
Get first value of series.
"""
if idx is None:
return x.iloc[0]
return x.iloc[idx.start or 0]
|
28d364329af3f579868807773efb8a074e2c3cb5
| 15,552
|
import torch
def _conjugate_gradient(f_Ax, b, cg_iters, residual_tol=1e-10):
"""Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (callable): A function to compute Hessian vector product.
b (torch.Tensor): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
torch.Tensor: Solution x* for equation Ax = b.
"""
p = b.clone()
r = b.clone()
x = torch.zeros_like(b)
rdotr = torch.dot(r, r)
for _ in range(cg_iters):
z = f_Ax(p)
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
|
ab8aa89ae15bef9214dbecfcff2b587993d550f9
| 15,556
|
def list_to_bytes(input_list):
"""将字节串数组拼接成字节串
Args:
input_list (list): 字节串列表
Returns:
bytes: 拼接后的字节串
"""
return b''.join(input_list)
|
7a2341540c131316ec0eebf06e3ffe447a95c9d6
| 15,557
|
from typing import Dict
from typing import Optional
def get_task_exhaustation_date(
plan_worktime_dict: Dict[str, float], annotation_not_started: int, velocity_per_task: float
) -> Optional[str]:
"""
タスクの枯渇予定日を求める。
教師付されていないタスク数がすでに0ならば、Noneを返す。
Args:
plan_worktime_dict:
annotation_not_started:
velocity_per_task:
Returns:
"""
if annotation_not_started <= 0 or velocity_per_task <= 0:
return None
remaining_task: float = annotation_not_started
for date in sorted(plan_worktime_dict.keys()):
plan_worktime = plan_worktime_dict[date]
task_count = plan_worktime / velocity_per_task
remaining_task -= task_count
if remaining_task <= 0:
return date
return None
|
ae4d61398e856403614b47fa95dd7b90a02b07b5
| 15,558
|
def rb_is_true(value: str) -> bool:
"""String to bool conversion function for use with args.get(...).
"""
return value.lower() == "true"
|
05835733e90e1bd756f8e1b02bb1c5683e628798
| 15,559
|
import torch
def most_confident(scores, thresh=0.3):
"""
:param scores: (n_way, n_queries)
:param thresh: float
:return:
"""
confidence, idx = torch.sort(scores, dim=1, descending=True)
d = (confidence.min(dim=0, keepdim=True).values > thresh).sum()
idx = idx[:, :d]
return idx
|
953d1699019bc6a325faa5b7f84a1c3de656ebf2
| 15,561
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.