content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def backproject_to_plane(cam, img_pt, plane):
"""Back an image point to a specified world plane"""
# map to normalized image coordinates
npt = np.matrix(npl.solve(cam[0], np.array(list(img_pt)+[1.0])))
M = cam[1].transpose()
n = np.matrix(plane[:3]).flatten()
d = plane.flat[3]
Mt = M * cam[2]
Mp = M * npt.transpose()
return Mp * (np.dot(n, Mt) - d) / np.dot(n, Mp) - Mt
|
47ae45103460db5a5447900dda10783c8f92362e
| 3,640,100
|
from datetime import datetime
def format_cell(cell, datetime_fmt=None):
"""Format a cell."""
if datetime_fmt and isinstance(cell, datetime):
return cell.strftime(datetime_fmt)
return cell
|
8d3fb41bb3d7d3f3b341482e2d050d32092118bf
| 3,640,101
|
def optimize(gradients, optim, global_step, summaries, global_norm=None, global_norm_clipped=None, appendix=''):
"""Modified from sugartensor"""
# Add Summary
if summaries is None:
summaries = ["loss", "learning_rate"]
# if "gradient_norm" in summaries:
# if global_norm is None:
# tf.summary.scalar("global_norm/gradient_norm" + appendix,
# clip_ops.global_norm(list(zip(*gradients))[0]))
# else:
# tf.summary.scalar("global_norm/gradient_norm" + appendix,
# global_norm)
# if global_norm_clipped is not None:
# tf.summary.scalar("global_norm/gradient_norm_clipped" + appendix,
# global_norm_clipped)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
# if "gradients" in summaries:
# tf.summary.histogram("gradients/%s" % var_name, grad_values)
# if "gradient_norm" in summaries:
# tf.summary.scalar("gradient_norm/%s" % var_name,
# clip_ops.global_norm([grad_values]))
# Gradient Update OP
return optim.apply_gradients(gradients, global_step=global_step)
|
4887d45d5b9eb5a96008daeab5d11c97afed27fd
| 3,640,102
|
def _get_desired_asg_capacity(region, stack_name):
"""Retrieve the desired capacity of the autoscaling group for a specific cluster."""
asg_conn = boto3.client("autoscaling", region_name=region)
tags = asg_conn.describe_tags(Filters=[{"Name": "value", "Values": [stack_name]}])
asg_name = tags.get("Tags")[0].get("ResourceId")
response = asg_conn.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
return response["AutoScalingGroups"][0]["DesiredCapacity"]
|
cdcec8493333a001fe3883b0c815da521c571f7a
| 3,640,103
|
def _default_geo_type_precision():
""" default digits after decimal for geo types """
return 4
|
eef082c8a8b38f4ede7bfb5d631b2679041b650c
| 3,640,104
|
import scipy
def load_movietimes(filepath_timestamps, filepath_daq):
"""Load daq and cam time stamps, create muxer"""
df = pd.read_csv(filepath_timestamps)
# DAQ time stamps
with h5py.File(filepath_daq, 'r') as f:
daq_stamps = f['systemtime'][:]
daq_sampleinterval = f['samplenumber'][:]
# remove trailing zeros - may be left over if recording didn't finish properly
if 0 in daq_stamps:
last_valid_idx = np.argmax(daq_stamps == 0)
else:
last_valid_idx = len(daq_stamps) - 1 # in case there are no trailing zeros
daq_samplenumber = np.cumsum(daq_sampleinterval)[:last_valid_idx, np.newaxis]
last_sample = daq_samplenumber[-1, 0]
nb_seconds_per_interval, _ = scipy.stats.mode(np.diff(daq_stamps[:last_valid_idx, 0])) # seconds - using mode here to be more robust
nb_seconds_per_interval = nb_seconds_per_interval[0]
nb_samples_per_interval = np.mean(np.diff(daq_samplenumber[:last_valid_idx, 0]))
sampling_rate_Hz = np.around(nb_samples_per_interval / nb_seconds_per_interval, -3) # round to 1000s of Hz
# ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], frame_times=shutter_times, sample_numbers=daq_samplenumber[:, 0], auto_monotonize=False)
ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], sample_numbers=daq_samplenumber[:, 0],
frame_samples=df['sample'], frame_numbers=df['movie_frame'], auto_monotonize=False)
# # different refs:
#
# # first sample is 0 seconds
# s0 = ss.sample_time(0)
# ss = SampStamp(sample_times=daq_stamps[:, 0] - s0, frame_times=cam_stamps[:, 0] - s0, sample_numbers=daq_samplenumber[:, 0])
#
# # first frame is 0 seconds - for no-resample-video-data
# f0 = ss.frame_time(0)
# ss = SampStamp(sample_times=daq_stamps[:, 0] - f0, frame_times=cam_stamps[:, 0] - f0, sample_numbers=daq_samplenumber[:, 0])
return ss, last_sample, sampling_rate_Hz
|
4d54f7378f3d189a5bea4c14f68d5556958ba4f3
| 3,640,105
|
def is_valid_hotkey(hotkey: str) -> bool:
"""Returns True if hotkey string is valid."""
mode_opts = ["press", "click", "wheel"]
btn_opts = [b.name for b in Button]
wheel_opts = ["up", "down"]
hotkeylist = hotkey[2:].split("_")
if len(hotkeylist) == 0 or len(hotkeylist) % 2 != 0:
return False
for i in range(0, len(hotkeylist), 2):
mode = hotkeylist[i]
btn = hotkeylist[i + 1]
if mode not in mode_opts:
return False
if mode == "wheel" and btn not in wheel_opts:
return False
elif mode in ("press", "click") and btn not in btn_opts:
return False
if hotkey[-2] == "press":
return False
return True
|
2fb47b3f77b4cb3da2b70340b6ed96bd03c0bd14
| 3,640,106
|
def find_range_with_sum(values : list[int], target : int) -> tuple[int, int]:
"""Given a list of positive integers, find a range which sums to a target
value."""
i = j = acc = 0
while j < len(values):
if acc == target:
return i, j
elif acc < target:
acc += values[j]
j += 1
else:
acc -= values[i]
i += 1
return -1, -1
|
d54f185c98c03f985724a29471ecb1e301c14df5
| 3,640,107
|
def output_AR1(outfile, fmri_image, clobber=False):
"""
Create an output file of the AR1 parameter from the OLS pass of
fmristat.
Parameters
----------
outfile :
fmri_image : ``FmriImageList`` or 4D image
object such that ``object[0]`` has attributes ``coordmap`` and ``shape``
clobber : bool
if True, overwrite previous output
Returns
-------
regression_output : ``RegressionOutput`` instance
"""
outim = ModelOutputImage(outfile, fmri_image[0].coordmap,
fmri_image[0].shape, clobber=clobber)
return outputters.RegressionOutput(outim, outputters.output_AR1)
|
b805e73992a51045378d5e8f86ccf780d049002b
| 3,640,108
|
def feature_bit_number(current):
"""Fuzz bit number field of a feature name table header extension."""
constraints = UINT8_V
return selector(current, constraints)
|
4a2103f399765aec9d84c8152922db3801e4a718
| 3,640,109
|
def render_page(context, slot, payload): # pylint: disable=R0201,W0613
""" Base template slot """
chapter = request.args.get('chapter', '')
module = request.args.get('module', '')
page = request.args.get('page', '')
try:
if page:
return render_template(f"{chapter.lower()}/{module.lower()}/{page.lower()}.html", active_chapter=chapter,
config=payload)
return render_template(f"{chapter.lower()}/{module.lower()}.html", active_chapter=chapter, config=payload)
except:
return render_template(f"common/empty.html", active_chapter=chapter, config=payload)
|
69e5a837d90084b4c215ff75fb08a47aab1cff97
| 3,640,110
|
def add_stripe_customer_if_not_existing(f):
"""
Decorator which creates user as a customer if not already existing before making a request to the Stripe API
"""
@wraps(f)
def wrapper(user: DjangoUserProtocol, *args, **kwargs):
user = create_customer(user)
return f(user, *args, **kwargs)
return wrapper
|
676e9fad5de545a2627d52942917a49af3c6539d
| 3,640,111
|
def debug():
"""
Import the test utils module to be able to:
- Use the trace tool and get context variables after making a request to Apigee
"""
return ApigeeApiTraceDebug(proxy=config.PROXY_NAME)
|
0639a52e1a838b0408a820dd29ae4a08f89b0adc
| 3,640,112
|
def FromModuleToDoc(importedMod,filDfltText):
"""
Returns the doc string of a module as a literal node. Possibly truncated
so it can be displayed.
"""
try:
docModuAll = importedMod.__doc__
if docModuAll:
docModuAll = docModuAll.strip()
# Take only the first non-empty line.
docModuSplit = docModuAll.split("\n")
docModu = None
for docModu in docModuSplit:
if docModu :
# sys.stderr.write("DOC="+docModu)
maxLen = 40
if len(docModu) > maxLen:
docModu = docModu[0:maxLen] + "..."
break
except:
docModu = ""
if not docModu:
# If no doc available, just transform the file name.
docModu = filDfltText.replace("_"," ").capitalize()
nodModu = NodeLiteral(docModu)
return nodModu
|
e79882283e232166df67a1a5333e60fcdb2a136f
| 3,640,113
|
def noisify_patternnet_asymmetric(y_train, noise, random_state=None):
""" mistakes in labelling the land cover classes in PatternNet dataset
cemetery -> christmas_tree_fram
harbor <--> ferry terminal
Den.Res --> costal home
overpass <--> intersection
park.space --> park.lot
runway_mark --> park.space
costal home <--> sparse Res
swimming pool --> costal home
"""
nb_classes = 38
P = np.eye(nb_classes)
n = noise
if n>0.0:
P[5,5], P[5,7] = 1.-n, n
P[9,9], P[9,32] =1.-n, n
P[11,11], P[11,9] = 1.-n, n
P[17,17], P[17,12] = 1.-n, n
P[12,12], P[12,17] = 1.-n, n
P[18,18], P[18,23] = 1.-n, n
P[23,23], P[23,18] = 1.-n, n
P[25,25], P[25, 24] = 1.-n, n
P[29,29], P[29,25] = 1.-n, n
P[32,32], P[32,9] = 1.-n, n
P[34,34], P[34,9] = 1.-n, n
y_train_noisy = multiclass_noisify(y_train, P=P,
random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train
|
ef37d5c39081cba489076956c0cd948a93ba0387
| 3,640,114
|
def group_superset_counts(pred, label):
"""
Return TP if all label spans appear within pred spans
:param pred, label: A group, represeted as a dict
:return: A Counts namedtuple with TP, FP and FN counts
"""
if (pred["label"] != label["label"]):
return Counts(0, 1, 1)
for label_span in label["spans"]:
for pred_span in pred["spans"]:
if (pred_span["start"] <= label_span["start"] and
pred_span["end"] >= label_span["end"]):
break
else:
return Counts(0, 1, 1)
return Counts(1, 0, 0)
|
8fddd5cfdb0050e97ec60d37e4d939b40cf5d891
| 3,640,115
|
def other():
""" Queries all of the logged in user's Campaigns
and plugs them into the campaigns template """
entities = db.session.query(Entity)
entities = [e.to_dict() for e in entities]
return render_template('other.html', entities=entities)
|
7c7613f919bf5eecc223cf90715e9bd2ae6eb130
| 3,640,116
|
def rel_angle(vec_set1, vec_set2):
"""
Calculate the relative angle between two vector sets
Args:
vec_set1(array[array]): an array of two vectors
vec_set2(array[array]): second array of two vectors
"""
return vec_angle(vec_set2[0], vec_set2[1]) / vec_angle(vec_set1[0], vec_set1[1]) - 1
|
af89a10e26968f53200294919d8b72b532aa3522
| 3,640,117
|
def check_position_axes(chgcar1: CHGCAR, chgcar2: CHGCAR) -> bool:
"""Check the cell vectors and atom positions are same in two CHGCAR.
Parameters
-----------
chgcar1, chgcar2: vaspy.CHGCAR
Returns
-------
bool
"""
cell1 = chgcar1.poscar.cell_vecs
cell2 = chgcar2.poscar.cell_vecs
pos1 = np.array(chgcar1.poscar.positions)
pos2 = np.array(chgcar2.poscar.positions)
assert np.allclose(cell1, cell2), "UnitCells are inconsistent. Abort."
assert np.allclose(pos1, pos2), "Atom positions are inconsistent!!! Abort."
return True
|
29eabfd72a664c77d55164953b6819f3eabd72f1
| 3,640,118
|
def path_shortest(graph, start):
""" Pythonic minheap implementation of dijkstra's algorithm """
# Initialize all distances to infinity but the start one.
distances = {node: float('infinity') for node in graph}
distances[start] = 0
paths = [(0, start)]
while paths:
current_distance, current_node = heap.heappop(paths)
neighbors = graph[current_node].items()
for neighbor, weight in neighbors:
distance = current_distance + weight
if distance < distances[neighbor]:
distances[neighbor] = distance
heap.heappush(paths, (distance, neighbor))
return distances
|
32fe7df3fb02c3a0c3882f5cc5135417c5193985
| 3,640,119
|
import urllib
import json
def request(url, *args, **kwargs):
"""Requests a single JSON resource from the Wynncraft API.
:param url: The URL of the resource to fetch
:type url: :class:`str`
:param args: Positional arguments to pass to the URL
:param kwargs: Keyword arguments (:class:`str`) to pass to the URL
:returns: The returned JSON object as a :class:`dict`
:rtype: :class:`dict`
"""
parsedArgs = (urllib.parse.quote(a) for a in args)
parsedKwargs = {}
for k,v in kwargs.items():
parsedKwargs[k] = urllib.parse.quote(v)
response = urllib.request.urlopen(url.format(*parsedArgs, **parsedKwargs))
data = json.load(response)
response.close()
return data
|
66f23e5a15b44b5c9bc0777c717154749d25987e
| 3,640,120
|
def destagger(var, stagger_dim, meta=False):
"""Return the variable on the unstaggered grid.
This function destaggers the variable by taking the average of the
values located on either side of the grid box.
Args:
var (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A variable
on a staggered grid.
stagger_dim (:obj:`int`): The dimension index to destagger.
Negative values can be used to choose dimensions referenced
from the right hand side (-1 is the rightmost dimension).
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is False.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`:
The destaggered variable. If xarray is enabled and
the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
var_shape = var.shape
num_dims = var.ndim
stagger_dim_size = var_shape[stagger_dim]
# Dynamically building the range slices to create the appropriate
# number of ':'s in the array accessor lists.
# For example, for a 3D array, the calculation would be
# result = .5 * (var[:,:,0:stagger_dim_size-2]
# + var[:,:,1:stagger_dim_size-1])
# for stagger_dim=2. So, full slices would be used for dims 0 and 1, but
# dim 2 needs the special slice.
full_slice = slice(None)
slice1 = slice(0, stagger_dim_size - 1, 1)
slice2 = slice(1, stagger_dim_size, 1)
# default to full slices
dim_ranges_1 = [full_slice] * num_dims
dim_ranges_2 = [full_slice] * num_dims
# for the stagger dim, insert the appropriate slice range
dim_ranges_1[stagger_dim] = slice1
dim_ranges_2[stagger_dim] = slice2
result = .5*(var[tuple(dim_ranges_1)] + var[tuple(dim_ranges_2)])
return result
|
89bb08618fa8890001f72a43da06ee8b15b328be
| 3,640,121
|
from typing import Optional
from typing import Dict
from typing import Any
def predict_pipeline_acceleration(
data: arr_t, sampling_rate: float, convert_to_g: Optional[bool] = True, **kwargs
) -> Dict[str, Any]:
"""Apply sleep processing pipeline on raw acceleration data.
This function processes raw acceleration data collected during sleep. The pipeline consists of the following steps:
* *Activity Count Conversion*: Convert (3-axis) raw acceleration data into activity counts. Most sleep/wake
detection algorithms use activity counts (as typically provided by Actigraphs) as input data.
* *Wear Detection*: Detect wear and non-wear periods. Cut data to longest continuous wear block.
* *Rest Periods*: Detect rest periods, i.e., periods with large physical inactivity. The longest continuous
rest period (*Major Rest Period*) is used to determine the *Bed Interval*, i.e., the period spent in bed.
* *Sleep/Wake Detection*: Apply sleep/wake detection algorithm to classify phases of sleep and wake.
* *Sleep Endpoint Computation*: Compute Sleep Endpoints from sleep/wake detection results and bed interval.
Parameters
----------
data : array_like with shape (n,3)
input data. Must be a 3-d acceleration signal
sampling_rate : float
sampling rate of recorded data in Hz
convert_to_g : bool, optional
``True`` if input data is provided in :math:`m/s^2` and should be converted in :math:`g`, ``False`` if input
data is already in :math:`g` and does not need to be converted.
Default: ``True``
**kwargs :
additional parameters to configure sleep/wake detection. The possible parameters depend on the selected
sleep/wake detection algorithm and are passed to
:class:`~biopsykit.sleep.sleep_wake_detection.SleepWakeDetection`.
Returns
-------
dict
dictionary with Sleep Processing Pipeline results.
"""
# TODO: add entries of result dictionary to docstring and add possibility to specify sleep/wake prediction algorithm
ac = ActivityCounts(sampling_rate)
wd = WearDetection(sampling_rate=sampling_rate)
rp = RestPeriods(sampling_rate=sampling_rate)
sw = SleepWakeDetection("cole_kripke", **kwargs)
if convert_to_g:
data = convert_acc_data_to_g(data, inplace=False)
df_wear = wd.predict(data)
major_wear_block = wd.get_major_wear_block(df_wear)
# cut data to major wear block
data = wd.cut_to_wear_block(data, major_wear_block)
if len(data) == 0:
return {}
df_ac = ac.calculate(data)
df_sw = sw.predict(df_ac)
df_rp = rp.predict(data)
bed_interval = [df_rp["start"][0], df_rp["end"][0]]
sleep_endpoints = compute_sleep_endpoints(df_sw, bed_interval)
if not sleep_endpoints:
return {}
major_wear_block = [str(d) for d in major_wear_block]
dict_result = {
"wear_detection": df_wear,
"activity_counts": df_ac,
"sleep_wake_prediction": df_sw,
"major_wear_block": major_wear_block,
"rest_periods": df_rp,
"bed_interval": bed_interval,
"sleep_endpoints": sleep_endpoints,
}
return dict_result
|
f714a29925c2733d0e8baf8d95b2884bf9d98e6e
| 3,640,122
|
def create_blueprint(request_manager: RequestManager, cache: Cache,
dataset_factory: DatasetFactory):
"""
Creates an instance of the blueprint.
"""
blueprint = Blueprint('metadata', __name__, url_prefix='/metadata')
@cache.memoize()
def _get_method_types_per_approach():
frame = dataset_factory.get_prepared_data_frame()
return metadata.get_method_type_count_per_approach(frame)
@cache.memoize()
def _get_approach_type_counts():
frame = dataset_factory.get_prepared_data_frame()
return metadata.get_approach_type_count(frame)
# pylint: disable=unused-variable
@blueprint.route('method/count')
def get_method_count():
"""
Triggers calculation of number of method types per approach.
---
response:
200:
description: The retrieved result will be a JSON object
representing the number of different method types per
approach.
application/json:
schema:
$ref: '#/definitions/RequestResponse'
"""
ticket = request_manager.submit_ticketed(
_get_method_types_per_approach)
return get_state_response(ticket)
@blueprint.route('approaches/count')
def get_approach_count():
"""
Computes which approach types are present in the available data and how
many sessions each of them was used in.
---
response:
200:
description: The retrieved result will be a JSON object
representing the number of sessions each approach was
used in.
application/json:
schema:
$ref: '#/definitions/RequestResponse'
"""
ticket = request_manager.submit_ticketed(_get_approach_type_counts)
return get_state_response(ticket)
return blueprint
|
32693a6286e4ffb15e4820dbd7ad5fdbe6632e95
| 3,640,123
|
def sides(function_ast, parameters, function_callback):
"""
Given an ast, parses both sides of an expression.
sides(b != c) => None
"""
left = side(function_ast['leftExpression'], parameters, function_callback)
right = side(function_ast['rightExpression'], parameters, function_callback)
return (left, right)
|
9ed00100122f821340a0db37e77bfcf786eacdf9
| 3,640,124
|
def print_url(host, port, datasets):
"""
Prints a list of available dataset URLs, if any. Otherwise, prints a
generic URL.
"""
def url(path = None):
return colored(
"blue",
"http://{host}:{port}/{path}".format(
host = host,
port = port,
path = path if path is not None else ""))
horizontal_rule = colored("green", "—" * 78)
print()
print(horizontal_rule)
if len(datasets):
print(" The following datasets should be available in a moment:")
for path in sorted(datasets, key = str.casefold):
print(" • %s" % url(path))
else:
print(" Open <%s> in your browser." % url())
print()
print(" ", colored("yellow", "Warning: No datasets detected."))
print(horizontal_rule)
print()
|
37d58dce1672f60d72936d6e1b9644fdd5ab689f
| 3,640,125
|
def get_default_sample_path_random(data_path):
"""Return path to sample with default parameters as suffix"""
extra_suffix = get_default_extra_suffix(related_docs=False)
return get_default_sample_path(data_path, sample_suffix=extra_suffix)
|
54220840dc6ef1831859a60058506e7503effcb7
| 3,640,126
|
def VectorShadersAddMaterialDesc(builder, materialDesc):
"""This method is deprecated. Please switch to AddMaterialDesc."""
return AddMaterialDesc(builder, materialDesc)
|
0aaec1d3e14536a65c9cb876075d12348176096c
| 3,640,127
|
import math
def phase_randomize(D, random_state=0):
"""Randomly shift signal phases
For each timecourse (from each voxel and each subject), computes its DFT
and then randomly shifts the phase of each frequency before inverting
back into the time domain. This yields timecourses with the same power
spectrum (and thus the same autocorrelation) as the original timecourses,
but will remove any meaningful temporal relationships between the
timecourses.
This procedure is described in:
Simony E, Honey CJ, Chen J, Lositsky O, Yeshurun Y, Wiesel A, Hasson U
(2016) Dynamic reconfiguration of the default mode network during narrative
comprehension. Nat Commun 7.
Parameters
----------
D : voxel by time by subject ndarray
fMRI data to be phase randomized
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
ndarray of same shape as D
phase randomized timecourses
"""
random_state = check_random_state(random_state)
F = fft(D, axis=1)
if D.shape[1] % 2 == 0:
pos_freq = np.arange(1, D.shape[1] // 2)
neg_freq = np.arange(D.shape[1] - 1, D.shape[1] // 2, -1)
else:
pos_freq = np.arange(1, (D.shape[1] - 1) // 2 + 1)
neg_freq = np.arange(D.shape[1] - 1, (D.shape[1] - 1) // 2, -1)
shift = random_state.rand(D.shape[0], len(pos_freq),
D.shape[2]) * 2 * math.pi
# Shift pos and neg frequencies symmetrically, to keep signal real
F[:, pos_freq, :] *= np.exp(1j * shift)
F[:, neg_freq, :] *= np.exp(-1j * shift)
return np.real(ifft(F, axis=1))
|
d8f3230acdf8b3df98995adaadc92f41497a27ea
| 3,640,128
|
def monospaced(text):
"""
Convert all contiguous whitespace into single space and strip leading and
trailing spaces.
Parameters
----------
text : str
Text to be re-spaced
Returns
-------
str
Copy of input string with all contiguous white space replaced with
single space " ".
"""
return REGEX_SPACE.sub(' ', text).strip()
|
51f07908dde10ef67bd70b5eb65e03ee832c3755
| 3,640,129
|
def molecule_block(*args, **kwargs):
"""
Generates the TRIPOS Mol2 block for a given molecule, returned as a string
"""
mol = Molecule(*args, **kwargs)
block = mol.molecule_block() + mol.atom_block() + mol.bond_block() + '\n'
return block
|
79ebf821e105666fb81396197fa0f218b2cf3e48
| 3,640,130
|
def setup_test_env(settings_key='default'):
"""Allows easier integration testing by creating RPC and HTTP clients
:param settings_key: Desired server to use
:return: Tuple of RPC client, HTTP client, and thrift module
"""
return RpcClient(handler), HttpClient(), load_module(settings_key)
|
827a71692dd2eb9946db34289dcf48d5b5d4415b
| 3,640,131
|
from typing import List
from typing import Tuple
def calculateCentroid(
pointCloud : List[Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Calculate centroid of point cloud.
Arguments
--------------------------------------------------------------------------
pointCloud (float 3-tuple list) -- list of xyz coordinates.
Returns
--------------------------------------------------------------------------
centroid (float 3-tuple) -- centroid of points in point cloud.
"""
numPoints = len(pointCloud)
x, y, z = [], [], []
for point in pointCloud:
x.append(point[0])
y.append(point[1])
z.append(point[2])
x, y, z = sum(x) / numPoints, sum(y) / numPoints, sum(z) / numPoints
return x, y, z
|
0e8d6d578a0a983fe1e68bff22c5cc613503ee76
| 3,640,132
|
def timber_load():
"""
Calculate Timber's IO load since the last call
"""
#io(read_count=454556, write_count=3456, read_bytes=110592, write_bytes=0)
global timber_io_stat
try:
new_stat = p.get_io_counters()
readCount = new_stat.read_count - timber_io_stat.read_count
writeCount = new_stat.write_count - timber_io_stat.write_count
readBytes = new_stat.read_bytes - timber_io_stat.read_bytes
writeBytes = new_stat.write_bytes - timber_io_stat.write_bytes
timber_io_stat = new_stat
return readCount,writeCount,readBytes,writeBytes
except Exception as e:
debug(e)
debug("Timber load data pulled failed", error=True)
|
eebd8c3b0cc48b01f361de0455c34daee5942ea9
| 3,640,133
|
def get_num_uniq_users(csv_file, userid_col):
"""
A Helper function to help get the number of unique users
:param csv_file: path to CSV file
:param userid_col: Column for user ID
:return:
"""
# Read the CSV file using pandas
df = pd.read_csv(csv_file)
# Use the nunique() method to get number of unique users
num = len(np.unique(df[userid_col]))
return num
|
ade25596bb308414c80e1aea87d412bd5a340288
| 3,640,134
|
def generate_graph_batch(n_examples, sample_length):
""" generate all of the training data
Parameters
----------
n_examples: int
Num of the samples
sample_length: int
Length of the samples.
# TODO we should implement samples of different lens as in the DeepMind example.
Returns
-------
res: tuple
(input_data, target_data), each of the elements is a list of entities dicts
"""
input_data = [
graph_data_from_list(np.random.uniform(size=sample_length))
for _ in range(n_examples)
]
target_data = [create_target_data(v, e, conn) for v, e, conn in input_data]
return input_data, target_data
|
8f98b86e069070a44c84368592a023311ebcdc7d
| 3,640,135
|
from zooniverse_web.models import Survey, QuestionResponse, Response, QuestionOption
from zooniverse_web.utility.survey import generate_new_survey
def administration(request):
"""Administration actions ((re)train acton predictor for a new survey)
Parameters
----------
request:
POST request
Returns
-------
render:
django.shortcuts.render (a page to be rendered)
"""
message = None
message_class = None
if request.method == 'POST':
next_action = request.POST.get('submit', None)
if next_action == '(Re)Train Recommender':
previous_survey = Survey.objects.filter(active=True).order_by('-creation_date').first()
if not previous_survey:
survey_created = generate_new_survey()
message_class = 'success'
message = 'New survey created on {}!'.format(survey_created.creation_date)
else:
# Are there any responses for this survey?
try:
for option in QuestionOption.objects.all():
QuestionResponse.objects.filter(
response=Response.objects.get(
status=Response.FINISHED,
survey=previous_survey
),
answer=option.option
)
survey_created = generate_new_survey()
message_class = 'success'
message = 'New survey created on {}!'.format(survey_created.creation_date.date())
except (QuestionOption.DoesNotExist, QuestionResponse.DoesNotExist, Response.DoesNotExist):
message = 'You do not have enough question responses saved yet for the current survey! ' \
'Try again later.'
message_class = 'warning'
except:
message = 'Something went wrong while generating the survey. Please try again. <br />' \
'If the problem keeps on occuring, please contact your system administrator.'
message_class = 'danger'
else:
message = ''
message_class = ''
return render(
request,
'administration/administration.html',
{
'message': message,
'message_class': message_class,
}
)
|
7d5a08450c9058f6fd33a13fc4cf6b714bc7e657
| 3,640,136
|
from typing import Counter
def checkout(skus):
"""
Calculate the total amount for the checkout based on the SKUs entered in
:param skus: string, each char is an item
:return: int, total amount of the cart, including special offers
"""
total = 0
counter = Counter(skus)
# got through the offers (biggest first), and calculate the line total, and any free offers...
for item in counter:
print('item: {}'.format(item))
if item not in ITEMS:
return -1
line_total = 0
free_offer = 0
qty = counter[item]
ordered_offers = sorted(ITEMS[item]['special_offers'], key=lambda k: (k['min_quantity']), reverse=True)
# does this item have an specials?
for offer in ordered_offers:
# how many can we get of the biggest offer
number_of_offers = qty // offer['min_quantity']
if 'price' in offer:
# how many are left, put in qty for next offer...
number_of_items_in_offer = number_of_offers * offer['min_quantity']
qty -= number_of_items_in_offer
# update the line total
line_total += number_of_offers * offer['price']
elif 'other_free' in offer:
if offer['other_free'] in counter:
# make sure we have the min required items
if counter[item] >= offer['min_quantity']:
other_free = offer['other_free']
# is this full price the correct value? what if we used a multi price?
free_offer = number_of_offers * ITEMS[other_free]['price']
# add any remaining qty as full price to the line_total
line_total += qty * ITEMS[item]['price']
# add the line total, and the free offers to the checkout total
total += line_total
total -= free_offer
return total
|
ad00a9c3e3cd4f34cfd7b5b306d3863decc0751b
| 3,640,137
|
def func_tradeg(filename, hdulist=None, whichhdu=None):
"""Return the fits header value TELRA in degrees.
"""
hdulist2 = None
if hdulist is None:
hdulist2 = fits.open(filename, 'readonly')
else:
hdulist2 = hdulist
telra = fitsutils.get_hdr_value(hdulist2, 'TELRA')
if hdulist is None:
hdulist2.close()
return spmeta.convert_ra_to_deg(telra)
|
4e6751d2eb0ac9e6264f768e932cbd42c2fc2c4e
| 3,640,138
|
def column_indexes(column_names, row_header):
"""項目位置の取得
Args:
column_names (str): column name
row_header (dict): row header info.
Returns:
[type]: [description]
"""
column_indexes = {}
for idx in column_names:
column_indexes[idx] = row_header.index(column_names[idx])
return column_indexes
|
4205e31e91cd64f833abd9ad87a02d91eebc8c61
| 3,640,139
|
import logging
import pickle
def dmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
|
6bbb5df017374f207d7c9338a737212f5c7e5b23
| 3,640,140
|
def fit_stats(act_map, param, func=KentFunc):
"""Generate fitting statistics from scipy's curve fitting"""
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = act_map.flatten()
fpred = func(Xin, *param) # KentFunc
res = fval - fpred
rsquare = 1 - (res**2).mean() / fval.var()
return res.reshape(act_map.shape), rsquare
|
97e4223daf3e140f1a091491f18012840b8c006a
| 3,640,141
|
def conv2d_for_hpool_valid_width_wrapper(inputs,filters,strides,padding,**kwargs):
"""
Wraps tf.layers.conv2d to allow valid convolution across signal width and
'same' convolution across signal height when padding is set to "valid_time"
Arguments:
inputs (TF Tensor): Tensor input.
filters (TF Tensor): Must have the same type as input.
A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels]
strides (int or tuple/list) : An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding (string): One of `"valid"`, `"same"`, or `"valid_time"` (case-insensitive).
kwargs (dictionary): Specifies all other arguments required by
tf.layers.conv2d. Passes these directly to function without modification.
See Tensorflow documentation for further details.
Returns:
(TF Tensor): Output of tf.layers.conv2d.
"""
#Collects relvant parameters
size=inputs.get_shape()
kernel_size = filters.get_shape()
filter_height = int(kernel_size[0])
in_height = int(size[1])
#Calculates according to SAME padding formula
if (in_height % strides[0] == 0):
pad_along_height = max(filter_height - strides[0], 0)
else:
pad_along_height = max(filter_height - (in_height % strides[0]), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
#Pads signal if VALID_TIME is selected and padding is necessary
#Otherwise, pass inputs through and allow specified convolutioon
if pad_along_height == 0 or padding.upper() != 'VALID_TIME':
padding = 'VALID' if padding.upper() == 'VALID_TIME' else padding
output_tensor = tf.nn.conv2d(inputs,filter=filters,
strides=strides,padding=padding,
**kwargs)
else:
#Pads input tensor and moves conv2d to valid padding
paddings = tf.constant([[0,0],[pad_top, pad_bottom], [0, 0],[0,0]])
input_padded = tf.pad(inputs,paddings)
output_tensor=tf.nn.conv2d(input_padded,filter=filters,
strides=strides, padding="VALID",
**kwargs)
return output_tensor
|
9b4438c687232245e645ea5714e7ad7899ecd98b
| 3,640,142
|
import copy
def resample_cells(tree, params, current_node = 'root', inplace = False):
"""
Runs a new simulation of the cell evolution on a fixed tree
"""
if not inplace:
tree = copy.deepcopy(tree)
for child in tree.successors(current_node):
initial_cell = tree.nodes[current_node]['cell'].deepcopy()
initial_cell.reset_seed()
tree.nodes[child]['cell'] = sim.evolve_cell(initial_cell,
tree.nodes[child]['time_to_parent'],
params)
resample_cells(tree, params, current_node = child, inplace = True)
return tree
|
10cde9abdf3a6271aa20276c3e193b1c93ca7908
| 3,640,143
|
def get_sql_query(table_name:str) -> str:
"""Fetch SQL query file for generation of dim or fact table(s)"""
f = open(f'./models/sql/{table_name}.sql')
f_sql_query = f.read()
f.close()
return f_sql_query
|
fc3308eae51b7d10667a50a0f4ee4e295bfea8d0
| 3,640,144
|
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
return tf_inspect.getcallargs(function, *args, **kwds)
|
b19befded386e6081be9858c7eb31ffd45c96ef3
| 3,640,145
|
def sub_bases( motif ):
"""
Return all possible specifications of a motif with degenerate bases.
"""
subs = {"W":"[AT]", \
"S":"[CG]", \
"M":"[AC]", \
"K":"[GT]", \
"R":"[AG]", \
"Y":"[CT]", \
"B":"[CGT]", \
"D":"[AGT]", \
"H":"[ACT]", \
"V":"[ACG]", \
"N":"[ACGTN]"}
for symbol,sub in subs.iteritems():
if motif.find(symbol) > -1:
motif = motif.replace(symbol, sub)
return motif
|
10ff2ea1959aba103f1956398afb5f1d8801edd7
| 3,640,146
|
import logging
def parse(input_file_path):
"""
Parse input file
:param input_file_path: input file path
:return: Image list
"""
verticals, horizontals = 0, 0
logging.info("parsing %s", input_file_path)
with open(input_file_path, 'r') as input_file:
nb = int(input_file.readline()) # images nb
images = []
for i, img_txt in enumerate(input_file.readlines()):
data = img_txt.rstrip().split(' ')
orientation = data[0]
tags = data[2:]
images.append(Image(i, orientation, set(tags)))
if orientation == 'V':
verticals += 1
else: # H
horizontals += 1
logging.info('parsing %s done', input_file_path)
logging.info('%d images found (%d V,%d H)', nb, verticals, horizontals)
return images
|
ecd4fd066d1128f385da59965a93e59c038052bd
| 3,640,147
|
def char(ctx, number):
"""
Returns the character specified by a number
"""
return chr(conversions.to_integer(number, ctx))
|
5c5254978055f690b6801479b180ff39b31e2248
| 3,640,148
|
import re
import logging
async def get_character_name(gear_url, message):
"""
It is *sometimes* the case that discord users don't update their username
to be their character name (eg for alts).
This method renders the gear_url in an HTML session and parses the page
to attempt to find the character's name.
This assumes a specific format of the page: player names are nested in
an h3 element with css class named 'class-[player class]'
Returns the character's name if successful, otherwise returns the message sender's
display name in discord.
"""
name = message.author.display_name
if not re.match(SIXTY_UPGRADES_REGEX, gear_url):
return name
for i in range(MAX_FETCH_CHARACTER_NAME_RETRIES):
try:
asession = AsyncHTMLSession()
webpage = await asession.get(gear_url)
await webpage.html.arender()
query_selector = "h3[class^='class-']"
name = webpage.html.find(query_selector, first=True).text
break
except Exception as e:
logging.error(e)
finally:
await asession.close()
return name
|
cdd18e0123f226d2c59d41bbf39e0dfc02188d73
| 3,640,149
|
import pandas
def get_treant_df(tags, path='.'):
"""Get treants as a Pandas DataFrame
Args:
tags: treant tags to identify the treants
path: the path to search for treants
Returns:
a Pandas DataFrame with the treant name, tags and categories
>>> from click.testing import CliRunner
>>> from toolz.curried import do
>>> with CliRunner().isolated_filesystem() as dir_:
... assert pipe(
... dir_,
... dtr.Treant,
... do(lambda x: x.__setattr__('tags', ['atag'])),
... lambda x: x.uuid[:8],
... lambda x: x == get_treant_df(['atag'], path=dir_).uuid[0]
... )
"""
return pipe(
tags,
get_by_tags(path=path),
lambda x: x.map(get_treant_data),
pandas.DataFrame,
)
|
a5972646e27ffd88d18f1c0d212a2ae081ebe4f1
| 3,640,150
|
def gather_keypoints(keypoints_1, keypoints_2, matches):
"""
Gather matched keypoints in a (n x 4) array,
where each row correspond to a pair of matching
keypoints' coordinates in two images.
"""
res = []
for m in matches:
idx_1 = m.queryIdx
idx_2 = m.trainIdx
pt_1 = keypoints_1[idx_1].pt
pt_2 = keypoints_2[idx_2].pt
row = [pt_1[0], pt_1[1], pt_2[0], pt_2[1]]
res.append(row)
return np.array(res)
|
5abef87c570493b57e81dcddc2732ed541aa6a08
| 3,640,151
|
async def stop():
""" Stop any playing audio. """
Sound.stop()
return Sound.get_state()
|
3c7ea7aae3e8dd7e3b33ddd9beed0ce2182800bc
| 3,640,152
|
def is_numeric(X, compress=True):
"""
Determine whether input is numeric array
Parameters
----------
X: Numpy array
compress: Boolean
Returns
-------
V: Numpy Boolean array if compress is False, otherwise Boolean Value
"""
def is_float(val):
try:
float(val)
except ValueError:
return False
else:
return True
isnumeric = np.vectorize(is_float, otypes=[bool]) # return numpy array
V = isnumeric(X)
if compress:
return np.all(V)
return V
|
ad28657f51680cd193671a6a8a8da6a91390dc15
| 3,640,153
|
def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9):
""" show figure with result of the ellipse fitting
:param ndarray img:
:param ndarray seg:
:param [(int, int, int, int, float)] ellipses:
:param [(int, int)] centers:
:param [float] crits:
:param float fig_size:
:return:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> ells = np.random.random((3, 5)) * 25
>>> centers = np.random.random((3, 2)) * 25
>>> crits = np.random.random(3)
>>> fig = figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
assert len(ellipses) == len(centers) == len(crits), \
'number of ellipses (%i) and centers (%i) and criteria (%i) ' \
'should match' % (len(ellipses), len(centers), len(crits))
fig, ax = create_figure_by_image(img.shape[:2], fig_size)
assert img.ndim == 2, \
'required image dimension is 2 to instead %s' % repr(img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
for i, params in enumerate(ellipses):
c1, c2, h, w, phi = params
rr, cc = ellipse_perimeter(int(c1), int(c2), int(h), int(w), phi)
ax.plot(cc, rr, '.', color=COLORS[i % len(COLORS)],
label='#%i with crit=%d' % ((i + 1), int(crits[i])))
ax.legend(loc='lower right')
# plt.plot(centers[:, 1], centers[:, 0], 'ow')
for i in range(len(centers)):
ax.plot(centers[i, 1], centers[i, 0], 'o',
color=COLORS[i % len(COLORS)])
ax.set_xlim([0, seg.shape[1]])
ax.set_ylim([seg.shape[0], 0])
ax.axis('off')
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig
|
de6b58a01a64c3123f5aad4dfb6935c6c19a041c
| 3,640,154
|
def fmt_bytesize(num: float, suffix: str = "B") -> str:
"""Change a number of bytes in a human readable format.
Args:
num: number to format
suffix: (Default value = 'B')
Returns:
The value formatted in human readable format (e.g. KiB).
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.1f} {}{}".format(num, unit, suffix)
num /= 1024.0
return "{:.1f} {}{}".format(num, "Yi", suffix)
|
09b36d229856004b6df108ab1ce4ef0a9c1e6289
| 3,640,155
|
def get_kpoint_mesh(structure: Structure, cutoff_length: float, force_odd: bool = True):
"""Calculate reciprocal-space sampling with real-space cut-off."""
reciprocal_lattice = structure.lattice.reciprocal_lattice_crystallographic
# Get reciprocal cell vector magnitudes
abc_recip = np.array(reciprocal_lattice.abc)
mesh = np.ceil(abc_recip * 2 * cutoff_length).astype(int)
if force_odd:
mesh += (mesh + 1) % 2
return mesh
|
0536b5e2c37b7ba98d240fc3099fad93d246f730
| 3,640,156
|
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_101', **kwargs):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope, **kwargs)
|
138289084d48edd9d9c1096bd790b1479d902ec1
| 3,640,157
|
def is_package_authorized(package_name):
"""
get user information if it is authorized user in the package config
Returns:
[JSON string]: [user information session]
"""
authorized_users = get_package_admins(package_name)
user_info = get_user_info()
user_dict = j.data.serializers.json.loads(user_info)
username = user_dict["username"]
# if the package doesn't include admins then allow any authenticated user
if authorized_users and not any([username in authorized_users, username in j.core.identity.me.admins]):
return abort(403)
return user_info
|
59b89ebb9c8579d61a18a194e7f5f4bd41d738b6
| 3,640,158
|
def submit_search_query(query_string, query_limit, query_offset,
class_resource):
"""
Submit a search query request to the RETS API
"""
search_result = class_resource.search(
query='%s' % query_string, limit=query_limit, offset=query_offset)
return search_result
|
f8c30c86f7ff7c33fc96b26b1491ddaa48710fbc
| 3,640,159
|
def one_hot_encode(df):
"""
desc : one hot encodes categorical cols
args:
df (pd.DataFrame) : stroke dataframe
returns:
df (pd.DataFrame) : stroke dataframe with one_hot_encoded columns
"""
# extract categorical columns
stroke_data = df.copy()
cat_cols = stroke_data.select_dtypes(include = ["object"])
cat_vals = cat_cols.values
cat_cols_names = cat_cols.columns
enc = OneHotEncoder(sparse = False)
encoded_vals = enc.fit_transform(cat_vals)
encoded_cols = enc.get_feature_names(cat_cols_names)
encoded_cols = pd.DataFrame(encoded_vals,columns = encoded_cols,index = cat_cols.index)
#drop non one hot encoded cols
stroke_data.drop(columns = cat_cols_names, axis = 1, inplace = True)
#add encoded columns
stroke_data = pd.concat([stroke_data,encoded_cols], axis = 1)
#print(stroke_data.shape)
print(stroke_data)
return stroke_data
|
6895dfbc4bb57d5e8d9a5552e2ac7fcb94e07434
| 3,640,160
|
def assert_increasing(a):
"""Utility function for enforcing ascending values.
This function's handle can be supplied as :py:kwarg:`post_method` to a
:py:func:`processed_proprty <pyproprop>` to enforce values within a
:py:type:`ndarray <numpy>` are in ascending order. This is useful for
enforcing time guesses to be sequential.
"""
if (a is not None) and (not np.all(np.diff(a) >= 0)):
msg = f"Elements in {a} must be in ascending numerical order."
raise ValueError(msg)
return a
|
f1ded37b40686cf400da23f567880e73180a78fe
| 3,640,161
|
def copy_to_device(device,
remote_path,
local_path='harddisk:',
server=None,
protocol='http',
vrf=None,
timeout=300,
compact=False,
use_kstack=False,
fu=None,
http_auth=True,
**kwargs):
"""
Copy file from linux server to the device.
Args:
device (Device): Device object
remote_path (str): remote file path on the server
local_path (str): local file path to copy to on the device (default: harddisk:)
server (str): hostname or address of the server (default: None)
protocol(str): file transfer protocol to be used (default: http)
vrf (str): vrf to use (optional)
timeout(int): timeout value in seconds, default 300
compact(bool): compress image option for n9k, defaults False
fu(obj): FileUtils object to use instead of creating one. Defaults to None.
use_kstack(bool): Use faster version of copy, defaults False
Not supported with a file transfer protocol
prompting for a username and password
http_auth (bool): Use http authentication (default: True)
Returns:
None
If the server is not specified, a HTTP server will be spawned
on the local system and serve the directory of the file
specified via remote_path and the copy operation will use http.
If the device is connected via CLI proxy (unix jump host) and the proxy has
'socat' installed, the transfer will be done via the proxy automatically.
"""
return generic_copy_to_device(device=device,
remote_path=remote_path,
local_path=local_path,
server=server,
protocol=protocol,
vrf=vrf,
timeout=timeout,
compact=compact,
use_kstack=use_kstack,
fu=fu,
http_auth=http_auth,
**kwargs)
|
762ef928656473458e0fee8dc47c1a581103ed0e
| 3,640,162
|
import os
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.gdrive')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'gdrive.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
|
51a17757d3d764c090efc0b8b213e9e6e9abca3d
| 3,640,163
|
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, protocol.typeinfos)
return decoder.instance(protocol.replay_header_typeid)
|
1fcee7900a5c0c310e67afe31a154b8310da7089
| 3,640,164
|
def _generate_indexed(array: IndexedArray) -> str:
"""Generate an indexed Bash array."""
return (
"("
+ " ".join(
f"[{index}]={_generate_string(value)}"
for index, value in enumerate(array)
if value is not None
)
+ ")"
)
|
2443b3c6be74684360c395995b3d16d4ebecf1d8
| 3,640,165
|
from datetime import datetime
def up_date(dte, r_quant, str_unit, bln_post_colon):
""" Adjust a date in the light of a (quantity, unit) tuple,
taking account of any recent colon
"""
if str_unit == 'w':
dte += timedelta(weeks=r_quant)
elif str_unit == 'd':
dte += timedelta(days=r_quant)
elif str_unit == 'h':
dte += timedelta(hours=r_quant)
elif str_unit == 'm':
dte += timedelta(minutes=r_quant)
elif str_unit in ('Y', 'y'):
if r_quant > 500: # jul 2019 vs jul 17
r_year = r_quant
else:
r_year = datetime.now().year + r_quant
try:
dte = datetime.replace(dte, year=int(r_year))
except ValueError:
dte = datetime.replace(dte, day=28, month=2,
year=int(datetime.now().year + r_quant))
elif str_unit == 'H':
dte = datetime.replace(dte, hour=int(r_quant), second=0, microsecond=0)
elif str_unit == 'M':
dte = datetime.replace(dte, minute=int(r_quant),
second=0, microsecond=0)
elif str_unit == 'a':
if not bln_post_colon:
dte = datetime.replace(dte, hour=int(r_quant), minute=0,
second=0, microsecond=0)
elif str_unit == 'p':
if bln_post_colon: # adjust by 12 hours if necessary
if dte.hour < 12:
dte = datetime.replace(dte, hour=dte.hour+12)
else:
p_quant = r_quant
if p_quant < 12:
p_quant += 12
dte = datetime.replace(dte, hour=int(p_quant), minute=0,
second=0, microsecond=0)
elif (len(str_unit) >= 3) and (STR_MONTHS.find(str_unit) != -1):
dte = datetime.replace(dte, month=(STR_MONTHS.index(str_unit) + 3)/3,
day=int(r_quant), second=0, microsecond=0)
# refers to this year or next year ? (assume not past)
dte_today = datetime.today().replace(hour=0, minute=0, \
second=0, microsecond=0)
if dte < dte_today:
dte = dte.replace(year=(dte_today.year+1))
return dte
|
684b09e5d37bf0d3445262b886c73188d35425ef
| 3,640,166
|
from typing import MutableMapping
def read_options() -> Options:
"""
read command line arguments and options
Returns:
option class(Options)
Raises:
NotInspectableError: the file or the directory does not exists.
"""
args: MutableMapping = docopt(__doc__)
schema = Schema({
"<path>": And(Use(get_path), lambda path: path.is_file() or path.is_dir(),
error=f"The specified path {args['<path>']}"
" does not exist.\n")
})
try:
args = schema.validate(args)
except SchemaError as e:
raise NotInspectableError(e.args[0])
return Options(args["<path>"])
|
25cd3c29f6e206fd97334f7a48d267680a9e553c
| 3,640,167
|
def test_generator_single_input_2():
"""
Feature: Test single str input
Description: input str
Expectation: success
"""
def generator_str():
for i in range(64):
yield chr(ord('a') + i)
class RandomAccessDatasetInner:
def __init__(self):
self.__data = [i for i in range(64)]
def __getitem__(self, item):
return chr(ord('a') + self.__data[item])
def __len__(self):
return 64
class SequentialAccessDataset:
def __init__(self):
self.__data = [i for i in range(64)]
self.__index = 0
def __next__(self):
if self.__index >= 64:
raise StopIteration
item = chr(ord('a') + self.__data[self.__index])
self.__index += 1
return item
def __iter__(self):
self.__index = 0
return self
def __len__(self):
return 64
def assert_generator_single_input_2(data):
# apply dataset operations
data1 = ds.GeneratorDataset(data, ["data"], shuffle=False)
i = 0
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary
s = chr(ord('a') + i)
golden = np.array(bytes(s, encoding='utf8'))
np.testing.assert_array_equal(item["data"], golden)
i = i + 1
assert_generator_single_input_2(generator_str)
assert_generator_single_input_2(RandomAccessDatasetInner())
assert_generator_single_input_2(SequentialAccessDataset())
|
d251279c0740b52c9d32c2ae572f6dbdf32f36ea
| 3,640,168
|
def SLINK(Dataset, d):
"""function to execute SLINK algo
Args:
Dataset(List) :- list of data points, who are also lists
d(int) :- dimension of data points
Returns:
res(Iterables) :- list of triples sorted by the second element,
first element is index of point,
the other two are pointer representations of dendrograms noting the
lowest level at which i is no longer the last point in his cluster and
the last point in the cluster which i then joins
Heights(Iterables) :- list of the second element of res' triples
"""
n = len(Dataset)
A = [inf for i in range(n)]
B = [0 for i in range(n)]
# initialisation
A[0] = inf
B[0] = 0
for k in range(1, n):
B[k] = k
A[k] = inf
M = [0 for i in range(k + 1)]
for i in range(k):
M[i] = metrics(Dataset[i], Dataset[k])
for i in range(k):
if(A[i] >= M[i]):
M[B[i]] = min(M[B[i]], A[i])
A[i] = M[i]
B[i] = k
if(A[i] < M[i]):
M[B[i]] = min(M[B[i]], M[i])
for i in range(k):
if(A[i] >= A[B[i]]):
B[i] = k
res = [(index, i, j) for index, (i, j) in enumerate(zip(A, B))]
res = sorted(res, key=lambda x: x[1])
Heights = [triple[1] for triple in res]
return(res, Heights)
|
d10a3f8cb3e6d81649bebd4a45f5be79d206f1be
| 3,640,169
|
def static_file(path='index.html'):
"""static_file"""
return app.send_static_file(path)
|
5c3f2d423d029a8e7bb8db5fbe3c557f7a6aa9c3
| 3,640,170
|
def lazy_property(function):
""" Decorator to make a lazily executed property """
attribute = '_' + function.__name__
@property
@wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
|
db1d62eb66a018bc166b67fe9c2e25d671261f77
| 3,640,171
|
from pathlib import Path
def basename(fname):
"""
Return file name without path.
Examples
--------
>>> fname = '../test/data/FSI.txt.zip'
>>> print('{}, {}, {}'.format(*basename(fname)))
../test/data, FSI.txt, .zip
"""
if not isinstance(fname, path_type):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
|
55cd53ec71e4e914493129e40fa216ddcdbe8083
| 3,640,172
|
def validate_lockstring(lockstring):
"""
Validate so lockstring is on a valid form.
Args:
lockstring (str): Lockstring to validate.
Returns:
is_valid (bool): If the lockstring is valid or not.
error (str or None): A string describing the error, or None
if no error was found.
"""
global _LOCK_HANDLER
if not _LOCK_HANDLER:
_LOCK_HANDLER = LockHandler(_ObjDummy())
return _LOCK_HANDLER.validate(lockstring)
|
0feb67597e31667013ab182159c8433ae4a80346
| 3,640,173
|
from typing import Iterable
def decode_geohash_collection(geohashes: Iterable[str]):
"""
Return collection of geohashes decoded into location coordinates.
Parameters
----------
geohashes: Iterable[str]
Collection of geohashes to be decoded
Returns
-------
Iterable[Tuple[float, float]]
Collection of location coordinates in Latitude/Longitude
"""
locations = []
for geohash in geohashes:
exact_location = decode_geo_hash(geohash)
locations.append((exact_location[0], exact_location[1]))
return locations
|
2e673c852c7ac2775fd29b32243bdc8b1aa83d77
| 3,640,174
|
def renormalize_sparse(A: sp.spmatrix) -> sp.spmatrix:
"""Get (D**-0.5) * A * (D ** -0.5), where D is the diagonalized row sum."""
A = sp.coo_matrix(A)
A.eliminate_zeros()
rowsum = np.array(A.sum(1))
assert np.all(rowsum >= 0)
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return d_mat_inv_sqrt.dot(A).dot(d_mat_inv_sqrt)
# return A.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
|
33122bcf018dba842f04e044cf9e799860a56042
| 3,640,175
|
from typing import Union
import torch
import os
def load_gloria(
name: str = "gloria_resnet50",
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
):
"""Load a GLoRIA model
Parameters
----------
name : str
A model name listed by `gloria.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
Returns
-------
gloria_model : torch.nn.Module
The GLoRIA model
"""
# warnings
if name in _MODELS:
ckpt_path = _MODELS[name]
elif os.path.isfile(name):
ckpt_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
if not os.path.exists(ckpt_path):
raise RuntimeError(
f"Model {name} not found.\n"
+ "Make sure to download the pretrained weights from \n"
+ " https://stanfordmedicine.box.com/s/j5h7q99f3pfi7enc0dom73m4nsm6yzvh \n"
+ " and copy it to the ./pretrained folder."
)
ckpt = torch.load(ckpt_path, map_location=device)
cfg = ckpt["hyper_parameters"]
ckpt_dict = ckpt["state_dict"]
fixed_ckpt_dict = {}
for k, v in ckpt_dict.items():
new_key = k.split("gloria.")[-1]
fixed_ckpt_dict[new_key] = v
ckpt_dict = fixed_ckpt_dict
gloria_model = builder.build_gloria_model(cfg).to(device)
gloria_model.load_state_dict(ckpt_dict)
return gloria_model
|
5ec060dd7e430244b597891d10cb19bc176a727f
| 3,640,176
|
import subprocess
import os
import re
def is_enabled():
"""
Check if `ufw` is enabled
:returns: True if ufw is enabled
"""
output = subprocess.check_output(['ufw', 'status'],
universal_newlines=True,
env={'LANG': 'en_US',
'PATH': os.environ['PATH']})
m = re.findall(r'^Status: active\n', output, re.M)
return len(m) >= 1
|
fa922cdb87e35e1fc7cf77c5eba6a7da651ea070
| 3,640,177
|
def _fetch(
self,
targets=None,
jobs=None,
remote=None,
all_branches=False,
show_checksums=False,
with_deps=False,
all_tags=False,
recursive=False,
):
"""Download data items from a cloud and imported repositories
Returns:
int: number of successfully downloaded files
Raises:
DownloadError: thrown when there are failed downloads, either
during `cloud.pull` or trying to fetch imported files
config.NoRemoteError: thrown when downloading only local files and no
remote is configured
"""
used = self.used_cache(
targets,
all_branches=all_branches,
all_tags=all_tags,
with_deps=with_deps,
force=True,
remote=remote,
jobs=jobs,
recursive=recursive,
)
downloaded = 0
failed = 0
try:
downloaded += self.cloud.pull(
used, jobs, remote=remote, show_checksums=show_checksums
)
except NoRemoteError:
if not used.external and used["local"]:
raise
except DownloadError as exc:
failed += exc.amount
for (repo_url, repo_rev), files in used.external.items():
d, f = _fetch_external(self, repo_url, repo_rev, files)
downloaded += d
failed += f
if failed:
raise DownloadError(failed)
return downloaded
|
18238bb1c4c5bd0772757013173e26645d5cdf5a
| 3,640,178
|
def get_connected_input_geometry(blend_shape):
"""
Return an array of blend_shape's input plugs that have an input connection.
pm.listConnections should do this, but it has bugs when the input array is sparse.
"""
results = []
blend_shape_plug = _get_plug_from_node('%s.input' % blend_shape)
num_input_elements = blend_shape_plug.evaluateNumElements()
for idx in range(num_input_elements):
input = blend_shape_plug.elementByPhysicalIndex(idx)
input_geometry_attr = OpenMaya.MFnDependencyNode(input.node()).attribute('inputGeometry')
input_geometry_plug = input.child(input_geometry_attr)
conns = OpenMaya.MPlugArray()
input_geometry_plug.connectedTo(conns, True, False);
if conns.length():
results.append(input_geometry_plug.info())
return results
|
c69421ce452a1416006db0f22d279a6ed9694ebc
| 3,640,179
|
import torch
from typing import Tuple
def get_median_and_stdev(arr: torch.Tensor) -> Tuple[float, float]:
"""Returns the median and standard deviation from a tensor."""
return torch.median(arr).item(), torch.std(arr).item()
|
d8fca5a97f00d14beecaa4b508442bc7a3637f86
| 3,640,180
|
def connect(user, host, port):
"""Create and return a new SSHClient connected to the given host."""
client = ssh.SSHClient()
if not env.disable_known_hosts:
client.load_system_host_keys()
if not env.reject_unknown_hosts:
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
connected = False
password = get_password()
while not connected:
try:
client.connect(
hostname=host,
port=int(port),
username=user,
password=password,
key_filename=env.key_filename,
timeout=10,
allow_agent=not env.no_agent,
look_for_keys=not env.no_keys
)
connected = True
return client
# BadHostKeyException corresponds to key mismatch, i.e. what on the
# command line results in the big banner error about man-in-the-middle
# attacks.
except ssh.BadHostKeyException:
abort("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % env.host)
# Prompt for new password to try on auth failure
except (
ssh.AuthenticationException,
ssh.PasswordRequiredException,
ssh.SSHException
), e:
# For whatever reason, empty password + no ssh key or agent results
# in an SSHException instead of an AuthenticationException. Since
# it's difficult to do otherwise, we must assume empty password +
# SSHException == auth exception. Conversely: if we get
# SSHException and there *was* a password -- it is probably
# something non auth related, and should be sent upwards.
if e.__class__ is ssh.SSHException and password:
abort(str(e))
# Otherwise, assume an auth exception, and prompt for new/better
# password.
#
# Paramiko doesn't handle prompting for locked private keys (i.e.
# keys with a passphrase and not loaded into an agent) so we have
# to detect this and tweak our prompt slightly. (Otherwise,
# however, the logic flow is the same, because Paramiko's connect()
# method overrides the password argument to be either the login
# password OR the private key passphrase. Meh.)
#
# NOTE: This will come up if you normally use a
# passphrase-protected private key with ssh-agent, and enter an
# incorrect remote username, because Paramiko:
#
# * Tries the agent first, which will fail as you gave the wrong
# username, so obviously any loaded keys aren't gonna work for a
# nonexistent remote account;
# * Then tries the on-disk key file, which is passphrased;
# * Realizes there's no password to try unlocking that key with,
# because you didn't enter a password, because you're using
# ssh-agent;
# * In this condition (trying a key file, password is None)
# Paramiko raises PasswordRequiredException.
#
text = None
if e.__class__ is ssh.PasswordRequiredException:
# NOTE: we can't easily say WHICH key's passphrase is needed,
# because Paramiko doesn't provide us with that info, and
# env.key_filename may be a list of keys, so we can't know
# which one raised the exception. Best not to try.
prompt = "[%s] Passphrase for private key"
text = prompt % env.host_string
password = prompt_for_password(text, user=user)
# Update env.password, env.passwords if empty
set_password(password)
# Ctrl-D / Ctrl-C for exit
except (EOFError, TypeError):
# Print a newline (in case user was sitting at prompt)
print('')
sys.exit(0)
# Handle timeouts
except timeout:
abort('Timed out trying to connect to %s' % host)
# Handle DNS error / name lookup failure
except gaierror:
abort('Name lookup failed for %s' % host)
# Handle generic network-related errors
# NOTE: In 2.6, socket.error subclasses IOError
except socketerror, e:
abort('Low level socket error connecting to host %s: %s' % (
host, e[1])
)
|
a13a3ce5e80f603f21933c9e6ad48b073368b97e
| 3,640,181
|
import scipy
def _chf_to_pdf(t, x, chf, **chf_args):
"""
Estimate by numerical integration, using ``scipy.integrate.quad``,
of the probability distribution described by the given characteristic
function. Integration errors are not reported/checked.
Either ``t`` or ``x`` must be a scalar.
"""
t = np.asarray(t)
x = np.asarray(x)
def f(u, t, x):
return np.real(
exp(-1j*u*x) / (2*np.pi) * chf(t, u, **chf_args))
if t.shape != ():
pdf = np.empty(t.shape)
for i in np.ndindex(t.shape):
pdf[i] = scipy.integrate.quad(
lambda u: f(u, t[i], x), -np.inf, np.inf)[0]
else:
pdf = np.empty(x.shape)
for i in np.ndindex(x.shape):
pdf[i] = scipy.integrate.quad(
lambda u: f(u, t, x[i]), -np.inf, np.inf)[0]
return pdf
|
7022d335d39c25b73203b63b40b1ebb8c178154b
| 3,640,182
|
import re
import logging
def ParseTraceLocationLine(msg):
"""Parse the location line of a stack trace. If successfully parsed, returns (filename, line, method)."""
parsed = re.match(kCodeLocationLine, msg)
if not parsed:
return None
try:
return (parsed.group(1), parsed.group(2), parsed.group(3))
except IndexError as e:
logging.warning('RE matched "%s", but extracted wrong number of items: %r' % (msg, e))
return None
|
15e74bb26a7c213cf24171ffdfa32b8d4e6d818a
| 3,640,183
|
import pandas
import os
def get_rl_params(num, similarity_name, reward_name):
""" Get RL model parameters (alpha and beta) based on <num>'s data.
<similarity_name> is the name of the similarity metric you want to
use, see fmri.catreward.roi.data.get_similarity_data() for details.
<reward_name> is the name of the data to be used as rewards in the
model. Options are 'acc' ({0,1}, i.e. behavioral accuracy or
'gl' ({-1,1}, short for gain/lose) """
if similarity_name == None:
similarity_name = 'none'
table = pandas.read_table(
os.path.join(fmri.__path__[0],'catreward', 'rl',
'101_118_rl_params.txt'), sep=',')
stable = table[table['sub'] == num]
stable_r = stable[stable['reward'] == reward_name]
stable_r_s = stable_r[stable_r['sim'] == similarity_name]
return stable_r_s.ix[:,0:2].values[0].tolist()
|
08bc39fbc02b2fa3d459a3996a87e294e0ef1d94
| 3,640,184
|
import functools
def return_arg_type(at_position):
"""
Wrap the return value with the result of `type(args[at_position])`
"""
def decorator(to_wrap):
@functools.wraps(to_wrap)
def wrapper(*args, **kwargs):
result = to_wrap(*args, **kwargs)
ReturnType = type(args[at_position])
return ReturnType(result)
return wrapper
return decorator
|
30bf4e4a46b0b64b6cb5752286a13c0e6f7618df
| 3,640,185
|
def extend(s, var, val):
"""Copy dict s and extend it by setting var to val; return copy."""
try: # Python 3.5 and later
return eval('{**s, var: val}')
except SyntaxError: # Python 3.4
s2 = s.copy()
s2[var] = val
return s2
|
919e7102bf7f8766d9ddb9ea61a07ddd020d1bb8
| 3,640,186
|
from typing import Optional
from typing import Any
def rx_reduce(observable: Observable, accumulator: AccumulatorOperator, seed: Optional[Any] = None) -> Observable:
"""Create an observable which reduce source with accumulator and seed value.
Args:
observable (Observable): source
accumulator (AccumulatorOperator): accumulator function (two argument, one result) async or sync.
seed (Optional[Any]): optional seed value (default none)
Returns:
(Observable): a new observable
"""
is_awaitable = iscoroutinefunction(accumulator)
async def _subscribe(an_observer: Observer) -> Subscription:
nonlocal is_awaitable
_buffer = seed
async def _on_next(item: Any):
nonlocal _buffer
_buffer = await accumulator(_buffer, item) if is_awaitable else accumulator(_buffer, item)
async def _on_completed():
nonlocal _buffer
await an_observer.on_next(_buffer)
await an_observer.on_completed()
return await observable.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next, on_completed=_on_completed))
return rx_create(subscribe=_subscribe)
|
600d5c47fd7b29ead5293c7a172c8ebdb026706a
| 3,640,187
|
import logging
import torch
def predict(image: Image.Image):
""" Take an image and run it through the inference model. This returns a ModelOutput object with all of the
information that the model returns. Furthermore, bounding box coordinates are normalized.
"""
logging.debug("Sending image to model for inference ...")
width, height = image.size
model = get_inference_model()
model.eval()
with torch.no_grad():
model_input = image_to_model_input(image)
model_output = model([model_input])[0]
logging.debug(f"Model returned {len(model_output)} fields.")
bounding_boxes = model_output["instances"].get_fields()["pred_boxes"].to("cpu").tensor.tolist()
confidences = model_output["instances"].get_fields()["scores"].to("cpu").tolist()
classes = model_output["instances"].get_fields()["pred_classes"].to("cpu").tolist()
classes = [CATEGORIES[num] for num in classes]
# Normalize all the bounding box coordinates to between 0 and 1.
normalized_bounding_boxes = []
for box in bounding_boxes:
normalized_box = (
box[0] / float(width), box[1] / float(height), box[2] / float(width), box[3] / float(height))
normalized_bounding_boxes.append(BoundingBox(upper_left_x=normalized_box[0],
upper_left_y=normalized_box[1],
lower_right_x=normalized_box[2],
lower_right_y=normalized_box[3]))
return ModelOutput(bounding_boxes=normalized_bounding_boxes,
confidences=confidences,
classes=classes)
|
eb90a598fb573dd8f345c62408bbf921f765a1f3
| 3,640,188
|
def minimum_filter(
input,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional minimum filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.minimum_filter`
"""
return _min_or_max_filter(
input, size, footprint, None, output, mode, cval, origin, "min"
)
|
fbbda2abbd470b98cb03158377256c7397c17da6
| 3,640,189
|
import copy
def lowpass(data, cutoff=0.25, fs=30, order=2, nyq=0.75):
"""
Butter low pass filter for a single or spectra or a list of them.
:type data: list[float]
:param data: List of vectors in line format (each line is a vector).
:type cutoff: float
:param cutoff: Desired cutoff frequency of the filter. The default is 0.25.
:type fs: int
:param fs: Sample rate in Hz. The default is 30.
:type order: int
:param order: Sin wave can be approx represented as quadratic. The default is 2.
:type nyq: float
:param nyq: Nyquist frequency, 0.75*fs is a good value to start. The default is 0.75*30.
:returns: Filtered data
:rtype: list[float]
"""
y = copy.deepcopy(data) # so it does not change the input list
normal_cutoff = cutoff / (nyq * fs)
b, a = butter(order, normal_cutoff, btype='low', analog=False)
if len(np.array(y).shape) > 1:
for i in range(len(y)):
y[i] = filtfilt(b, a, y[i])
else:
y = filtfilt(b, a, y)
return y
|
ac42a32c406b1c5a182a1af805e86bf0b0c0606f
| 3,640,190
|
from re import M
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
|
d8eae5b2cc8dbab9a26d7248faf17fe638f5e603
| 3,640,191
|
def sogs_put(client, url, json, user):
"""
PUTs a test `client` request to `url` with the given `json` as body and X-SOGS-* signature
headers signing the request for `user`.
"""
data = dumps(json).encode()
return client.put(
url, data=data, content_type='application/json', headers=x_sogs_for(user, "PUT", url, data)
)
|
7bb3f34d7aff75f422b898ba6eee2908c8bc4ca4
| 3,640,192
|
import tqdm
import requests
def get_results(heading):
"""Get all records under a given record heading from PubChem/
Update results from those records."""
page = 1
results = {}
with tqdm(total=100) as pbar:
while True:
url = (f"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/annotations/heading/"
f"JSON?heading_type=Compound&heading={heading}&page={page}")
response = requests.get(url)
records = response.json()
update_results(records, results)
totalPages = records['Annotations']['TotalPages']
if page==1:
pbar.reset(total=totalPages)
pbar.set_description("%d CIDs described" % len(results))
pbar.update()
page += 1
if page > totalPages:
break
return results
|
fba50023290dfde12a54f6d7792f578ecc66e3d9
| 3,640,193
|
def create_dictionary(documents):
"""Creates word dictionary for given corpus.
Parameters:
documents (list of str): set of documents
Returns:
dictionary (gensim.corpora.Dictionary): gensim dicionary of words from dataset
"""
dictionary = Dictionary(documents)
dictionary.compactify()
return dictionary
|
bba8e6af363da3fcdde983c6ebf52432323ccf96
| 3,640,194
|
def clone_bitarray(other, src=None):
"""
Fast clone of the bit array. The actual function used depends on the implementation
:param other:
:param src:
:return:
"""
if FAST_IMPL_PH4 and src is not None:
src.fast_copy(other)
return src
return to_bitarray(other)
|
9196474dff0e6c1b79f9307409c5f351f2c015d7
| 3,640,195
|
from typing import List
from typing import Optional
from typing import Dict
def _create_graph(
expressions: List[expression.Expression],
options: calculate_options.Options,
feed_dict: Optional[Dict[expression.Expression, prensor.Prensor]] = None
) -> "ExpressionGraph":
"""Create graph and calculate expressions."""
expression_graph = OriginalExpressionGraph(expressions)
canonical_graph = CanonicalExpressionGraph(expression_graph)
canonical_graph.calculate_values(options, feed_dict=feed_dict)
return canonical_graph
|
e7f5a9dbaf3c34a3c925c5d390d6ee44fac57062
| 3,640,196
|
import warnings
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Notes
-----
This function caches at level 30.
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> librosa.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> librosa.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
>>> plt.show()
"""
S = np.asarray(S)
if amin <= 0:
raise ParameterError("amin must be strictly positive")
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn(
"power_to_db was called on complex input so phase "
"information will be discarded. To suppress this warning, "
"call power_to_db(np.abs(D)**2) instead."
)
magnitude = np.abs(S)
else:
magnitude = S
if callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError("top_db must be non-negative")
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
|
bee32e8b9be49d4797a83ec940c6a29ae09e144e
| 3,640,197
|
def generate_arn(service, arn_suffix, region=None):
"""Returns a formatted arn for AWS.
Keyword arguments:
service -- the AWS service
arn_suffix -- the majority of the arn after the initial common data
region -- the region (can be None for region free arns)
"""
arn_value = "arn"
aws_value = "aws"
region_qualified = region if region else ""
return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}"
|
53dcf55c3fb15784770d1c2d62375d1e750469f8
| 3,640,198
|
def prod_list(lst):
"""returns the product of all numbers in a list"""
if lst:
res = 1
for num in lst:
res *= num
return res
else:
raise ValueError("List cannot be empty.")
|
8179e2906fb4b517d02972fd4647095d37caf6cd
| 3,640,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.