content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def SetDocTimestampFrequency(doc:NexDoc, freq:float):
"""Sets the document timestamp frequency"""
return NexRun("SetDocTimestampFrequency", locals())
|
dceb766792b6ca34d5f5759b0079acc5b70da5a9
| 3,640,800
|
def rsqrt(x:np.ndarray):
"""Computes reciprocal of square root of x element-wise.
Args:
x: input tensor
Returns:
output tensor
Examples:
>>> x = np.array([2., 0., -2.])
>>> rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
"""
return 1/np.sqrt(x)
|
f219ae71b5136bc1b34a2bb06ec76dcdb7ee20bb
| 3,640,801
|
def is_card(obj):
"""Return true if the object is a card."""
return obj in CARDS_SET
|
21f155feadde94d652e120224a2712f2470a9926
| 3,640,802
|
def plot_lines(
y: tuple,
x: np.ndarray = None,
points: bool = True,
x_axis_label: str = 'Index',
y_axis_label: str = 'Value',
plot_width: int = 1000,
plot_height: int = 500,
color: tuple = None,
legend: tuple = None,
title: str = 'Graph lines',
show_graph: bool = True
) -> figure:
"""
Plot lines from y tuple. Number of lines equal len(y)
with plot params
"""
if x is None:
x = np.arange(len(y[0]))
if legend is None:
legend = [f'Line {i}' for i in range(len(y))]
if color is None:
color = COLORS
fig = figure(title=title, x_axis_label=x_axis_label,
y_axis_label=y_axis_label, plot_width=plot_width,
plot_height=plot_height)
for i in range(len(y)):
fig.line(
y=y[i],
x=x,
color=color[i],
legend=legend[i]
)
if points is not None:
fig.circle(
y=y[i],
x=x,
fill_color=color[i]
)
if show_graph:
show(fig)
return fig
|
b938151b90005bc23bb9ed2f795dbf4620b26251
| 3,640,803
|
import re
def obtain_csrf(session):
"""
Obtain the CSRF token from the login page.
"""
resp = session.get(FLOW_LOGIN_GET_URL)
contents = str(resp.content)
match = re.search(r'csrfToken" value="([a-z0-9\-]+)"', contents)
return match.group(1)
|
a091ca33b6b0a43608261e46c54c7ae164a9d3af
| 3,640,804
|
def get_distance_curve(
kernel,
lambda_values,
N,
M=None,
):
""" Given number of elements per class, full kernel (with first N rows corr.
to mixture and the last M rows corr. to component, and set of lambda values
compute $\hat d(\lambda)$ for those values of lambda"""
d_lambda = []
if M == None:
M = kernel.shape[0] - N
prev_soln = None
for lambda_value in lambda_values:
u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),
np.zeros((M, 1)))) + (1 - lambda_value) / M \
* np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))
(solution, distance_sqd) = \
find_nearest_valid_distribution(u_lambda, kernel, initial=prev_soln)
prev_soln = solution
d_lambda.append(sqrt(distance_sqd))
d_lambda = np.array(d_lambda)
return d_lambda
|
e085ea6b2122b052625df1c7b60115552112ffab
| 3,640,805
|
def _process_labels(labels, label_smoothing):
"""Pre-process a binary label tensor, maybe applying smoothing.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
Returns
-------
torch.Tensor
The processed labels.
"""
assert label_smoothing is not None
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
|
5a71ded8ac9d3ef4b389542814a170f35ef18fdd
| 3,640,806
|
def guess_digit(image, avgs):
"""Return the digit whose average darkness in the training data is
closest to the darkness of ``image``. Note that ``avgs`` is
assumed to be a defaultdict whose keys are 0...9, and whose values
are the corresponding average darknesses across the training data."""
darkness = sum(image)
distances = {k: abs(v-darkness) for k, v in avgs.iteritems()}
return min(distances, key=distances.get)
|
055a0d31f85ce6f5786d6bd6dfaed75bdb3ff5d6
| 3,640,807
|
import time
def multiple_writes(self,
Y_splits,
Z_splits,
X_splits,
out_dir,
mem,
filename_prefix="bigbrain",
extension="nii",
nThreads=1,
benchmark=False):
"""
Split the input image into several splits,
all share with the same shape
For now only support .nii extension
:param Y_splits: How many splits in Y-axis
:param Z_splits: How many splits in Z-axis
:param X_splits: How many splits in X-axis
:param out_dir: Output Splits dir
:param mem: memory load each round
:param filename_prefix: each split's prefix filename
:param extension: extension of each split
:param nThreads: number of threads to trigger in each writing process
:param benchmark: If set to true the function will return
a dictionary containing benchmark information.
:return:
"""
def threaded_multiple():
'''# Using multi-threading to send data to hdfs in parallel,
# which will parallelize writing process.
# nThreads: number of threads that are working on writing
# data at the same time.
print("start {} threads to write data...".format(nThreads))
# separate all the splits' metadata to several pieces,
# each piece contains #nThreads splits' metadata.
caches = _split_arr(one_round_split_metadata.items(), nThreads)
st1 = time()
for thread_round in caches:
tds = []
# one split's metadata triggers one thread
for i in thread_round:
ix = i[1]
data = data_in_range[ix[0]: ix[1],
ix[2]: ix[3],
ix[4]: ix[5]]
td = threading.Thread(target=write_array_to_file,
args=(data, i[0], 0, benchmark))
td.start()
tds.append(td)
del data
for t in tds:
t.join()'''
pass
def compute_sizes(Y_splits, Z_splits, X_splits):
''' A function.
'''
# calculate remainder based on the original image file
Y_size, Z_size, X_size = self.header.get_data_shape()
bytes_per_voxel = self.header['bitpix'] / 8
if (X_size % X_splits != 0
or Z_size % Z_splits != 0
or Y_size % Y_splits != 0):
raise Exception("There is remainder after splitting, \
please reset the y,z,x splits")
x_size = X_size / X_splits
z_size = Z_size / Z_splits
y_size = Y_size / Y_splits
return ((x_size, z_size, y_size),
(X_size, Z_size, Y_size),
bytes_per_voxel)
def file_manipulation_multiple(sizes, Sizes, filename_prefix):
''' A function.
'''
x_size, z_size, y_size = sizes
X_size, Z_size, Y_size = Sizes
# get all split_names and write them to the legend file
split_names = generate_splits_name(y_size, z_size, x_size, Y_size,
Z_size, X_size, out_dir,
filename_prefix,
extension)
generate_legend_file(split_names, "legend.txt", out_dir)
# generate all the headers for each split
# in order to reduce overhead when reading headers of splits
# from hdfs, create a header cache in the local environment
print("create split meta data dictionary...")
split_meta_cache = generate_headers_of_splits(split_names,
y_size,
z_size,
x_size,
self.header
.get_data_dtype())
print("Get split indexes...")
split_indexes = get_indexes_of_all_splits(split_names,
split_meta_cache,
Y_size, Z_size)
return split_indexes, split_names, split_meta_cache
def get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index):
''' A function.
'''
# create split metadata for all splits(position, write_range, etc.)
one_round_split_metadata = {}
for split_name in split_names:
if check_in_range(next_read_index, split_indexes[split_name]):
split = split_meta_cache[split_name]
(X_index_min, X_index_max,
x_index_min, x_index_max) = \
extract_slices_range(split,
next_read_index, Y_size,
Z_size)
y_index_min = int(split.split_pos[-3])
z_index_min = int(split.split_pos[-2])
y_index_max = y_index_min + split.split_y
z_index_max = z_index_min + split.split_z
one_round_split_metadata[split_name] = \
(y_index_min, y_index_max, z_index_min, z_index_max,
X_index_min - from_x_index,
X_index_max - from_x_index + 1)
return one_round_split_metadata
def loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark):
''' A function.
'''
split_read_time = 0
split_nb_seeks = 0
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
next_read_offsets = (next_read_index[0] * bytes_per_voxel,
next_read_index[1] * bytes_per_voxel + 1)
print("From {} to {}".format(next_read_offsets[0],
next_read_offsets[1]))
from_x_index = index_to_voxel(next_read_index[0],
Y_size, Z_size)[2]
to_x_index = index_to_voxel(next_read_index[1] + 1,
Y_size, Z_size)[2]
# read
print("Start reading data to memory...")
if benchmark:
t = time()
data_in_range = \
self.proxy.dataobj[..., int(from_x_index): int(to_x_index)]
if benchmark:
read_time = time() -t
print('read time ', read_time)
split_read_time += read_time
split_nb_seeks += 1
one_round_split_metadata = get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index)
caches = _split_arr(one_round_split_metadata.items(), nThreads)
threaded_multiple()
for round in caches:
for i in round:
ix = i[1]
ix = list(map(lambda x: int(x), ix))
data = data_in_range[ix[0]:ix[1], ix[2]:ix[3], ix[4]:ix[5]]
if benchmark:
seek_time, write_time, seek_number = \
write_array_to_file(data, i[0], 0, benchmark)
split_write_time += write_time
split_seek_time += seek_time
split_nb_seeks += seek_number
print("writing data takes ", write_time)
else:
write_array_to_file(data, i[0], 0, benchmark)
next_read_index = (next_read_index[1] + 1,
next_read_index[1] + voxels)
# last write, write no more than image size
if next_read_index[1] >= original_img_voxels:
next_read_index = (next_read_index[0], original_img_voxels - 1)
del caches
del one_round_split_metadata
del data_in_range
if benchmark:
return (next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number)
else:
return next_read_index
# begin algorithm
split_read_time = 0
split_seek_time = 0
split_write_time = 0
split_seek_number = 0
# preparation
sizes, Sizes, bytes_per_voxel = compute_sizes(Y_splits,
Z_splits,
X_splits)
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
(split_indexes,
split_names,
split_meta_cache) = \
file_manipulation_multiple(sizes,
Sizes,
filename_prefix)
# drop the remainder which is less than one slice
# if mem is less than one slice, then set mem to one slice
mem = mem - mem % (Y_size * Z_size * bytes_per_voxel) \
if mem >= Y_size * Z_size * bytes_per_voxel \
else Y_size * Z_size * bytes_per_voxel
voxels = mem // bytes_per_voxel # get how many voxels per round
next_read_index = (0, voxels - 1)
while True:
if benchmark:
(next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number) = (loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark))
else:
next_read_index = loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark)
# if write range is larger than img size, we are done
if next_read_index[0] >= original_img_voxels:
break
if benchmark:
return {'split_read_time': split_read_time,
'split_write_time': split_write_time,
'split_seek_time': split_seek_time,
'split_nb_seeks': split_seek_number}
else:
return
|
b2a7048628c54bf8976f9b3182fe4cecc18468e7
| 3,640,808
|
def new_parameter_value(data, parameter_key: str):
"""Return the new parameter value and if necessary, remove any obsolete multiple choice values."""
new_value = dict(bottle.request.json)[parameter_key]
source_parameter = data.datamodel["sources"][data.source["type"]]["parameters"][parameter_key]
if source_parameter["type"] == "multiple_choice":
new_value = [value for value in new_value if value in source_parameter["values"]]
return new_value
|
41160804aba582ce0c588762bb1a96ea53e258df
| 3,640,809
|
from typing import Sequence
from typing import Optional
def rotate_to_base_frame(
pybullet_client: bullet_client.BulletClient,
urdf_id: int,
vector: Sequence[float],
init_orientation_inv_quat: Optional[Sequence[float]] = (0, 0, 0, 1)
) -> np.ndarray:
"""Rotates the input vector to the base coordinate systems.
Note: This is different from world frame to base frame transformation, as we
do not apply any translation here.
Args:
pybullet_client: The bullet client.
urdf_id: The unique id returned after loading URDF.
vector: Input vector in the world frame.
init_orientation_inv_quat:
Returns:
A rotated vector in the base frame.
"""
_, base_orientation_quat = (
pybullet_client.getBasePositionAndOrientation(urdf_id))
_, base_orientation_quat_from_init = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=init_orientation_inv_quat,
positionB=(0, 0, 0),
orientationB=base_orientation_quat)
_, inverse_base_orientation = pybullet_client.invertTransform(
[0, 0, 0], base_orientation_quat_from_init)
# PyBullet transforms requires simple list/tuple or it may crash.
if isinstance(vector, np.ndarray):
vector_list = vector.tolist()
else:
vector_list = vector
local_vector, _ = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=inverse_base_orientation,
positionB=vector_list,
orientationB=(0, 0, 0, 1),
)
return np.array(local_vector)
|
5651e0183cd61555f90fe6af1e5c5dc2bec6e8b5
| 3,640,810
|
def show_page_map(label):
"""Renders the base page map code."""
return render('page_map.html', {
'map_label': label.replace('_', ' '),
})
|
623d47c4de57c1810c07475a70e501d55ee5e9ae
| 3,640,811
|
def create_clf_unicycle_position_controller(linear_velocity_gain=0.8, angular_velocity_gain=3):
"""Creates a unicycle model pose controller. Drives the unicycle model to a given position
and orientation. (($u: \mathbf{R}^{3 \times N} \times \mathbf{R}^{2 \times N} \to \mathbf{R}^{2 \times N}$)
linear_velocity_gain - the gain impacting the produced unicycle linear velocity
angular_velocity_gain - the gain impacting the produced unicycle angular velocity
-> function
"""
#Check user input types
assert isinstance(linear_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be an integer or float. Recieved type %r." % type(linear_velocity_gain).__name__
assert isinstance(angular_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be an integer or float. Recieved type %r." % type(angular_velocity_gain).__name__
#Check user input ranges/sizes
assert linear_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be greater than or equal to zero. Recieved %r." % linear_velocity_gain
assert angular_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be greater than or equal to zero. Recieved %r." % angular_velocity_gain
def position_uni_clf_controller(states, positions):
""" A position controller for unicycle models. This utilized a control lyapunov function
(CLF) to drive a unicycle system to a desired position. This function operates on unicycle
states and desired positions to return a unicycle velocity command vector.
states: 3xN numpy array (of unicycle states, [x;y;theta])
poses: 3xN numpy array (of desired positons, [x_goal;y_goal])
-> 2xN numpy array (of unicycle control inputs)
"""
#Check user input types
assert isinstance(states, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the single-integrator robot states (xi) must be a numpy array. Recieved type %r." % type(states).__name__
assert isinstance(positions, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the robot goal points (positions) must be a numpy array. Recieved type %r." % type(positions).__name__
#Check user input ranges/sizes
assert states.shape[0] == 3, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the unicycle robot states (states) must be 3 ([x;y;theta]). Recieved dimension %r." % states.shape[0]
assert positions.shape[0] == 2, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the robot goal positions (positions) must be 2 ([x_goal;y_goal]). Recieved dimension %r." % positions.shape[0]
assert states.shape[1] == positions.shape[1], "In the function created by the create_clf_unicycle_position_controller function, the number of unicycle robot states (states) must be equal to the number of robot goal positions (positions). Recieved a current robot pose input array (states) of size %r states %r and desired position array (positions) of size %r states %r." % (states.shape[0], states.shape[1], positions.shape[0], positions.shape[1])
_,N = np.shape(states)
dxu = np.zeros((2, N))
pos_error = positions - states[:2][:]
rot_error = np.arctan2(pos_error[1][:],pos_error[0][:])
dist = np.linalg.norm(pos_error, axis=0)
dxu[0][:]=linear_velocity_gain*dist*np.cos(rot_error-states[2][:])
dxu[1][:]=angular_velocity_gain*dist*np.sin(rot_error-states[2][:])
return dxu
return position_uni_clf_controller
|
4d75c85079ca5350473c058019ae6f4763fdd97b
| 3,640,812
|
import sys
import subprocess
def cluster_pipeline(gff3_file, strand, verbose):
"""
here clusters of sequences from the same locus are prepared
"""
cat = CAT % gff3_file
btsort1 = BEDTOOLS_SORT
if strand:
btmerge1 = BEDTOOLS_MERGE_ST
sys.stdout.write("###CLUSTERING IN\033[32m STRANDED MODE\033[0m###\n")
else:
btmerge1 = BEDTOOLS_MERGE
sys.stdout.write("###CLUSTERING IN\033[32m NON-STRANDED MODE\033[0m ###\n")
btsort2 = BEDTOOLS_SORT
# Sort the GFF3 file
cat_call = subprocess.Popen(cat, stdout=subprocess.PIPE, shell=True)
if verbose:
sys.stderr.write('Executing: %s\n\n' % cat)
btsort1_call = subprocess.Popen(btsort1, stdin=cat_call.stdout, stdout=subprocess.PIPE, shell=True)
# Merge the BED entries, count number of reads on each merged entry
if verbose:
sys.stderr.write('Executing: %s\n\n' % btsort1)
btmerge1_call = subprocess.Popen(btmerge1, stdin=btsort1_call.stdout, stdout=subprocess.PIPE, shell=True)
# NSort it again and returns
if verbose:
sys.stderr.write('Executing: %s\n\n' % btmerge1)
btsort2_call = subprocess.Popen(btsort2, stdin=btmerge1_call.stdout, stdout=subprocess.PIPE, shell=True)
if verbose:
sys.stderr.write('Executing: %s\n\n' % btsort2)
outputBT = btsort2_call.communicate()[0]
final_output = outputBT.splitlines()
return final_output
|
60f642e90e73b8cf53c0261e5fd2aa5f79637e1a
| 3,640,813
|
from typing import Tuple
def stft_reassign_from_sig(sig_wf: np.ndarray,
frequency_sample_rate_hz: float,
band_order_Nth: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,
np.ndarray]:
"""
Librosa STFT is complex FFT grid, not power
Reassigned frequencies are not the same as the standard mesh frequencies
:param sig_wf: array with input signal
:param frequency_sample_rate_hz: sample rate of frequency in Hz
:param band_order_Nth: Nth order of constant Q bands
:return: six numpy ndarrays with STFT, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s,
frequency_stft_rsg_hz
"""
sig_duration_s = len(sig_wf)/frequency_sample_rate_hz
_, min_frequency_hz = scales.from_duration(band_order_Nth, sig_duration_s)
order_Nth, cycles_M, quality_Q, \
frequency_center, frequency_start, frequency_end = \
scales.frequency_bands_g2f1(scale_order_input=band_order_Nth,
frequency_low_input=min_frequency_hz,
frequency_sample_rate_input=frequency_sample_rate_hz)
# Choose the spectral resolution as the key parameter
frequency_resolution_min_hz = np.min(frequency_end - frequency_start)
frequency_resolution_max_hz = np.max(frequency_end - frequency_start)
frequency_resolution_hz_geo = np.sqrt(frequency_resolution_min_hz*frequency_resolution_max_hz)
stft_time_duration_s = 1/frequency_resolution_hz_geo
stft_points_per_seg = int(frequency_sample_rate_hz*stft_time_duration_s)
# From CQT
stft_points_hop, _, _, _, _ = \
scales.cqt_frequency_bands_g2f1(band_order_Nth,
min_frequency_hz,
frequency_sample_rate_hz,
is_power_2=False)
print('Reassigned STFT Duration, NFFT, HOP:', len(sig_wf), stft_points_per_seg, stft_points_hop)
STFT_Scaling = 2*np.sqrt(np.pi)/stft_points_per_seg
# Reassigned frequencies require a 'best fit' solution.
frequency_stft_rsg_hz, time_stft_rsg_s, STFT_mag = \
librosa.reassigned_spectrogram(sig_wf, sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg,
hop_length=stft_points_hop, win_length=None,
window='hann', center=False, pad_mode='reflect')
# Must be scaled to match scipy psd
STFT_mag *= STFT_Scaling
STFT_bits = utils.log2epsilon(STFT_mag)
# Standard mesh times and frequencies for plotting - nice to have both
time_stft_s = librosa.times_like(STFT_mag, sr=frequency_sample_rate_hz,
hop_length=stft_points_hop)
frequency_stft_hz = librosa.core.fft_frequencies(sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg)
# Reassigned frequencies are not the same as the standard mesh frequencies
return STFT_mag, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s, frequency_stft_rsg_hz
|
90aa2b019ace90500c38feb5e643a8ca3c02360a
| 3,640,814
|
from typing import List
def download(*urls, zip: str=None, unzip: bool=False, **kwargs) -> List[File]:
"""
Download multiple zippyshare urls
Parameters
-----------
*urls
Zippyshare urls.
zip: :class:`str`
Zip all downloaded files once finished.
Zip filename will be taken from ``zip`` parameter,
default to ``None``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
unzip: :class:`bool`
Unzip all downloaded files once finished
(if given file is zip format extract it, otherwise ignore it),
default to ``False``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
**kwargs
These parameters will be passed to :meth:`File.download()`,
except for parameter ``filename``.
Returns
-------
List[:class:`File`]
a list of Zippyshare files
"""
if unzip and zip:
raise ValueError("unzip and zip paramaters cannot be set together")
downloaded_files = {}
files = []
for url in urls:
info = get_info(url)
file = File(info)
files.append(file)
if kwargs.get('filename') is not None:
kwargs.pop('filename')
file_path = file.download(**kwargs)
downloaded_files[file] = file_path
if unzip:
extract_archived_file(str(file_path))
if zip:
log.info(build_pretty_list_log(downloaded_files, 'Zipping all downloaded files to "%s"' % zip))
archive_zip(downloaded_files, zip)
log.info(build_pretty_list_log(downloaded_files, 'Successfully zip all downloaded files to "%s"' % zip))
return files
|
a1197d264fa3305fb545a60e5963be2fc326aa5d
| 3,640,815
|
def pop_arg(args_list, expected_size_after=0, msg="Missing argument"):
"""helper function to get and check command line arguments"""
try:
value = args_list.pop(0)
except IndexError:
raise BadCommandUsage(msg)
if expected_size_after is not None and len(args_list) > expected_size_after:
raise BadCommandUsage('too many arguments')
return value
|
90b1f1ae596a9257d15cc189e87223b166252c9a
| 3,640,816
|
def d4s(data):
"""
Beam parameter calculation according to the ISO standard D4sigma integrals
input: 2D array of intensity values (pixels)
output:
xx, yy: x and y centres
dx, dy: 4 sigma widths for x and y
angle: inferred rotation angle, radians
"""
gg = data
dimy, dimx = np.shape(data)
X, Y = np.mgrid[0:dimx,0:dimy]
X = X.T
Y = Y.T
P = np.sum(data)
xx = np.sum(data * X) / P
yy = np.sum(data * Y) / P
xx2 = np.sum(data * (X - xx)**2)/P
yy2 = np.sum(data * (Y - yy)**2)/P
xy = np.sum(data * (X - xx) * (Y - yy)) / P
gamm = np.sign(xx2 - yy2)
angle = 0.5 * np.arctan(2*xy / (xx2 - yy2))
try:
dx = 2 * np.sqrt(2) * (xx2 + yy2 + gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
dy = 2 * np.sqrt(2) * (xx2 + yy2 - gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
except:
# In case of error, just make the size very large
print "Fitting error"
dx, dy = data.shape
return xx, yy, dx, dy, angle
|
f10c0792a2e200c980ccd6ffb286bdfabc90bb32
| 3,640,817
|
from astropy.utils import iers
import warnings
import six
def checkWarnings(func, func_args=[], func_kwargs={},
category=UserWarning,
nwarnings=1, message=None, known_warning=None):
"""Function to check expected warnings."""
if (not isinstance(category, list) or len(category) == 1) and nwarnings > 1:
if isinstance(category, list):
category = category * nwarnings
else:
category = [category] * nwarnings
if (not isinstance(message, list) or len(message) == 1) and nwarnings > 1:
if isinstance(message, list):
message = message * nwarnings
else:
message = [message] * nwarnings
if known_warning == 'miriad':
# The default warnings for known telescopes when reading miriad files
category = [UserWarning]
message = ['Altitude is not present in Miriad file, using known '
'location values for PAPER.']
nwarnings = 1
elif known_warning == 'paper_uvfits':
# The default warnings for known telescopes when reading uvfits files
category = [UserWarning] * 2
message = ['Required Antenna frame keyword', 'telescope_location is not set']
nwarnings = 2
elif known_warning == 'fhd':
category = [UserWarning]
message = ['Telescope location derived from obs']
nwarnings = 1
category = uvutils._get_iterable(category)
message = uvutils._get_iterable(message)
clearWarnings()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") # All warnings triggered
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# filter iers warnings if iers.conf.auto_max_age is set to None, as we do in testing if the iers url is down
if iers.conf.auto_max_age is None:
warnings.filterwarnings("ignore", message="failed to download")
warnings.filterwarnings("ignore", message="time is out of IERS range")
if isinstance(message, six.string_types):
test_message = [message.startswith("LST values stored in ")]
else:
test_message = []
for m in message:
if m is None:
test_message.append(False)
else:
test_message.append(m.startswith("LST values stored in "))
if not any(test_message):
warnings.filterwarnings("ignore", message="LST values stored in ")
retval = func(*func_args, **func_kwargs) # Run function
# Verify
if len(w) != nwarnings:
print('wrong number of warnings. Expected number was {nexp}, '
'actual number was {nact}.'.format(nexp=nwarnings, nact=len(w)))
for idx, wi in enumerate(w):
print('warning {i} is: {w}'.format(i=idx, w=wi))
assert(False)
else:
for i, w_i in enumerate(w):
if w_i.category is not category[i]:
print('expected category ' + str(i) + ' was: ', category[i])
print('category ' + str(i) + ' was: ', str(w_i.category))
assert(False)
if message[i] is not None:
if message[i] not in str(w_i.message):
print('expected message ' + str(i) + ' was: ', message[i])
print('message ' + str(i) + ' was: ', str(w_i.message))
assert(False)
return retval
|
58a40594f48b1f47350e9b6a1ca1d956dfd63d04
| 3,640,818
|
from omas.omas_utils import list_structures
from omas.omas_utils import load_structure
def extract_times(imas_version=omas_rcparams['default_imas_version']):
"""
return list of strings with .time across all structures
:param imas_version: imas version
:return: list with times
"""
times = []
for structure in list_structures(imas_version=imas_version):
tmp = load_structure(structure, imas_version)[0]
for item in tmp:
if not item.endswith('.time') or 'data_type' not in tmp[item] or tmp[item]['data_type'] == 'STRUCTURE':
continue
times.append(item)
return sorted(times)
|
2b13361dc713d90a946554383b556a8ced24ac55
| 3,640,819
|
import json
def load_appdata():
"""load application data from json file
"""
try:
_in = open(FNAME)
except FileNotFoundError:
return
with _in:
appdata = json.load(_in)
return appdata
|
afb3a69a5abf72cd14a8ae0c8c99ccc3350899a1
| 3,640,820
|
def compute_couplings(models_a, models_b):
"""
Given logistic models for two multiple sequence alignments, calculate all
intermolecular coupling strengths between residues.
The coupling strength between positions i and j is calculated as the 2-norm
of the concatenation of the coefficient submatrices that describe the
relationships between the two positions.
----------------------------------------------------------------------------
Reference:
Ovchinnikov, Sergey, Hetunandan Kamisetty, and David Baker.
"Robust and accurate prediction of residue–residue interactions across
protein interfaces using evolutionary information." Elife 3 (2014): e02030
----------------------------------------------------------------------------
Arguments
---------
models_a: list of SGDClassifier objects, one for each analyzed column in
MSA A
models_b: list of SGDClassifier objects, one for each analyzed column in
MSA B
Returns
-------
couplings: dict, contains intermolecular coupling strengths in the format
{"Ai:Bj":float,...}
contact_mtx: array, 2D matrix of dimensions (models_a, models_b); contains
the value of the coupling strength for each pair of positions
"""
# Dictionary to store couplings between residues
couplings = {}
# To keep track of the submatrix we need to take from the matrix of
# coefficients from protein B
# Iterate over models / columns of MSA A
# Variable to keep track of the submatrix we need to take from the matrix
# of coefficients of models of B
offset_a = 0
contact_mtx = np.zeros((len(models_a), len(models_b)))
for i, model_a in enumerate(models_a):
# Variable to keep track of the submatrix we need to take from the
# matrix of coefficients from protein A
end_point_a = 0
for j, model_b in enumerate(models_b):
# Select the relevant submatrices of coefficients, this is,
# the columns in A that indicate coupling to B and vice versa
# Taking the 2-norm of a vector and a matrix is equivalent. In case
# of mismatching dimensions, flatten the matrices into vectors and
# concatenate them
sel_coefs_a, end_point_a = select_coefs(model_a.coef_, end_point_a)
sel_coefs_a = sel_coefs_a.flatten()
sel_coefs_b, _ = select_coefs(model_b.coef_, offset_a)
sel_coefs_b = sel_coefs_b.flatten()
coef_vector = np.concatenate((sel_coefs_a, sel_coefs_b))
# Calculate coupling strength (as the 2-norm of the vector of
# coefficients) and store the value in the output
coupling = np.linalg.norm(coef_vector)
coupling_name = ''.join(['A', str(i), ':', 'B', str(j)])
couplings[coupling_name] = coupling
contact_mtx[i][j] = coupling
offset_a += 20
return couplings, contact_mtx
|
761c1987a7e230f70e123ce8d1746881b1b26cae
| 3,640,821
|
def update_checkout_line(request, checkout, variant_id):
"""Update the line quantities."""
if not request.is_ajax():
return redirect("checkout:index")
checkout_line = get_object_or_404(checkout.lines, variant_id=variant_id)
discounts = request.discounts
status = None
form = ReplaceCheckoutLineForm(
request.POST,
checkout=checkout,
variant=checkout_line.variant,
discounts=discounts,
)
manager = request.extensions
if form.is_valid():
form.save()
checkout.refresh_from_db()
# Refresh obj from db and confirm that checkout still has this line
checkout_line = checkout.lines.filter(variant_id=variant_id).first()
line_total = zero_taxed_money(currency=settings.DEFAULT_CURRENCY)
if checkout_line:
line_total = manager.calculate_checkout_line_total(checkout_line, discounts)
subtotal = get_display_price(line_total)
response = {
"variantId": variant_id,
"subtotal": format_money(subtotal),
"total": 0,
"checkout": {"numItems": checkout.quantity, "numLines": len(checkout)},
}
checkout_total = manager.calculate_checkout_subtotal(checkout, discounts)
checkout_total = get_display_price(checkout_total)
response["total"] = format_money(checkout_total)
local_checkout_total = to_local_currency(checkout_total, request.currency)
if local_checkout_total is not None:
response["localTotal"] = format_money(local_checkout_total)
status = 200
elif request.POST is not None:
response = {"error": form.errors}
status = 400
return JsonResponse(response, status=status)
|
9394699c50bc3724ac253f288e23cc77eac05a3a
| 3,640,822
|
from typing import Optional
def merge_df(
df: Optional[pd.DataFrame], new_df: Optional[pd.DataFrame], how="left"
):
"""
join two dataframes. Assumes the dataframes are indexed on datetime
Args:
df: optional dataframe
new_df: optional dataframe
Returns:
The merged dataframe
"""
if df is None:
result_df = new_df
elif new_df is None:
result_df = df
else:
try:
result_df = pd.merge_ordered(
df.reset_index(),
new_df.reset_index().drop_duplicates()
)
result_df.set_index("datetime", inplace=True)
result_df.sort_index(inplace=True)
if len(result_df.index.unique()) != len(result_df.index):
LOG.error("Merging did not result in unique indexes. Killing"
" to avoid missing data")
raise ValueError("Issue merging")
except Exception as e:
LOG.error("failed joining dataframes.")
raise e
return result_df
|
783111942086a23fbb13b1e96f2d098c7db0f963
| 3,640,823
|
import os
import yaml
def load_settings(settings_path: str = CHAOSTOOLKIT_CONFIG_PATH) -> Settings:
"""
Load chaostoolkit settings as a mapping of key/values or return `None`
when the file could not be found.
"""
if not os.path.exists(settings_path):
logger.debug(
"The Chaos Toolkit settings file could not be found at "
"'{c}'.".format(c=settings_path)
)
return
with open(settings_path) as f:
try:
settings = yaml.safe_load(f.read())
loaded_settings.set(settings)
return settings
except yaml.YAMLError as ye:
logger.error("Failed parsing YAML settings: {}".format(str(ye)))
|
43a7f8e83827df26a840a13a53d6c87e6bddf5ff
| 3,640,824
|
def _is_leaf(tree: DecisionTreeClassifier, node_id: int) -> bool:
"""
Determines if a tree node is a leaf.
:param tree: an `sklearn` decision tree classifier object
:param node_id: an integer identifying a node in the above tree
:return: a boolean `True` if the node is a leaf, `False` otherwise
"""
return tree.tree_.children_left[node_id] == tree.tree_.children_right[node_id]
|
bdc5affe82c1c7505668e0f7c70dbb548170b6e1
| 3,640,825
|
async def commission_reset(bot, context):
"""Resets a given user's post cooldown manually."""
advertisement_data = await _get_advertisement_data(bot, context.guild)
deleted_persistence = data.get(
bot, __name__, 'recently_deleted', guild_id=context.guild.id, default={})
user_id = context.arguments[0].id
if user_id in advertisement_data:
del advertisement_data[user_id]
if str(user_id) in deleted_persistence:
del deleted_persistence[str(user_id)]
return Response(
"Reset that user's advertisement cooldown. Their last advertisement post "
"will need to be removed manually if necessary.")
|
06666421569b92fdf8a943351058e3f53c7d0777
| 3,640,826
|
def test_sample_problems_auto_1d_maximization(max_iter, max_response, error_lim, model_type, capsys):
"""
solve a sample problem in two different conditions.
test that auto method works for a particular single-covariate (univariate) function
"""
# define data
x_input = [(0.5, 0,
1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return -(6 * x["covar0"].iloc[0] - 2) ** 2 * np.sin(12 * x["covar0"].iloc[0] - 4)
# initialize class instance
cc = TuneSession(covars=x_input, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert
assert cc.model["covars_sampled_iter"] == max_iter
# assert that max value found
THEORETICAL_MAX_COVAR = 0.75725
assert abs(cc.covars_best_response_value[-1].item() - THEORETICAL_MAX_COVAR) < error_lim
# run current_best method
cc.current_best()
captured = capsys.readouterr()
assert abs(cc.best["covars"].values[0][0] - THEORETICAL_MAX_COVAR) < error_lim
assert abs(cc.best["response"].values[0][0] - max_response) < error_lim
assert cc.best["iteration_when_recorded"] == max_iter
|
3db394c4b1cccb276d3efe80ff7561830fc82b7a
| 3,640,827
|
import logging
def partition_round(elms, percent, exact=-1, total=100, *args, **kwargs):
"""
Partitions dataset in a predictable way.
:param elms: Total Number of elements
:type elms: Integer
:param percent: Percentage of problem space to be processed on one device
:param type: Integer
:param exact: Flag that states whether percentage of problem space is greater than 50 or not (0 for percent < 50, 1 for percent >= 50)
:param type: Integer
:param total: Percentage of total problem space (Default value: 100)
:type total: Integer
:return: Number of elements of partitioned dataset
:rtype: Integer
"""
if elms < 100:
factor = 10
x = elms / 10
else:
factor = 1
x = elms / 100
if exact == -1:
exact = 0 if percent > 50 else 1
if elms % 2 == 0:
if percent == 50:
logging.debug(
"PARTITION: get_slice_values -> multiple_round -> partition_round (if percent=50) returns: %d",
elms / 2)
return elms / 2
elif exact == 0:
b = x * (total - percent) / factor
return partition_round(elms, total) - b if total != 100 else elms - b
elif exact == 1:
logging.debug("PARTITION: get_slice_values -> multiple_round -> partition_round (if exact=1) returns: %d",
x * percent / factor)
return x * percent / factor
else:
if percent > 50:
return partition_round(elms - 1, percent, exact, total)
else:
return partition_round(elms - 1, percent, exact, total) + 1
|
c3d83a9da0d25d9e9a1f688620fc9a925535cb6a
| 3,640,828
|
def heatmap_numeric_w_dependent_variable(df, dependent_variable):
"""
Takes df, a dependant variable as str
Returns a heatmap of all independent variables' correlations with dependent variable
"""
plt.figure(figsize=(10, 5.5))
figure = sns.heatmap(
df.corr()[[dependent_variable]].sort_values(by=dependent_variable),
annot=True,
cmap="coolwarm",
vmin=-1,
vmax=1,
)
return figure
|
46919deb37ee1f641983761a81ffeb830dac8217
| 3,640,829
|
from typing import Set
from typing import Tuple
import timeit
def _handle_rpm(
rpm: Rpm,
universe: str,
repo_url: str,
rpm_table: RpmTable,
all_snapshot_universes: Set[str],
cfg: DownloadConfig,
) -> Tuple[Rpm, MaybeStorageID, float]:
"""Fetches the specified RPM from the repo DB and downloads it if needed.
Returns a 3-tuple of the hydrated RPM, storage ID or exception if one was
caught, and bytes downloaded, if a download occurred (used for reporting).
"""
# Read-after-write consitency is not needed here as this is the first read
# in the execution model. It's possible another concurrent snapshot is
# running that could race with this read, but that's not critical as this
# section should be idempotent, and at worst we'll duplicate some work by
# re-downloading the RPM.
with cfg.new_db_ctx(readonly=True, force_master=False) as ro_repo_db:
# If we get no `storage_id` back, there are 3 possibilities:
# - `rpm.nevra()` was never seen before.
# - `rpm.nevra()` was seen before, but it was hashed with
# different algorithm(s), so we MUST download and
# compute the canonical checksum to know if its contents
# are the same.
# - `rpm.nevra()` was seen before, **AND** one of the
# prior checksums used `rpm.checksum.algorithms`, but
# produced a different hash value. In other words, this
# is a `MutableRpmError`, because the same NEVRA must
# have had two different contents. We COULD explicitly
# detect this error here, and avoid the download.
# However, this severe error should be infrequent, and we
# actually get valuable information from the download --
# this lets us know whether the file is wrong or the
# repodata is wrong.
with timeit(
partial(log_sample, LogOp.RPM_QUERY, rpm=rpm, universe=universe)
):
(
storage_id,
canonical_chk,
) = ro_repo_db.get_rpm_storage_id_and_checksum(rpm_table, rpm)
# If the RPM is already stored with a matching checksum, just update its
# `.canonical_checksum`. Note that `rpm` was parsed from repodata, and thus
# it's guaranteed to not yet have a `canonical_checksum`.
if storage_id:
rpm = rpm._replace(canonical_checksum=canonical_chk)
# This is a very common case and thus noisy log, so we write to debug
log.debug(f"Already stored under {storage_id}: {rpm}")
return rpm, storage_id, 0
# We have to download the RPM.
try:
with timeit(
partial(log_sample, LogOp.RPM_DOWNLOAD, rpm=rpm, universe=universe)
):
rpm, storage_id = _download_rpm(rpm, repo_url, rpm_table, cfg)
return rpm, storage_id, rpm.size
# RPM checksum validation errors, HTTP errors, etc
except ReportableError as ex:
# This "fake" storage_id is stored in `storage_id_to_rpm`, so the
# error is propagated to sqlite db through the snapshot. It isn't
# written to repo_db however as that happens in the *_impl function
return rpm, ex, 0
|
107d3e9b0d139663d33a415be8eccfb6541e1b4a
| 3,640,830
|
def numpy2seq(Z, val=-1):
"""Appends the minimal required amount of zeroes at the end of each
array in the jagged array `M`, such that `M` looses its jagedness."""
seq = []
for z in t2n(Z).astype(int):
i = np.where(z==val)[0]
if i.size == 0:
seq += [z.tolist()]
else:
seq += [z[:min(i)].tolist()]
return seq
|
b46f6379a3eba0c5754c1a824dc28a43a10dc742
| 3,640,831
|
def winner(board):
"""Detirmine the game's winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
|
6adb31e668c1d7e2723df7d65ab34246748c3249
| 3,640,832
|
def compute_inv_propensity(train_file, A, B):
"""
Compute Inverse propensity values
Values for A/B:
Wikpedia-500K: 0.5/0.4
Amazon-670K, Amazon-3M: 0.6/2.6
Others: 0.55/1.5
"""
train_labels = data_utils.read_sparse_file(train_file)
inv_propen = xc_metrics.compute_inv_propesity(train_labels, A, B)
return inv_propen
|
df8f45cf48f056cee6f3f9026f546dcea0f9ee75
| 3,640,833
|
def tanh(x):
"""
Returns the cos of x.
Args:
x (TensorOp): A tensor.
Returns:
TensorOp: The tanh of x.
"""
return TanhOp(x)
|
bef86675a70714f3e33a6828353e1f71958c3057
| 3,640,834
|
def importBodyCSVDataset(testSplit: float, local_import: bool):
"""Import body dataset as numpy arrays from GitHub if available, or local dataset otherwise.
Args:
testSplit (float, optional): Percentage of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.
"""
assert 0.0 <= testSplit <= 1.0
datasetPath = DATASETS_PATH / "BodyPose_Dataset.csv"
datasetURL = "https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/datasets/BodyPose_Dataset.csv"
if local_import:
dataset_df = pd.read_csv(datasetPath)
else:
dataset_df = pd.read_csv(datasetURL)
bodyLabels_df = dataset_df.groupby("label")
labels = list(dataset_df.label.unique())
# Find the minimum number of samples accross categories to uniformly distributed sample sets
total_size_cat = bodyLabels_df.size().min()
test_size_cat = int(total_size_cat * testSplit)
train_size_cat = total_size_cat - test_size_cat
x_train = []
x_test = []
y_train = []
y_test = []
# Iterate over each labeled group
for label, group in bodyLabels_df:
# remove irrelevant columns
group_array = group.drop(["label", "accuracy"], axis=1).to_numpy()
np.random.shuffle(group_array)
group_array_2D = [np.array((x[::2], x[1::2])).T for x in group_array]
x_train.append(group_array_2D[:train_size_cat])
y_train.append([label] * train_size_cat)
x_test.append(group_array_2D[train_size_cat : train_size_cat + test_size_cat])
y_test.append([label] * test_size_cat)
# Concatenate sample sets as numpy arrays
x_train = np.concatenate(x_train, axis=0)
x_test = np.concatenate(x_test, axis=0)
y_train = np.concatenate(y_train, axis=0)
y_test = np.concatenate(y_test, axis=0)
return x_train, x_test, y_train, y_test, labels
|
411d8c1aa3e1d741e2b169f1a4c3065af8f5e82c
| 3,640,835
|
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
"""
Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
"""
kwds = dict(args=(a, b, R, df), epsabs=1e-4, epsrel=1e-2, limit=150)
if not quadkwds is None:
kwds.update(quadkwds)
lower, upper = chi.ppf([ieps, 1 - ieps], df)
res, err = integrate.quad(funbgh2, lower, upper, **kwds)
prob = res * bghfactor(df)
return prob
|
2b15e3ce209d01e4790391242cbd87914a79fa5d
| 3,640,836
|
from typing import Union
from typing import Iterable
from typing import Dict
from typing import Callable
import random
import os
import warnings
import functools
def build_dataloaders(
cfg: CfgNode,
batch_size: Union[int, Iterable[int]],
) -> Dict[str, Callable]:
"""
Get iterators of built-in datasets.
Args:
cfg: CfgNode instance that requests built-in datasets.
batch_size (int or sequence): The number of examples in one mini-batch. If batch_size is
a sequence like (b1, b2, b3), batch sizes of train/valid/test splits will be matched
to b1/b2/b3, respectively.
Returns:
A dictionary with keys 'dataloader', 'trn_loader', 'val_loader', and 'tst_loader'.
Example:
>>> dataloaders = build_dataloaders(cfg, batch_size=[128, 200, 200])
>>> for epoch_idx in enumerate(range(10), start=1):
>>> rng, data_rng = jax.random.split(rng)
>>>
>>> trn_loader = dataloaders['dataloader'](rng=data_rng)
>>> trn_loader = jax_utils.prefetch_to_device(trn_loader, size=2)
>>> for batch_idx, batch in enumerate(trn_loader, start=1):
>>> (...)
>>>
>>> val_loader = dataloaders['val_loader'](rng=None)
>>> val_loader = jax_utils.prefetch_to_device(val_loader, size=2)
>>> for batch_idx, batch in enumerate(val_loader, start=1):
>>> (...)
"""
name = cfg.DATASETS.NAME
if name in ['MNIST', 'KMNIST', 'FashionMNIST',]:
indices = list(range(60000))
if cfg.DATASETS.MNIST.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.MNIST.TRAIN_INDICES[0] : cfg.DATASETS.MNIST.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.MNIST.VALID_INDICES[0] : cfg.DATASETS.MNIST.VALID_INDICES[1]]
elif name in ['CIFAR10', 'CIFAR100',]:
indices = list(range(50000))
if cfg.DATASETS.CIFAR.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.CIFAR.TRAIN_INDICES[0] : cfg.DATASETS.CIFAR.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.CIFAR.VALID_INDICES[0] : cfg.DATASETS.CIFAR.VALID_INDICES[1]]
elif name in ['TinyImageNet200',]:
indices = list(range(100000))
if cfg.DATASETS.TINY.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.TINY.TRAIN_INDICES[0] : cfg.DATASETS.TINY.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.TINY.VALID_INDICES[0] : cfg.DATASETS.TINY.VALID_INDICES[1]]
elif name in ['ImageNet1k_x32', 'ImageNet1k_x64',]:
indices = list(range(1281167))
if cfg.DATASETS.DOWNSAMPLED_IMAGENET.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.DOWNSAMPLED_IMAGENET.TRAIN_INDICES[0] : cfg.DATASETS.DOWNSAMPLED_IMAGENET.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.DOWNSAMPLED_IMAGENET.VALID_INDICES[0] : cfg.DATASETS.DOWNSAMPLED_IMAGENET.VALID_INDICES[1]]
trn_images = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/train_images.npy'))
trn_labels = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/train_labels.npy'))
tst_images = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/test_images.npy'))
tst_labels = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/test_labels.npy'))
# validation split
if val_indices:
val_images, val_labels = trn_images[val_indices], trn_labels[val_indices]
trn_images, trn_labels = trn_images[trn_indices], trn_labels[trn_indices]
else:
val_images, val_labels = tst_images, tst_labels
trn_images, trn_labels = trn_images[trn_indices], trn_labels[trn_indices]
# specify mini-batch settings
if isinstance(batch_size, int):
batch_size = (batch_size, batch_size, batch_size,)
trn_batch_size, val_batch_size, tst_batch_size = batch_size
if len(val_images) % val_batch_size != 0:
warnings.warn(f'val_batch_size={val_batch_size} cannot utilize all {len(val_images)} examples.')
if len(tst_images) % tst_batch_size != 0:
warnings.warn(f'tst_batch_size={tst_batch_size} cannot utilize all {len(tst_images)} examples.')
trn_steps_per_epoch = len(trn_images) // trn_batch_size
val_steps_per_epoch = len(val_images) // val_batch_size
tst_steps_per_epoch = len(tst_images) // tst_batch_size
# build dataloaders
dataloaders = {
'dataloader': functools.partial(
_build_dataloader,
images = trn_images,
labels = trn_labels,
batch_size = trn_batch_size,
steps_per_epoch = trn_steps_per_epoch,
shuffle = True,
transform = jax.jit(jax.vmap(DATA_AUGMENTATION[cfg.DATASETS.DATA_AUGMENTATION][name])),
),
'trn_loader': functools.partial(
_build_dataloader,
images = trn_images,
labels = trn_labels,
batch_size = trn_batch_size,
steps_per_epoch = trn_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'val_loader': functools.partial(
_build_dataloader,
images = val_images,
labels = val_labels,
batch_size = val_batch_size,
steps_per_epoch = val_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'tst_loader': functools.partial(
_build_dataloader,
images = tst_images,
labels = tst_labels,
batch_size = tst_batch_size,
steps_per_epoch = tst_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'trn_steps_per_epoch': trn_steps_per_epoch,
'val_steps_per_epoch': val_steps_per_epoch,
'tst_steps_per_epoch': tst_steps_per_epoch,
}
return dataloaders
|
b878f0553c2491e9cc208ba500b02bc2d0f2226c
| 3,640,837
|
import functools
def get_activity(
iterator,
*,
perspective,
garbage_class,
dtype=np.bool,
non_sil_alignment_fn=None,
debug=False,
use_ArrayIntervall=False,
):
"""
perspective:
Example:
'global_worn' -- global perspective for worn ('P')
'worn' -- return perspective for each speaker ('P01', ...)
'array' -- return perspective for each array ('U01', ...)
garbage_class: True, False, None
True: garbage_class is always one
False: garbage_class is always zero
None: the number of classes is 4 and not 5
non_sil_alignment_fn: None or a function with the signature:
value = non_sil_alignment_fn(ex, perspective_mic_array)
where
ex is one example in iterator
perspective_mic_array is in ['U01', ..., 'P01', ..., 'P']
value is a 1d array indicating if at a sample the source is active
or not
use_ArrayIntervall: ArrayIntervall is a special datatype to reduce
memory usage
returns:
dict[session_id][mic_perspective][speaker_id] = array(dtype=bool)
session_id e.g.: 'S02', ...
mic_perspective e.g.: 'P', 'P05', 'U01', ...
speaker_id e.g.: 'P05', ...
>>> from pb_chime5.database.chime5 import Chime5
>>> import textwrap
>>> db = Chime5()
>>> def display_activity(activity):
... print(tuple(activity.keys()))
... print(' '*2, tuple(activity['S02'].keys()))
... print(' '*4, tuple(activity['S02']['P'].keys()))
... print(' '*6, activity['S02']['P']['P05'])
... print(' '*6, activity['S02']['P']['Noise'])
>>> def display_activity(activity, indent=0):
... indent_print = lambda x: print(textwrap.indent(str(x), ' '*indent))
... if isinstance(activity, dict):
... for i, (k, v) in enumerate(activity.items()):
... if i == 0 or k in ['Noise']:
... indent_print(f'{k}:')
... display_activity(v, indent=indent+2)
... else:
... indent_print(f'{k}: ...')
... else:
... indent_print(activity)
>>> activity = get_activity(db.get_datasets('S02'), perspective='global_worn', garbage_class=True)
>>> display_activity(activity)
S02:
P:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
Noise:
[ True True True ... True True True]
>>> activity = get_activity(db.get_datasets('S02'), perspective='worn', garbage_class=False)
>>> display_activity(activity)
S02:
P05:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
Noise:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
>>> activity = get_activity(db.get_datasets('S02'), perspective='array', garbage_class=None)
>>> display_activity(activity)
S02:
U01:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
U02: ...
U03: ...
U04: ...
U05: ...
U06: ...
"""
dict_it_S = iterator.groupby(lambda ex: ex['session_id'])
# Dispatcher is a dict with better KeyErrors
all_acitivity = Dispatcher()
for session_id, it_S in dict_it_S.items():
if perspective == 'worn':
perspective_tmp = mapping.session_to_speakers[session_id]
elif perspective == 'global_worn':
perspective_tmp = ['P'] # Always from target speaker
elif perspective == 'array':
# The mapping considers missing arrays
perspective_tmp = mapping.session_to_arrays[session_id]
else:
perspective_tmp = perspective
if not isinstance(perspective_tmp, (tuple, list)):
perspective_tmp = [perspective_tmp, ]
speaker_ids = mapping.session_to_speakers[session_id]
if use_ArrayIntervall:
assert dtype == np.bool, dtype
zeros = ArrayIntervall
def ones(shape):
arr = zeros(shape=shape)
arr[:] = 1
return arr
else:
zeros = functools.partial(np.zeros, dtype=dtype)
ones = functools.partial(np.ones, dtype=dtype)
all_acitivity[session_id] = Dispatcher({
p: Dispatcher({
s: zeros(shape=[mapping.session_array_to_num_samples[f'{session_id}_{p}']])
# s: ArrayIntervall(shape=[num_samples])
for s in speaker_ids
})
for p in perspective_tmp
})
if garbage_class is True:
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}']
all_acitivity[session_id][p]['Noise'] = ones(
shape=[num_samples],
)
elif garbage_class is False:
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}']
all_acitivity[session_id][p]['Noise'] = zeros(
shape=[num_samples]
)
elif garbage_class is None:
pass
elif isinstance(garbage_class, int) and garbage_class > 0:
for noise_idx in range(garbage_class):
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}'
]
all_acitivity[session_id][p][f'Noise{noise_idx}'] = ones(
shape=[num_samples]
)
else:
raise ValueError(garbage_class)
missing_count = 0
for ex in it_S:
for pers in perspective_tmp:
if ex['transcription'] == '[redacted]':
continue
target_speaker = ex['speaker_id']
# example_id = ex['example_id']
if pers == 'P':
perspective_mic_array = target_speaker
else:
perspective_mic_array = pers
if perspective_mic_array.startswith('P'):
start = ex['start']['worn'][perspective_mic_array]
end = ex['end']['worn'][perspective_mic_array]
else:
if not perspective_mic_array in ex['audio_path']['observation']:
continue
start = ex['start']['observation'][perspective_mic_array]
end = ex['end']['observation'][perspective_mic_array]
if non_sil_alignment_fn is None:
value = 1
else:
value = non_sil_alignment_fn(ex, perspective_mic_array)
if value is 1:
missing_count += 1
if debug:
all_acitivity[session_id][pers][target_speaker][start:end] += value
else:
all_acitivity[session_id][pers][target_speaker][start:end] = value
if missing_count > len(it_S) // 2:
raise RuntimeError(
f'Something went wrong.\n'
f'Expected {len(it_S) * len(perspective_tmp)} times a '
f'finetuned annotation for session {session_id}, but '
f'{missing_count} times they are missing.\n'
f'Expect that at least {len(it_S) // 2} finetuned annotations '
f'are available, when non_sil_alignment_fn is given.\n'
f'Otherwise assume something went wrong.'
)
del it_S
return all_acitivity
|
4bb771f80beba242f59b54879563c4462d5ca0c6
| 3,640,838
|
import re
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
|
dd77b86533dd43bcecf2ef944a61b59c4150aaae
| 3,640,839
|
import os
def is_rotational(block_device: str) -> bool:
"""
Checks if given block device is "rotational" (spinning rust) or
solid state block device.
:param block_device: Path to block device to check
:return: True if block device is a rotational block device,
false otherwise
"""
base_name = os.path.basename(block_device)
rotational_file = f'/sys/block/{base_name}/queue/rotational'
if not os.path.exists(rotational_file):
# Maybe given path is not the base block device
# -> Get disk for given block devices and try again
disk = base_disk_for_block_device(block_device)
if disk != block_device:
return is_rotational(disk)
raise Exception('Could not find file {}!'.format(rotational_file))
with open(rotational_file, 'r') as f_obj:
content = f_obj.read(1)
if content == '1':
return True
if content == '0':
return False
raise Exception('Unknown value in {}!'.format(rotational_file))
|
caf6203160e637ab39152d84d2fff06e79fc3083
| 3,640,840
|
import sys
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
|
29bdbfbff4a1ce2d399a95c3a4685467a4022eaf
| 3,640,841
|
import random
def make_dpl_from_construct(construct,showlabels=None):
""" This function creats a dictionary suitable for
input into dnaplotlib for plotting constructs.
Inputs:
construct: a DNA_construct object
showlabels: list of part types to show labels for. For example, [AttachmentSite,Terminator]"""
#TODO make showlabels more general
if(showlabels is None):
showlabels = []
outdesign = []
if(HAVE_MATPLOTLIB):
cmap = cm.Set1(range(len(construct.parts_list)*2))
pind = 0
for part in construct.parts_list:
pcolor = part.color
pcolor2 = part.color2
if(HAVE_MATPLOTLIB):
if(type(pcolor)==int):
c1 = cmap[pcolor][:-1]
else:
c1 = cmap[pind][:-1]
if(type(pcolor2)==int):
c2 = cmap[pcolor2][:-1]
else:
c2 = cmap[random.choice(list(range(len(construct.parts_list))))][:-1]
showlabel = False
if(type(part) in showlabels):
showlabel = True
outdesign+=make_dpl_from_part(part,direction = part.direction=="forward",\
color=c1,color2 =c2 ,showlabel=showlabel)
pind+=1
return outdesign
|
2391ccb2e5ee73e083c116d369fbffeac964081d
| 3,640,842
|
import yaml
import os
def load_config(path: str, env=None):
"""
Load a YAML config file and replace variables from the environment
Args:
path (str): The resource path in the form of `dir/file` or `package:dir/file`
Returns:
The configuration tree with variable references replaced, or `False` if the
file is not found
"""
try:
with load_resource(path) as resource:
cfg = yaml.load(resource, Loader=yaml.FullLoader)
except FileNotFoundError:
return False
cfg = expand_tree_variables(cfg, env or os.environ)
return cfg
|
ffe9944194bfe3e5be1ce4dbf0c9f1073c2d26f4
| 3,640,843
|
def update_service(
*, db_session: Session = Depends(get_db), service_id: PrimaryKey, service_in: ServiceUpdate
):
"""Update an existing service."""
service = get(db_session=db_session, service_id=service_id)
if not service:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "A service with this id does not exist."}],
)
try:
service = update(db_session=db_session, service=service, service_in=service_in)
except IntegrityError:
raise ValidationError(
[ErrorWrapper(ExistsError(msg="A service with this name already exists."), loc="name")],
model=ServiceUpdate,
)
return service
|
0471c4bd496004a9c1cc5af4d806bd8109f62ca7
| 3,640,844
|
def import_flow_by_ref(flow_strref):
"""Return flow class by flow string reference."""
app_label, flow_path = flow_strref.split('/')
return import_string('{}.{}'.format(get_app_package(app_label), flow_path))
|
c2f9fe0b9ccc409b3bd64b6691ee34ca8d430ed6
| 3,640,845
|
def _escape_value(value):
"""Escape a value."""
value = value.replace(b"\\", b"\\\\")
value = value.replace(b"\n", b"\\n")
value = value.replace(b"\t", b"\\t")
value = value.replace(b'"', b'\\"')
return value
|
b58a3236c0686c7fb6a33859986123dc2b8089cc
| 3,640,846
|
from typing import Iterable
def find(*objects: Iterable[object]):
"""Sometimes you know the inputs and outputs for a procedure, but you don't remember the name.
methodfinder.find tries to find the name.
>>> import methodfinder
>>> import itertools
>>> methodfinder.find([1,2,3]) == 6
sum([1, 2, 3])
>>> methodfinder.find('1 + 1') == 2
eval('1 + 1')
>>> methodfinder.find(0.0) == 1.0
math.cos(0.0)
math.cosh(0.0)
math.erfc(0.0)
math.exp(0.0)
>>> methodfinder.find(0) == 1
0.denominator
math.factorial(0)
>>> import numpy as np
>>> methodfinder.find(np, 3) == np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
numpy.eye(3)
numpy.identity(3)
"""
# Just call the wrapper function so that the == sign can be used to specify
# the desired result
return _Foo(objects)
|
fcfc3c4d0e6d72b6d9f1d7b7bfd46146d8bbf027
| 3,640,847
|
def join_returns(cfg, arg_names, function_ast=None):
"""Joins multiple returns in a CFG into a single block
Given a CFG with multiple return statements, this function will replace the
returns by gotos to a common join block.
"""
join_args = [ir.Argument(function_ast, info=n, name=n) for n in arg_names]
join = ir.Block(function_ast, join_args, info="MERGE RETURNS")
returns = list(of_type[ir.Return](cfg.graph.nodes))
if returns:
cfg += CfgSimple.statement(join)
# Replace returns with gotos to joining block
for ret in returns:
assert len(ret.returns) == len(arg_names), (ret.returns, arg_names)
goto = ir.Goto(ret.ast_node, join, ret.returns)
cfg = cfg.replace(ret, goto)
cfg = cfg + (goto, join)
return cfg, join_args
|
0a89c2c6df39e0693597358f01704619cbd1d0bd
| 3,640,848
|
def get_all():
"""
Returns list of all tweets from this server.
"""
return jsonify([t.to_dict() for t in tweet.get_all()])
|
a8803f46ca4c32ea3a0f607a7a37d23a5d97c316
| 3,640,849
|
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
def proportion_sig_beg_to_start_of_ground(ds):
"""
The total energy from signal beginning to the start of the ground peak,
normalized by total energy of the waveform. Ground peak assumed to be the last peak.
"""
ds = get_dist_metric_value(ds, metric='start_of_ground_peak_dist')
# the processed wf is from sig beg to sig end, select sig beg to ground peak
sig_beg_to_ground = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=ds.processed_wf,
signal_begin_dist=ds.sig_begin_dist,
signal_end_dist=ds.start_of_ground_peak_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
sig_beg_to_ground = sig_beg_to_ground.transpose(dims[0], dims[1])
# total energy of the smoothed waveform
total = ds.processed_wf.sum(dim="rec_bin")
return sig_beg_to_ground.sum(dim="rec_bin") / total
|
73fbbd90c8511433bcdae225daea5b7cba9e8297
| 3,640,850
|
import requests
def post_file(url, file_path, username, password):
"""Post an image file to the classifier."""
kwargs = {}
if username:
kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
file = {'file': open(file_path, 'rb')}
response = requests.post(
url,
files=file,
**kwargs
)
if response.status_code == HTTP_OK:
return response
return None
|
b615e5a766e6ca5d0427bfcdbd475e1b6cd5b9bb
| 3,640,851
|
def bias_init_with_prob(prior_prob):
""" initialize conv/fc bias value according to giving probablity"""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
|
533f777df5e8346ab2eadf5f366a275bab099aec
| 3,640,852
|
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the number and the position where it ends.
Otherwise, raise a L{ParseError}.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
|
854e9290b5853e525ea1ba3f658f59cea37b117c
| 3,640,853
|
def training_data_provider(train_s, train_t):
"""
Concatenates two lists containing adata files
# Parameters
train_s: `~anndata.AnnData`
Annotated data matrix.
train_t: `~anndata.AnnData`
Annotated data matrix.
# Returns
Concatenated Annotated data matrix.
# Example
```python
import scgen
import anndata
train_data = anndata.read("./data/train_kang.h5ad")
test_data = anndata.read("./data/test.h5ad")
whole_data = training_data_provider(train_data, test_data)
```
"""
train_s_X = []
train_s_diet = []
train_s_groups = []
for i in train_s:
train_s_X.append(i.X.A)
train_s_diet.append(i.obs["condition"].tolist())
train_s_groups.append(i.obs["cell_type"].tolist())
train_s_X = np.concatenate(train_s_X)
temp = []
for i in train_s_diet:
temp = temp + i
train_s_diet = temp
temp = []
for i in train_s_groups:
temp = temp + i
train_s_groups = temp
train_t_X = []
train_t_diet = []
train_t_groups = []
for i in train_t:
train_t_X.append(i.X.A)
train_t_diet.append(i.obs["condition"].tolist())
train_t_groups.append(i.obs["cell_type"].tolist())
temp = []
for i in train_t_diet:
temp = temp + i
train_t_diet = temp
temp = []
for i in train_t_groups:
temp = temp + i
train_t_groups = temp
train_t_X = np.concatenate(train_t_X)
train_real = np.concatenate([train_s_X, train_t_X]) # concat all
train_real = anndata.AnnData(train_real)
train_real.obs["condition"] = train_s_diet + train_t_diet
train_real.obs["cell_type"] = train_s_groups + train_t_groups
return train_real
|
35016ecb6f57e2814dacc6e36408882025311bb9
| 3,640,854
|
import warnings
def _build_trees(base_estimator, estimator_params, params, X, y, sample_weight,
tree_state, n_trees, verbose=0, class_weight=None,
bootstrap=False):
""" Fit a single tree in parallel """
tree = _make_estimator(
_get_value(base_estimator), estimator_params,
params=params, random_state=tree_state
)
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
|
43259d71a5d666e371c90e15c1ca61241fbee8e0
| 3,640,855
|
def select_privilege():
"""Provide a select Privilege model for testing."""
priv = Privilege(
database_object=DatabaseObject(name="one_table", type=DatabaseObjectType.TABLE),
action=Action.SELECT,
)
return priv
|
721f8edd0b6777a082682e377a80c73f8dc2bb00
| 3,640,856
|
def plot_gaia_sources_on_survey(
tpf,
target_gaiaid,
gaia_sources=None,
fov_rad=None,
depth=0.0,
kmax=1.0,
sap_mask="pipeline",
survey="DSS2 Red",
verbose=True,
ax=None,
outline_color="C6", # pink
figsize=None,
pix_scale=TESS_pix_scale,
**mask_kwargs,
):
"""Plot (superpose) Gaia sources on archival image
Parameters
----------
target_coord : astropy.coordinates
target coordinate
gaia_sources : pd.DataFrame
gaia sources table
fov_rad : astropy.unit
FOV radius
survey : str
image survey; see from astroquery.skyview import SkyView;
SkyView.list_surveys()
verbose : bool
print texts
ax : axis
subplot axis
outline_color : str
aperture outline color (default=C6)
kwargs : dict
keyword arguments for aper_radius, percentile
Returns
-------
ax : axis
subplot axis
TODO: correct for proper motion difference between
survey image and gaia DR2 positions
"""
assert target_gaiaid is not None
ny, nx = tpf.flux.shape[1:]
if fov_rad is None:
diag = np.sqrt(nx ** 2 + ny ** 2)
fov_rad = (0.4 * diag * pix_scale).to(u.arcmin)
target_coord = SkyCoord(ra=tpf.ra * u.deg, dec=tpf.dec * u.deg)
if gaia_sources is None:
print(
"Querying Gaia sometimes hangs. Provide `gaia_sources` if you can."
)
gaia_sources = Catalogs.query_region(
target_coord, radius=fov_rad, catalog="Gaia", version=2
).to_pandas()
assert len(gaia_sources) > 1, "gaia_sources contains single entry"
# make aperture mask
mask = parse_aperture_mask(tpf, sap_mask=sap_mask, **mask_kwargs)
maskhdr = tpf.hdu[2].header
# make aperture mask outline
contour = np.zeros((ny, nx))
contour[np.where(mask)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode="nearest")
extent = np.array([-1, nx, -1, ny])
if verbose:
print(
f"Querying {survey} ({fov_rad:.2f} x {fov_rad:.2f}) archival image"
)
# -----------create figure---------------#
if ax is None:
# get img hdu for subplot projection
try:
hdu = SkyView.get_images(
position=target_coord.icrs.to_string(),
coordinates="icrs",
survey=survey,
radius=fov_rad,
grid=False,
)[0][0]
except Exception:
errmsg = "survey image not available"
raise FileNotFoundError(errmsg)
fig = pl.figure(figsize=figsize)
# define scaling in projection
ax = fig.add_subplot(111, projection=WCS(hdu.header))
# plot survey img
if str(target_coord.distance) == "nan":
target_coord = SkyCoord(ra=target_coord.ra, dec=target_coord.dec)
nax, hdu = plot_finder_image(
target_coord, ax=ax, fov_radius=fov_rad, survey=survey, reticle=False
)
imgwcs = WCS(hdu.header)
mx, my = hdu.data.shape
# plot mask
_ = ax.contour(
highres,
levels=[0.5],
extent=extent,
origin="lower",
linewidths=[3],
colors=outline_color,
transform=ax.get_transform(WCS(maskhdr)),
)
idx = gaia_sources["source_id"].astype(int).isin([target_gaiaid])
target_gmag = gaia_sources.loc[idx, "phot_g_mean_mag"].values[0]
for index, row in gaia_sources.iterrows():
marker, s = "o", 100
r, d, mag, id = row[["ra", "dec", "phot_g_mean_mag", "source_id"]]
pix = imgwcs.all_world2pix(np.c_[r, d], 1)[0]
if int(id) != int(target_gaiaid):
gamma = 1 + 10 ** (0.4 * (mag - target_gmag))
if depth > kmax / gamma:
# too deep to have originated from secondary star
edgecolor = "C1"
alpha = 1 # 0.5
else:
# possible NEBs
edgecolor = "C3"
alpha = 1
else:
s = 200
edgecolor = "C2"
marker = "s"
alpha = 1
nax.scatter(
pix[0],
pix[1],
marker=marker,
s=s,
edgecolor=edgecolor,
alpha=alpha,
facecolor="none",
)
# orient such that north is up; left is east
ax.invert_yaxis()
if hasattr(ax, "coords"):
ax.coords[0].set_major_formatter("dd:mm")
ax.coords[1].set_major_formatter("dd:mm")
# set img limits
pl.setp(
nax,
xlim=(0, mx),
ylim=(0, my),
title="{0} ({1:.2f}' x {1:.2f}')".format(survey, fov_rad.value),
)
return ax
|
e90d77cfd1f59dda5db8d6a4651acac0aeacc81e
| 3,640,857
|
def getLinkToSong(res):
"""
getLinkToSong(res): link to all songs
:param: res: information about the playlist -> getResponse(pl_id)
:returns: list of links to each song
"""
return res['items'][0]['track']['external_urls']['spotify']
|
e59fe598ed900a90dcf5376d265eedfc51d8e0a7
| 3,640,858
|
def entropy_sampling(classifier, X, n_instances=1):
"""Entropy sampling query strategy, uses entropy of all probabilities as score.
This strategy selects the samples with the highest entropy in their prediction
probabilities.
Args:
classifier: The classifier for which the labels are to be queried.
X: The pool of samples to query from.
n_instances: Number of samples to be queried.
Returns:
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
classwise_uncertainty = _get_probability_classes(classifier, X)
entropies = np.transpose(entropy(np.transpose(classwise_uncertainty)))
index = np.flip(np.argsort(entropies))[:n_instances]
return index, entropies[index]
|
ffc465a3e8a517e692927f051dea0162d3191cf9
| 3,640,859
|
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calexport.testing.Browser(wsgi_app=browserWsgiAppS)
|
a256b814a08833eec88eb6289b6c5a57f17e7d84
| 3,640,860
|
def parse_playing_now_message(playback):
"""parse_playing_now_message
:param playback: object
:returns str
"""
track = playback.get("item", {}).get("name", False)
artist = playback.get("item", {}).get("artists", [])
artist = map(lambda a: a.get("name", ""), artist)
artist = ", ".join(list(artist))
message = "Playing '%s' from '%s' now!" % (track, artist)
if not track:
message = "Could not get current track!"
return message
|
88d7c35257c2aaee44d1bdc1ec06640603c6a286
| 3,640,861
|
from datetime import datetime
import json
def test_in_execution(test_plan_uuid):
"""
Executor->Curator
Test in execution: executor responses with the Test ID that can be used in a future test cancellation
{ "test-id": <test_id> }(?)
:param test_plan_uuid:
:return:
"""
# app.logger.debug(f'Callback received {request.path}, contains {request.get_data()}, '
# f'Content-type: {request.headers["Content-type"]}')
_LOG.debug(f'Callback received {request.path}, contains {request.get_data()}, '
f'Content-type: {request.headers["Content-type"]}')
try:
executor_payload = request.get_json()
context['test_preparations'][test_plan_uuid]['updated_at'] = datetime.utcnow().replace(microsecond=0)
test_index = next(
(index for (index, d) in
enumerate(context['test_preparations'][test_plan_uuid]['augmented_descriptors'])
if d['test_uuid'] == executor_payload['test_uuid']), None)
(context['test_preparations'][test_plan_uuid]['augmented_descriptors']
[test_index]['test_status']) = executor_payload['status'] if 'status' in executor_payload.keys() \
else 'RUNNING'
return make_response('{}', OK, {'Content-Type': 'application/json'})
except Exception as e:
return make_response(json.dumps({'exception': e.args}), INTERNAL_ERROR, {'Content-Type': 'application/json'})
|
147fd56af41c232fa874332e704c9e043f368d5c
| 3,640,862
|
import requests
def load_remote_image(image_url):
"""Loads a remotely stored image into memory as an OpenCV/Numpy array
Args:
image_url (str): the URL of the image
Returns:
numpy ndarray: the image in OpenCV format (a [rows, cols, 3] BGR numpy
array)
"""
response = requests.get(image_url, stream=True)
img = Image.open(BytesIO(response.content))
image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return image
|
a760e76df679cc15788332df02e5470ec5b60ec2
| 3,640,863
|
def evlt(inp : str) -> int:
""" Evaluates the passed string and returns the value if
successful, otherwise raises an error """
operand = [] # stack for operands
operator = [] # stack for operators + parentheses
i = 0 # loop variable, cannot do range because have to increment dynamically
if inp.count('(') != inp.count(')'):
raise TooManyBracketsException()
while i < len(inp): # while not EOF
if inp[i].isdigit(): # if character is a digit
num = ""
while i < len(inp) and inp[i].isdigit(): # Logic to fetch an entire number,
num += inp[i]
i += 1
if int(num) >= 2**31 - 1:
raise OverflowError()
operand.append(int(num)) # push operand to stack
elif inp[i] == '(': # if opening brace, push to stack
operator.append(inp[i])
i += 1
elif inp[i] in operators:
try: # if operator, pop all operators having a higher precedence
while len(operator) and precedence(operator[-1]) >= precedence(inp[i]):
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # evaluate them with the last 2 values in operand stack and append to itself
operator.append(inp[i]) # append operator to operator stack)
i += 1
except:
raise TooManyOperatorsException
elif inp[i] == ')': # if closing brace, evaluate all operators in between
while len(inp) != 0 and operator[-1] != '(': # while not EOF and the last(recent) item is not opening bracket
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # pop the operator in order and evaluate and push to operand stack
operator.pop() # pop (
i += 1
else:
i += 1
continue
while len(operator) != 0: # while operator is not empty
op = operator.pop()
b = operand.pop()
a = operand.pop()
operand.append(evlexp(a, b, op)) # pop and evaluate operators till its empty and append to operand
# if there are no more elements in top of stack, and only one (possibly the answer)
if len(operand) == 1:
return operand[-1]
# if there's more than one element and no more operators, something wrong!
else:
raise TooManyOperandsException()
|
2c0ea8781e969f44fa0575c967366d69a19010eb
| 3,640,864
|
import os
import argparse
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
|
fc5f03f18ae6f37520dbff6c3143699ac234f6b6
| 3,640,865
|
def _create_preactivation_hook(activations):
"""
when we add this hook to a model's layer, it is called whenever
it is about to make the forward pass
"""
def _linear_preactivation_hook(module, inputs):
activations.append(inputs[0].cpu())
return _linear_preactivation_hook
|
7f4cc10f7e051ed8e30556ee054a65c4878f6c0f
| 3,640,866
|
import importlib
def import_by_path(path):
"""
Given a dotted/colon path, like project.module:ClassName.callable,
returns the object at the end of the path.
"""
module_path, object_path = path.split(":", 1)
target = importlib.import_module(module_path)
for bit in object_path.split("."):
target = getattr(target, bit)
return target
|
939b3426f36b3a188f7a48e21551807d42cfa254
| 3,640,867
|
def ordered_links(d, k0, k1):
"""
find ordered links starting from the link (k0, k1)
Parameters
==========
d : dict for the graph
k0, k1: adjacents nodes of the graphs
Examples
========
>>> from active_nodes import ordered_links
>>> d = {0:[1,4], 1:[0,2], 2:[1,3], 3:[2,4], 4:[0,3]}
>>> ordered_links(d, 0, 1)
[(0, 1), (0, 4), (1, 2), (2, 3), (3, 4)]
"""
assert k0 in d
assert k1 in d[k0]
dx = defaultdict(list)
links = []
_append_link(dx, links, k0, k1)
r = _add_links1(links, d, dx)
while 1:
active = [k for k in dx if 0 < len(dx[k]) < len(d[k])]
if not active:
break
a1 = _short_path_active_nodes_all(d, dx, active)
if a1 is None:
break
a2 = _add_paths(d, dx, links, a1)
return links
|
472e9e7d459e8a574de8edd5272c96b648b50207
| 3,640,868
|
def _exceeded_threshold(number_of_retries: int, maximum_retries: int) -> bool:
"""Return True if the number of retries has been exceeded.
Args:
number_of_retries: The number of retry attempts made already.
maximum_retries: The maximum number of retry attempts to make.
Returns:
True if the maximum number of retry attempts have already been
made.
"""
if maximum_retries is None:
# Retry forever.
return False
return number_of_retries >= maximum_retries
|
c434e1e752856f9160d40e25ac20dde0583e50a6
| 3,640,869
|
import json
def _get_and_check_response(method, host, url, body=None, headers=None, files=None, data=None, timeout=30):
"""Wait for the HTTPS response and throw an exception if the return
status is not OK. Return either a dict based on the
HTTP response in JSON, or if the response is not in JSON format,
return a tuple containing the data in the body and the content type.
"""
url = 'https://' + host + url
# print(url)
if files:
res = https_session.post(url, files=files, data=data, timeout=timeout)
elif method == 'POST':
res = https_session.post(url, body, headers, timeout=timeout)
else:
res = https_session.get(url, timeout=timeout)
res.raise_for_status()
content_type = res.headers.get(CONTENT_TYPE, None)
content = res.text
if content_type and content_type.startswith(CONTENT_TYPE_JSON):
# Quickfix to remove second key in bad API response
key = '"FaxContainerFile":'
if content.count(key) == 2:
content = content[:content.rfind(key)].rstrip(',') + "}"
return json.loads(content)
else:
return (content, content_type)
|
559d85ee8f7d21445e5cfa0acc464b3e9ad98fe3
| 3,640,870
|
def moveb_m_human(agents, self_state, self_name, c, goal):
"""
This method implements the following block-stacking algorithm:
If there's a block that can be moved to its final position, then
do so and call move_blocks recursively. Otherwise, if there's a
block that needs to be moved and can be moved to the table, then
do so and call move_blocks recursively. Otherwise, no blocks need
to be moved.
"""
if self_name in self_state.isReachableBy[c] and c in goal.isOnStack and goal.isOnStack[c] and not self_state.isOnStack[c]:
return [("human_pick", c), ("human_stack",)]
return []
|
f99fd14b2091a1e8d0426dcef57ce33b96fc1352
| 3,640,871
|
import os
def create_initialized_headless_egl_display():
"""Creates an initialized EGL display directly on a device."""
devices = EGL.eglQueryDevicesEXT()
if os.environ.get("EGL_DEVICE_ID", None) is not None:
devices = [devices[int(os.environ["EGL_DEVICE_ID"])]]
for device in devices:
display = EGL.eglGetPlatformDisplayEXT(
EGL.EGL_PLATFORM_DEVICE_EXT, device, None)
if display != EGL.EGL_NO_DISPLAY and EGL.eglGetError() == EGL.EGL_SUCCESS:
# `eglInitialize` may or may not raise an exception on failure depending
# on how PyOpenGL is configured. We therefore catch a `GLError` and also
# manually check the output of `eglGetError()` here.
try:
initialized = EGL.eglInitialize(display, None, None)
except error.GLError:
pass
else:
if initialized == EGL.EGL_TRUE and EGL.eglGetError() == EGL.EGL_SUCCESS:
return display
return EGL.EGL_NO_DISPLAY
|
5a0351936a6a4771869aed046da3f60ce1edd1bb
| 3,640,872
|
import tkinter
def BooleanVar(default, callback=None):
"""
Return a new (initialized) `tkinter.BooleanVar`.
@param default the variable initial value
@param callback function to invoke whenever the variable changes its value
@return the created variable
"""
return _var(tkinter.BooleanVar, default, callback)
|
451a43da5e9eb506fe8b928fa7f4e986c8da6b69
| 3,640,873
|
import re
def parse_header(source):
"""Copied from textgrid.parse_header"""
header = source.readline() # header junk
m = re.match('File type = "([\w ]+)"', header)
if m is None or not m.groups()[0].startswith('ooTextFile'):
raise ValueError('The file could not be parsed as a Praat text file as '
'it is lacking a proper header.')
short = 'short' in m.groups()[0]
file_type = parse_line(source.readline(), short, '') # header junk
t = source.readline() # header junk
return file_type, short
|
ff47296868f93cbe55d15b29a2245ceb14ed5460
| 3,640,874
|
from datetime import datetime
def create_amsterdam(*args):
"""
Creates a new droplet with sensible defaults
Usage:
[name]
Arguments:
name: (optional) name to give the droplet; if missing, current timestamp
"""
name = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S.%f")
try:
name = args[0]
except:
pass
return create_small_droplet(name, 'ams2', 'ubuntu-17-04-x64')
|
ed01c67db180894bbcf2cdfee4cd2f45633cc637
| 3,640,875
|
def convert_inp(float_inp):
"""
Convert inp from decimal value (0.000, 0.333, 0.667, etc) to (0.0, 0.1, 0.2) for cleaner display.
:param float float_inp: inning pitching float value
:return:
"""
# Split inp into integer and decimal parts
i_inp, d_inp = divmod(float_inp, 1)
d_inp = d_inp*10
# Look at first digit of decimal part
# NOTE: repr(3)[0] = 2 and repr(6) = 5, not sure why?
if int(repr(d_inp)[0]) == 0:
disp_inp = i_inp + 0.0
elif int(repr(d_inp)[0]) == 3 or int(repr(d_inp)[0]) == 2:
disp_inp = i_inp + 0.1
elif int(repr(d_inp)[0]) == 6 or int(repr(d_inp)[0]) == 7 or int(repr(d_inp)[0]) == 5:
disp_inp = i_inp + 0.2
else:
print "{0} innings is not a standard amount!".format(float_inp)
return None
return disp_inp
|
ce0e196ca570b02787842db3ec2efb6ac529685c
| 3,640,876
|
from matplotlib import pyplot as plt
from typing import Dict
from typing import Any
from typing import Optional
from typing import Set
from typing import Tuple
import warnings
def plot(pulse: PulseTemplate,
parameters: Dict[str, Parameter]=None,
sample_rate: Real=10,
axes: Any=None,
show: bool=True,
plot_channels: Optional[Set[ChannelID]]=None,
plot_measurements: Optional[Set[str]]=None,
stepped: bool=True,
maximum_points: int=10**6,
time_slice: Tuple[Real, Real]=None,
**kwargs) -> Any: # pragma: no cover
"""Plots a pulse using matplotlib.
The given pulse template will first be turned into a pulse program (represented by a Loop object) with the provided
parameters. The render() function is then invoked to obtain voltage samples over the entire duration of the pulse which
are then plotted in a matplotlib figure.
Args:
pulse: The pulse to be plotted.
parameters: An optional mapping of parameter names to Parameter
objects.
sample_rate: The rate with which the waveforms are sampled for the plot in
samples per time unit. (default = 10)
axes: matplotlib Axes object the pulse will be drawn into if provided
show: If true, the figure will be shown
plot_channels: If specified only channels from this set will be plotted. If omitted all channels will be.
stepped: If true pyplot.step is used for plotting
plot_measurements: If specified measurements in this set will be plotted. If omitted no measurements will be.
maximum_points: If the sampled waveform is bigger, it is not plotted
time_slice: The time slice to be plotted. If None, the entire pulse will be shown.
kwargs: Forwarded to pyplot. Overwrites other settings.
Returns:
matplotlib.pyplot.Figure instance in which the pulse is rendered
Raises:
PlottingNotPossibleException if the sequencing is interrupted before it finishes, e.g.,
because a parameter value could not be evaluated
all Exceptions possibly raised during sequencing
"""
channels = pulse.defined_channels
if parameters is None:
parameters = dict()
program = pulse.create_program(parameters=parameters,
channel_mapping={ch: ch for ch in channels},
measurement_mapping={w: w for w in pulse.measurement_names})
if program is not None:
times, voltages, measurements = render(program,
sample_rate,
render_measurements=bool(plot_measurements),
time_slice=time_slice)
else:
times, voltages, measurements = np.array([]), dict(), []
duration = 0
if times.size == 0:
warnings.warn("Pulse to be plotted is empty!")
elif times.size > maximum_points:
# todo [2018-05-30]: since it results in an empty return value this should arguably be an exception, not just a warning
warnings.warn("Sampled pulse of size {wf_len} is lager than {max_points}".format(wf_len=times.size,
max_points=maximum_points))
return None
else:
duration = times[-1]
if time_slice is None:
time_slice = (0, duration)
legend_handles = []
if axes is None:
# plot to figure
figure = plt.figure()
axes = figure.add_subplot(111)
if plot_channels is not None:
voltages = {ch: voltage
for ch, voltage in voltages.items()
if ch in plot_channels}
for ch_name, voltage in voltages.items():
label = 'channel {}'.format(ch_name)
if stepped:
line, = axes.step(times, voltage, **{**dict(where='post', label=label), **kwargs})
else:
line, = axes.plot(times, voltage, **{**dict(label=label), **kwargs})
legend_handles.append(line)
if plot_measurements:
measurement_dict = dict()
for name, begin, length in measurements:
if name in plot_measurements:
measurement_dict.setdefault(name, []).append((begin, begin+length))
color_map = plt.cm.get_cmap('plasma')
meas_colors = {name: color_map(i/len(measurement_dict))
for i, name in enumerate(measurement_dict.keys())}
for name, begin_end_list in measurement_dict.items():
for begin, end in begin_end_list:
poly = axes.axvspan(begin, end, alpha=0.2, label=name, edgecolor='black', facecolor=meas_colors[name])
legend_handles.append(poly)
axes.legend(handles=legend_handles)
max_voltage = max((max(channel, default=0) for channel in voltages.values()), default=0)
min_voltage = min((min(channel, default=0) for channel in voltages.values()), default=0)
# add some margins in the presentation
axes.set_xlim(-0.5+time_slice[0], time_slice[1] + 0.5)
voltage_difference = max_voltage-min_voltage
if voltage_difference>0:
axes.set_ylim(min_voltage - 0.1*voltage_difference, max_voltage + 0.1*voltage_difference)
axes.set_xlabel('Time (ns)')
axes.set_ylabel('Voltage (a.u.)')
if pulse.identifier:
axes.set_title(pulse.identifier)
if show:
axes.get_figure().show()
return axes.get_figure()
|
e417989116496e82aa6885f01e4ec864eb3cbd55
| 3,640,877
|
def is_ipv4(line):
"""检查是否是IPv4"""
if line.find("ipv4") < 6: return False
return True
|
bd602f5a9ac74d2bd115fe85c90490556932e068
| 3,640,878
|
def format_ica_lat(ff_lat):
"""
conversão de uma latitude em graus para o formato GGMM.mmmH
@param ff_lat: latitude em graus
@return string no formato GGMM.mmmH
"""
# logger
# M_LOG.info(">> format_ica_lat")
# converte os graus para D/M/S
lf_deg, lf_min, lf_seg = deg2dms(ff_lat)
# converte para GGMM.mmm
lf_deg = (abs(lf_deg) * 100) + lf_min + (lf_seg / 60.)
# return latitude
# return "{}{:4.3f}".format('S' if ff_lat <= 0 else 'N', lf_deg)
return "{:4.3f}{}".format(lf_deg, 'S' if ff_lat <= 0 else 'N')
|
d1e6f111e70ec7bd532e3d14afe3c90dc99cb8f8
| 3,640,879
|
def loadData (x_file="ass1_data/linearX.csv", y_file="ass1_data/linearY.csv"):
"""
Loads the X, Y matrices.
Splits into training, validation and test sets
"""
X = np.genfromtxt(x_file)
Y = np.genfromtxt(y_file)
Z = [X, Y]
Z = np.c_[X.reshape(len(X), -1), Y.reshape(len(Y), -1)]
np.random.shuffle(Z)
# Partition the data into three sets
size = len(Z)
training_size = int(0.8 * size)
validation_size = int(0.1 * size)
test_size = int(0.1 * size)
training_Z = Z[0:training_size]
validation_Z = Z[training_size:training_size+validation_size]
test_Z = Z[training_size+validation_size:]
return (Z[:,0], Z[:,1])
|
18fb7269f2b853b089494e6021d765d76a148711
| 3,640,880
|
async def retrieve_users():
"""
Retrieve all users in collection
"""
users = []
async for user in user_collection.find():
users.append(user_parser(user))
return users
|
914969f7beb75a9409e370b9e2453c681c37ff42
| 3,640,881
|
import hashlib
def get_file_hash(path):
"""파일 해쉬 구하기."""
hash = None
md5 = hashlib.md5()
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
hash = md5.hexdigest()
info("get_file_hash from {}: {}".format(path, hash))
return hash
|
a024b0002c019ec9bae4fca40e68919c6236b2fa
| 3,640,882
|
from nipy.labs.spatial_models.discrete_domain import \
def apply_repro_analysis(dataset, thresholds=[3.0], method = 'crfx'):
"""
perform the reproducibility analysis according to the
"""
grid_domain_from_binary_array
n_subj, dimx, dimy = dataset.shape
func = np.reshape(dataset,(n_subj, dimx * dimy)).T
var = np.ones((dimx * dimy, n_subj))
domain = grid_domain_from_binary_array(np.ones((dimx, dimy, 1)))
ngroups = 5
sigma = 2.0
csize = 10
niter = 5
verbose = 0
swap = False
kap, clt, pkd = [], [], []
for threshold in thresholds:
kappa, cls, pks = [], [], []
kwargs = {'threshold':threshold, 'csize':csize}
for i in range(niter):
k = voxel_reproducibility(func, var, domain, ngroups,
method, swap, verbose, **kwargs)
kappa.append(k)
cld = cluster_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
cls.append(cld)
pk = peak_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
pks.append(pk)
kap.append(np.array(kappa))
clt.append(np.array(cls))
pkd.append(np.array(pks))
kap = np.array(kap)
clt = np.array(clt)
pkd = np.array(pkd)
return kap, clt, pkd
|
cffb667b80b0a049856dc7c11db6d81fd9521f49
| 3,640,883
|
def api_root(request):
"""
Logging root
"""
rtn = dict(
message="Hello, {}. You're at the logs api index.".format(request.user.username),
)
return Response(rtn)
|
b002724baefccdd0cd0dcc324fa23d9902186351
| 3,640,884
|
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.dropna(inplace=True)
df.drop(['long', 'date', 'lat', 'id'], axis=1, inplace=True)
df = df.drop(df.index[df.bedrooms <= 0])
df = df.drop(df.index[df.sqft_living <= 0])
df = df.drop(df.index[df.floors <= 0])
df = df.drop(df.index[df.bathrooms < 0])
df = df.drop(df.index[df.price < 0])
# df = pd.get_dummies(df, columns=['zipcode'])
df['yr_built_or_renovated'] = df[['yr_built', 'yr_renovated']].max(axis=1)
df.drop(['yr_built', 'yr_renovated'], axis=1, inplace=True)
price = df.pop('price')
return df, price
|
412b197274ae4ca06e4cc7f9cd4b7d7b7c5934a0
| 3,640,885
|
def parse_esim_inst(line):
"""Parse a single line of an e-sim trace.
Keep the original line for debugging purposes.
>>> i0 = parse_esim_inst('0x000000 b.l 0x0000000000000058 - pc <- 0x58 - nbit <- 0x0')
>>> ex0 = {'pc': 0, 'AN': False, 'instruction': 'b.l', 'line': '0x000000 b.l 0x0000000000000058 - pc <- 0x58 - nbit <- 0x0'}
>>> i0 == ex0
True
>>> i1 = parse_esim_inst('0x0000b0 --- _epiphany_star strd r2,[r0],+0x1 - memaddr <- 0x2f8, memory <- 0x0, memaddr <- 0x2fc, memory <- 0x0, registers <- 0x300')
>>> ex1 = {'instruction': 'strd', 'line': '0x0000b0 --- _epiphany_star strd r2,[r0],+0x1 - memaddr <- 0x2f8, memory <- 0x0, memaddr <- 0x2fc, memory <- 0x0, registers <- 0x300', 'mem': [(760, 0), (764, 0)], 'pc': 176, 'reg': [768]}
>>> i1 == ex1
True
"""
inst = dict()
tokens = line.split()
if not tokens:
return inst
inst['line'] = line
inst['pc'] = int(tokens[0], 16)
inst['instruction'] = tokens[3] if tokens[1] == '---' else tokens[1]
for index, tok in enumerate(tokens[1:]):
if tok == 'registers' or tok == 'core-registers': # Writing to a register.
value = int(tokens[1:][index + 2].split(',')[0], 16)
if 'reg' in inst:
inst['reg'].append(value)
else:
inst['reg'] = [value]
elif tok == 'memaddr': # Writing to memory.
addr = tokens[1:][index + 2].split(',')[0]
addr = int(addr, 16)
value = tokens[1:][index + 5].split(',')[0]
value = int(value, 16)
if 'mem' in inst:
inst['mem'].append((addr, value))
else:
inst['mem'] = [(addr, value)]
else: # Next tok might be a flag.
if tok in _e_flags.keys():
state = tokens[1:][index + 2].split(',')[0]
inst[_e_flags[tok]] = state == '0x1'
# Otherwise ignore and continue.
return inst
|
c9bc221d8658219edc3759584ece76a56954ccd4
| 3,640,886
|
def getcollength(a):
"""
Get the length of a matrix view object
"""
t=getType(a)
f={'mview_f':vsip_mgetcollength_f,
'mview_d':vsip_mgetcollength_d,
'mview_i':vsip_mgetcollength_i,
'mview_si':vsip_mgetcollength_si,
'mview_uc':vsip_mgetcollength_uc,
'cmview_f':vsip_cmgetcollength_f,
'cmview_d':vsip_cmgetcollength_d,
'mview_bl':vsip_mgetcollength_bl }
assert t[0] and t[1] in f,'Type <:%s:> not a supported type for for getcollength'%t[1]
return f[t[1]](a)
|
fe4b4c69f1631c0e571cd1590aa8eeb8fa5bc7bb
| 3,640,887
|
from unittest.mock import patch
def test_coinbase_query_balances(function_scope_coinbase):
"""Test that coinbase balance query works fine for the happy path"""
coinbase = function_scope_coinbase
def mock_coinbase_accounts(url, timeout): # pylint: disable=unused-argument
response = MockResponse(
200,
"""
{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [
{
"id": "58542935-67b5-56e1-a3f9-42686e07fa40",
"name": "My Vault",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "4.00000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/58542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
},
{
"id": "2bbf394c-193b-5b2a-9155-3b4732659ede",
"name": "My Wallet",
"primary": true,
"type": "wallet",
"currency": "ETH",
"balance": {
"amount": "39.59000000",
"currency": "ETH"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede"
},
{
"id": "68542935-67b5-56e1-a3f9-42686e07fa40",
"name": "Another Wallet",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "1.230000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/68542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
}
]
}
""",
)
return response
with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_accounts):
balances, msg = coinbase.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC].amount == FVal('5.23')
assert balances[A_BTC].usd_value == FVal('7.8450000000')
assert balances[A_ETH].amount == FVal('39.59')
assert balances[A_ETH].usd_value == FVal('59.385000000')
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
|
d25d8d31ae5a7c22559c322edeed53404fc179ab
| 3,640,888
|
def process_phase_boundary(fname):
"""
Processes the phase boundary file, computed mean and standard deviations
"""
singlets = []
chem_pot = []
temperatures = []
with h5.File(fname, 'r') as hfile:
for name in hfile.keys():
grp = hfile[name]
singlets.append(np.array(grp["singlets"]))
chem_pot.append(np.array(grp["chem_pot"]))
temperatures.append(np.array(grp["temperatures"]))
max_temp = 0.0
min_temp = 10000000.0
for temp_array in temperatures:
if np.max(temp_array) > max_temp:
max_temp = np.max(temp_array)
if np.min(temp_array) < min_temp:
min_temp = np.min(temp_array)
temp_linspace = np.linspace(min_temp, max_temp, 200)
result = {}
result["chem_pot"] = []
result["std_chem_pot"] = []
result["singlets"] = []
result["std_singlets"] = []
result["num_visits"] = []
result["temperature"] = temp_linspace
for sing_dset in singlets:
if np.any(sing_dset.shape != singlets[0].shape):
msg = "Invalid file! Looks like it contains phase boundary\n"
msg += " data for different systems"
raise ValueError(msg)
num_chem_pots = chem_pot[0].shape[1]
for i in range(num_chem_pots):
mu_averager = DatasetAverager(temp_linspace)
for temps, mu in zip(temperatures, chem_pot):
mu_averager.add_dataset(temps, mu[:,i])
mu_res = mu_averager.get()
result["chem_pot"].append(mu_res["y_values"])
result["std_chem_pot"].append(mu_res["std_y"])
result["num_visits"].append(mu_res["num_visits"])
num_singlets = singlets[0].shape[1]
for i in range(num_chem_pots):
for temp, singl in zip(temperatures, singlets):
singlet_averager = DatasetAverager(temp_linspace)
singlet = []
std_singlet = []
for j in range(num_singlets):
singlet_averager.add_dataset(temps, singl[:,j,i])
singl_res = singlet_averager.get()
singlet.append(singl_res["y_values"])
std_singlet.append(singl_res["std_y"])
result["singlets"].append(singlet)
result["std_singlets"].append(std_singlet)
return result
|
4e7f01e3265566f03fa4e7e21f13cb48a1777c9c
| 3,640,889
|
from sys import argv
from argparse import ArgumentParser
import os
from re import DEBUG
def main():
"""Entry point for the check_model script.
Returns
-------
:class:`int`
An integer suitable for passing to :func:`sys.exit`.
"""
desc = """Check actual files against the data model for validity.
"""
parser = ArgumentParser(description=desc, prog=os.path.basename(argv[0]))
parser.add_argument('-d', '--datamodel-dir', dest='desidatamodel',
metavar='DIR',
help='Override the value of DESIDATAMODEL.')
parser.add_argument('-F', '--compare-files', dest='files',
action='store_true',
help='Compare an individual data model to an individual file.')
parser.add_argument('-W', '--warning-is-error', dest='error',
action='store_true',
help='Data model warnings raise exceptions.')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Set log level to DEBUG.')
parser.add_argument('section', metavar='DIR or FILE',
help='Section of the data model or individual model file.')
parser.add_argument('directory', metavar='DIR or FILE',
help='Check files in this top-level directory, or one individual file.')
options = parser.parse_args()
if options.verbose:
log.setLevel(DEBUG)
if 'DESIDATAMODEL' in os.environ:
data_model_root = os.environ['DESIDATAMODEL']
else:
if options.desidatamodel is not None:
data_model_root = options.desidatamodel
else:
log.critical(("DESIDATAMODEL is not defined. " +
"Cannot find data model files!"))
return 1
log.debug("DESIDATAMODEL=%s", data_model_root)
if options.files:
filename = os.path.join(data_model_root, 'doc', options.section)
section = os.path.join(data_model_root, 'doc', options.section.split('/')[0])
log.info("Loading individual data model: %s.", filename)
files = [DataModel(filename, section)]
log.info("Skipping regular expression processing.")
# files[0].get_regexp(options.directory, error=options.error)
log.info("Setting prototype file for %s to %s.", filename, options.directory)
files[0].prototype = options.directory
else:
section = os.path.join(data_model_root, 'doc', options.section)
log.info("Loading data model file in %s.", section)
files = scan_model(section)
log.info("Searching for data files in %s.", options.directory)
files_to_regexp(options.directory, files, error=options.error)
log.info("Identifying prototype files in %s.", options.directory)
collect_files(options.directory, files)
validate_prototypes(files, error=options.error)
return 0
|
c82f3acef0cbca485611cb20b9b1121ea497306b
| 3,640,890
|
def blackman_window(shape, normalization=1):
"""
Create a 3d Blackman window based on shape.
:param shape: tuple, shape of the 3d window
:param normalization: value of the integral of the backman window
:return: the 3d Blackman window
"""
nbz, nby, nbx = shape
array_z = np.blackman(nbz)
array_y = np.blackman(nby)
array_x = np.blackman(nbx)
blackman2 = np.ones((nbz, nby))
blackman3 = np.ones((nbz, nby, nbx))
for idz in range(nbz):
blackman2[idz, :] = array_z[idz] * array_y
for idy in range(nby):
blackman3[idz, idy] = blackman2[idz, idy] * array_x
blackman3 = blackman3 / blackman3.sum() * normalization
return blackman3
|
45ae8132aad01319e1728f0a4355dda4d5d7d145
| 3,640,891
|
def asset_movements_from_dictlist(given_data, start_ts, end_ts):
""" Gets a list of dict asset movements, most probably read from the json files and
a time period. Returns it as a list of the AssetMovement tuples that are inside the time period
"""
returned_movements = list()
for movement in given_data:
if movement['timestamp'] < start_ts:
continue
if movement['timestamp'] > end_ts:
break
returned_movements.append(AssetMovement(
exchange=movement['exchange'],
category=movement['category'],
timestamp=movement['timestamp'],
asset=movement['asset'],
amount=FVal(movement['amount']),
fee=FVal(movement['fee']),
))
return returned_movements
|
b21355ad65c2603559ea00650d4ea6dd2a7d94f0
| 3,640,892
|
def update_work(work_id):
"""
Route permettant de modifier les données d'une collection
:param work_id: ID de l'oeuvre récupérée depuis la page oeuvre
:return: redirection ou template update-work.html
:rtype: template
"""
if request.method == "GET":
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", updateWork=updateWork)
else:
status, data = Work.update_work(
work_id=work_id,
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None)
)
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", nom="CollectArt", updateWork=updateWork)
|
aed65c45d53fa9d7b551df6909fdece488f2ab65
| 3,640,893
|
def login_view(request):
"""Login user view"""
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(request, username=email, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.info(request, 'Username Or Password is incorrect.')
context = {}
return render(request, 'pages/login.html', context)
|
702a3aa5a90cd5a5386a4fa3b74ab4b36d3748bb
| 3,640,894
|
import torch
import random
import os
def set_seed(seed: int) -> RandomState:
""" Method to set seed across runs to ensure reproducibility.
It fixes seed for single-gpu machines.
Args:
seed (int): Seed to fix reproducibility. It should different for
each run
Returns:
RandomState: fixed random state to initialize dataset iterators
"""
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False # set to false for reproducibility, True to boost performance
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
random_state = random.getstate()
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
return random_state
|
af0117e54dd03751d1173f32ae495f1003cadb35
| 3,640,895
|
def mse(im1, im2):
"""Compute the Mean Squared Error.
Compute the Mean Squared Error between the two images, i.e. sum of the squared difference.
Args:
im1 (ndarray): First array.
im2 (ndarray): Second array.
Returns:
float: Mean Squared Error.
"""
im1 = np.asarray(im1)
im2 = np.asarray(im2)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
err = np.sum((im1.astype("float") - im2.astype("float")) ** 2)
err /= float(im1.shape[0] * im1.shape[1])
return err
|
3d14472d3eb211855b53174990c3201bbae49086
| 3,640,896
|
import torch
def bert_text_preparation(text, tokenizer):
"""Preparing the input for BERT
Takes a string argument and performs
pre-processing like adding special tokens,
tokenization, tokens to ids, and tokens to
segment ids. All tokens are mapped to seg-
ment id = 1.
Args:
text (str): Text to be converted
tokenizer (obj): Tokenizer object
to convert text into BERT-re-
adable tokens and ids
Returns:
list: List of BERT-readable tokens
obj: Torch tensor with token ids
obj: Torch tensor segment ids
"""
marked_text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1]*len(indexed_tokens)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
return tokenized_text, tokens_tensor, segments_tensors
|
f9b3de4062fd0cc554e51bd02c750daea0a8250c
| 3,640,897
|
def possibly_equal(first, second):
"""Equality comparison that propagates uncertainty.
It represents uncertainty using its own function object."""
if first is possibly_equal or second is possibly_equal:
return possibly_equal #Propagate the possibilities
return first == second
|
12662df45d6ee0c6e1aadb6a5c4c0ced9352af35
| 3,640,898
|
def get_logs():
"""
Endpoint used by Slack /logs command
"""
req = request.values
logger.info(f'Log request received: {req}')
if not can_view_logs(req['user_id']):
logger.info(f"{req['user_name']} attempted to view logs and was denied")
return make_response("You are not authorized to do that.", 200)
url = get_temporary_url(req['user_id'], req['text'])
logger.info(f"Created log URL for {req['user_name']} : {url.url}")
return make_response(f'{request.host_url}logs/{url.url}', 200)
|
9708515dbd70c6e817f21c474fa1e96a26a1e9b4
| 3,640,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.