content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def extract_and_resize_frames(path, resize_to=None):
"""
Iterate the GIF, extracting each frame and resizing them
Returns:
An array of all frames
"""
mode = analyseImage(path)["mode"]
im = PImage.open(path)
if not resize_to:
resize_to = (im.size[0] // 2, im.size[1] // 2)
i = 0
p = im.getpalette()
last_frame = im.convert("RGBA")
all_frames = []
try:
while True:
# print("saving %s (%s) frame %d, %s %s" % (path, mode, i, im.size, im.tile))
"""
If the GIF uses local colour tables, each frame will have its own palette.
If not, we need to apply the global palette to the new frame.
"""
if not im.getpalette():
im.putpalette(p)
new_frame = PImage.new("RGBA", im.size)
"""
Is this file a "partial"-mode GIF where frames update a region of a different size to the entire image?
If so, we need to construct the new frame by pasting it on top of the preceding frames.
"""
if mode == "partial":
new_frame.paste(last_frame)
new_frame.paste(im, (0, 0), im.convert("RGBA"))
new_frame.thumbnail(resize_to, PImage.ANTIALIAS)
all_frames.append(new_frame)
i += 1
last_frame = new_frame
im.seek(im.tell() + 1)
except EOFError:
pass
return all_frames | 5,334,400 |
def create(self, node=None):
"""Test the RBAC functionality of the `CREATE VIEW` command.
"""
Scenario(run=create_without_create_view_privilege)
Scenario(run=create_with_create_view_privilege_granted_directly_or_via_role)
Scenario(run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role)
Scenario(run=create_without_source_table_privilege)
Scenario(run=create_with_source_table_privilege_granted_directly_or_via_role)
Scenario(run=create_with_subquery_privilege_granted_directly_or_via_role)
Scenario(run=create_with_join_query_privilege_granted_directly_or_via_role)
Scenario(run=create_with_union_query_privilege_granted_directly_or_via_role)
Scenario(run=create_with_join_union_subquery_privilege_granted_directly_or_via_role)
Scenario(run=create_with_nested_views_privilege_granted_directly_or_via_role) | 5,334,401 |
def voter_star_off_save_doc_view(request):
"""
Show documentation about voterStarOffSave
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = voter_star_off_save_doc.voter_star_off_save_doc_template_values(url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values) | 5,334,402 |
def generate_offsets(size_map, flow_map=None, kernel_shape=(3, 3, 3), dilation=(1, 1, 1)):
"""
Generates offsets for deformable convolutions from scalar maps.
Maps should be of shape NxCxDxHxW, i.e. one set of parameters for every
pixel. ``size_map`` and ``orientation_map`` expect a single channel,
and flow_map should have 2
"""
kernel_shape = _triple(kernel_shape)
dilation = _triple(dilation)
zeros = torch.zeros_like(size_map)
if flow_map is None:
flow_map = torch.cat([zeros, zeros], dim=1)
"""
at each pixel, size_map predicts the size of the object at that pixel
0. Inverse optical flow if not given
1. Sample sizes from previous/next frame according to optical flow
2. scale, and shift kernel shape
a. Multiply with sizes
b. Add optical flow shift
"""
if flow_map.shape[1] == 2:
flow_prev = flow_map
# TODO: properly inverse optical flow
flow_next = -flow_map
else:
flow_prev, flow_next = torch.split(flow_map, [2, 2], dim=1)
kz, ky, kx = kernel_shape
assert kz == ky == kx == 3, "not implemented for kernel shapes != 3x3x3"
N, _, D, H, W = size_map.shape
grid = grid_like(size_map)
grid_prev = einops.rearrange(grid + flow_prev, "n zyx d h w -> n d h w zyx")
grid_next = einops.rearrange(grid + flow_next, "n zyx d h w -> n d h w zyx")
size_prev = F.grid_sample(size_map, grid_prev, align_corners=False)
size_next = F.grid_sample(size_map, grid_next, align_corners=False)
base_offset = get_kernel_grid(kernel_shape, dilation, size_map.device)
base_offset = einops.rearrange(base_offset, "zyx kz ky kx -> kz ky kx zyx 1 1 1", zyx=3)
def _inflate(m):
return torch.cat([zeros, m, m], dim=1)
offsets = torch.cat([
base_offset[i] * _inflate(size)[:, None, None, None, ...]
for i, size in enumerate((size_prev, size_map, size_next))
], dim=1)
offsets = offsets + flow_map
return offsets - base_offset | 5,334,403 |
def lighten_color(color, amount=0.5):
""" Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color("g", 0.3)
>> lighten_color("#F034A3", 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
c = colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
return mc.to_hex(c) | 5,334,404 |
def unf_gas_density_kgm3(t_K, p_MPaa, gamma_gas, z):
"""
Equation for gas density
:param t_K: temperature
:param p_MPaa: pressure
:param gamma_gas: specific gas density by air
:param z: z-factor
:return: gas density
"""
m = gamma_gas * 0.029
p_Pa = 10 ** 6 * p_MPaa
rho_gas = p_Pa * m / (z * 8.31 * t_K)
return rho_gas | 5,334,405 |
def main() -> None:
"""Read hexadecimal string and converted to binary string."""
with open(INPUT_FILE, encoding='utf-8') as input_file:
packet = hex2ba(input_file.readline().strip())
program = []
parse_packet(program, packet)
print(f'Part One: Sum of all versions parsed: {sum_versions(program)}')
result = run_program(program[0][1], program[0][2])
print(f'Part Two: Result of all operations: {result}') | 5,334,406 |
def poll():
"""Get Modbus agent data.
Performance data from Modbus enabled targets.
Args:
None
Returns:
agentdata: AgentPolledData object for all data gathered by the agent
"""
# Initialize key variables.
config = Config()
_pi = config.polling_interval()
# Initialize AgentPolledData
agent_program = PATTOO_AGENT_OPCUAD
agentdata = AgentPolledData(agent_program, _pi)
# Get registers to be polled
tpp_list = config.target_polling_points()
arguments = [(tpp,) for tpp in tpp_list]
# Poll registers for all targets and update the TargetDataPoints
target_datapoints_list = _parallel_poller(arguments)
agentdata.add(target_datapoints_list)
# Return data
return agentdata | 5,334,407 |
def dot_product_timer(x_shape=(5000, 5000),
y_shape=(5000, 5000),
mean=0,
std=10,
seed=8053):
"""
A timer for the formula array1.dot(array2).
Inputs:
x_shape: Tuple of 2 Int
Shape of array1;
y_shape: Tuple of 2 Int
Shape of array2;
mean: Float
Mean of the normal distribution used for random
selection of elements of array1 and array2;
std: Float
Standard deviation of the normal distribution used for random
selection of elements of array1 and array2;
seed: Int
Seed used in np.random.seed
Outputs:
delta_t: Float
Number of seconds for completing array1.dot(array2);
"""
np.random.seed(seed)
array1 = np.random.normal(mean, std, x_shape)
array2 = np.random.normal(mean, std, y_shape)
start = time.time()
array1.dot(array2)
delta_t = time.time() - start
return delta_t | 5,334,408 |
def run_migrations(app=None):
"""
Run the migrations to the database
Usage: fab run_migrations:app_name
"""
with virtualenv(env.virtualenv):
with cd(env.code_dir):
if getattr(env, 'initial_deploy', False):
run_venv("./manage.py syncdb --all")
run_venv("./manage.py migrate --fake --noinput")
else:
run_venv("./manage.py syncdb --noinput")
if app:
run_venv("./manage.py migrate %s --noinput" % app)
else:
run_venv("./manage.py migrate --noinput") | 5,334,409 |
def ref_icrs_fk5(fnout='icrs_fk5.csv'):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
ra = np.random.uniform(0., 360., N)
dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N)))
# Generate random observation epoch and equinoxes
obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)]
equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)]
ra_icrs, dec_icrs = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_icrs = Ast.SkyFrame('System=ICRS,Epoch={epoch}'.format(epoch=obstime[i]))
frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i]))
# ICRS to FK5
frameset = frame_icrs.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk5.convert(frame_icrs)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_icrs.append(coords[0, 0])
dec_icrs.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name='equinox_fk5', data=equinox_fk5))
t.add_column(Column(name='obstime', data=obstime))
t.add_column(Column(name='ra_in', data=ra))
t.add_column(Column(name='dec_in', data=dec))
t.add_column(Column(name='ra_fk5', data=ra_fk5))
t.add_column(Column(name='dec_fk5', data=dec_fk5))
t.add_column(Column(name='ra_icrs', data=ra_icrs))
t.add_column(Column(name='dec_icrs', data=dec_icrs))
f = open(os.path.join('data', fnout), 'wb')
f.write("# This file was generated with the {0} script, and the reference "
"values were computed using AST\n".format(os.path.basename(__file__)))
t.write(f, format='ascii', delimiter=',') | 5,334,410 |
def availible_files(path:str, contains:str='') -> list:
"""Returns the availible files in directory
Args:
path(str): Path to directory
contains(str, optional): (Default value = '')
Returns:
Raises:
"""
return [f for f in os.listdir(path) if contains in f] | 5,334,411 |
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
"""init pretraining params"""
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
print("Load pretraining parameters from {}.".format(
pretraining_params_path)) | 5,334,412 |
def npareatotal(values, areaclass):
"""
numpy area total procedure
:param values:
:param areaclass:
:return:
"""
return np.take(np.bincount(areaclass,weights=values),areaclass) | 5,334,413 |
def create_aerocode_wrapper(aerocode_params, output_params, options):
""" create wind code wrapper"""
solver = 'FAST'
# solver = 'HAWC2'
if solver=='FAST':
## TODO, changed when we have a real turbine
# aero code stuff: for constructors
from AeroelasticSE.FusedFAST import openFAST
w = openFAST(output_params) ## need better name than output_params
# w = openFAST(None, atm, output_params) ## need better name than output_params
w.setOutput(output_params)
elif solver == 'HAWC2':
w = openHAWC2(None)
raise NotImplementedError, "HAWC2 aeroecode wrapper not implemented in runBatch.py yet"
else:
raise ValueError, "unknown aerocode: %s" % solver
return w | 5,334,414 |
def register_todo_update(callback):
"""
Register a callback to be called when the number of tutorials changes
"""
dbus.SessionBus().add_signal_receiver(
callback, "update_tuto_num", LIBTUTO_DBUS_INTERFACE, None, None) | 5,334,415 |
def lqr_ofb_cost(K, R, Q, X, ss_o):
# type: (np.array, np.array, np.array, np.array, control.ss) -> np.array
"""
Cost for LQR output feedback optimization.
@K gain matrix
@Q process noise covariance matrix
@X initial state covariance matrix
@ss_o open loop state space system
@return cost
"""
K = np.matrix(K).T
A = np.matrix(ss_o.A)
B = np.matrix(ss_o.B)
C = np.matrix(ss_o.C)
A_c = A - B * K * C
Q_c = C.T * K.T * R * K * C + Q
P = scipy.linalg.solve_lyapunov(A_c.T, -Q_c)
J = np.trace(P * X)
return J | 5,334,416 |
def test_stack_str_format_on_empty(empty_stack):
""" Do we get the expected str return on empty stack
"""
expected = 'Top: None | Length: 0'
actual = str(empty_stack)
assert expected == actual | 5,334,417 |
def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False):
"""
Calculates the effective steady state for a driven
system with a time-dependent cosinusoidal term:
.. math::
\\mathcal{\\hat{H}}(t) = \\hat{H}_0 +
\\mathcal{\\hat{O}} \\cos(\\omega_d t)
Parameters
----------
H_0 : :obj:`~Qobj`
A Hamiltonian or Liouvillian operator.
c_ops : list
A list of collapse operators.
Op_t : :obj:`~Qobj`
The the interaction operator which is multiplied by the cosine
w_d : float, default 1.0
The frequency of the drive
n_it : int, default 3
The number of iterations for the solver
sparse : bool, default False
Solve for the steady state using sparse algorithms.
Actually, dense seems to be faster.
Returns
-------
dm : qobj
Steady state density matrix.
.. note::
See: Sze Meng Tan,
https://copilot.caltech.edu/documents/16743/qousersguide.pdf,
Section (10.16)
"""
if sparse:
N = H_0.shape[0]
L_0 = liouvillian(H_0, c_ops).data.tocsc()
L_t = liouvillian(Op_t)
L_p = (0.5 * L_t).data.tocsc()
# L_p and L_m correspond to the positive and negative
# frequency terms respectively.
# They are independent in the model, so we keep both names.
L_m = L_p
L_p_array = L_p.todense()
L_m_array = L_p_array
Id = sp.eye(N ** 2, format="csc", dtype=np.complex128)
S = T = sp.csc_matrix((N ** 2, N ** 2), dtype=np.complex128)
for n_i in np.arange(n_it, 0, -1):
L = sp.csc_matrix(L_0 - 1j * n_i * w_d * Id + L_m.dot(S))
L.sort_indices()
LU = splu(L)
S = - LU.solve(L_p_array)
L = sp.csc_matrix(L_0 + 1j * n_i * w_d * Id + L_p.dot(T))
L.sort_indices()
LU = splu(L)
T = - LU.solve(L_m_array)
M_subs = L_0 + L_m.dot(S) + L_p.dot(T)
else:
N = H_0.shape[0]
L_0 = liouvillian(H_0, c_ops).full()
L_t = liouvillian(Op_t)
L_p = (0.5 * L_t).full()
L_m = L_p
Id = np.eye(N ** 2)
S, T = np.zeros((N ** 2, N ** 2)), np.zeros((N ** 2, N ** 2))
for n_i in np.arange(n_it, 0, -1):
L = L_0 - 1j * n_i * w_d * Id + np.matmul(L_m, S)
lu, piv = la.lu_factor(L)
S = - la.lu_solve((lu, piv), L_p)
L = L_0 + 1j * n_i * w_d * Id + np.matmul(L_p, T)
lu, piv = la.lu_factor(L)
T = - la.lu_solve((lu, piv), L_m)
M_subs = L_0 + np.matmul(L_m, S) + np.matmul(L_p, T)
return steadystate(Qobj(M_subs, type="super", dims=L_t.dims)) | 5,334,418 |
def gsl_blas_dtrmm(*args, **kwargs):
"""
gsl_blas_dtrmm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, double alpha,
gsl_matrix A, gsl_matrix B) -> int
"""
return _gslwrap.gsl_blas_dtrmm(*args, **kwargs) | 5,334,419 |
def scale(value, upper, lower, min_, max_):
"""Scales value between upper and lower values, depending on the given
minimun and maximum value.
"""
numerator = ((lower - upper) * float((value - min_)))
denominator = float((max_ - min_))
return numerator / denominator + upper | 5,334,420 |
def conditional_response(view, video=None, **kwargs):
"""
Redirect to login page if user is anonymous and video is private.
Raise a permission denied error if user is logged in but doesn't have permission.
Otherwise, return standard template response.
Args:
view(TemplateView): a video-specific View object (ViewDetail, ViewEmbed, etc).
video(ui.models.Video): a video to display with the view
Returns:
TemplateResponse: the template response to render
"""
if not ui_permissions.has_video_view_permission(video, view.request):
if view.request.user.is_authenticated:
raise PermissionDenied
else:
return redirect_to_login(view.request.get_full_path())
context = view.get_context_data(video, **kwargs)
return view.render_to_response(context) | 5,334,421 |
def Flatten(nmap_list):
"""Flattens every `.NestedMap` in nmap_list and concatenate them."""
ret = []
for x in nmap_list:
ret += x.Flatten()
return ret | 5,334,422 |
def get_data_schema() -> T.StructType:
"""
Return the kafka data schema
"""
return T.StructType(
[T.StructField('key', T.StringType()),
T.StructField('message', T.StringType())]
) | 5,334,423 |
def build_graph(num: int = 0) -> (int, List[int]):
"""Build a graph of num nodes."""
if num < 3:
raise app.UsageError('Must request graph of at least 3 nodes.')
weight = 5.0
nodes = [(0, 1, 1.0), (1, 2, 2.0), (0, 2, 3.0)]
for i in range(num-3):
l = random.sample(range(0, 3 + i - 1), 2)
nodes.append((3 + i, l[0],
weight*np.random.random()))
nodes.append((3 + i, l[1],
weight*np.random.random()))
return num, nodes | 5,334,424 |
async def test_locator_run_exception(config, mocker):
"""Test an error doesn't kill the Locator."""
logger_mock = mocker.MagicMock()
p = Locator(config, logger_mock)
p.last_work_end_timestamp = None
p._do_work = AsyncMock()
p._do_work.side_effect = [Exception("bad thing happen!")]
await p.run()
p._do_work.assert_called()
assert p.last_work_end_timestamp | 5,334,425 |
def concatenate(boxes_list:List[Boxes], fields:Collection[str]=None) -> Boxes:
"""Merge multiple boxes to a single instance
B = A[:10]
C = A[10:]
D = concatenate([A, B])
D should be equal to A
"""
if not boxes_list:
if fields is None:
fields = []
return empty(*fields)
if fields is None:
# Get fields common to all sub-boxes
common_fields = set.intersection( *[set(x.get_fields()) for x in boxes_list] )
else:
common_fields = fields
coords = np.concatenate([x.get() for x in boxes_list], axis=0)
new_fields = dict()
for f in common_fields:
new_fields[f] = np.concatenate([x.get_field(f) for x in boxes_list], axis=0)
return Boxes(coords, **new_fields) | 5,334,426 |
def detect_peaks(array, freq=0, cthr=0.2, unprocessed_array=False, fs=44100):
"""
Function detects the peaks in array, based from the mirpeaks algorithm.
:param array: Array in which to detect peaks
:param freq: Scale representing the x axis (sample length as array)
:param cthr: Threshold for checking adjacent peaks
:param unprocessed_array: Array that in unprocessed (normalised), if False will default to the same as array.
:param fs: Sampe rate of the array
:return: index of peaks, values of peaks, peak value on freq.
"""
# flatten the array for correct processing
array = array.flatten()
if np.isscalar(freq):
# calculate the frerquency scale - assuming a samplerate if none provided
freq = np.linspace(0, fs / 2.0, len(array))
if np.isscalar(unprocessed_array):
unprocessed_array = array
# add values to allow peaks at the first and last values
# to allow peaks at start and end (default of mir)
array_appended = np.insert(array, [0, len(array)], -2.0)
# unprocessed array to get peak values
array_unprocess_appended = np.insert(
unprocessed_array, [0, len(unprocessed_array)], -2.0
)
# append the frequency scale for precise freq calculation
freq_appended = np.insert(freq, [0, len(freq)], -1.0)
# get the difference values
diff_array = np.diff(array_appended)
# find local maxima
mx = (
np.array(
np.where((array >= cthr) & (
diff_array[0:-1] > 0) & (diff_array[1:] <= 0))
)
+ 1
)
# initialise arrays for output
finalmx = []
peak_value = []
peak_x = []
peak_idx = []
if np.size(mx) > 0:
# unpack the array if peaks found
mx = mx[0]
j = 0 # scans the peaks from beginning to end
mxj = mx[j] # the current peak under evaluation
jj = j + 1
bufmin = 2.0
bufmax = array_appended[mxj]
if mxj > 1:
oldbufmin = min(array_appended[: mxj - 1])
else:
oldbufmin = array_appended[0]
while jj < len(mx):
# if adjacent mx values are too close, returns no array
if mx[jj - 1] + 1 == mx[jj] - 1:
bufmin = min([bufmin, array_appended[mx[jj - 1]]])
else:
bufmin = min(
[bufmin, min(array_appended[mx[jj - 1]: mx[jj] - 1])])
if bufmax - bufmin < cthr:
# There is no contrastive notch
if array_appended[mx[jj]] > bufmax:
# new peak is significant;y higher than the old peak,
# the peak is transfered to the new position
j = jj
mxj = mx[j] # the current peak
bufmax = array_appended[mxj]
oldbufmin = min([oldbufmin, bufmin])
bufmin = 2.0
elif array_appended[mx[jj]] - bufmax <= 0:
bufmax = max([bufmax, array_appended[mx[jj]]])
oldbufmin = min([oldbufmin, bufmin])
else:
# There is a contrastive notch
if bufmax - oldbufmin < cthr:
# But the previous peak candidate is too weak and therefore discarded
oldbufmin = min([oldbufmin, bufmin])
else:
# The previous peak candidate is OK and therefore stored
finalmx.append(mxj)
oldbufmin = bufmin
bufmax = array_appended[mx[jj]]
j = jj
mxj = mx[j] # The current peak
bufmin = 2.0
jj += 1
if bufmax - oldbufmin >= cthr and (
bufmax - min(array_appended[mx[j] + 1:]) >= cthr
):
# The last peak candidate is OK and stored
finalmx.append(mx[j])
""" Sort the values according to their level """
finalmx = np.array(finalmx, dtype=np.int64)
sort_idx = np.argsort(array_appended[finalmx])[::-1] # descending sort
finalmx = finalmx[sort_idx]
# indexes were for the appended array, -1 to return to original array index
peak_idx = finalmx - 1
peak_value = array_unprocess_appended[finalmx]
peak_x = freq_appended[finalmx]
""" Interpolation for more precise peak location """
corrected_value = []
corrected_position = []
for current_peak_idx in finalmx:
# if there enough space to do the fitting
if 1 < current_peak_idx < (len(array_unprocess_appended) - 2):
y0 = array_unprocess_appended[current_peak_idx]
ym = array_unprocess_appended[current_peak_idx - 1]
yp = array_unprocess_appended[current_peak_idx + 1]
p = (yp - ym) / (2 * (2 * y0 - yp - ym))
corrected_value.append(y0 - (0.25 * (ym - yp) * p))
if p >= 0:
correct_pos = ((1 - p) * freq_appended[current_peak_idx]) + (
p * freq_appended[current_peak_idx + 1]
)
corrected_position.append(correct_pos)
elif p < 0:
correct_pos = ((1 + p) * freq_appended[current_peak_idx]) - (
p * freq_appended[current_peak_idx - 1]
)
corrected_position.append(correct_pos)
else:
corrected_value.append(
array_unprocess_appended[current_peak_idx])
corrected_position.append(freq_appended[current_peak_idx])
if corrected_position:
peak_x = corrected_position
peak_value = corrected_value
peak_idx = peak_idx.astype(np.int64)
return peak_idx, np.array(peak_value, dtype=np.float64), np.array(peak_x, np.float64)
else:
return np.array([0], dtype=np.int64), np.array(
[0], dtype=np.float64), np.array([0], np.float64) | 5,334,427 |
def left_index_iter(shape):
"""Iterator for the left boundary indices of a structured grid."""
return range(0, shape[0] * shape[1], shape[1]) | 5,334,428 |
def calculate_precision_recall(df_merged):
"""Calculates precision and recall arrays going through df_merged row-wise."""
all_positives = get_all_positives(df_merged)
# Populates each row with 1 if this row is a true positive
# (at its score level).
df_merged["is_tp"] = np.where(
(df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") &
(df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0)
# Counts true positives up to and including that row.
df_merged["tp"] = df_merged["is_tp"].cumsum()
# Calculates precision for every row counting true positives up to
# and including that row over the index (1-based) of that row.
df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1)
# Calculates recall for every row counting true positives up to
# and including that row over all positives in the groundtruth dataset.
df_merged["recall"] = df_merged["tp"] / all_positives
logging.info(
"\n%s\n",
df_merged.head(10)[[
"uid", "score", "label_groundtruth", "is_tp", "tp", "precision",
"recall"
]])
return np.array(df_merged["precision"]), np.array(df_merged["recall"]) | 5,334,429 |
def combine(shards, judo_file):
"""combine
this class is passed the
"""
# Recombine the shards to create the kek
combined_shares = Shamir.combine(shards)
combined_shares_string = "{}".format(combined_shares)
# decrypt the dek uysing the recombined kek
decrypted_dek = decrypt(
judo_file['wrappedKey'],
unhexlify(combined_shares_string)
)
# decrypt the data using the dek
decrypted_data = decrypt(
judo_file['data'],
unhexlify(decrypted_dek)
)
decrypted_text = unhexlify(decrypted_data)
return(decrypted_data, decrypted_text) | 5,334,430 |
def shiftRightUnsigned(e, numBits):
"""
:rtype: Column
>>> from pysparkling import Context
>>> from pysparkling.sql.session import SparkSession
>>> from pysparkling.sql.functions import shiftLeft, shiftRight, shiftRightUnsigned
>>> spark = SparkSession(Context())
>>> df = spark.range(-5, 4)
>>> df.select("id", shiftRight("id", 1), shiftRightUnsigned("id", 1)).show()
+---+-----------------+-------------------------+
| id|shiftright(id, 1)|shiftrightunsigned(id, 1)|
+---+-----------------+-------------------------+
| -5| -3| 9223372036854775805|
| -4| -2| 9223372036854775806|
| -3| -2| 9223372036854775806|
| -2| -1| 9223372036854775807|
| -1| -1| 9223372036854775807|
| 0| 0| 0|
| 1| 0| 0|
| 2| 1| 1|
| 3| 1| 1|
+---+-----------------+-------------------------+
"""
return col(ShiftRightUnsigned(parse(e), lit(numBits))) | 5,334,431 |
def test_jenkinslts_home_exists(host):
"""
Tests if jenkins home directory exists.
"""
assert host.file(PACKAGE_HOME).exists | 5,334,432 |
def change_wallpaper_job(profile, force=False):
"""Centralized wallpaper method that calls setter algorithm based on input prof settings.
When force, skip the profile name check
"""
with G_WALLPAPER_CHANGE_LOCK:
if profile.spanmode.startswith("single") and profile.ppimode is False:
thrd = Thread(target=span_single_image_simple, args=(profile, force), daemon=True)
thrd.start()
elif ((profile.spanmode.startswith("single") and profile.ppimode is True) or
profile.spanmode.startswith("advanced")):
thrd = Thread(target=span_single_image_advanced, args=(profile, force), daemon=True)
thrd.start()
elif profile.spanmode.startswith("multi"):
thrd = Thread(target=set_multi_image_wallpaper, args=(profile, force), daemon=True)
thrd.start()
else:
sp_logging.G_LOGGER.info("Unkown profile spanmode: %s", profile.spanmode)
return None
return thrd | 5,334,433 |
def transact():
"""Sends an API request to the VCC exchange"""
# Sets a quantity range to sell. Example: (10,15) randomly selects a number between 10 and 15
quantity = round(random.uniform(10, 15), 2)
# Coin to trade (FCT, ADA, BTC etc...)
coin = 'fct'
# Base currency coin is denominated in (BTC, USDT, etc...)
currency = "usdt"
# API key and secret key. Must set through env variable
# Can change to your own reference via config file or other means
api_key = os.environ["VCC_API"]
secret = bytes(os.environ["VCC_SECRET"], "utf-8")
# Validates the pairing input above and requests the last price of coin_currency pairing
pairing = f"{coin.upper()}_{currency.upper()}"
ticker = requests.get('https://vcc.exchange/api/v2/ticker')
try:
price = ticker.json()['data'][pairing]['last_price']
except Pairing as e:
raise e
# Change the figure here to adjust the price addition/reduction factor
# (.0000001, for example reduces current currency price by .0000001)
tx_price = round((float(price) - .0000001), 7)
# API endpoints for use. If anything other than sell_order or buy_order, must change method to GET
endpoints = {
"user": "api/v2/user",
"ticker": "api/v2/ticker",
"orders": "api/v2/orders/",
"trades": "api/v2/orders/trades",
"sell_order": f"api/v2/orders?trade_type=sell&type=limit&quantity={quantity}&price={tx_price}"
f"¤cy={currency.lower()}&coin={coin.lower()}",
"buy_order": f"api/v2/orders?trade_type=buy&type=limit&quantity={quantity}&price={tx_price}"
f"¤cy={currency.lower()}&coin={coin.lower()}"
}
# Base URL for API
base_url = "https://vcc.exchange/"
# Change this figure to determine type of API request
endpoint = endpoints["sell_order"]
# Creates the message payload
if endpoints["sell_order"] or endpoints["buy_order"]:
message = bytes(f"POST {endpoint}", "utf-8")
else:
message = bytes(f"GET {endpoint}", "utf-8")
signature = hmac.new(secret, message, digestmod=hashlib.sha256).hexdigest().encode()
# Millisecond UTC timestamp
timestamp = str(int(datetime.now(timezone.utc).timestamp() * 1000))
# Headers for including in API request
headers = {
"Authorization": f"Bearer {api_key}",
"timestamp": timestamp,
"signature": signature
}
if endpoints["sell_order"] or endpoints["buy_order"]:
response = requests.post(f"{base_url}{endpoint}", headers=headers)
print(f'Placing a limit order to sell {quantity} {coin.upper()} at {tx_price}{currency.upper()}'
f' price. Standby for response...')
else:
response = requests.get(f"{base_url}{endpoint}", headers=headers)
print(response.json()) | 5,334,434 |
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = sys.modules[base.__module__].__dict__
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False)
value = _eval_type(value, base_globals, localns)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()} | 5,334,435 |
def sheets_from_excel(xlspath):
"""
Reads in an xls(x) file,
returns an array of arrays, like:
Xijk, i = sheet, j = row, k = column
(but it's not a np ndarray, just nested arrays)
"""
wb = xlrd.open_workbook(xlspath)
n_sheets = wb.nsheets
sheet_data = []
for sn in xrange(n_sheets):
sheet = wb.sheet_by_index(sn)
rows = [sheet.row_values(i) for i in xrange(sheet.nrows)]
if len(rows) > 0:
sheet_data.append(rows)
return sheet_data | 5,334,436 |
def metadata(ctx, drafts, path):
"""List metadata for [PROJECT] [VERSION]"""
_rws = partial(rws_call, ctx)
if len(path) == 0:
_rws(MetadataStudiesRequest(), default_attr='oid')
elif len(path) == 1:
if drafts:
_rws(StudyDraftsRequest(path[0]), default_attr='oid')
else:
_rws(StudyVersionsRequest(path[0]), default_attr='oid')
elif len(path) == 2:
_rws(StudyVersionRequest(path[0], path[1]))
else:
click.echo('Too many arguments') | 5,334,437 |
def fitStatmechPseudoRotors(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are enough heat capacity points
provided that the vibrational frequencies can be fit directly, but the
hindered rotors must be combined into a single "pseudo-rotor".
"""
# Construct the lower and upper bounds for each variable
bounds = []
# Bounds for harmonic oscillator frequencies
for i in range(Nvib):
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# Bounds for pseudo-hindered rotor frequency and barrier height
bounds.append((hrFreqLowerBound, hrFreqUpperBound))
bounds.append((hrBarrLowerBound, hrBarrUpperBound))
# Construct the initial guess
# Initial guesses within each mode type must be distinct or else the
# optimization will fail
x0 = numpy.zeros(Nvib + 2, numpy.float64)
# Initial guess for harmonic oscillator frequencies
if Nvib > 0:
x0[0] = 200.0
x0[1:Nvib] = numpy.linspace(800.0, 1600.0, Nvib-1)
# Initial guess for hindered rotor frequencies and barrier heights
x0[Nvib] = 100.0
x0[Nvib+1] = 300.0
# Execute the optimization
fit = PseudoRotorFit(Tlist, Cvlist, Nvib, Nrot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not numpy.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule.toSMILES()))
# Postprocess optimization results
vib = list(x[0:Nvib])
hind = []
for i in range(Nrot):
hind.append((x[Nvib], x[Nvib+1]))
return vib, hind | 5,334,438 |
def send_notification(to,
subject,
body,
username=None,
password=None,
smtp_server=None,
port=None):
"""
Send an email or text notification to the given recipient.
email account from which the email or text will be sent.
param to: str
the subject of the message (Use '' for no subject)
param body: str
the content of the notification message
optional params:
param username: str
the username for the sending email account. If no value is
specified, then the value in the user's system environment is used.
param password: str
the password for the sending email account. If no value is
specified, the value in the user's system environment is used.
param smtp_server: str
The smtp server that will be used to send the message. If no value
is specified, then the value in the user's system environment is
used.
param port: int
the port associated with the smtp server. This must be set if the
smtp server is set, and vice versa.
"""
load_dotenv()
if username is not None:
email_username = username
else:
email_username = os.getenv("EMAIL_USERNAME")
if password is not None:
email_password = password
else:
email_password = os.getenv("EMAIL_APP_PASSWORD")
if smtp_server is not None:
email_server = smtp_server
else:
email_server = os.getenv("GMAIL_SMTP_SERVER")
if port is not None:
server_port = port
else:
server_port = os.getenv("GMAIL_SMTP_PORT")
msg = EmailMessage()
msg.set_content(body)
msg["subject"] = subject
msg["from"] = email_username
msg["to"] = to
server = smtplib.SMTP(email_server, server_port)
server.starttls()
server.login(email_username, email_password)
server.send_message(msg)
server.quit() | 5,334,439 |
def plot_summary(labels,
label2array,
xlim=100,
ylim=None,
logscale=True,
ylabel='Regret',
xlabel='BO Iters',
method='mean',
title=None,
violin_trials=None,
violin_labels=None,
figsize=(24, 6),
colors=COLORS.copy(),
axes=None,
uppercenter_legend=True,
uppercenter_legend_ncol=3,
bbox_to_anchor=(0.5, 1.1),
**kwargs):
"""Plot a summary of results with options to add violin plots on slices.
Args:
labels: list of labels to be included in the plot.
label2array: a dictionary with labels as keys and an array of results as
values.
xlim: a tuple of the new x-axis limits.
ylim: a tuple of the new y-axis limits.
logscale: use log scale for y axis if True.
ylabel: label for y axis.
xlabel: label for x axis.
method: plot mean and std, or median and percentile.
title: title of the plot.
violin_trials: list of trials to plot violin plots on slices of the figure.
violin_labels: list of lables to be included in violin plots.
figsize: a tuple describing the size of the figure.
colors: dictionary mapping from label to color.
axes: list of matplotlib.pyplot.axis objects.
uppercenter_legend: use an upper center legend if True.
uppercenter_legend_ncol: number of columns for the upper center legend.
bbox_to_anchor: bbox_to_anchor of the upper center legend.
**kwargs: other plot arguments.
"""
plt.figure(dpi=1500)
if axes is None or len(axes) < len(violin_trials) + 1:
_, axes = plt.subplots(
nrows=1, ncols=len(violin_trials) + 1, figsize=figsize)
plot_all({la: label2array.get(la, None) for la in labels},
axes[0],
logscale=logscale,
ylabel=ylabel,
xlabel=xlabel,
method=method,
colors=colors,
**kwargs)
axes[0].set_xlim(0, xlim)
if uppercenter_legend:
axes[0].legend(
loc='upper center',
bbox_to_anchor=bbox_to_anchor,
ncol=uppercenter_legend_ncol,
fancybox=True,
shadow=True)
else:
axes[0].legend()
if ylim:
axes[0].set_ylim(ylim[0], ylim[1])
if title:
axes[0].set_title(title)
if not violin_trials or not violin_labels:
return
labels = violin_labels
for i, trial in enumerate(violin_trials):
data = [np.array(label2array[la])[:, trial] for la in labels]
quartile1, medians, quartile3 = np.percentile(
np.array(data), [20, 50, 80], axis=1)
parts = axes[i + 1].violinplot(data, showmedians=False, showextrema=False)
inds = np.arange(1, len(medians) + 1)
axes[i + 1].scatter(
inds, medians, marker='o', color='white', s=30, zorder=3)
axes[i + 1].vlines(
inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
for pc, la in zip(parts['bodies'], labels):
pc.set_facecolor(colors[la])
pc.set_edgecolor('black')
pc.set_alpha(1)
axes[i + 1].set_title(f'BO Iters = {trial+1}')
set_violin_axis_style(axes[i + 1], labels) | 5,334,440 |
def add_numbers(a, b):
"""Sums the given numbers.
:param int a: The first number.
:param int b: The second number.
:return: The sum of the given numbers.
>>> add_numbers(1, 2)
3
>>> add_numbers(50, -8)
42
"""
return a + b | 5,334,441 |
def get_version(table_name):
"""Get the most recent version number held in a given table."""
db = get_db()
cur = db.cursor()
cur.execute("select * from {} order by entered_on desc".format(table_name))
return cur.fetchone()["version"] | 5,334,442 |
def area(a, indices=(0, 1, 2, 3)):
"""
:param a:
:param indices:
:return:
"""
x0, y0, x1, y1 = indices
return (a[..., x1] - a[..., x0]) * (a[..., y1] - a[..., y0]) | 5,334,443 |
def latest_file(path_name, keyword='', ext='', **kwargs) -> str:
"""
Latest modified file in folder
Args:
path_name: full path name
keyword: keyword to search
ext: file extension
Returns:
str: latest file name
"""
files = sort_by_modified(
all_files(path_name=path_name, keyword=keyword, ext=ext, full_path=True)
)
if not files:
from xbbg.io import logs
logger = logs.get_logger(latest_file, level=kwargs.pop('log', 'warning'))
logger.debug(f'no file in folder: {path_name}')
return ''
return str(files[0]).replace('\\', '/') | 5,334,444 |
def like_bp_gauss_mix_loop_nbin(grid_dir, n_bps, n_zbin, lmax_like, lmin_like, lmax_in, lmin_in, fid_pos_pos_dir,
fid_she_she_dir, fid_pos_she_dir, pos_nl_path, she_nl_path, mixmats_path,
bp_cov_filemask, binmixmat_save_dir, obs_bp_save_dir, varied_params, like_save_dir):
"""
Run the like_bp_gauss_mix likelihood module over a CosmoSIS grid repeatedly for different numbers of bandpowers,
saving a separate likelihood file for each number of bandpowers.
Args:
grid_dir (str): Path to CosmoSIS grid.
n_bps (list): List of numbers of bandpowers.
n_zbin (int): Number of redshift bins.
lmax_like (int): Maximum l to use in the likelihood.
lmin_like (int): Minimum l to use in the likelihood.
lmax_in (int): Maximum l included in mixing.
lmin_in (int): Minimum l supplied in theory and noise power spectra.
fid_pos_pos_dir (str): Path to fiducial position-position power spectra.
fid_she_she_dir (str): Path to fiducial shear-shear power spectra.
fid_pos_she_dir (str): Path to fiducial position-shear power spectra.
pos_nl_path (str): Path to text file containing position noise power spectrum.
she_nl_path (str): Path to text file containing shear noise power spectrum.
mixmats_path (str): Path to mixing matrices in numpy .npz file with four arrays (mixmat_nn_to_nn,
mixmat_ne_to_ne, mixmat_ee_to_ee, mixmat_bb_to_ee) each with shape
(lmax_like - lmin_in + 1, lmax_in - lmin_in + 1).
bp_cov_filemask (str): Path to precomputed bandpower covariance with {n_bp} placeholder, in numpy .npz file with
array name cov, with shape (n_data, n_data) where n_data = n_spectra * n_bandpowers.
binmixmat_save_dir (str): Path to directory into which to save combined mixing and binning matrices, which are
then loaded inside the likelihood module.
obs_bp_save_dir (str): Path to directory into which to save binned 'observed' (mixed fiducial) power spectra,
which are then loaded inside the likelihood module.
varied_params (list): List of CosmoSIS parameter names whose values are varied across the grid.
like_save_dir (str): Path to directory into which to save likelihood files, one for each number of bandpowers.
"""
print(f'Starting at {time.strftime("%c")}')
# Calculate some useful quantities
n_field = 2 * n_zbin
n_spec = n_field * (n_field + 1) // 2
n_ell_like = lmax_like - lmin_like + 1
n_ell_in = lmax_in - lmin_in + 1
ell_in = np.arange(lmin_in, lmax_in + 1)
# Form list of power spectra
print('Forming list of power spectra')
fields = [f'{f}{z}' for z in range(1, n_zbin + 1) for f in ['N', 'E']]
assert len(fields) == n_field
spectra = [fields[row] + fields[row + diag] for diag in range(n_field) for row in range(n_field - diag)]
assert len(spectra) == n_spec
# Load fiducial Cls
print(f'Loading fiducial Cls at {time.strftime("%c")}')
fid_cl = like_bp.load_spectra(n_zbin, fid_pos_pos_dir, fid_she_she_dir, fid_pos_she_dir, lmax_in, lmin_in)
fid_cl = fid_cl[:, lmin_in:]
assert fid_cl.shape == (n_spec, n_ell_in)
# Add noise
print(f'Adding noise at {time.strftime("%c")}')
pos_nl = np.loadtxt(pos_nl_path, max_rows=n_ell_in)
she_nl = np.loadtxt(she_nl_path, max_rows=n_ell_in)
fid_cl[:n_field:2, :] += pos_nl
fid_cl[1:n_field:2, :] += she_nl
# Load mixing matrices
print(f'Loading mixing matrices at {time.strftime("%c")}')
lowl_skip = lmin_like - lmin_in
with np.load(mixmats_path) as data:
mixmat_nn_to_nn = data['mixmat_nn_to_nn'][lowl_skip:, :]
mixmat_ne_to_ne = data['mixmat_ne_to_ne'][lowl_skip:, :]
mixmat_ee_to_ee = data['mixmat_ee_to_ee'][lowl_skip:, :]
mixmat_bb_to_ee = data['mixmat_bb_to_ee'][lowl_skip:, :]
mixmat_shape = (n_ell_like, n_ell_in)
assert mixmat_nn_to_nn.shape == mixmat_shape, (mixmat_nn_to_nn.shape, mixmat_shape)
assert mixmat_ne_to_ne.shape == mixmat_shape, (mixmat_ne_to_ne.shape, mixmat_shape)
assert mixmat_ee_to_ee.shape == mixmat_shape, (mixmat_ee_to_ee.shape, mixmat_shape)
assert mixmat_bb_to_ee.shape == mixmat_shape, (mixmat_bb_to_ee.shape, mixmat_shape)
# Pre-calculate mixed BB -> EE noise contribution for auto-spectra
print(f'Mixing noise at {time.strftime("%c")}')
cl_bb_to_ee = mixmat_bb_to_ee @ she_nl
# Apply mixing matrices to fiducial Cls to get an unbinned 'observation'
obs_cl = np.full((n_spec, n_ell_like), np.nan)
for spec_idx, (f1, z1, f2, z2) in enumerate(spectra):
print(f'Mixing {spec_idx + 1} / {n_spec} at {time.strftime("%c")}')
# Identify the right mixing matrix
spec_type = f1 + f2
if spec_type == 'NN':
mixmat = mixmat_nn_to_nn
elif spec_type in ('NE', 'EN'):
mixmat = mixmat_ne_to_ne
elif spec_type == 'EE':
mixmat = mixmat_ee_to_ee
else:
raise ValueError(f'Unexpected spec_type {spec_type}')
# Apply it: l = l_out, p = l' = l_in
cls_unmixed = fid_cl[spec_idx, :]
cls_mixed = np.einsum('lp,p->l', mixmat, cls_unmixed)
# Add BB->EE noise contribution for EE auto-spectra
if spec_type == 'EE' and z1 == z2:
cls_mixed += cl_bb_to_ee
# Store
obs_cl[spec_idx, :] = cls_mixed
assert np.all(np.isfinite(obs_cl))
# Iterate over numbers of bandpowers
for n_bp in n_bps:
print(f'Starting n_bp = {n_bp} at {time.strftime("%c")}')
# Form binning matrix
print(f'{n_bp}bp: Forming binning matrix at {time.strftime("%c")}')
pbl = gaussian_cl_likelihood.python.simulation.get_binning_matrix(n_bp, lmin_like, lmax_like)
if pbl.ndim == 1:
pbl = pbl[np.newaxis, :]
assert pbl.shape == (n_bp, n_ell_like)
# Form combined binning and mixing matrices
print(f'{n_bp}bp: Forming combined binning and mixing matrices at {time.strftime("%c")}')
binmix_nn_to_nn = pbl @ mixmat_nn_to_nn
binmix_ne_to_ne = pbl @ mixmat_ne_to_ne
binmix_ee_to_ee = pbl @ mixmat_ee_to_ee
binmix_bb_to_ee = pbl @ mixmat_bb_to_ee
binmix_shape = (n_bp, n_ell_in)
assert binmix_nn_to_nn.shape == binmix_shape
assert binmix_ne_to_ne.shape == binmix_shape
assert binmix_ee_to_ee.shape == binmix_shape
assert binmix_bb_to_ee.shape == binmix_shape
# Save combined binning and mixing matrices to disk
bmm_filename = f'binmix_lminin{lmin_in}_lmaxin{lmax_in}_lminlike{lmin_like}_lmaxlike{lmax_like}_{n_bp}bp.npz'
binmixmat_path = os.path.join(binmixmat_save_dir, bmm_filename)
binmixmat_header = (f'Combined binning and mixing matrices output by {__file__} for '
f'mixmats_path = {mixmats_path}, lmin_in = {lmin_in}, lmax_in = {lmax_in}, '
f'lmin_like = {lmin_like}, lmax_like = {lmax_like}, n_bp = {n_bp}, '
f'at {time.strftime("%c")}')
np.savez_compressed(binmixmat_path, binmix_tt_to_tt=binmix_nn_to_nn, binmix_te_to_te=binmix_ne_to_ne,
binmix_ee_to_ee=binmix_ee_to_ee, binmix_bb_to_ee=binmix_bb_to_ee, header=binmixmat_header)
print(f'{n_bp}bp: Saved combined binnning and mixing matrices to {binmixmat_path} at {time.strftime("%c")}')
# Bin observation
print(f'{n_bp}bp: Binning observation at {time.strftime("%c")}')
obs_bp = np.einsum('bl,sl->sb', pbl, obs_cl)
assert obs_bp.shape == (n_spec, n_bp)
assert np.all(np.isfinite(obs_bp))
# Save observation to disk
obs_bp_path = os.path.join(obs_bp_save_dir, f'obs_{n_bp}bp.npz')
obs_bp_header = (f'Observed bandpowers output by {__file__} for fid_pos_pos_dir = {fid_pos_pos_dir}, '
f'fid_she_she_dir = {fid_she_she_dir}, fid_pos_she_dir = {fid_pos_she_dir}, '
f'mixmats_path = {mixmats_path}, lmin_like = {lmin_like}, lmax_like = {lmax_like}, '
f'n_bp = {n_bp}, at {time.strftime("%c")}')
np.savez_compressed(obs_bp_path, obs_bp=obs_bp, header=obs_bp_header)
print(f'{n_bp}bp: Saved binned observation to {obs_bp_path} at {time.strftime("%c")}')
# Setup the likelihood module
print(f'{n_bp}bp: Setting up likelihood module at {time.strftime("%c")}')
bp_cov_path = bp_cov_filemask.format(n_bp=n_bp)
config = like_bp_mix.setup(obs_bp_path, binmixmat_path, lmin_in, bp_cov_path, pos_nl_path, she_nl_path, lmin_in,
lmax_in, n_zbin)
print(f'{n_bp}bp: Setup complete at {time.strftime("%c")}')
# Loop over every input directory
source_dirs = glob.glob(os.path.join(grid_dir, '_[0-9]*/'))
n_dirs = len(source_dirs)
if n_dirs == 0:
warnings.warn(f'{n_bp}bp: No matching directories. Terminating at {time.strftime("%c")}')
return
n_params = len(varied_params)
if n_params == 0:
warnings.warn(f'{n_bp}bp: No parameters specified. Terminating at {time.strftime("%c")}')
return
res = []
for i, source_dir in enumerate(source_dirs):
print(f'{n_bp}bp: Calculating likelihood {i + 1} / {n_dirs} at {time.strftime("%c")}')
# Extract cosmological parameters
params = [None]*n_params
values_path = os.path.join(source_dir, 'cosmological_parameters/values.txt')
with open(values_path, encoding='ascii') as f:
for line in f:
for param_idx, param in enumerate(varied_params):
param_str = f'{param} = '
if param_str in line:
params[param_idx] = float(line[len(param_str):])
err_str = f'{n_bp}bp: Not all parameters in varied_params found in {values_path}'
assert np.all([param is not None for param in params]), err_str
# Check the ells for consistency
galaxy_ell = np.loadtxt(os.path.join(source_dir, 'galaxy_cl/ell.txt'))[:n_ell_in]
shear_ell = np.loadtxt(os.path.join(source_dir, 'shear_cl/ell.txt'))[:n_ell_in]
galaxy_shear_ell = np.loadtxt(os.path.join(source_dir, 'galaxy_shear_cl/ell.txt'))[:n_ell_in]
assert np.array_equal(galaxy_ell, ell_in)
assert np.array_equal(shear_ell, ell_in)
assert np.array_equal(galaxy_shear_ell, ell_in)
# Load theory Cls
th_pos_pos_dir = os.path.join(source_dir, 'galaxy_cl/')
th_she_she_dir = os.path.join(source_dir, 'shear_cl/')
th_pos_she_dir = os.path.join(source_dir, 'galaxy_shear_cl/')
theory_cl = like_bp_mix.load_cls(n_zbin, th_pos_pos_dir, th_she_she_dir, th_pos_she_dir, lmax=lmax_in)
# Evaluate likelihood
log_like_gauss = like_bp_mix.execute(theory_cl, lmin_in, config)
# Store cosmological params & likelihood
res.append([*params, log_like_gauss])
# Save results to file
res_grid = np.asarray(res)
param_names = ' '.join(varied_params)
like_path = os.path.join(like_save_dir, f'like_lmaxlike{lmax_like}_{n_bp}bp.txt')
like_header = (f'Output from {__file__}.like_bp_gauss_mix_loop_nbin for parameters:\ngrid_dir = {grid_dir}\n'
f'n_zbin = {n_zbin}\nlmax_like = {lmax_like}\nlmin_like = {lmin_like}\nlmax_in = {lmax_in}\n'
f'lmin_in = {lmin_in}\nfid_pos_pos_dir = {fid_pos_pos_dir}\n'
f'fid_she_she_dir = {fid_she_she_dir}\nfid_pos_she_dir = {fid_pos_she_dir}\n'
f'pos_nl_path = {pos_nl_path}\nshe_nl_path = {she_nl_path}\nmixmats_path = {mixmats_path}\n'
f'bp_cov_filemask = {bp_cov_filemask}\nn_bp = {n_bp}\nat {time.strftime("%c")}\n\n'
f'{param_names} log_like_gauss')
np.savetxt(like_path, res_grid, header=like_header)
print(f'{n_bp}bp: Saved likelihood file to {like_path} at {time.strftime("%c")}')
print(f'{n_bp}bp: Done at {time.strftime("%c")}')
print()
print(f'All done at {time.strftime("%c")}') | 5,334,445 |
def infer_tf_dtypes(image_array):
"""
Choosing a suitable tf dtype based on the dtype of input numpy array.
"""
return dtype_casting(
image_array.dtype[0], image_array.interp_order[0], as_tf=True) | 5,334,446 |
def write_unique_genera():
"""Writes unique genera to the output file
Output:
- written file
"""
with open(argv[1]) as inf:
all_lines = inf.readlines()
all_genera = []
for line in all_lines:
genus = line.strip().split()[0]
if genus not in all_genera:
all_genera.append(genus)
with open(argv[2], 'w') as outf:
for g in all_genera:
outf.write(f'{g}\n')
print(len(all_genera))
# total number of genera is used in bash script from which this script
# is called. Above line should be left in! | 5,334,447 |
def test_thread_crew(count):
"""
thread_crew() creates multiple worker threads, reading from one Channel
and writing to another.
"""
def worker(value, check, out_chnl):
out_chnl.put((value, threading.get_ident()))
if value == count - 1:
out_chnl.end()
# Now sleep until the work is finished, so that every value is run by a
# different thread, and hence we can test the number of threads.
while True:
out_chnl.check()
time.sleep(0.1)
requests, responses = channel.thread_crew(count, worker, mode='1:m')
requests.put_many(range(count)).end()
outputs = tuple(responses)
assert len(outputs) == count
assert {output[0] for output in outputs} == set(range(count))
assert len({output[1] for output in outputs}) == count | 5,334,448 |
def get_cifar10_datasets(n_devices, batch_size=256, normalize=False):
"""Get CIFAR-10 dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
train_dataset = tfds.load('cifar10', split='train[:90%]')
val_dataset = tfds.load('cifar10', split='train[90%:]')
test_dataset = tfds.load('cifar10', split='test')
def decode(x):
decoded = {
'inputs':
tf.cast(tf.image.rgb_to_grayscale(x['image']), dtype=tf.int32),
'targets':
x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.shuffle(
buffer_size=256, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 10, 256, (batch_size, 32, 32,
1) | 5,334,449 |
def predict(dataset, fitmodel_url, save_results=True, show=False):
"""
Function starts a job that makes predictions to input data with a given model
Parameters
----------
input - dataset object with input urls and other parameters
fitmodel_url - model created in fit phase
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls with predictions on ddfs
"""
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job, result_iterator
if dataset.params["y_map"] == []:
raise Exception("Logistic regression requires a target label mapping parameter.")
if "logreg_fitmodel" not in fitmodel_url:
raise Exception("Incorrect fit model.")
job = Job(worker=Worker(save_results=save_results))
# job parallelizes execution of mappers
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))]
job.params = dataset.params # job parameters (dataset object)
job.params["thetas"] = [v for k, v in result_iterator(fitmodel_url["logreg_fitmodel"]) if k == "thetas"][
0] # thetas are loaded from ddfs
job.run(name="logreg_predict", input=dataset.params["data_tag"])
results = job.wait(show=show)
return results | 5,334,450 |
def test_multiple_config_to_csv(log_basic_config_reader):
"""
Assert that multiple reads and writes are supported by config_to_csv.
"""
config, config_reader = log_basic_config_reader
log_dir = os.path.dirname(config_reader.data["log_dir"])
for _ in range(5):
config_to_csv(config_reader)
df = pd.read_csv(os.path.join(log_dir, "run_data.csv"))
assert 5 == len(df)
records = df.to_dict("records")
assert 5 == len(records)
for record in records:
for key in config_reader.data:
assert config_reader.data[key] == record[key] | 5,334,451 |
def format_string_to_json(balance_info):
"""
Format string to json.
e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00'''
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
"""
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split('|')
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES")
)
return dumps(balance_dict) | 5,334,452 |
def get_relevant_texts(subject: Synset, doc_threshold: float) -> Tuple[List[str], List[int], int, int]:
"""Get all lines from all relevant articles. Also return the number of retrieved documents and retained ones."""
article_dir = get_article_dir(subject)
rel_path = get_relevant_scores_path(subject)
subject_name = get_concept_name(subject)
with rel_path.open() as f: # read file to get the ids of relevant articles
scores = [float(line) for line in f if line.strip()]
num_doc_retrieved = len(scores)
line_list = []
doc_id_list = []
num_doc_retained = 0
for doc_id, score in enumerate(scores):
path = article_dir / "{}.txt".format(doc_id)
try:
with path.open() as f:
lines = [line.strip() for line in f if line.strip()]
if len(lines) > 500: # ignore huge files
continue
text = "\n".join(lines)
if score >= doc_threshold or (len(text.split()) <= 200 and subject_name in text.lower()):
line_list.extend(lines)
doc_id_list.extend([doc_id] * len(lines))
num_doc_retained += 1
except FileNotFoundError:
logger.warning(f"Subject {subject.name()} - {path} does not exist!")
continue
return line_list, doc_id_list, num_doc_retrieved, num_doc_retained | 5,334,453 |
def plot_mae(X, y, model):
"""
Il est aussi pertinent de logger les graphiques sous forme d'artifacts.
"""
fig = plt.figure()
plt.scatter(y, model.predict(X))
plt.xlabel("Durée réelle du trajet")
plt.ylabel("Durée estimée du trajet")
image = fig
fig.savefig("MAE.png")
plt.close(fig)
return image | 5,334,454 |
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format='jyear', scale='utc')
t_tai = Time(range(1980, 2001), format='jyear', scale='tai')
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec)) | 5,334,455 |
def compile_options(
rst_roles: Optional[List[str]],
rst_directives: Optional[List[str]],
*,
allow_autodoc: bool = False,
allow_toolbox: bool = False,
):
"""
Compile the list of allowed roles and directives.
:param rst_roles:
:param rst_directives:
:param allow_autodoc:
:param allow_toolbox:
"""
default_allowed_rst_directives = []
default_allowed_rst_roles = []
config = ConfigParser()
config.read("tox.ini", encoding="UTF-8")
if "flake8" in config:
if "rst-directives" in config["flake8"]:
default_allowed_rst_directives.extend(re.split(r"[\n,]", config["flake8"]["rst-directives"]))
if "rst-roles" in config["flake8"]:
default_allowed_rst_roles.extend(re.split(r"[\n,]", config["flake8"]["rst-roles"]))
domain: Domain
if allow_toolbox:
domain = Toolbox()
elif allow_autodoc:
domain = Autodoc()
else:
domain = Builtin()
if rst_roles is None:
rst_roles = sorted({*default_allowed_rst_roles, *domain.roles})
if rst_directives is None:
rst_directives = sorted({*default_allowed_rst_directives, *domain.directives})
return rst_roles, rst_directives | 5,334,456 |
def get_pure_function(method):
"""
Retreive pure function, for a method.
Depends on features specific to CPython
"""
assert(isinstance(method, types.MethodType))
assert(hasattr(method, 'im_func'))
return method.im_func | 5,334,457 |
def test_failure_of_just_reload():
"""
Test that really_reload is _actually_ needed in this test case
This test exists to check that the test before it is
actually testing what needs to be tested
"""
from importlib import reload, invalidate_caches
setup_old_math_lib()
invalidate_caches()
from .reload_module import my_prog, math
assert my_prog.main() == 3
update_math_py_with_new_code()
#let's try using importlib
reload(math)
# we successfully reloaded the module, but...
assert math.do_some_math() == 9
# the main program is still using the old code
assert my_prog.main() == 3 | 5,334,458 |
def _agg_samples_2d(sample_df: pd.DataFrame) -> pd.DataFrame:
"""Aggregate ENN samples for plotting."""
def pct_95(x):
return np.percentile(x, 95)
def pct_5(x):
return np.percentile(x, 5)
enn_df = (sample_df.groupby(['x0', 'x1'])['y']
.agg([np.mean, np.std, pct_5, pct_95]).reset_index())
enn_df = enn_df.rename({'mean': 'y'}, axis=1)
enn_df['method'] = 'enn'
return enn_df | 5,334,459 |
def get_np_num_array_str(data_frame_rows):
"""
Get a complete code str that creates a np array with random values
"""
test_code = cleandoc("""
from sklearn.preprocessing import StandardScaler
import pandas as pd
from numpy.random import randint
series = randint(0,100,size=({}))
df = pd.DataFrame(series, columns=["num"])
""".format(data_frame_rows))
return test_code | 5,334,460 |
def get_config(name: str = None, default: Any = _MISSING) -> Any:
"""Gets the global configuration.
Parameters
----------
name : str, optional
The name of the setting to get the value for. If no name is
given then the whole :obj:`Configuration` object is returned.
default : optional
The default value to return if `name` is provided but the
setting doesn't exist in the global configuration.
Returns
-------
:obj:`Configuration` or :obj:`object`
The global configuration object or the configuration setting
requested.
"""
global _GLOBAL_CONFIG
if not name:
return _GLOBAL_CONFIG.copy()
if default == _MISSING:
return _GLOBAL_CONFIG[name]
return _GLOBAL_CONFIG.get(name, default) | 5,334,461 |
def domain_domain_distance(ptg1, ptg2, pdb_struct, domain_distance_dict):
"""
Return the distance between two domains, which will be defined as
the distance between their two closest SSEs
(using SSE distnace defined in ptdistmatrix.py)
Parameters:
ptg1 - PTGraph2 object for one domain
ptg2 - PTGraph2 object for the other domain
pdb_struct - parsed PDB structure from Bio.PDB
domain_distance_dict (In/Out) - dict { (dom1, dom2) : ret_tuple }
for memoizing domiain-domain distances. (dom1,dom2)
is tuple of two PTGraph2 objects, note both (dom1,dom2)
and (dom2,dom1) are always added
and ret_tuple is the return value tuple as defined below.
Return value:
tuple (dist, closest_sse1, closest_sse2, closest_res1, closest_res2)
distance in Angstroms between the two domains, as defined above and
closest_sse1, closest_sse2 are PTNode objects for the closest
SSEs in ptg1 and ptg2 domains respectively and
closest_res1 and closest_res2 are the closest residues in
closest_sse1 and closest_sse2 respectively.
"""
# This function is memoized by the domain_distance_dict parmeter,
# to save recomputations of distances that are previously computed.
if domain_distance_dict.has_key((ptg1, ptg2)):
return domain_distance_dict[(ptg1, ptg2)]
min_dist = float("inf")
closest_sse1 = closest_sse2 = None
closest_res1 = closest_res2 = None
# exclude the terminus nodes
ptg1_sses = [ node for node in ptg1.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
ptg2_sses = [ node for node in ptg2.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
for sse1 in ptg1_sses:
for sse2 in ptg2_sses:
(dist, res1, res2) = calc_sse_sse_dist(sse1, sse2, pdb_struct)
if dist < min_dist:
min_dist = dist
closest_sse1 = sse1
closest_sse2 = sse2
closest_res1 = res1
closest_res2 = res2
ret_tuple12 = (min_dist,closest_sse1,closest_sse2,closest_res1,closest_res2)
ret_tuple21 = (min_dist,closest_sse2,closest_sse1,closest_res2,closest_res1)
domain_distance_dict[(ptg1, ptg2)] = ret_tuple12
domain_distance_dict[(ptg2, ptg1)] = ret_tuple21
# if verbose:
# sys.stderr.write('dist between domain ' + ptg1.domainid + ' and ' +
# ptg2.domainid + ' is ' + str(min_dist) + '\n')
return ret_tuple12 | 5,334,462 |
def rip_subtitles():
"""
%prog <dvd_source>
"""
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(usage=trim(encode_dvd.__doc__))
parser.add_argument(
'-t', '--title', help='enter the dvd title number to process', default=''
)
parser.add_argument('-s', '--subtitle', help='enter the subtitle ID')
parser.add_argument('device', nargs='?')
args = parser.parse_args()
command = MEncoderCommand()
device = args.device or input('enter device> ')
print('device is', device)
command.set_device(device)
videos_path = join(os.environ['PUBLIC'], 'Videos', 'Movies')
default_title = infer_name(device)
title_prompt = 'Enter output filename [%s]> ' % default_title
user_title = input(title_prompt) or default_title
target = os.path.join(videos_path, user_title)
command.source = ['dvd://%(title)s' % vars(args)]
command['o'] = os.devnull
command.audio_options = HyphenArgs(nosound=None)
command.video_options = HyphenArgs(ovc='frameno')
command['sid'] = args.subtitle or '0'
command['vobsubout'] = target
command['vobsuboutindex'] = command['sid']
# command['vobsuboutid'] = 'en'
command = tuple(command.get_args())
errors = open(os.devnull, 'w')
proc = subprocess.Popen(command, stderr=errors)
proc.wait() | 5,334,463 |
def main():
"""
generate a sales example with tables for customers,
sales, products
"""
s = content.DataFiles()
date_list = generate.get_list_dates(2016, 2016, 500)
prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])
tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])
tbl_cust.save_table('customers.csv')
cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])
tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])
tbl_sales.save_table('sales.csv') | 5,334,464 |
def pred_error(f_pred, prepare_data, data, iterator, max_len, n_words, filter_h):
""" compute the prediction error.
"""
valid_err = 0
for _, valid_index in iterator:
x = [data[0][t] for t in valid_index]
x = prepare_data(x,max_len,n_words,filter_h)
preds = f_pred(x)
targets = np.array([data[1][t] for t in valid_index],dtype='int32')
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err | 5,334,465 |
def standardize_10msample(frac: float=0.01):
"""Runs each data processing function in series to save a new .csv data file.
Intended for Pandas DataFrame. For Dask DataFrames, use standardize_10msample_dask
Args:
frac (float, optional): Fraction of data file rows to sample. Defaults to 0.01.
Returns:
df_10msample(pd.core.Frame.DataFrame): Finished DataFrame\
,that should match the same when using .read_csv() method
"""
sample_10m = '../data/10m_sample_common_passwords/10-million-combos.txt'
df_10msample = pd.read_csv(sample_10m, header=None, delimiter='\t').astype(str).sample(frac=frac)
df_10msample.columns = ['username', 'password']
df_10msample.drop('username', axis=1, inplace=True)
df_10msample['length'] = df_10msample['password'].apply(len)
strength_features(df_10msample)
df_10msample['class'] = df_10msample['password'].apply(withPassClass)
pass_class_expand(df_10msample)
to_csv(df_10msample, filename='../data/10m_sample_common_passwords/10m_standardized.csv')
return df_10msample | 5,334,466 |
def test_assign_resolution_bins(data_fmodel, bins, inplace, return_labels):
"""Test DataSet.assign_resolution_bins"""
result = data_fmodel.assign_resolution_bins(bins=bins,
inplace=inplace,
return_labels=return_labels)
if return_labels:
result, labels = result
# Test bins
assert "bin" in result.columns
assert len(result["bin"].unique()) == bins
assert result.bin.max() == bins-1
# Test inplace
if inplace:
assert id(result) == id(data_fmodel)
else:
assert id(result) != id(data_fmodel)
# Test labels
if return_labels:
assert len(labels) == bins | 5,334,467 |
def put(bucket, key, val):
"""
Writes key-value pair to storage.
:param bucket: (string) A bucket name.
:param key: (string) A key name.
:param val: (bytes) Value to write.
"""
_check_bucket(bucket)
if not isinstance(val, bytes):
raise TypeError("value should be of type bytes")
key_path = _key_path(bucket, key)
with NamedTemporaryFile("wb", delete=False) as f:
tmp_key_path = Path(f.name)
f.write(val)
f.flush()
os.fsync(f.fileno())
tmp_key_path.rename(key_path) | 5,334,468 |
def smatrix_backward_kernel_S(z, phase_factors, mean_probe_intensities, r, r_min, out, tau):
"""
S-matrix has beam tilts included, pre-calculated scanning phase factors.
Fastest to compute
:param z: D x K x MY x MX x 2
:param phase_factors: B x D x K x 2
:param r: D x K x 2
:param mean_probe_intensities: D
:param out: B x NY x NX x 2
:param z_strides: (4,)
:return: exit waves in out
"""
n = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
D, K, MY, MX, _ = z.shape
B = out.shape[0]
b = n // (MY * MX)
my = (n - b * (MX * MY)) // MX
mx = (n - b * (MX * MY) - my * MX)
if n < MY * MX * B:
for d in range(D):
for k in range(K):
r0 = int(r[d, k, 0] - r_min[0])
r1 = int(r[d, k, 1] - r_min[1])
a = z[d, k, my, mx, 0]
c = z[d, k, my, mx, 1]
u = phase_factors[b, d, k, 0]
v = phase_factors[b, d, k, 1]
val_real = a * u + c * v
val_imag = c * u - a * v
val_real *= tau[0] / mean_probe_intensities[d]
val_imag *= tau[0] / mean_probe_intensities[d]
cuda.atomic.add(out, (b, r0 + my, r1 + mx, 0), val_real)
cuda.atomic.add(out, (b, r0 + my, r1 + mx, 1), val_imag) | 5,334,469 |
def is_ansible_managed(file_path):
"""
Gets whether the fail2ban configuration file at the given path is managed by Ansible.
:param file_path: the file to check if managed by Ansible
:return: whether the file is managed by Ansible
"""
with open(file_path, "r") as file:
return file.readline().strip() == ANSIBLE_MANAGED_LINE | 5,334,470 |
def delete_samples_generic_database(exec_tag, testcase_name=None):
""" delete existing samples of given testcase """
db_path = os.getenv("WAR_TOOLS_DIR") + "/generic_samples.db"
con = sqlite3.connect(db_path)
statement = "SELECT name FROM sqlite_master WHERE type='table';"
if ('GEN_RESULTS_TABLE',) in con.execute(statement).fetchall():
if exec_tag and testcase_name:
con.execute("DELETE from GEN_RESULTS_TABLE where tag == ? and \
testcase == ?", (exec_tag, testcase_name, ))
else:
con.execute("DELETE from GEN_RESULTS_TABLE where tag == ?", (exec_tag, ))
con.commit()
con.close() | 5,334,471 |
def sumai(array):
"""
Return the sum of the elements of an integer array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sumai_c.html
:param array: Input Array.
:type array: Array of ints
:return: The sum of the array.
:rtype: int
"""
n = ctypes.c_int(len(array))
array = stypes.toIntVector(array)
return libspice.sumai_c(array, n) | 5,334,472 |
def _get_connection_dir(app):
"""Gets the connection dir to use for the IPKernelApp"""
connection_dir = None
# Check the pyxll config first
cfg = get_config()
if cfg.has_option("JUPYTER", "runtime_dir"):
connection_dir = cfg.get("JUPYTER", "runtime_dir")
if not os.path.abspath(connection_dir):
connection_dir = os.path.join(os.path.dirname(pyxll.__file__), connection_dir)
# If not set in the pyxll config use the default from the kernel
if not connection_dir:
connection_dir = app.connection_dir
# If Excel is installed as a UWP then AppData will appear as a different folder when the
# child Python process is run so use a different path.
excel_path = _get_excel_path()
if "WindowsApps" in re.split(r"[/\\]+", excel_path):
_log.debug("Excel looks like a UWP app.")
if "AppData" in re.split(r"[/\\]+", connection_dir):
connection_dir = os.path.join(os.path.dirname(pyxll.__file__), ".jupyter", "runtime")
_log.warning("Jupyter's runtime directory is in AppData but Excel is installed as a UWP. ")
_log.warning(f"{connection_dir} will be used instead.")
_log.warning("Set 'runtime_dir' in the '[JUPYTER]' section of your pyxll.cfg to change this directory.")
return connection_dir | 5,334,473 |
def download(accession):
"""Downloads GEO file based on accession number. Returns a SOFTFile or ANNOTFile
instance.
For reading and unzipping binary chunks, see:
http://stackoverflow.com/a/27053335/1830334
http://stackoverflow.com/a/2424549/1830334
"""
import os
if 'GPL' not in accession: # soft file
geo_file = SOFTFile(accession)
if 'GDS' in accession:
url = _construct_GDS_url(accession)
else:
url = _construct_GSE_url(accession)
else:
geo_file = ANNOTFile(accession)
url = _construct_GPL_url(accession)
if os.path.isfile(geo_file.path()): # avoid downloading the same file again if exists
return geo_file
CHUNK_SIZE = 1024
decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
response = _get_file_by_url(url)
with open(geo_file.path(), 'w+') as f:
while True:
bin_chunk = response.read(CHUNK_SIZE)
if not bin_chunk:
break
string = decompressor.decompress(bin_chunk)
f.write(string)
return geo_file | 5,334,474 |
def invoke(
node: Union[DAG, Task],
params: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, NodeOutput]:
"""
Invoke a node with a series of parameters.
Parameters
----------
node
Node to execute
params
Inputs to the task, indexed by input/parameter name.
Returns
-------
Serialized outputs of the task, indexed by output name.
Raises
------
ValueError
When any required parameters are missing
TypeError
When any of the outputs cannot be obtained from the return value of the task's function
SerializationError
When some of the outputs cannot be serialized with the specified Serializer
"""
if isinstance(node, DAG):
return _invoke_dag(node, params=params)
else:
return _invoke_task(node, params=params) | 5,334,475 |
def test_adder1_config():
"""Test adder1_offset kwarg from config
"""
configfy.set_active_config_file('./tests/test_config.ini')
assert adder1(1, 1) == 3, 'Offset not read from config!' | 5,334,476 |
def convert_acl_to_iam_policy(acl):
"""Converts the legacy ACL format to an IAM Policy proto."""
owners = acl.get('owners', [])
readers = acl.get('readers', [])
if acl.get('all_users_can_read', False):
readers.append('allUsers')
writers = acl.get('writers', [])
bindings = []
if owners:
bindings.append({'role': 'roles/owner', 'members': owners})
if readers:
bindings.append({'role': 'roles/viewer', 'members': readers})
if writers:
bindings.append({'role': 'roles/editor', 'members': writers})
return {'bindings': bindings} | 5,334,477 |
def fix_module_doctest(module):
"""
Extract docstrings from cython functions, that would be skipped by doctest
otherwise.
"""
module.__test__ = {}
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, numbawrapper.NumbaWrapper) and
from_module(module, value.py_func) and value.py_func.__doc__):
module.__test__[name] = value.py_func.__doc__
elif (inspect.isbuiltin(value) and isinstance(value.__doc__, str) and
from_module(module, value)):
module.__test__[name] = value.__doc__ | 5,334,478 |
def get_valid_start_end(mask):
"""
Args:
mask (ndarray of bool): invalid mask
Returns:
"""
ns = mask.shape[0]
nt = mask.shape[1]
start_idx = np.full(ns, -1, dtype=np.int32)
end_idx = np.full(ns, -1, dtype=np.int32)
for s in range(ns):
# scan from start to the end
for t in range(nt):
if not mask[s][t]:
start_idx[s] = t
break
# reverse scan, from end to start
for t in range(nt - 1, -1, -1):
if not mask[s][t]:
end_idx[s] = t + 1
break
return start_idx, end_idx | 5,334,479 |
def put_network_object(session, key, data):
# type: (Session, Text, Any) -> None
"""Put data as extended object with given key for the current network."""
url_tail = "/{}/{}/{}".format(
CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS
)
_put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key}) | 5,334,480 |
def pahrametahrize(*args, **kwargs) -> t.Callable:
"""Pass arguments straight through to `pytest.mark.parametrize`."""
return pytest.mark.parametrize(*args, **kwargs) | 5,334,481 |
def utcnow():
"""Return the current time in UTC with a UTC timezone set."""
return datetime.utcnow().replace(microsecond=0, tzinfo=UTC) | 5,334,482 |
def default_to(default, value):
"""
Ramda implementation of default_to
:param default:
:param value:
:return:
"""
return value or default | 5,334,483 |
def copy_one_class_images_labels(path_to_images, path_to_labels, target_path,
image_type='.png', label_suffix='-labels'):
"""
copy images which contain only one class label
Args:
path_to_images: path to the images
path_to_labels: path to the labels
target_path: path to store the images/labels
image_type: type of the images/labels (e.g. .jpg, .png)
label_suffix: the suffix of the label names
Returns:
"""
# iterate through all files
img_files = glob.glob(path_to_images + '/*' + image_type)
target_path += '/one_class_images_labels/'
one_class_img_dir = target_path + '/images/'
one_class_label_dir = target_path + '/labels/'
ensure_path_exists(one_class_img_dir)
ensure_path_exists(one_class_label_dir)
for img_file in img_files:
img = os.path.basename(img_file)
label_file = path_to_labels + img[:-4] + label_suffix + img[-4:]
if is_one_class_label_image(label_file):
shutil.copy(img_file, one_class_img_dir)
shutil.copy(label_file, one_class_label_dir) | 5,334,484 |
def dummy():
"""
>>> import gym
>>> env = gym.make("FetchReachCSL-v1")
>>> _ = env.seed(0)
>>> _ = env.reset()
>>> for _ in range(5):
... obs, rew, _, info = env.step(env.action_space.sample())
... rew2 = env.compute_reward(obs['achieved_goal'], obs['desired_goal'], dict())
... print(rew, rew2, info['reward_compute_count'])
-1.4871227185677292 -1.4871227185677292 1
-1.1468296518239358 -1.1468296518239358 3
-3.2561153606471986 -3.2561153606471986 5
-3.6017700571537192 -3.6017700571537192 7
-3.9249514565213195 -3.9249514565213195 9
>>> env = gym.make("FetchReachPR-v1")
>>> _ = env.seed(0)
>>> _ = env.reset()
>>> for _ in range(5):
... obs, rew, _, info = env.step(env.action_space.sample())
... rew2 = env.compute_reward(obs['achieved_goal'], obs['desired_goal'], dict())
... print(rew, rew2, info['reward_compute_count'])
-1.0 -1.0 1
-1.0 -1.0 3
-1.0 -1.0 5
-1.0 -1.0 7
-1.0 -1.0 9
>>> env = gym.make("FetchReachSparse-v1")
>>> _ = env.seed(0)
>>> _ = env.reset()
>>> for _ in range(5):
... obs, rew, _, info = env.step(env.action_space.sample())
... rew2 = env.compute_reward(obs['achieved_goal'], obs['desired_goal'], dict())
... print(rew, rew2, info['reward_compute_count'])
-1.0 -1.0 1
-1.0 -1.0 3
-1.0 -1.0 5
-1.0 -1.0 7
-1.0 -1.0 9
"""
pass | 5,334,485 |
def main(page_size: int = 10000, period: str = "daily", adjust: str = ""):
"""
下载入口程序
:param page_size: a市场46多只,这里默认10000,即下载全部
:param period: 日线:daily; 周线:weekly; 月线: monthly
:param adjust: 复权类型,前复权:"qfq";后复权:"hfq";"不复权":"", 默认不复权
:return:
"""
res = json.loads(requests_get(a_detail_url(psize=page_size)))
if "data" in res and "diff" in res["data"]:
data = res['data']['diff']
symbols = [item["f12"] for item in data if item['f15'] != "-"]
print(len(symbols))
exit()
# 开启多线程
crawl_num = 20
crawls = [i for i in range(crawl_num)]
thread_list = []
# 计算每个线程下载的数量
per_num = math.ceil(len(symbols) / len(crawls))
for i, crawl in enumerate(crawls):
if i == 0:
begin = 0
end = per_num
else:
begin = i * per_num
end = begin + per_num
thread = threading.Thread(target=download, args=(symbols[begin:end], period, adjust))
thread.start()
thread_list.append(thread)
# 结束多线程
for crawl_thread in thread_list:
crawl_thread.join() | 5,334,486 |
def insertGraph():
"""
Create a new graph
"""
root = Xref.getroot().elem
ref = getNewRef()
elem = etree.Element(etree.QName(root, sgraph), reference=ref)
name = makeNewName(sgraph, elem)
root.append(elem)
Xref.setDirty()
return name, (elem, newDotGraph(name, ref, elem)) | 5,334,487 |
def get_line_notif(line_data: str):
"""
Извлечь запись из таблицы.
:param line_data: запрашиваемая строка
"""
try:
connection = psycopg2.connect(
user=USER,
password=PASSWORD,
host="127.0.0.1",
port="5432",
database=DATABASE)
cursor = connection.cursor(cursor_factory=extras.DictCursor)
date_time = datetime.datetime.now()
hour = '0' + str(date_time.hour) if date_time.hour < 10 else date_time.hour
minute = '0' + str(date_time.minute) if date_time.minute < 10 else date_time.minute
day = '0' + str(date_time.day) if date_time.day < 10 else date_time.day
month = '0' + str(date_time.month) if date_time.month < 10 else date_time.month
if line_data in ("Мобильная Связь", "Подписки", "ЖКХ"):
cursor.execute(f'SELECT * from reminders WHERE date = \'{day}\' and ' +
f'time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "Планер":
cursor.execute(f'SELECT * from reminders WHERE date = \'{date_time.date}\' ' +
f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "День Рождения":
cursor.execute(f'SELECT * from reminders WHERE date = \'{day}.{month}\' ' +
f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "Приём Лекарств":
cursor.execute('SELECT * from reminders WHERE ' +
f'time = \'{hour}:{minute}\' and type=\'{line_data}\';')
connection.commit()
except (Exception, Error) as error:
print(ERROR_MESSAGE, error)
finally:
res = cursor.fetchall()
if connection:
cursor.close()
connection.close()
return res | 5,334,488 |
def apply_user(username: str, password: str =None,
extra_smb_groups: List[str] =None, no_default_smb_group: bool =False, **kwargs):
"""Add user or modify user
Args:
username: username
password: password
extra_smb_groups: extra groups besides default samba group to add
no_default_smb_group: do not include default samba group; by default, it includes
"""
if _check_user_exists(username):
mod_user(username=username,
password=password,
extra_smb_groups=extra_smb_groups,
no_default_smb_group=no_default_smb_group)
else:
add_user(username=username,
password=password if password is not None else '',
extra_smb_groups=extra_smb_groups,
no_default_smb_group=no_default_smb_group) | 5,334,489 |
def load_alloc_model(matfilepath, prefix):
""" Load allocmodel stored to disk in bnpy .mat format.
Parameters
------
matfilepath : str
String file system path to folder where .mat files are stored.
Usually this path is a "taskoutpath" like where bnpy.run
saves its output.
prefix : str
Indicates which stored checkpoint to use.
Can look like 'Lap0005.000'.
Returns
------
allocModel : bnpy.allocmodel object
This object has valid set of global parameters
and valid hyperparameters that define its prior.
"""
apriorpath = os.path.join(matfilepath, 'AllocPrior.mat')
amodelpath = os.path.join(matfilepath, prefix + 'AllocModel.mat')
APDict = loadDictFromMatfile(apriorpath)
ADict = loadDictFromMatfile(amodelpath)
AllocConstr = AllocModelConstructorsByName[ADict['name']]
amodel = AllocConstr(ADict['inferType'], APDict)
amodel.from_dict(ADict)
return amodel | 5,334,490 |
def bouts_per_minute(boutlist):
"""Takes list of times of bouts in seconds, returns bpm = total_bouts / total_time."""
bpm = (total_bouts(boutlist) / total_time(boutlist)) * 60
return bpm | 5,334,491 |
def test_registration(windows):
"""Test that the manifest is properly regsistered.
"""
w = Workbench()
w.register(CoreManifest())
w.register(ErrorsManifest())
w.register(PackagesManifest())
with signal_error_raise():
w.get_plugin('exopy.app.packages').collect_and_register()
# Edit the name of the package
assert w.get_plugin('exopy_ext_demo') | 5,334,492 |
def convert_to_snake_case(string: str) -> str:
"""Helper function to convert column names into snake case. Takes a string
of any sort and makes conversions to snake case, replacing double-
underscores with single underscores."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
draft = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return draft.replace('__', '_') | 5,334,493 |
def test_ttl_action_first_encrypt():
"""Test that when _last_updated has never been set, ttl_action returns TtlActions.EXPIRED."""
store = MagicMock(__class__=ProviderStore)
provider = CachingMostRecentProvider(provider_store=store, material_name="my material", version_ttl=10.0)
assert provider._last_updated is None
ttl_action = provider._ttl_action(0, "encrypt")
assert ttl_action is TtlActions.EXPIRED | 5,334,494 |
def list_keys(client, keys):
"""
:param client: string
:param keys: list of candidate keys
:return: True if all keys exist, None otherwise
"""
objects = client.get_multi(keys)
if bool(objects):
return objects
else:
return None | 5,334,495 |
def write_fasta(df,out_file,seq_column="sequence",seq_name="pretty",
write_only_keepers=True,empty_char="X-?",clean_sequence=False):
"""
df: data frame to write out
out_file: output file
seq_column: column in data frame to use as sequence
seq_name: column in data frame to use as >NAME. If "pretty",
write out a pretty names.
write_only_keepers: whether or not to write only seq with keep = True
empty_char: empty char. if the sequence is only empty char, do not write
out.
clean_sequence: replace any non-aa characters with "-"
"""
# Make sure seq name is sane
try:
df[seq_name]
take_pretty = False
except KeyError:
if seq_name == "pretty":
take_pretty = True
else:
err = f"seq_name '{seq_name}' not recognized."
err += "Should be a column name or 'pretty'\n"
raise ValueError(err)
# Make sure seq column is sane
try:
df[seq_column]
except KeyError:
err = f"seq_column '{seq_column}' not found\n."
raise ValueError(err)
# Construct fasta output
out = []
for i in range(len(df)):
row = df.iloc[i]
if write_only_keepers:
if not row.keep:
continue
if take_pretty:
h = _private.to_pretty(row)
else:
h = row[seq_name]
seq = row[seq_column]
is_empty = len([s for s in seq if s not in list(empty_char)]) == 0
if seq == "" or seq is None or is_empty:
continue
# Replace non-aa characters with '-'
if clean_sequence:
seq = re.sub("[^ACDEFGHIKLMNPQRSTVWYZ-]","-",seq)
out.append(f">{h}\n{seq}\n")
# Write output
f = open(out_file,"w")
f.write("".join(out))
f.close() | 5,334,496 |
def load_config(config_name):
"""
Load a configuration object from a file and return the object. The given configuration name
must be a valid saved configuration.
:param config_name: The name of the configuration file to load from.
:return: The configuration object saved in that file.
"""
if config_name is None:
return None
file_name = config_name + ".dat"
file_path = os.path.join(util.working_directory(), CONFIG_DIRECTORY, file_name)
config_file = open(file_path, "rb")
config = pickle.load(config_file)
config_file.close()
return config | 5,334,497 |
def estimate_variance(ip_image: np.ndarray, x: int, y: int, nbr_size: int) -> float:
"""Estimates local variances as described in pg. 6, eqn. 20"""
nbrs = get_neighborhood(x, y, nbr_size, ip_image.shape[0], ip_image.shape[1])
vars = list()
for channel in range(3):
pixel_avg = 0
for i, j in nbrs:
pixel_avg += ip_image[i, j, channel]
pixel_avg /= len(nbrs)
pixel_var = 0
for i, j in nbrs:
pixel_var += (ip_image[i, j, channel] - pixel_avg) * (ip_image[i, j, channel] - pixel_avg)
pixel_var /= len(nbrs)
vars.append(pixel_var)
return np.average(vars) | 5,334,498 |
def api_key_regenerate():
"""
Generate a new API key for the currently logged-in user.
"""
try:
return flask.jsonify({
constants.api.RESULT: constants.api.RESULT_SUCCESS,
constants.api.MESSAGE: None,
'api_key': database.user.generate_new_api_key(current_user.user_id).api_key,
}), constants.api.SUCCESS_CODE
except:
return flask.jsonify(constants.api.UNDEFINED_FAILURE), constants.api.UNDEFINED_FAILURE_CODE | 5,334,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.