content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import json
def get_tiff_param(tiff_file):
"""Obtain relevant parameters of TiffFile object"""
xy_dim, description = lookup_page(tiff_file.pages[0])
shape = tiff_file.asarray().shape
if tiff_file.is_fluoview:
return get_fluoview_param(description, xy_dim, shape)
elif tiff_file.is_imagej:
return get_imagej_param(description, xy_dim, shape)
else:
# We are using test data
desc_dict = json.loads(description)
minor_axis = desc_dict['minor_axis']
n_modes = desc_dict['n_modes']
xy_dim = tuple(desc_dict['xy_dim'])
return minor_axis, n_modes, xy_dim | d161f142780e86e19cb2a8e7e9440c4768509119 | 3,630,900 |
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture)) | d87605b6025e1bc78c7436affe740f7591a99f68 | 3,630,901 |
from typing import Optional
def to_xy0(
xyz: npt.ArrayLike, radius: Optional[float] = 1.0, stacked: Optional[bool] = True
) -> np.ndarray:
"""
Convert geocentric xyz coordinates to longitude (φ) and latitude (λ)
xy0 (i.e., φλ0) coordinates.
Parameters
----------
xyz : ArrayLike
A sequence of one or more (x, y, z) values to be converted to
longitude and latitude coordinates.
radius : bool, default=1.0
The radius of the sphere. Defaults to an S2 unit sphere.
stacked : bool, default=True
Specify whether the resultant xy0 coordinates have shape (N, 3).
Otherwise, they will have shape (3, N).
Returns
-------
ndarray
The longitude and latitude xy0 coordinates, in degrees.
Notes
-----
.. versionadded:: 0.1.0
"""
xyz = np.asanyarray(xyz)
lons = wrap(np.degrees(np.arctan2(xyz[:, 1], xyz[:, 0])))
lats = np.degrees(np.arcsin(xyz[:, 2] / radius))
z = np.zeros_like(lons)
data = [lons, lats, z]
if stacked:
result = np.vstack(data).T
else:
result = np.array(data)
return result | 9f3e94def7f7fa5d34a791fcd054068874f5621b | 3,630,902 |
def getDataset(filepath = ""):
"""
Reads a comma separated variables (csv) file and creates a dataset.
A dataset has the following components: Variables, Data, Types, Groups, Sorts
To call each componenet of a dataset, do the following.
Say you have a dataset named: somedata
somedata['VARIABLES'] accesses the variables in this dataset
somedata['DATA'] accesses the rows of data in this dataet.
"""
data = {}
data['VARIABLES'] = getVariables(filepath)
data['DATA'] = getData(filepath)
return data | cb3ae7b9b392a702729078df009452cff0a965b6 | 3,630,903 |
def draw_matches(img0, img1, kpts0, kpts1, match_idx,
downscale_ratio=1, color=(0, 255, 0), radius=4, thickness=2):
"""
Args:
img: color image.
kpts: Nx2 numpy array.
match_idx: Mx2 numpy array indicating the matching index.
Returns:
display: image with drawn matches.
"""
resize0 = cv2.resize(
img0, (int(img0.shape[1] * downscale_ratio), int(img0.shape[0] * downscale_ratio)))
resize1 = cv2.resize(
img1, (int(img1.shape[1] * downscale_ratio), int(img1.shape[0] * downscale_ratio)))
rows0, cols0 = resize0.shape[:2]
rows1, cols1 = resize1.shape[:2]
kpts0 *= downscale_ratio
kpts1 *= downscale_ratio
display = np.zeros((max(rows0, rows1), cols0 + cols1, 3))
display[:rows0, :cols0, :] = resize0
display[:rows1, cols0:(cols0 + cols1), :] = resize1
for idx in range(match_idx.shape[0]):
val = match_idx[idx]
pt0 = (int(kpts0[val[0]][0]), int(kpts0[val[0]][1]))
pt1 = (int(kpts1[val[1]][0]) + cols0, int(kpts1[val[1]][1]))
cv2.circle(display, pt0, radius, color, thickness)
cv2.circle(display, pt1, radius, color, thickness)
cv2.line(display, pt0, pt1, color, thickness)
display /= 255
return display | 17b118bf175bef066292b370a7f0571425900fc3 | 3,630,904 |
def shorten_build_target(build_target: str) -> str:
"""Returns a shortened version of the build target."""
if build_target == '//chrome/android:chrome_java':
return 'chrome_java'
return build_target.replace('//chrome/browser/', '//c/b/') | 03af53f1fcacae9a4e0309053075806d65275ce9 | 3,630,905 |
from datetime import datetime
import copy
def store_initial_role_data(dynamo_table, arn, create_date, role_id, role_name, account_number, current_policy, tags):
"""
Store the initial version of a role in Dynamo
Args:
role (Role)
current_policy (dict)
Returns:
None
"""
policy_entry = {'Source': 'Scan', 'Discovered': datetime.datetime.utcnow().isoformat(), 'Policy': current_policy}
role_dict = {'Arn': arn, 'CreateDate': create_date.isoformat(), 'RoleId': role_id, 'RoleName': role_name,
'Account': account_number, 'Policies': [policy_entry],
'Refreshed': datetime.datetime.utcnow().isoformat(), 'Active': True, 'Repoed': 'Never', 'Tags': tags}
store_dynamo = copy.copy(role_dict)
dynamo_table.put_item(Item=_empty_string_to_dynamo_replace(store_dynamo))
# we want to store CreateDate as a string but keep it as a datetime, so put it back here
role_dict['CreateDate'] = create_date
return role_dict | 2c6a54a3b6fc414e638e3ba2870201c73b37a2b1 | 3,630,906 |
def apply_shift(x, shift, out):
"""
Translates elements of `x` along axis=0 by `shift`, using linear
interpolation for non-integer shifts.
Parameters
----------
x : ndarray
Array with ndim >= 1, holding data.
shift : float
Shift magnitude.
out : ndarray
Array with the same shape as x.
Returns
-------
out : ndarray
"""
T = len(out)
if shift > 0:
d = int(shift // 1)
r = shift % 1
for t in range(T):
j = t - d
if j <= 0:
out[t] = x[0]
else:
out[t] = x[j] * (1 - r) + x[j - 1] * r
elif shift < 0:
d = int((-shift) // 1)
r = (-shift) % 1
for t in range(T):
j = t - d
if j <= 0:
out[-t-1] = x[-1]
else:
out[-t-1] = x[-j-1] * (1 - r) + x[-j] * r
else:
out[:] = x
return out | 86e58c536cbc2fb43bb049aab6d0d4d733308bbd | 3,630,907 |
import platform
def get_socket(dname, protocol, host, dno):
"""socket = get_socket(dname, protocol, host, dno)
Connect to the display specified by DNAME, PROTOCOL, HOST and DNO, which
are the corresponding values from a previous call to get_display().
Return SOCKET, a new socket object connected to the X server.
"""
modname = _socket_mods.get(platform, _default_socket_mod)
mod = _relative_import(modname)
return mod.get_socket(dname, protocol, host, dno) | d2703d7747ffac8d142f170dd153169f111f8b5d | 3,630,908 |
def azureblobstorage_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
return node_addon.get_folders() | d6934946a428e592ab2644ffbfb0a28ebb2ad9c8 | 3,630,909 |
def summary_statistics(is_significant, time_interp, n_records_at_t, n_window):
"""Compute summary statistics."""
# Initialize
significant_number = np.zeros(len(time_interp))
significant_relative = np.zeros(len(time_interp))
max_records_around_t = np.zeros(len(time_interp))
for j in xrange(len(time_interp)):
# Set window and treat boundaries
start = max(0, j - n_window)
end = min(j + n_window, len(time_interp))
# If any significant values inside a certain window, add an event / a transition
significant_number[j] = np.any(is_significant[:, start:end], axis=1).sum()
# Get max number of records available in window around time j
max_records_around_t[j] = n_records_at_t[start:end].max()
#significant_relative[measure] = significant_number[measure] / n_records_at_t
return (max_records_around_t, significant_number / max_records_around_t) | e970e78a1dfe96220f8ff49a85deccc5dc00f673 | 3,630,910 |
from typing import Tuple
import random
def draw_two(max_n: int) -> Tuple[int, int]:
"""Draw two different ints given max (mod max)."""
i = random.randint(0, max_n)
j = (i + random.randint(1, max_n - 1)) % max_n
return i, j | 9ebb09158c296998c39a2c4e8fc7a18456428fc6 | 3,630,911 |
def compareFloats(a, b, rtol=1.0e-5, atol=opscore.RO.SysConst.FAccuracy):
"""Compares values a and b
Returns 0 if the values are approximately equals, i.e.:
- |a - b| < atol + (rtol * |a + b|)
Else 1 if a > b, -1 if a < b
Inputs:
- a, b: scalars to be compared (int or float)
- atol: absolute tolerance
- rtol: relative tolerance
The algorithm used is the same one used by numpy.allclose.
"""
if abs(a - b) < (atol + (rtol * abs(float(a + b)))):
return 0
return cmp(a, b) | 6ff9e55040bfbefea11ef6f10cdc88b5e2fa7a77 | 3,630,912 |
def net_import_share_constraint_rule(backend_model, constraint_group, carrier, what):
"""
Enforces demand shares of net imports from transmission technologies for groups of locations,
on average over the entire model period. Transmission within the group are ignored. The share
is relative to ``demand`` technologies only.
.. container:: scrolling-wrapper
.. math::
\\sum_{loc::tech::carrier \\in loc\\_tech\\_carriers\\_transmission \\in given\\_locations, timestep \\in timesteps} carrier_{prod}(loc::tech::carrier, timestep)
+ \\sum_{loc::tech::carrier \\in loc\\_tech\\_carriers\\_transmission \\in given\\_locations, timestep \\in timesteps} carrier_{con}(loc::tech::carrier, timestep) \\leq
share \\times \\sum_{loc::tech:carrier \\in loc\\_tech\\_demand \\in given\\_locations, timestep\\in timesteps}
carrier_{con}(loc::tech::carrier, timestep)
"""
share = get_param(backend_model, 'group_net_import_share_{}'.format(what), (carrier, constraint_group))
if share.value is None:
return return_noconstraint('net_import_share', constraint_group)
else:
trans_loc_tech = getattr(
backend_model,
'group_constraint_loc_techs_{}'.format(constraint_group)
)
locs = set(loc_tech.split('::')[0] for loc_tech in trans_loc_tech)
trans_loc_tech = filter(lambda loc_tec: loc_tec.split(":")[-1] not in locs, trans_loc_tech)
demand_loc_tech = [
i for i in backend_model.loc_tech_carriers_demand
if i.split('::')[0] in locs
]
lhs = sum(
(backend_model.carrier_prod[loc_tech + '::' + carrier, timestep]
+ backend_model.carrier_con[loc_tech + '::' + carrier, timestep])
for loc_tech in trans_loc_tech
for timestep in backend_model.timesteps
)
rhs = - share * sum(
backend_model.carrier_con[loc_tech, timestep]
for loc_tech in demand_loc_tech
for timestep in backend_model.timesteps
)
return equalizer(lhs, rhs, what) | 30add5ca89d2b995742e52eec759ca9504029e57 | 3,630,913 |
def loss_gaussian(X):
"""
encode X by CNML
"""
Xmat = np.matrix(X)
n, m = Xmat.shape
if n == 1:
Xmat = Xmat.T
n, m = Xmat.shape
else:
pass
if n <= 0:
return np.nan
Xc = Xmat - np.mean(Xmat, 0)
S = np.dot(Xc.T, Xc / n)
detS = sl.det(S)
logpdf_max = n * (-m - m * np.log(2 * np.pi) - np.log(detS)) / 2
capacity = m * (m + 3) / 4 * np.log(n) # approximation
return -logpdf_max + capacity | b9e7db0b25f8b9b6104d653561b3e13a154a742b | 3,630,914 |
def integrand(x, n):
"""
Bessel function of first kind and order n.
"""
return jn(n, x) | 6b5dce69f94285518cc6e23b4ae23f22e0e981fc | 3,630,915 |
def get_forms_tuple(*args):
"""
Converts a string of grammemes to a tuple of two sets:
- set of tags for declension
- set of tags for refining the word form
"""
forms = list()
specs = list()
for arg in args:
for key in force_str(arg).split(','):
if key in INFLECT_FORMS:
forms.append(INFLECT_FORMS[key])
elif key in SPECIFYING_FORMS:
specs.append(SPECIFYING_FORMS[key])
else:
raise ValueError('`%s` is not a grammeme' % key)
return set(forms), set(specs) | 9f8264b29e0828ffa01af6da1704d7f9c889e167 | 3,630,916 |
def truncate(inputs, channels, data_format):
"""Slice the inputs to channels if necessary."""
if data_format == 'channels_last':
input_channels = inputs.get_shape()[3].value
else:
assert data_format == 'channels_first'
input_channels = inputs.get_shape()[1].value
if input_channels < channels:
raise ValueError('input channel < output channels for truncate')
elif input_channels == channels:
return inputs # No truncation necessary
else:
# Truncation should only be necessary when channel division leads to
# vertices with +1 channels. The input vertex should always be projected to
# the minimum channel count.
assert input_channels - channels == 1
if data_format == 'channels_last':
return tf.slice(inputs, [0, 0, 0, 0], [-1, -1, -1, channels])
else:
return tf.slice(inputs, [0, 0, 0, 0], [-1, channels, -1, -1]) | 965574da9af5e85c80ce66ed13b36d13b5cffbef | 3,630,917 |
def do_rating_by_user(parser, token):
"""
Returns a User's Rating of a Snippet, if any.
Example::
{% get_rating_by_user user.id object.id as rating %}
"""
bits = token.contents.split()
if len(bits) != 5:
raise template.TemplateSyntaxError("'%s' tag takes exactly four arguments" % bits[0])
if bits[3] != 'as':
raise template.TemplateSyntaxError("third argument to '%s' tag must be 'as'" % bits[0])
return RatingByUserNode(bits[1], bits[2], bits[4]) | 761e3414bc3c292e315c25d79e1c7417e6f894c7 | 3,630,918 |
import os
def get_read_counts_total_table(path, pool):
"""
This table is used for "Fraction of Total Reads that Align to the Human Genome" plot
"""
full_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_total = pd.read_csv(full_path, sep='\t')
col_idx = ~read_counts_total.columns.str.contains(PICARD_LABEL)
read_counts_total = read_counts_total.iloc[:, col_idx]
read_counts_total['AlignFrac'] = read_counts_total[TOTAL_MAPPED_COLUMN] / read_counts_total[TOTAL_READS_COLUMN]
read_counts_total[TOTAL_OFF_TARGET_FRACTION_COLUMN] = 1 - read_counts_total[TOTAL_ON_TARGET_FRACTION_COLUMN]
read_counts_total['pool'] = pool
return read_counts_total | dae44903051eabbce1118363fc51c8566861484b | 3,630,919 |
def get_label_funcs_threshold(bin_count=10, lower=1e-9, upper=1e-3, bin_func=bin_func_sum):
"""
:param bin_count: Number of different thresholds to encode the goes flux into.
:param lower: Lower limit for valid goes flux values.
:param upper: Upper limit for valid goes flux values.
:param bin_func: Function used to return the list of outputs the model makes to a single bin, which will then be
transformed back to a goes flux value.
"""
bin_labels = np.zeros((bin_count, bin_count - 1))
for i in range(bin_count):
bin_labels[i][:i] = 1
low = np.log10(lower)
up = np.log10(upper)
def label_func(y):
val = np.log10(y)
rawbin = (val - low) / (up - low) * bin_count
index = np.floor(rawbin).astype(int)
return bin_labels[np.clip(index, 0, bin_count - 1)]
def unlabel_func(y):
val = bin_func(y)
print(val)
exponent = (((val / bin_count) * (up - low)) + low)
return 10 ** exponent
return LabelFuncs(label_func, unlabel_func) | 561dbc5b468b2b657908403ef39e630fe66649d9 | 3,630,920 |
def modify_layer(layer, name, norm = None, dropout = None):
"""Add BatchNorm and/or Dropout on top of `layer`"""
if dropout is not None:
name = "%s-dropout" % (name)
layer = Dropout(dropout, name = name)(layer)
if norm is not None:
name = "%s-%snorm" % (name, norm)
layer = get_normalization_layer(norm, name = name)(layer)
return layer | 169905b19fe29d1beee2b4a3b5a5e97cb9ca9e93 | 3,630,921 |
def create_access(terms_to_check, new_term):
"""
Breaks a new_term up into separate constituent parts so that they can be
compared in a check_access test.
Returns a list of terms that should be inserted.
"""
protos = new_term.match.get('protocol', ['any'])
sources = new_term.match.get('source-address', ['any'])
dests = new_term.match.get('destination-address', ['any'])
sourceports = new_term.match.get('source-port', ['any'])
destports = new_term.match.get('destination-port', ['any'])
ret = []
for proto in protos:
for source in sources:
for sourceport in sourceports:
for dest in dests:
for destport in destports:
t = Term()
if str(proto) != 'any':
t.match['protocol'] = [proto]
if str(source) != 'any':
t.match['source-address'] = [source]
if str(dest) != 'any':
t.match['destination-address'] = [dest]
if str(sourceport) != 'any':
t.match['source-port'] = [sourceport]
if str(destport) != 'any':
t.match['destination-port'] = [destport]
if not check_access(terms_to_check, t):
ret.append(t)
return ret | 61a3a6b786f61b56698e360a6b6acaf393d0fa15 | 3,630,922 |
def versionPropertiesDictionary(sql_row_list):
"""
versionPropertiesDictionary(sql_row_list)
transforms a row gotten via SQL request (list), to a dictionary
"""
properties_dictionary = \
{
"id": sql_row_list[0],
"model_id": sql_row_list[1],
"version": sql_row_list[2],
"metadata": sql_row_list[3],
"commit_comment": sql_row_list[4],
"created_timestamp": sql_row_list[5]
}
return properties_dictionary; | ab8cdd166bf8a187945c44fd416c3a4cf4634d02 | 3,630,923 |
def register():
"""
Register a new user
"""
logger.debug('register()')
return render_template('register.html', user=None) | 9306168e54b6b2bdff532792d1ffa493608cc5b8 | 3,630,924 |
import logging
def verify_received_aes_key(key: dict, rsa_public_key) -> bool:
"""
Verifies the AES key received as a dict and passed here is valid.
:param dict key: An AES key, as a dictionary.
:param rsa_public_key: The RSA public key of the author.
:return bool: True if the information is a valid AES key, False otherwise.
"""
if not validate_fields(key, Structures.aes_key_structure):
return False
value = key["value"]
h_str = key["hash"]
sig = Encryption.deserialize_string(key["sig"])
h = Encryption.hash_iterable(value)
expected_digest = h.hexdigest()
if h_str != expected_digest:
if Config.log_validation:
logging.error(f'Failed to validate key: indicated digest ({h_str!r}) '
f'is different from the one computed ({expected_digest})')
return False
if not Encryption.is_signature_valid(rsa_public_key, h, sig):
return False
return True | 53d0578d8761a06be260b7a63dff97e123a4e010 | 3,630,925 |
def remove_indices(dist_array, indices):
"""
Remove given indices from dist_array
:param dist_array: a flattened version of the dist_matrix in the format of entries (node0/ window0, node1/ window1, distance),
usually sorted
:param indices: indices which should be removed
:return:
"""
selected_dist_array = [element for i, element in enumerate(dist_array) if i not in indices]
return np.asarray(selected_dist_array) | 3b61645655a19c18889f7a02b8bfda0368091873 | 3,630,926 |
def bow_net(data, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2):
"""
Bow net
"""
# embedding layer
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(name="@HUB_senta_bow@embedding_0.w_0"))
# bow layer
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
# full connect layer
fc_1 = fluid.layers.fc(
input=bow_tanh,
size=hid_dim,
act="tanh",
param_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_0.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_0.b_0"))
fc_2 = fluid.layers.fc(
input=fc_1,
size=hid_dim2,
act="tanh",
param_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_1.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_1.b_0"))
# softmax layer
prediction = fluid.layers.fc(
input=[fc_2],
size=class_dim,
act="softmax",
param_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_2.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_bow@fc_2.b_0"))
return prediction, fc_2 | 94c7f0483c98644d943815ba92e00cbee0196e50 | 3,630,927 |
def close_story(request):
"""
Tags: logs+stories
---
Closes an open story.
---
story_id:
in: path
type: string
required: true
"""
auth_context = auth_context_from_request(request)
# Only available to Owners for now.
if not auth_context.is_owner():
raise PolicyUnauthorizedError("Only Owners may perform this action")
# NOTE: The story is closed by the view's decorator logging the close_story
# action with the given story_id. No additional method needs to be invoked.
# The story's ID exists in path, but it is not used here. It is captured by
# our decorator, when logging the API response.
# story_id = request.matchdict['story_id']
return Response('OK', 200) | 4b28978c99c26f76bbe04d62a55ca22ba9d40676 | 3,630,928 |
def convert_dates_to_ISO(date: str, date_2: str):
"""Assumes both dates are current system year. If the latter one occurs chronologically
before the former (i.e given "december 4 2021" and "january 4 2021", january comes first in the year.
This means that the second date is likely the next year, so we'll assume that. They will not be the same month
and different year, though: group buys don't run for 12 months
"""
iso_date_1 = "0000-00-00" # default values
iso_date_2 = "0000-00-00"
month_dict = { # can't wait for python 3.10 when we get switch statements :D
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12
}
try:
current_year = d.today().year
month_1 = date.split(" ")[0]
month_1_int = month_dict[month_1]
day_1 = date.split(" ")[1]
month_2 = date_2.split(" ")[0]
month_2_int = month_dict[month_2]
day_2 = date_2.split(" ")[1]
except KeyError:
return {"Start date not set", "End date not set"} # I'm not overly creative
if month_1_int > month_2_int or \
((month_1_int == month_2_int) and day_1 > day_2): # if the second date happens first
iso_date_1 = str(current_year) + "-" + get_ISO_num_string(month_1_int) + "-" + get_ISO_num_string(int(day_1))
iso_date_2 = str(int(current_year) + 1) + "-" + get_ISO_num_string(month_2_int) + get_ISO_num_string(int(day_2))
else:
iso_date_1 = str(current_year) + "-" + get_ISO_num_string(month_1_int) + "-" + get_ISO_num_string(int(day_1))
iso_date_2 = str(current_year) + "-" + get_ISO_num_string(month_2_int) + "-" + get_ISO_num_string(int(day_2))
return {iso_date_1, iso_date_2} | 7fd68c06ad5abff67e5bd69c1a23530f985857e0 | 3,630,929 |
def findliteralblocks(blocks):
"""Finds literal blocks and adds a 'type' field to the blocks.
Literal blocks are given the type 'literal', all other blocks are
given type the 'paragraph'.
"""
i = 0
while i < len(blocks):
# Searching for a block that looks like this:
#
# +------------------------------+
# | paragraph |
# | (ends with "::") |
# +------------------------------+
# +---------------------------+
# | indented literal block |
# +---------------------------+
blocks[i]['type'] = 'paragraph'
if blocks[i]['lines'][-1].endswith('::') and i + 1 < len(blocks):
indent = blocks[i]['indent']
adjustment = blocks[i + 1]['indent'] - indent
if blocks[i]['lines'] == ['::']:
# Expanded form: remove block
del blocks[i]
i -= 1
elif blocks[i]['lines'][-1].endswith(' ::'):
# Partially minimized form: remove space and both
# colons.
blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
else:
# Fully minimized form: remove just one colon.
blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
# List items are formatted with a hanging indent. We must
# correct for this here while we still have the original
# information on the indentation of the subsequent literal
# blocks available.
m = _bulletre.match(blocks[i]['lines'][0])
if m:
indent += m.end()
adjustment -= m.end()
# Mark the following indented blocks.
while i + 1 < len(blocks) and blocks[i + 1]['indent'] > indent:
blocks[i + 1]['type'] = 'literal'
blocks[i + 1]['indent'] -= adjustment
i += 1
i += 1
return blocks | 43b2472784744e5d1c17b5b4e306b23c953223fa | 3,630,930 |
import argparse
def get_parser():
""" Return parser """
parser = argparse.ArgumentParser(description='BioWardrobe Migration', add_help=True)
parser.add_argument("-c", "--config", help="Path to the BioWardrobe config file", default="/etc/wardrobe/wardrobe")
logging_level = parser.add_mutually_exclusive_group()
logging_level.add_argument("-d", "--debug", help="Output debug information", action="store_true")
logging_level.add_argument("-q", "--quiet", help="Suppress all outputs except errors", action="store_true")
return parser | 6f72a0ea5891acaf895b0c190b6f1774e1290731 | 3,630,931 |
import requests
def urlcheck():
"""takes all devurls and checks their header return"""
urlstatus = []
for elem in get_bundle_dev_url():
appads = "https://" + elem[1] + "/app-ads.txt"
try:
x = requests.head(appads, timeout=3.5, allow_redirects=True)
except requests.exceptions.Timeout:
status = "Timeout"
except ConnectionError:
status = "404"
else:
status = x.status_code
tt = (elem[0], appads, status)
print(tt)
urlstatus.append(tt)
return urlstatus | b4f8aa5cdd8cc4aab9b2a4ddf204ab3dd43f0792 | 3,630,932 |
def get_vpc_dhcp_options(dhcp_options_id=None,filters=None,tags=None,opts=None):
"""
Retrieve information about an EC2 DHCP Options configuration.
## Example Usage
### Lookup by DHCP Options ID
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.get_vpc_dhcp_options(dhcp_options_id="dopts-12345678")
```
### Lookup by Filter
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.get_vpc_dhcp_options(filters=[
{
"name": "key",
"values": ["domain-name"],
},
{
"name": "value",
"values": ["example.com"],
},
])
```
:param str dhcp_options_id: The EC2 DHCP Options ID.
:param list filters: List of custom filters as described below.
:param dict tags: A map of tags assigned to the resource.
The **filters** object supports the following:
* `name` (`str`) - The name of the field to filter.
* `values` (`list`) - Set of values for filtering.
"""
__args__ = dict()
__args__['dhcpOptionsId'] = dhcp_options_id
__args__['filters'] = filters
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ec2/getVpcDhcpOptions:getVpcDhcpOptions', __args__, opts=opts).value
return AwaitableGetVpcDhcpOptionsResult(
arn=__ret__.get('arn'),
dhcp_options_id=__ret__.get('dhcpOptionsId'),
domain_name=__ret__.get('domainName'),
domain_name_servers=__ret__.get('domainNameServers'),
filters=__ret__.get('filters'),
id=__ret__.get('id'),
netbios_name_servers=__ret__.get('netbiosNameServers'),
netbios_node_type=__ret__.get('netbiosNodeType'),
ntp_servers=__ret__.get('ntpServers'),
owner_id=__ret__.get('ownerId'),
tags=__ret__.get('tags')) | 05d73c3d230c3e815e47aa8c6134c89efb814439 | 3,630,933 |
import os
def moduleName(file):
"""Extract a module name from the python source file name, with appended ':'."""
return os.path.splitext(os.path.split(file)[1])[0] + ":" | 4f5035e80ddd3df7a8a93585bebf25e2e3300b49 | 3,630,934 |
def ms2str(v):
"""
Convert a time in milliseconds to a time string.
Arguments:
v: a time in milliseconds.
Returns:
A string in the format HH:MM:SS,mmm.
"""
v, ms = divmod(v, 1000)
v, s = divmod(v, 60)
h, m = divmod(v, 60)
return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}" | 5d50aa072584e5ad17d8bd3d08b0b0813aced819 | 3,630,935 |
import operator
def regroup(X, N):
"""
Regroup the rows and columns in X.
Rows/Columns that are N apart in X are adjacent in Y.
Parameters:
X (np.ndarray): Image to be regrouped
N (list): Size of 1D DCT performed (could give int)
Returns:
Y (np.ndarray): Regoruped image
"""
# if N is a 2-element list, N[0] is used for columns and N[1] for rows.
# if a single value is given, a square matrix is assumed
try:
N_m = N_n = operator.index(N)
except TypeError:
N_m, N_n = N
m, n = X.shape
if m % N_m != 0 or n % N_n != 0:
raise ValueError('regroup error: X dimensions not multiples of N')
X = X.reshape(m // N_m, N_m, n // N_n, N_n) # subdivide the axes
X = X.transpose((1, 0, 3, 2)) # permute them
return X.reshape(m, n) # and recombine | d4492e71a42a69d86d0e2a1c21bf05d13dfe13d7 | 3,630,936 |
def uint_to_two_compl(value: int, length: int) -> int:
"""Convert int to two complement integer with binary operations."""
if value >> (length - 1) & 1 == 0: # check sign bit
return value & (2 ** length - 1)
else:
return value | (~0 << length) | a0b7bd5192a3f12119ea7ec1a58ca785c37369bf | 3,630,937 |
def synthetic_pattern_program():
"""A program that tests pattern matching of `PrimOp` outputs.
Returns:
program: `instructions.Program`.
"""
block = instructions.Block(
[
instructions.prim_op(
[], ("one", ("five", "three")), lambda: (1, (2, 3))),
instructions.prim_op(
[], (("four", "five"), "six"), lambda: ((4, 5), 6)),
],
instructions.halt_op())
the_vars = {
"one": instructions.single_type(np.int64, ()),
"three": instructions.single_type(np.int64, ()),
"four": instructions.single_type(np.int64, ()),
"five": instructions.single_type(np.int64, ()),
"six": instructions.single_type(np.int64, ()),
}
return instructions.Program(
instructions.ControlFlowGraph([block]), [],
the_vars, [], (("one", "three"), "four", ("five", "six"))) | c3f55b75301604a394d9f0ec01d4198a27f90c8e | 3,630,938 |
def generator(z, out_channel_dim, is_train=True, alpha=0.2):
"""
Create the generator network
:param z: Input z
:param out_channel_dim: The number of channels in the output image
:param is_train: Boolean if generator is being used for training
:param alpha : leaky relu rate
:return: The tensor output of the generator
"""
# set reuse condition
if is_train:
# don't reuse when training
reuse = False
else:
# reuse when inferring
reuse = True
with tf.variable_scope('generator', reuse=reuse):
# First fully connected layer
# size is multipled by 7 * 7 to get image dimensions 28x28 instead of 32x32
x1 = tf.layers.dense(z, 7 * 7 * 512)
# Reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 7, 7, 512))
b1 = tf.layers.batch_normalization(x1, training=is_train)
r1 = tf.maximum(alpha * b1, b1)
# strides is 1 here to keep sizes correct
x2 = tf.layers.conv2d_transpose(r1, 256, 5, strides=1, padding='same')
b2 = tf.layers.batch_normalization(x2, training=is_train)
r2 = tf.maximum(alpha * b2, b2)
x3 = tf.layers.conv2d_transpose(r2, 128, 5, strides=2, padding='same')
b3 = tf.layers.batch_normalization(x3, training=is_train)
r3 = tf.maximum(alpha * b3, b3)
# Output layer, 28x28x5
logits = tf.layers.conv2d_transpose(r3, out_channel_dim, 5, strides=2, padding='same')
out = tf.tanh(logits)
return out | 9bea06f8c78aadc2e9a4287ce44b12422cfead99 | 3,630,939 |
import re
def manual_filtration(word, neg_list):
"""作用在 word 上, 若该词为负例则返回 True, 否则返回 False"""
pattern_1 = r',|\.|:|;|"'
pattern_2 = r'行|示|为|较|见|天|音'
pattern_3 = r'切除|标本|摄取|存在|活检|穿刺|开口|引流|胸痛|患者|治疗|不适|受限|疼痛|基本|压缩'
pattern_4 = r'^[A-Za-z0-9_]+$'
remove_word_list = neg_list + ['病理', '癌', '炎', '占位']
tnm_pattern = r'[Tt]\S{1,2}[Nn][xX0123][Mm][01]'
word_no_punc = remove_punctuation(word)
if ((not re.search(pattern_1, word)) and (not re.search(pattern_2, word)) and (not re.search(pattern_3, word))
and (not re.search(pattern_4, word)) and len(word_no_punc) > 1 and (word_no_punc not in remove_word_list)):
if (not re.search(tnm_pattern, word)) and re.search(r'\d', word):
return True
else:
return False
else:
return True | 6957c8a3b86073060ffeded9c5a7c7badf1005cc | 3,630,940 |
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
"*** YOUR CODE HERE ***"
return lambda x: f(f(x)) | 0ae1c89aca0a3fa85319882d49aea0cc05c12b7a | 3,630,941 |
import warnings
def _find_indexes(obj, var_name, min_limit, max_limit, use_dask):
"""
Function to find array indexes where failing limit tests
Parameters
----------
obj : Xarray.Dataset
Dataset containing data to use in test
var_name : str
Variable name to inspect
min_limit : float or numpy array
Minimum limit to use for returning indexes
max_limit : float or numpy array
Maximum limit to use for returning indexes
use_dask : boolean
Option to use Dask operations instead of Numpy
Returns
-------
Tuple containing solar zenith angle array and solar constant scalar
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if use_dask and isinstance(obj[var_name].data, da.Array):
index_min = da.where(obj[var_name].data < min_limit, True, False).compute()
index_max = da.where(obj[var_name].data > max_limit, True, False).compute()
else:
index_min = np.less(obj[var_name].values, min_limit)
index_max = np.greater(obj[var_name].values, max_limit)
return (index_min, index_max) | ad59ce154bf056d0ede5b98250fc95265ec83c89 | 3,630,942 |
def asinh_grad(orig, grad):
"""Returns [grad * 1/((1 + (x ^ 2)) ^ (1/2))]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / sqrt(ones + (x * x))] | 6cdcc4b0271aa8173059fe165fc77a556da79834 | 3,630,943 |
def inject_test_seed(seed, signature, user_data_dir):
"""Injects the given test seed.
Args:
seed (str): A variations seed.
signature (str): A seed signature.
user_data_dir (str): Path to the user data directory used to launch Chrome.
Returns:
bool: Whether the injection succeeded.
"""
seed_dict = {
LOCAL_STATE_SEED_NAME: seed,
LOCAL_STATE_SEED_SIGNATURE_NAME: signature,
}
update_local_state(user_data_dir, seed_dict)
current_seed, current_signature = get_current_seed(user_data_dir)
if current_seed != seed or current_signature != signature:
return False
return True | 838c4f0a2bd969327f0733c270659243caa3a881 | 3,630,944 |
import math
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
""" Per-stage depth scaling
Scales the block repeats in each stage. This depth scaling impl maintains
compatibility with the EfficientNet scaling method, while allowing sensible
scaling for other models that may have multiple block arg definitions in each stage.
"""
# We scale the total repeat count for each stage, there may be multiple
# block arg defs per stage so we need to sum.
num_repeat = sum(repeats)
if depth_trunc == 'round':
# Truncating to int by rounding allows stages with few repeats to remain
# proportionally smaller for longer. This is a good choice when stage definitions
# include single repeat stages that we'd prefer to keep that way as long as possible
num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
else:
# The default for EfficientNet truncates repeats to int via 'ceil'.
# Any multiplier > 1.0 will result in an increased depth for every stage.
num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
# Proportionally distribute repeat count scaling to each block definition in the stage.
# Allocation is done in reverse as it results in the first block being less likely to be scaled.
# The first block makes less sense to repeat in most of the arch definitions.
repeats_scaled = []
for r in repeats[::-1]:
rs = max(1, round((r / num_repeat * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::-1]
# Apply the calculated scaling to each block arg in the stage
sa_scaled = []
for ba, rep in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled | e1411f4c62bf5834a994d6c0313ea77e7368ee2c | 3,630,945 |
def tokenize(s):
"""
Ковертирует строку в питон список токенов
"""
return s.replace('(',' ( ').replace(')',' ) ').split() | 639baad1a6ec7640abe6752ad44e1f8585a9aafe | 3,630,946 |
def vote_entropy(votes, classes):
"""Calculates the vote entropy for measuring the level of disagreement in
QBC.
Parameters
----------
votes : array-like, shape (n_samples, n_estimators)
The class predicted by the estimators for each sample.
classes : array-like, shape (n_classes)
A list of all possible classes.
Returns
-------
vote_entropy : np.ndarray, shape (n_samples)
The vote entropy of each row in `votes`.
References
----------
[1] Engelson, Sean P., and Ido Dagan.
Minimizing manual annotation cost in supervised training from corpora.
arXiv preprint cmp-lg/9606030 (1996).
"""
# Check `votes` array.
votes = check_array(votes)
# Count the votes.
vote_count = compute_vote_vectors(y=votes, classes=classes,
missing_label=None)
# Compute vote entropy.
v = vote_count / len(votes)
with np.errstate(divide='ignore', invalid='ignore'):
scores = -np.nansum(v * np.log(v), axis=1) / np.log(len(votes))
return scores | 2f8232d85fae5511555eba469d7b7c33b375a8e0 | 3,630,947 |
def printModelDict(localsDict):
"""Convert serverConfig model configuration to a dictionary. This writes
the dictionary as text. This does not create a usable modelDict, just one to
use to print out the dictionary as python code."""
modelDict={}
parmsDict={}
tcDict={}
dbs=DATABASES
scText=""
for n,v in localsDict.items():
if type(v) is tuple and type(v[0]) is str and v[1] in [DISCRETE,SCALAR,VECTOR,WEATHER]:
parmsDict[n]=v
elif type(v) is tuple and len(v)==3 and type(v[0]) is int:
tcDict[n]=v
scText += '\n'
for n in sorted(parmsDict):
scText += 'parmVar: %s = %s\n' % (n,`parmsDict[n]`)
scText += '\n'
for n in sorted(tcDict):
scText += 'TC: %s = %s\n' % (n,`tcDict[n]`)
scText += '\n'
# Print out dbs entries, i.e., model database definition
for item in sorted(dbs):
plist=[]
parmTmpDict={}
for pt in item[1]:
parmsList=[]
# Try to find named parm setting
for p in pt[0]:
name=next((name for name,v in parmsDict.iteritems() if v == p), None)
if name is not None:
parmsList.append(name)
else:
parmsList.append(p[0])
theParms='&nlq(['+', '.join(parmsList)+'], '
# Try to get a named time constraint
name=next((name for name,v in tcDict.iteritems() if v == pt[1]), None)
if name is None:
name = `pt[1]`
if name in parmTmpDict:
parmTmpDict[name]+=parmsList
else:
parmTmpDict[name]=parmsList
# This consolidates parms by time constraint and sorts parm names.
for tc in sorted(parmTmpDict.keys()):
parmTmpDict[tc]=sorted(parmTmpDict[tc])
theParms='&nlq(['+', '.join(parmTmpDict[tc])+'], '
plist.append(theParms + tc +')&nrq')
modelDict[item[0][0]]={'DB':item[0],'Parms':plist}
for si,ml in INITMODULES.items():
m=ml[0]
entry=si
if len(ml) > 1:
# Multiple d2d models for smartinit
# Try to get model from si name
if si.find('Local_') == 0:
m=si[6:]
entry=(si,ml)
if m in modelDict:
# If a model has multiple SmartInit modules, try to best match which
# Smartinit module to assign to the model.
if 'INITMODULES' not in modelDict[m] or m in si:
modelDict[m]['INITMODULES']=entry
else:
modelDict[m]={'INITMODULES':entry}
for m,v in D2DDBVERSIONS.items():
if m in modelDict:
modelDict[m]['D2DDBVERSIONS']=D2DDBVERSIONS[m]
else:
modelDict[m]={'D2DDBVERSIONS':D2DDBVERSIONS[m]}
for m,v in D2DAccumulativeElements.items():
if m in modelDict:
modelDict[m]['D2DAccumulativeElements']=v
else:
modelDict[m]={'D2DAccumulativeElements':v}
for m,v in INITSKIPS.items():
if m in modelDict:
modelDict[m]['INITSKIPS']=v
else:
modelDict[m]={'INITSKIPS':v}
for item in D2DMODELS:
if type(item) is str:
m=item
v=item
else:
v,m=item
if m in modelDict:
modelDict[m]['D2DMODELS']=v
else:
modelDict[m]={'D2DMODELS':v}
for m in sorted(modelDict):
text=pprint.pformat(modelDict[m],width=80,indent=0)
text=text.replace("'&nlq",'')
text=text.replace("&nrq'",'')
text=text.replace('"&nlq','')
text=text.replace('&nrq"','')
text=text.replace(", 'INITMODULES':",",\n'INITMODULES':")
text=text.replace(')]}','),\n ]\n}')
text=text.replace('\n','\n ')
scText += "modelDict['%s'] = {\n %s\n\n" % (m,text[1:])
return scText | af8450e0d2807198de5f09d3245ff8100f9e8cdf | 3,630,948 |
import ctypes
def ira_equinox(jd_tdb, equinox, accuracy=0):
"""
To compute the intermediate right ascension of the equinox at
the input Julian date, using an analytical expression for the
accumulated precession in right ascension. For the true equinox,
the result is the equation of the origins.
Parameters
----------
jd_tdb : float
TDB Julian day.
equinox : {0, 1}, optional
= 0 ... mean equinox
= 1 ... true equinox
accuracy : {0, 1}, optional
Code specifying the relative accuracy of the output
position.
= 0 ... full accuracy (default)
= 1 ... reduced accuracy
Returns
-------
ira_eq : float
Intermediate right ascension of the equinox, in hours (+ or
-). If 'equinox' = 1 (i.e. true equinox), then the returned
value is the equation of the origins.
References
----------
.. [R1] Capitaine, N. et al. (2003), Astronomy and Astrophysics
412, 567-586, eq. (42).
"""
if jd_tdb < 0.0:
raise ValueError(_neg_err.format(name='jd_tdb'))
if equinox not in [0, 1]:
raise ValueError(_option_err.format(name='equinox', allowed=[0, 1]))
if accuracy not in [0, 1]:
raise ValueError(_option_err.format(name='accuracy', allowed=[0, 1]))
_ira_equinox = novaslib.ira_equinox
_ira_equinox.argtypes = (ctypes.c_double, ctypes.c_short, ctypes.c_short)
_ira_equinox.restype = ctypes.c_double
ira_eq = _ira_equinox(jd_tdb, equinox, accuracy)
return ira_eq | ce145a20c623e444623c6ae3f5d7d59910520a82 | 3,630,949 |
import os
def load_plugins(plugins_folder, verbose=True):
"""Import all plugins from target folder recursively."""
found_plugins = []
for pack in os.walk(plugins_folder):
for filename in pack[2]:
if "_" == filename[:1] or ".py" != filename[-3:]:
continue
path_to_module = os.path.join(pack[0], filename)
if verbose:
logger.info("Loading plugin \"{}\"".format(path_to_module))
found_plugins.append(
import_plugin(path_to_module, path_to_module)
)
return found_plugins | f33cb1f4b817136ba9320ceda8174e74d9b4ffcd | 3,630,950 |
from typing import Sequence
from typing import Any
from typing import Callable
from typing import Dict
def _generate_steps(
episode: Sequence[Any],
step_fn: Callable[[Dict[str, Any]], Dict[str, Any]]) -> Dict[str, Any]:
"""Constructs a dictionary of steps for the given episode.
Args:
episode: Sequence of steps (envlogger.StepData). Assumes that there is at
least one step.
step_fn: function that gets an RLDS step and returns the processed step.
Returns:
Nested dictionary with the steps of one episode. Steps
are represented by a nested dictionary where each nested field is a list.
"""
if len(episode) < 2:
step = _build_last_step(episode[0], step_fn)
return _to_nested_list(step)
# We use episode[1] here because in the first steps some of the fields(like
# the action, migt be None, so the shape is unknown).
empty_step = _build_empty_step(episode[1], step_fn)
steps = _empty_nested_list(empty_step)
prev_step = None
for step in episode:
if prev_step is not None:
rlds_step = _build_step(prev_step, step, step_fn)
for k, v in rlds_step.items():
steps[k] = _append_nested(steps[k], v)
prev_step = step
if prev_step is not None:
last_step = _build_last_step(prev_step, step_fn)
for k, v in last_step.items():
steps[k] = _append_nested(steps[k], v)
return steps | 4ddafb41501a1ab6b61b325ebb8db0c2bd40bf85 | 3,630,951 |
def format_results_data(results_data, compute_rank=True):
"""Formats results_data as sorted list of rows so it can be easily displayed as results_table
Appends user and ranks.
Side effect: This function modifies objects (i.e. individual results)
contained in results_data
"""
Result = namedtuple("Result", ["user", "data"])
res = [Result(user=user, data=data) for user, data in results_data.items()]
if compute_rank:
res.sort(key=lambda x: -x.data.previous_rounds_points)
for rank, r in zip(get_ranks(res), res):
r.data.prev_rank = rank
res.sort(key=lambda x: -x.data.sum)
for rank, r in zip(get_ranks(res), res):
r.data.rank = rank
else:
res.sort(key=lambda x: x.data.rank)
return res | 6fdceb3a07ebcc23b245e43a2eb3770f67073cd0 | 3,630,952 |
def encode_triangle(X, centroids):
"""
Perform triangle k-means encoding
"""
X3 = X.reshape(X.shape[0], 1, X.shape[1])
centroids3 = centroids.reshape(1, centroids.shape[0], centroids.shape[1])
z = np.sqrt(((X3 - centroids3) ** 2).sum(2))
means = z.mean(1).reshape(-1, 1)
return np.maximum(means - z, 0) | 98f11930892b61d8a7ee653a4dab677a6d0543d1 | 3,630,953 |
def get_user_and_check_password(username, password):
"""
Called by account controller and/or AuthKit valid_password to return a user from local db
"""
try:
q = Session.query(User).select_from(join(User, UserLogin, User.login_details))
q = q.filter(User.id == make_username(username))
q = q.filter(User.status == 'active' )
q = q.filter(UserLogin.type == 'password')
q = q.filter(UserLogin.token == encode_plain_text_password(password))
return q.one()
except NoResultFound:
# AllanC - Added fallback to search for email as some users get confised as to how to identify themselfs
# emails are not indexed? performance? using this should be safe as our username policy prohibits '@' and '.'
try:
q = Session.query(User).select_from(join(User, UserLogin, User.login_details))
q = q.filter(User.email == username )
q = q.filter(User.status == 'active' )
q = q.filter(UserLogin.type == 'password')
q = q.filter(UserLogin.token == encode_plain_text_password(password))
return q.one()
except NoResultFound:
return None | 3def5ad6bba4486d235d58b4f8f50cf3ac27b734 | 3,630,954 |
import hashlib
def get_db_for_id(id_value):
"""
Work out the database number containing a Review record with pk of
"id_value".
"""
# XXX: If we were interested in being able to add databases and do minimal
# data moving, it would be better to use a consistent hashing algorithm
# here (distribute the results across something like 100 * CLUSTER_SIZE and
# map back onto CLUSTER_SIZE machines).
return 1 + (int(hashlib.md5(str(id_value)).hexdigest(), 16) %
settings.CLUSTER_SIZE) | 7ece564172061e31c61168cba02b034ac2666533 | 3,630,955 |
def subclass(request):
"""Return a Object subclass"""
try:
params = request.param
except AttributeError:
params = {}
class TestObject(Object):
@property
def fields(self):
return params.get("fields", {})
TestObject.__name__ = params.get("name", "TestObject")
return TestObject | 376578606f20ae8899dba01590675847a2c8e11f | 3,630,956 |
def is_index(file_name: str) -> bool:
"""Determines if a filename is a proper index name."""
return file_name == "index" | 7beb5779b61e25b4467eb7964478c78d44f28931 | 3,630,957 |
from typing import List
def _recv_n_get_rsp(event: "Event") -> List[str]:
"""Logging handler when an N-GET-RSP is received.
Parameters
----------
event : events.Event
The evt.EVT_DIMSE_RECV event that occurred.
"""
msg = event.message
cs = msg.command_set
dataset = "None"
if msg.data_set and msg.data_set.getvalue() != b"":
dataset = "Present"
LOGGER.info("Received Get Response")
s = [
f"{' INCOMING DIMSE MESSAGE ':=^76}",
"Message Type : N-GET RSP",
f"Presentation Context ID : {msg.context_id}",
f"Message ID Being Responded To : {cs.MessageIDBeingRespondedTo}"
]
if "AffectedSOPClassUID" in cs:
sop_class = cs.AffectedSOPClassUID.name
s.append(f"Affected SOP Class UID : {sop_class}")
if "AffectedSOPInstanceUID" in cs:
sop_instance = cs.AffectedSOPInstanceUID
s.append(f"Affected SOP Instance UID : {sop_instance}")
s.append(f"Attribute List : {dataset}")
s.append(f"Status : 0x{cs.Status:04X}")
s.append(f"{' END DIMSE MESSAGE ':=^76}")
for line in s:
LOGGER.debug(line)
return s | 1ccc8951e212ecbacdd7a9e48986a646c15e9da2 | 3,630,958 |
def GenerateShardedFilenames(spec): # pylint:disable=invalid-name
"""Generate the list of filenames corresponding to the sharding path.
Args:
spec: Sharding specification.
Returns:
List of filenames.
Raises:
ShardError: If spec is not a valid sharded file specification.
"""
basename, num_shards, suffix = ParseShardedFileSpec(spec)
if not suffix:
suffix = ''
else:
suffix = '.' + suffix
files = []
width = _ShardWidth(num_shards)
format_str = '{{0}}-{{1:0{0}}}-of-{{2:0{0}}}{{3}}'.format(width)
for i in range(num_shards):
files.append(format_str.format(basename, i, num_shards, suffix))
return files | 38fe085c7063c80d633221aab9c0159fc8bc68ac | 3,630,959 |
def _tseries_from_nifti_helper(coords, data, TR, filter, normalize, average):
"""
Helper function for the function time_series_from_nifti, which does the
core operations of pulling out data from a data array given coords and then
normalizing and averaging if needed
"""
if coords is not None:
out_data = np.asarray(data[coords[0], coords[1], coords[2]])
else:
out_data = data
tseries = ts.TimeSeries(out_data, sampling_interval=TR)
if filter is not None:
if filter['method'] not in ('boxcar', 'fourier', 'fir', 'iir'):
e_s = "Filter method %s is not recognized" % filter['method']
raise ValueError(e_s)
else:
#Construct the key-word arguments to FilterAnalyzer:
kwargs = dict(lb=filter.get('lb', 0),
ub=filter.get('ub', None),
boxcar_iterations=filter.get('boxcar_iterations', 2),
filt_order=filter.get('filt_order', 64),
gpass=filter.get('gpass', 1),
gstop=filter.get('gstop', 60),
iir_ftype=filter.get('iir_ftype', 'ellip'),
fir_win=filter.get('fir_win', 'hamming'))
F = tsa.FilterAnalyzer(tseries, **kwargs)
if filter['method'] == 'boxcar':
tseries = F.filtered_boxcar
elif filter['method'] == 'fourier':
tseries = F.filtered_fourier
elif filter['method'] == 'fir':
tseries = F.fir
elif filter['method'] == 'iir':
tseries = F.iir
if normalize == 'percent':
tseries = tsa.NormalizationAnalyzer(tseries).percent_change
elif normalize == 'zscore':
tseries = tsa.NormalizationAnalyzer(tseries).z_score
if average:
if coords is None:
tseries.data = np.mean(np.reshape(tseries.data,
(np.array(tseries.shape[:-1]).prod(),
tseries.shape[-1])),0)
else:
tseries.data = np.mean(tseries.data, 0)
return tseries | 1e464b0f54536ab062ae32b49ed821f2d230f45f | 3,630,960 |
import hashlib
def hash_short(message, length=16):
""" Given Hash Function"""
return hashlib.sha1(message).hexdigest()[:length / 4] | bd071674ce5caf382dc73d27835f43409a6a49d2 | 3,630,961 |
def feincms_frontend_editing(cms_obj, request):
"""
{% feincms_frontend_editing feincms_page request %}
"""
if hasattr(request, 'session') and request.session.get('frontend_editing'):
context = template.RequestContext(request, {
"feincms_page": cms_obj,
'FEINCMS_ADMIN_MEDIA': feincms_settings.FEINCMS_ADMIN_MEDIA,
'FEINCMS_ADMIN_MEDIA_HOTLINKING': feincms_settings.FEINCMS_ADMIN_MEDIA_HOTLINKING
})
return render_to_string('admin/feincms/fe_tools.html', context)
return u'' | 6e1e1e0ee771d4260ffd310f970bf53a045a48ba | 3,630,962 |
def _add_tensor_cores(tt_a, tt_b):
"""Internal function to be called from add for two TT-tensors.
Does the actual assembling of the TT-cores to add two TT-tensors.
"""
ndims = tt_a.ndims()
dtype = tt_a.dtype
shape = shapes.lazy_raw_shape(tt_a)
a_ranks = shapes.lazy_tt_ranks(tt_a)
b_ranks = shapes.lazy_tt_ranks(tt_b)
tt_cores = []
for core_idx in range(ndims):
a_core = tt_a.tt_cores[core_idx]
b_core = tt_b.tt_cores[core_idx]
if core_idx == 0:
curr_core = tf.concat((a_core, b_core), axis=2)
elif core_idx == ndims - 1:
curr_core = tf.concat((a_core, b_core), axis=0)
else:
upper_zeros = tf.zeros((a_ranks[core_idx], shape[0][core_idx],
b_ranks[core_idx + 1]), dtype)
lower_zeros = tf.zeros((b_ranks[core_idx], shape[0][core_idx],
a_ranks[core_idx + 1]), dtype)
upper = tf.concat((a_core, upper_zeros), axis=2)
lower = tf.concat((lower_zeros, b_core), axis=2)
curr_core = tf.concat((upper, lower), axis=0)
tt_cores.append(curr_core)
return tt_cores | 07c1c79fff1547afe6c69fc82fe584f418432d8a | 3,630,963 |
from typing import OrderedDict
def create_mosaic_iterative(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
#masks data with clean_mask. all values that are clean_mask==False are set to nodata.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for index in reversed(range(len(clean_mask))):
dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999]
return dataset_out | 6ba190145b2b655decd7bdd770abd8a6390b09bc | 3,630,964 |
from typing import Sequence
from typing import cast
import collections
def dicom_file_loader(
accept_multiple_files: bool, stop_before_pixels: bool
) -> Sequence["pydicom.Dataset"]:
"""A Streamlit component that provides DICOM upload functionality.
Parameters
----------
accept_multiple_files : ``bool``
Whether or not the wrapped
`st.file_uploader <https://docs.streamlit.io/en/stable/api.html#streamlit.file_uploader>`_
utility will accept multiple uploads. This argument is directly
passed through as a keyword parameter to ``st.file_uploader``.
stop_before_pixels : ``bool``
Whether or not the wrapped
`pydicom.dcmread <https://pydicom.github.io/pydicom/dev/reference/generated/pydicom.filereader.dcmread.html#pydicom.filereader.dcmread>`_
will read the pixel values of the DICOM file or stop reading the
header before loading the pixel values. This argument is
directly passed through as a keyword parameter to
``pydicom.dcmread``
Returns
-------
datasets : a list of ``pydicom.Dataset``
The PyDICOM datasets corresponding to the files uploaded by the
user.
"""
left_column, right_column = st.beta_columns(2)
if accept_multiple_files:
file_string = "files"
else:
file_string = "file"
with left_column:
st.write(f"## Upload DICOM {file_string}")
# This is specifically not limited to .dcm extensions as
# sometimes DICOM exports to file don't have any file extension.
files: UploadedFiles = st.file_uploader(
f"DICOM {file_string}", accept_multiple_files=accept_multiple_files
)
if not files:
raise st.stop()
assumed_sequence = cast(Files, files)
try:
assumed_sequence[0]
except TypeError:
# If the return from file uploader is not indexable
a_single_file = cast(File, files)
files = [a_single_file]
files = cast(Files, files)
datasets = []
for a_file in files:
try:
a_file.seek(0)
dataset = pydicom.dcmread(
a_file, force=True, stop_before_pixels=stop_before_pixels
)
except Exception as e:
st.warning(
f'Failed reading the file "`{a_file.name}`". The error was the following:'
)
st.error(e)
st.stop()
raise
datasets.append(dataset)
patient_id_filenames_map = collections.defaultdict(set)
patient_id_names_map = collections.defaultdict(set)
for dataset, a_file in zip(datasets, files):
patient_id = dataset.PatientID
patient_name = _dcm_utilities.pretty_patient_name(dataset)
patient_id_filenames_map[patient_id].add(a_file.name)
patient_id_names_map[patient_id].add(patient_name)
with right_column:
st.write("## Details")
for patient_id, filenames in patient_id_filenames_map.items():
patient_names = patient_id_names_map[patient_id]
st.write(
f"""
* {_optionally_write_with_plural("Filename", filenames)}
* Patient ID: `{patient_id}`
* {_optionally_write_with_plural("Patient Name", patient_names)}
"""
)
return datasets | acaf9c54270a67f1893605a9a1d5b101dd02aba6 | 3,630,965 |
def get_substrings(source: str):
"""Get all substrings of a given string
Args:
string (str): the string to generate the substring from
Returns:
list: list of substrings
"""
# the number of substrings per length is the same as the length of the substring
# if the characters are not equal. For each duplicate char decrease number of substrings by 1
substrings = []
# for i in range(len(source)):
s = ""
for j in range(len(source)):
substrings.append(source[j])
s += source[j]
substrings.append(s)
return substrings | 79f1db4184c51235a9d6beb8437f1647bc993958 | 3,630,966 |
import csv
def _write_output_csv(reader: csv.DictReader, writer: csv.DictWriter,
config: dict) -> list:
"""Reads each row of a CSV and creates statvars for counts of
Incidents, Offenses, Victims and Known Offenders with different bias
motivations.
Args:
reader: CSV dict reader.
writer: CSV dict writer of final cleaned CSV.
config: A dict which maps constraint props to the statvar based on
values in the CSV. See scripts/fbi/hate_crime/table1/config.json for
an example.
Returns:
A list of statvars.
"""
statvars = []
for crime in reader:
incident_statvar = {**config['populationType']['Incidents']}
offense_statvar = {**config['populationType']['Offenses']}
victim_statvar = {**config['populationType']['Victims']}
offender_statvar = {**config['populationType']['KnownOffender']}
statvar_list = [
incident_statvar, offense_statvar, victim_statvar, offender_statvar
]
bias_motivation = crime['bias motivation']
bias_key_value = config['pvs'][bias_motivation]
utils.update_statvars(statvar_list, bias_key_value)
utils.update_statvar_dcids(statvar_list, config)
_write_row(crime['Year'], incident_statvar['Node'], crime['incidents'],
writer)
_write_row(crime['Year'], offense_statvar['Node'], crime['offenses'],
writer)
_write_row(crime['Year'], victim_statvar['Node'], crime['victims'],
writer)
_write_row(crime['Year'], offender_statvar['Node'],
crime['known offenders'], writer)
statvars.extend(statvar_list)
return statvars | 057820c97100b626171250d4d97f90b07f0c48a7 | 3,630,967 |
def get_vehicle_lay_off_engine_acceleration(carla_vehicle):
"""
Calculate the acceleration a carla vehicle faces by the engine on lay off
This respects the following forces:
- engine brake force
:param carla_vehicle: the carla vehicle
:type carla_vehicle: carla.Vehicle
:return: acceleration the vehicle [m/s^2 < 0]
:rtype: float64
"""
return -get_engine_brake_force(carla_vehicle) / get_vehicle_mass(carla_vehicle) | 84ca84d75866840e88cc6afdb615cbc0529ea311 | 3,630,968 |
from typing import Dict
from typing import Any
import sys
def create_data(name: str) -> Dict[str, Any]:
"""Arbitrary function that returns a dictionary.
This demonstration uses pydantic, but any dictionary can be tested!
"""
return User(id=sys.maxsize, name=name).dict() | 8d5e04da128e4ff55fcb3d2fc375f72ab876246b | 3,630,969 |
def _run_docker_shell_script(script_name, docker_dir, trailing_args=None,
env_variables=dict()):
"""
script_name (String) filename of a script in the nest/docker/ directory
docker_dir (String) directory of docker scripts in the nest repo. expected
to be /code_live/docker, but may be different is run outside
of the nest_ops container
env_variables (dict of string->string): will be set as commandline env
variables in the shell that the script is run
"""
#sh cannot be broken into its own list element or you will be dropped
#into a shell
cmd_ary = [('sh ' + script_name)]
if trailing_args is not None:
cmd_ary.extend(trailing_args)
cmd = " ".join(cmd_ary)
log("Executing command: " + str(cmd))
cr = container_users.make_host_user_command_runner()
cr.set_working_dir(docker_dir)
cr.add_env_variables(env_variables)
result = cr.run(cmd, stream_log=True)
exit_code = result.get_exit_code()
return exit_code | a68d85a6affd1905d312b91ccc85475c30809c22 | 3,630,970 |
def _create_functional_connect_edges_dynamic(n_node, is_edge_func):
"""Creates complete edges for a graph with `n_node`.
Args:
n_node: (integer scalar `Tensor`) The number of nodes.
is_edge_func: (bool) callable(sender, receiver) that returns tf.bool if connected. Must broadcast.
Returns:
A dict of RECEIVERS, SENDERS and N_EDGE data (`Tensor`s of rank 1).
"""
rng = tf.range(n_node)
ind = is_edge_func(rng, rng[:, None])
n_edge = tf.reduce_sum(tf.cast(ind, tf.int32))
indicies = tf.where(ind)
receivers = indicies[:, 0]
senders = indicies[:, 1]
receivers = tf.reshape(tf.cast(receivers, tf.int32), [n_edge])
senders = tf.reshape(tf.cast(senders, tf.int32), [n_edge])
n_edge = tf.reshape(n_edge, [1])
return {'receivers': receivers, 'senders': senders, 'n_edge': n_edge} | 338b7a240f516703853af8f810f56e313fcb2b99 | 3,630,971 |
import os
def _BreakoutFilesByLinter(files):
"""Maps a linter method to the list of files to lint."""
map_to_return = {}
for f in files:
extension = os.path.splitext(f)[1]
if extension in PYTHON_EXTENSIONS:
pylint_list = map_to_return.setdefault(_PylintFiles, [])
pylint_list.append(f)
elif extension in CPP_EXTENSIONS:
cpplint_list = map_to_return.setdefault(_CpplintFiles, [])
cpplint_list.append(f)
return map_to_return | 0f99bd166c9f1cfe73b10b745df3bca7ef4d3306 | 3,630,972 |
from operator import matmul
def simulateBERParallel(codes, channelfun, params, printValue=True):
"""
Simulates BER values at multiple SNRs, where the massively parallel algorithm is used. This implementation is especially designed for cupy.
Args:
codes (ndarray): an input codebook, which is generated on the CPU memory and is transferred into the GPU memory.
channelfun (function): .
params (dict): simulation parameters.
printValue (bool): a flag that determines whether to print the simulated values.
Returns:
dict: a dict that has two keys: snr_dB and ber, and contains the corresponding results. All the results are transferred into the CPU memory.
"""
M, N, T, ITo, ITi, Nc, B = params["M"], params["N"], params["T"], params["ITo"], params["ITi"], codes.shape[
0], log2(codes.shape[0])
snr_dBs = linspace(params["from"], params["to"], params["len"])
sigmav2s = 1.0 / inv_dB(snr_dBs)
codei = tile(arange(Nc), ITi)
xor2ebits = getXORtoErrorBitsArray(Nc)
x = hstack(tile(codes, Nc)) # M \times T * Nc^2
# x = [codes[0] codes[0] ... codes[0] codes[1] ...]
y = tile(hstack(codes), Nc) # M \times T * Nc^2
# y = [codes[0] codes[1] ... codes[Nc-1] codes[0] ...]
diffxy = x - y # M \times T * Nc^2
bers = zeros(len(snr_dBs))
for ito in trange(ITo):
bigh = channelfun(N, M, ITi) # ITi * N \times M
bigv = tile(randn_c(ITi * N, T), Nc * Nc) # ITi * N \times T * Nc^2
for i in range(len(snr_dBs)):
ydiff = matmul(bigh, diffxy) + bigv * sqrt(sigmav2s[i]) # ITi * N \times T * Nc^2
ydifffro = square(abs(ydiff)).reshape(ITi, N, Nc * Nc, T) # ITi \times N \times Nc * Nc \times T
ydifffrosum = sum(ydifffro, axis=(1, 3)) # ITi \times Nc * Nc
norms = ydifffrosum.reshape(ITi, Nc, Nc) # ITi \times Nc \times Nc
mini = argmin(norms, axis=2).reshape(ITi * Nc)
errorBits = sum(xor2ebits[codei ^ mini])
bers[i] += errorBits
if printValue:
nbits = (ito + 1) * ITi * B * Nc
print("At SNR = %1.2f dB, BER = %d / %d = %1.10e" % (snr_dBs[i], bers[i], nbits, bers[i] / nbits))
bers /= ITo * ITi * B * Nc
return {"snr_dB": snr_dBs, "ber": bers} | 89df47bbe05b19f32276d608c260ae51ea9c7f28 | 3,630,973 |
from datetime import datetime
import json
def modify_app_description(s, base_url, app_id, description):
"""
Modifies the description of an app
"""
rjson = app_full(s, base_url, app_id)[1]
rjson["modifiedDate"] = str(
((datetime.today()) + timedelta(days=1)).isoformat() + "Z"
)
rjson["description"] = description
data = json.dumps(rjson)
r = s.put(
base_url + "/qrs/app/" + app_id + "?xrfkey=abcdefg123456789",
data=data,
headers={"Content-Type": "application/json"},
)
return r.status_code | e4f1529a31dd982b2ccf56d31542139f6cc564ef | 3,630,974 |
def search_tag(resource_info, tag_key):
"""Search tag in tag list by given tag key."""
return next(
(tag["Value"] for tag in resource_info.get("Tags", []) if tag["Key"] == tag_key),
None,
) | 5945631a3de7032c62c493369e82dd330ef2bc47 | 3,630,975 |
def compute_GARCH_price(theta,
num_periods,
init_price,
init_sigma,
risk_free_rate,
num_simulations=50000):
"""
Compute asset price at period t + s (s periods ahead) given estimated theta values and initial price at t
using Monte Carlo simulation
Param
-----
theta: Tuple(Float, Float, Float, Float, Float)
GARCH(1, 1) parameter: (alpha_0, alpha_1, beta_1, risk premium lambda, standard deviation sigma)
num_periods: Int
Number of steps ahead for which price is calculated.
If we are at time t, and would like to compute price at T, then num_periods = T - t
init_price: Float
Price at time t
init_sigma: Float
Conditional standard deviation at time t
risk_free_rate: Float
Yearly return of risk-free asset. If theta is estimated with a certain risk-free-rate,
then that rate should be given here.
num_simulation: Int
Number of Monte Carlo simulations to be made
Return
------
Tuple(ndarray, ndarray, ndarray):
A tuple of size 3, where the elements are, respectively
(simulated prices, simulated conditional variance, simulated innovation).
Each ndarray has shape (n, ), where n is the number of MC simulations made.
"""
a0, a1, b1, lamb, sigma0 = theta
# set initial values
# variance sigma_sq_t
shape = (num_periods, num_simulations)
sigma_sq_array = np.zeros(shape=shape)
sigma_sq_array[0, :] = init_sigma ** 2
# innovation xi_t
xi_array = np.random.standard_normal(size=shape)
xi_array[0, :] = xi_array[0, :] * init_sigma
# compute sigma_sq and xi iteratively for num_period
for t in range(1, num_periods):
sigma_sq_array[t, :] = a0 \
+ a1 * (xi_array[t - 1, :] - lamb * np.sqrt(sigma_sq_array[t - 1, :])) ** 2 \
+ b1 * sigma_sq_array[t - 1, :] # sigma_sq at t
xi_array[t, :] = xi_array[t, :] * np.sqrt(sigma_sq_array[t, :]) # xi at t
# compute s-period-ahead price for num_simulations
price_array = init_price * np.exp(num_periods * risk_free_rate \
- 1/2 * np.sum(sigma_sq_array, axis=0) \
+ np.sum(xi_array, axis=0))
return price_array, sigma_sq_array, xi_array | de6a6aca57bb6ea63cfa39221522798322bafdda | 3,630,976 |
def create_order_nb(size: float,
price: float,
size_type: int = SizeType.Amount,
direction: int = Direction.All,
fees: float = 0.,
fixed_fees: float = 0.,
slippage: float = 0.,
min_size: float = 0.,
max_size: float = np.inf,
reject_prob: float = 0.,
lock_cash: bool = False,
allow_partial: bool = True,
raise_reject: bool = False,
log: bool = False) -> Order:
"""Create an order with some defaults."""
return Order(
size=float(size),
price=float(price),
size_type=size_type,
direction=direction,
fees=float(fees),
fixed_fees=float(fixed_fees),
slippage=float(slippage),
min_size=float(min_size),
max_size=float(max_size),
reject_prob=float(reject_prob),
lock_cash=lock_cash,
allow_partial=allow_partial,
raise_reject=raise_reject,
log=log
) | 660940e9258f1b43352cf5e2b78e083b0c984bc7 | 3,630,977 |
def generate_noise_2d_fft_filter(F, randstate=None, seed=None, fft_method=None,
domain="spatial"):
"""Produces a field of correlated noise using global Fourier filtering.
Parameters
----------
F : dict
A filter object returned by
:py:func:`pysteps.noise.fftgenerators.initialize_param_2d_fft_filter` or
:py:func:`pysteps.noise.fftgenerators.initialize_nonparam_2d_fft_filter`.
All values in the filter array are required to be finite.
randstate : mtrand.RandomState
Optional random generator to use. If set to None, use numpy.random.
seed : int
Value to set a seed for the generator. None will not set the seed.
fft_method : str or tuple
A string or a (function,kwargs) tuple defining the FFT method to use
(see "FFT methods" in :py:func:`pysteps.utils.interface.get_method`).
Defaults to "numpy".
domain : {"spatial", "spectral"}
The domain for the computations: If "spatial", the noise is generated
in the spatial domain and transformed back to spatial domain after the
Fourier filtering. If "spectral", the noise field is generated and kept
in the spectral domain.
Returns
-------
N : array-like
A two-dimensional field of stationary correlated noise. The noise field
is normalized to zero mean and unit variance.
"""
if domain not in ["spatial", "spectral"]:
raise ValueError("invalid value %s for the 'domain' argument: must be 'spatial' or 'spectral'" % str(domain))
input_shape = F["input_shape"]
use_full_fft = F["use_full_fft"]
F = F["field"]
if len(F.shape) != 2:
raise ValueError("field is not two-dimensional array")
if np.any(~np.isfinite(F)):
raise ValueError("field contains non-finite values")
if randstate is None:
randstate = np.random
# set the seed
if seed is not None:
randstate.seed(seed)
if fft_method is None:
fft = utils.get_method("numpy", shape=input_shape)
else:
if type(fft_method) == str:
fft = utils.get_method(fft_method, shape=input_shape)
else:
fft = fft_method
# produce fields of white noise
if domain == "spatial":
N = randstate.randn(input_shape[0], input_shape[1])
else:
if use_full_fft:
size = (input_shape[0], input_shape[1])
else:
size = (input_shape[0], int(input_shape[1]/2)+1)
theta = randstate.uniform(low=0.0, high=2.0*np.pi, size=size)
if input_shape[0] % 2 == 0:
theta[int(input_shape[0]/2)+1:, 0] = -theta[1:int(input_shape[0]/2), 0][::-1]
else:
theta[int(input_shape[0]/2)+1:, 0] = -theta[1:int(input_shape[0]/2)+1, 0][::-1]
N = np.cos(theta) + 1.j * np.sin(theta)
# apply the global Fourier filter to impose a correlation structure
if domain == "spatial":
if use_full_fft:
fN = fft.fft2(N)
else:
fN = fft.rfft2(N)
else:
fN = N
fN *= F
if domain == "spatial":
if use_full_fft:
N = np.array(fft.ifft2(fN).real)
else:
N = np.array(fft.irfft2(fN))
N = (N - N.mean()) / N.std()
else:
N = fN
N[0, 0] = 0.0
N /= utils.spectral.std(N, input_shape, use_full_fft=use_full_fft)
return N | 4c4dbe8b22cf73730bff84affa5ff592e0484a1c | 3,630,978 |
import os
def _get_image_files_and_labels(name, csv_path, image_dir):
"""Process input and get the image file paths, image ids and the labels.
Args:
name: 'train' or 'test'.
csv_path: path to the Google-landmark Dataset csv Data Sources files.
image_dir: directory that stores downloaded images.
Returns:
image_paths: the paths to all images in the image_dir.
file_ids: the unique ids of images.
labels: the landmark id of all images. When name='test', the returned labels
will be an empty list.
Raises:
ValueError: if input name is not supported.
"""
image_paths = tf.io.gfile.glob(image_dir + '/*.jpg')
file_ids = [os.path.basename(os.path.normpath(f))[:-4] for f in image_paths]
if name == 'train':
with tf.io.gfile.GFile(csv_path, 'rb') as csv_file:
df = pd.read_csv(csv_file)
df = df.set_index('id')
labels = [int(df.loc[fid]['landmark_id']) for fid in file_ids]
elif name == 'test':
labels = []
else:
raise ValueError('Unsupported dataset split name: %s' % name)
return image_paths, file_ids, labels | fdde6e48c859579c41086e17c128e463145a430c | 3,630,979 |
def get_spending_features(txn, windows_size=[1, 7, 30]):
"""
This function computes:
- the cumulative number of transactions for a customer for 1, 7 and 30 days
- the cumulative average transaction amount for a customer for 1, 7 and 30 days
Args:
txn: grouped transactions of customer
Returns:
nb_trans and cust_avg_amt for each window size
"""
# Setting trans_date as index for rolling function
txn.index = txn.trans_date
for size in windows_size:
# compute the total transaction amount and the number of transactions during the window
rolling_tx_amt = txn["amt"].rolling(f"{size}D").sum()
roll_tx_cnt = txn["amt"].rolling(f"{size}D").count()
# compute the average transaction amount
avg_trans_amt = rolling_tx_amt / roll_tx_cnt
# create as new columns
txn[f"nb_txns_{size}_days"] = list(roll_tx_cnt)
txn[f"avg_txns_amt_{size}_days"] = list(avg_trans_amt)
# Reindex according to transaction IDs
txn.index = txn.trans_num
# And return the dataframe with the new features
return txn | b648df3d1217074edec455a416e0eb698d8069ee | 3,630,980 |
def break_up(expr):
"""
breaks up an expression with nested parenthesis into sub expressions with
no parenthesis, with the innermost expressions first, that needs to be
calculated before doing the outer expression. also replaces the statement with
a single symbol in the larger expression
"""
p=0
new_expr = expr
expr_store={}
while "(" in new_expr:
inner = first_inner_parenthesis(new_expr)
new_expr = new_expr.replace(f'({inner})', f'p{p}')
expr_store[f'p{p}'] = inner
p=p+1
return new_expr, expr_store | 7f28290a3dadfec2bd42696bcf7be348288a7bca | 3,630,981 |
def fft_operation(file_path):
"""
FFT operation
:param file_path: Original wav file path
:return: freqs, 20 * log10(fft/max_fft), name
"""
wave_data, nchannels, sample_width, framerate, numframes = read_file(file_path)
abs_fft = np.abs(np.fft.fft(wave_data))
normalized_abs_fft = abs_fft / len(wave_data)
half_fft = 2 * normalized_abs_fft[range(int(len(wave_data) / 2))]
freqs = np.linspace(0, framerate, numframes)
return freqs[:int(len(freqs) / 2)], 20 * np.log10(half_fft / np.max(half_fft)), file_path[file_path.rfind(
'/') + 1:file_path.find('.')] | 75c672298e3d8650e79ee56ce540006102a1b375 | 3,630,982 |
import six
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(
str(i) for i in six.moves.xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval | 52056db88bf14352d4cda2411f25855457defbd7 | 3,630,983 |
from typing import Optional
from pathlib import Path
import importlib
def predict(
seq: np.ndarray,
aa_cut: Optional[np.ndarray] = None,
percent_peptide: Optional[np.ndarray] = None,
model: Optional[GradientBoostingRegressor] = None,
model_file: Optional[Path] = None,
pam_audit: bool = True,
length_audit: bool = False,
learn_options_override: Optional[dict[str, str]] = None,
verbose: bool = False,
) -> ndarray:
"""
Parameters
----------
seq : :class:np.ndarray
numpy array of 30 nt sequences.
aa_cut : numpy array of amino acid cut positions (optional).
percent_peptide : numpy array of percent peptide (optional).
model : model instance to use for prediction (optional).
model_file : file name of pickled model to use for prediction (optional).
pam_audit : check PAM of each sequence.
length_audit : check length of each sequence.
learn_options_override : a dictionary indicating which learn_options to override (optional).
verbose : bool
display extra information
Return
------
:class:`~np.array`
"""
if not isinstance(seq, np.ndarray):
raise AssertionError("Please ensure seq is a numpy array")
if len(seq[0]) <= 0:
raise AssertionError("Make sure that seq is not empty")
if not isinstance(seq[0], str):
raise AssertionError(
f"Please ensure input sequences are in string format, i.e. 'AGAG' "
f"rather than ['A' 'G' 'A' 'G'] or alternate representations"
)
if aa_cut is not None:
if len(aa_cut) <= 0:
raise AssertionError("Make sure that aa_cut is not empty")
if not isinstance(aa_cut, np.ndarray):
raise AssertionError("Please ensure aa_cut is a numpy array")
if not np.all(np.isreal(aa_cut)):
raise AssertionError("amino-acid cut position needs to be a real number")
if percent_peptide is not None:
if len(percent_peptide) <= 0:
raise AssertionError("Make sure that percent_peptide is not empty")
if not isinstance(percent_peptide, np.ndarray):
raise AssertionError("Please ensure percent_peptide is a numpy array")
if not np.all(np.isreal(percent_peptide)):
raise AssertionError("percent_peptide needs to be a real number")
if model_file is not None and not model_file.exists():
raise FileNotFoundError(f"The file {model_file.name} could not be found.")
if model_file is None:
if np.any(percent_peptide == -1) or (
percent_peptide is None and aa_cut is None
):
if verbose:
print("No model file specified, using V3_model_nopos")
model_name = "V3_model_nopos.pickle"
else:
if verbose:
print("No model file specified, using V3_model_full")
model_name = "V3_model_full.pickle"
model_file = importlib.resources.files("azimuth").joinpath(
"azure_models", model_name
)
if model is None:
f = importlib.resources.read_binary(
f"azimuth.{model_file.parent.stem}", model_file.name
)
model, learn_options = loads(f)
else:
model, learn_options = model
learn_options["V"] = 2
learn_options = override_learn_options(learn_options_override, learn_options)
x_df = pd.DataFrame(
columns=["30mer", "Strand"],
data=list(zip(seq, ["NA" for x in range(len(seq))])),
)
if np.all(percent_peptide != -1) and (
percent_peptide is not None and aa_cut is not None
):
gene_position = pd.DataFrame(
columns=["Percent Peptide", "Amino Acid Cut position"],
data=list(zip(percent_peptide, aa_cut)),
)
else:
gene_position = pd.DataFrame(
columns=["Percent Peptide", "Amino Acid Cut position"],
data=list(zip(np.ones(seq.shape[0]) * -1, np.ones(seq.shape[0]) * -1)),
)
feature_sets = featurize_data(
x_df,
learn_options,
pd.DataFrame(),
gene_position,
pam_audit=pam_audit,
length_audit=length_audit,
)
inputs, *_ = concatenate_feature_sets(feature_sets)
# call to scikit-learn, returns a vector of predicted values
preds = model.predict(inputs)
# also check that predictions are not 0/1 from a classifier.predict()
# (instead of predict_proba() or decision_function())
if np.all([True if pr in (0, 1) else False for pr in np.unique(preds)]):
raise AssertionError("model returned only 0s and 1s")
return preds | 50cb3adc03d017e7e94353dd9a9b15166e616d4a | 3,630,984 |
import collections
def _is_proper_sequence(seq):
"""Returns is seq is sequence and not string."""
return (isinstance(seq, collections.abc.Sequence) and
not isinstance(seq, str)) | d5f1f211330a9f4928b8cc8c7407adaf705fd4b2 | 3,630,985 |
def sigmoid(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudamat.apply_sigmoid(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target | ba02fe038ce1d491b40b605baf43e4bf8afda057 | 3,630,986 |
def mutation_modify_controlaction(controlaction_id: str, actionstatus: ActionStatusType, error: str = None):
"""Returns a mutation for modifying the status and errors of the ControlAction
Arguments:
controlaction_id: The unique identifier of the ControlAction.
actionstatus: the status to update to.
error: An error to set if the actionstatus is FailedActionStatus
Returns:
The string for the mutation for modifying a ControlAction.
"""
if not isinstance(actionstatus, ActionStatusType):
raise trompace.exceptions.InvalidActionStatusException(actionstatus)
args = {
"identifier": controlaction_id,
"actionStatus": StringConstant(actionstatus)
}
if error:
args["error"] = error
return mutation_create(args, UPDATE_CONTROLACTION) | f7ef2c0174fe8c31505d2397df38602c9cee46ac | 3,630,987 |
def get_colours(time, flux, data, nuv=[nuvwave, nuvtrans], u=[uwave, utrans], r=[rwave, rtrans], filter_pairs=None):
"""" Calculates the colours of a given sfh fluxes across time given the BC03 models from the magnitudes of the SED.
:time:
Array of times at which the colours should be calculated. In units of Gyrs.
:flux:
SED of fluxes describing the calculated SFH. Returned from the assign_total_flux function in fluxes.py
:data:
BC03 model values for wavelengths, time steps and fluxes. The wavelengths are needed to calculate the magnitudes.
RETURNS:
:nuv_u: :u_r:
Arrays the same shape as :time: with the predicted nuv-u and u-r colours
modified 11/7/2018 by BDS to allow user to specify different colours from nuv-u and u-r (those are still default)
note: it's still important that nuv is the bluest wavelength and r is the reddest wavelength
"""
if filter_pairs is None:
nuvwave_local = nuv[0]
nuvtrans_local = nuv[1]
uwave_local = u[0]
utrans_local = u[1]
rwave_local = r[0]
rtrans_local = r[1]
nuvmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, nuvwave_local, nuvtrans_local)
umag = fluxes.calculate_AB_mag(time, data[1:,0], flux, uwave_local, utrans_local)
rmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, rwave_local, rtrans_local)
nuv_u = nuvmag - umag
u_r = umag - rmag
return nuv_u, u_r
else:
colours = []
for thepair in filter_pairs:
bluermag = fluxes.calculate_AB_mag(time, data[1:,0], flux, thepair[0][0], thepair[0][1])
reddermag = fluxes.calculate_AB_mag(time, data[1:,0], flux, thepair[1][0], thepair[1][1])
colours.append(bluermag - reddermag)
return colours | d03c33822918c50d30db06284fbe56571886228c | 3,630,988 |
def loginpage():
"""A route to login the user."""
year = date.today().year
return render_template("website/login.html",
base=get_base_data(year),
route=Routes,
year=year,
github_enabled=is_github_supported(),
facebook_enabled=is_facebook_supported(),
gmail_enabled=is_gmail_supported(),
user_info=get_user_information()) | 282630fb7c4585cf0321c3fa3fc3ce55bfc33c8e | 3,630,989 |
from hypothesis.internal.conjecture.shrinker import dfa_replacement, sort_key
import math
def learn_a_new_dfa(runner, u, v, predicate):
"""Given two buffers ``u`` and ``v```, learn a DFA that will
allow the shrinker to normalise them better. ``u`` and ``v``
should not currently shrink to the same test case when calling
this function."""
assert predicate(runner.cached_test_function(u))
assert predicate(runner.cached_test_function(v))
u_shrunk = fully_shrink(runner, u, predicate)
v_shrunk = fully_shrink(runner, v, predicate)
u, v = sorted((u_shrunk.buffer, v_shrunk.buffer), key=sort_key)
assert u != v
assert not v.startswith(u)
# We would like to avoid using LStar on large strings as its
# behaviour can be quadratic or worse. In order to help achieve
# this we peel off a common prefix and suffix of the two final
# results and just learn the internal bit where they differ.
#
# This potentially reduces the length quite far if there's
# just one tricky bit of control flow we're struggling to
# reduce inside a strategy somewhere and the rest of the
# test function reduces fine.
if v.endswith(u):
prefix = b""
suffix = u
u_core = b""
assert len(u) > 0
v_core = v[: -len(u)]
else:
i = 0
while u[i] == v[i]:
i += 1
prefix = u[:i]
assert u.startswith(prefix)
assert v.startswith(prefix)
i = 1
while u[-i] == v[-i]:
i += 1
suffix = u[max(len(prefix), len(u) + 1 - i) :]
assert u.endswith(suffix)
assert v.endswith(suffix)
u_core = u[len(prefix) : len(u) - len(suffix)]
v_core = v[len(prefix) : len(v) - len(suffix)]
assert u == prefix + u_core + suffix, (list(u), list(v))
assert v == prefix + v_core + suffix, (list(u), list(v))
better = runner.cached_test_function(u)
worse = runner.cached_test_function(v)
allow_discards = worse.has_discards or better.has_discards
def is_valid_core(s):
if not (len(u_core) <= len(s) <= len(v_core)):
return False
buf = prefix + s + suffix
result = runner.cached_test_function(buf)
return (
predicate(result)
# Because we're often using this to learn strategies
# rather than entire complex test functions, it's
# important that our replacements are precise and
# don't leave the rest of the test case in a weird
# state.
and result.buffer == buf
# Because the shrinker is good at removing discarded
# data, unless we need discards to allow one or both
# of u and v to result in valid shrinks, we don't
# count attempts that have them as valid. This will
# cause us to match fewer strings, which will make
# the resulting shrink pass more efficient when run
# on test functions it wasn't really intended for.
and (allow_discards or not result.has_discards)
)
assert sort_key(u_core) < sort_key(v_core)
assert is_valid_core(u_core)
assert is_valid_core(v_core)
learner = LStar(is_valid_core)
prev = -1
while learner.generation != prev:
prev = learner.generation
learner.learn(u_core)
learner.learn(v_core)
# L* has a tendency to learn DFAs which wrap around to
# the beginning. We don't want to it to do that unless
# it's accurate, so we use these as examples to show
# check going around the DFA twice.
learner.learn(u_core * 2)
learner.learn(v_core * 2)
if learner.dfa.max_length(learner.dfa.start) > len(v_core):
# The language we learn is finite and bounded above
# by the length of v_core. This is important in order
# to keep our shrink passes reasonably efficient -
# otherwise they can match far too much. So whenever
# we learn a DFA that could match a string longer
# than len(v_core) we fix it by finding the first
# string longer than v_core and learning that as
# a correction.
x = next(learner.dfa.all_matching_strings(min_length=len(v_core) + 1))
assert not is_valid_core(x)
learner.learn(x)
assert not learner.dfa.matches(x)
assert learner.generation != prev
else:
# We mostly care about getting the right answer on the
# minimal test case, but because we're doing this offline
# anyway we might as well spend a little more time trying
# small examples to make sure the learner gets them right.
for x in islice(learner.dfa.all_matching_strings(), 100):
if not is_valid_core(x):
learner.learn(x)
assert learner.generation != prev
break
# We've now successfully learned a DFA that works for shrinking
# our failed normalisation further. Canonicalise it into a concrete
# DFA so we can save it for later.
new_dfa = learner.dfa.canonicalise()
assert math.isfinite(new_dfa.max_length(new_dfa.start))
shrinker = runner.new_shrinker(runner.cached_test_function(v), predicate)
assert (len(prefix), len(v) - len(suffix)) in shrinker.matching_regions(new_dfa)
name = "tmp-dfa-" + repr(new_dfa)
shrinker.extra_dfas[name] = new_dfa
shrinker.fixate_shrink_passes([dfa_replacement(name)])
assert sort_key(shrinker.buffer) < sort_key(v)
return new_dfa | ff0ba6d5831099d8d5f392b3cd7dec59ac3a3663 | 3,630,990 |
def generate_corrected_profiles (corr_factor, fwd_border_dicts, fwd_profile, plasmid_length, scale):
""" Correct the edges of the transcription profiles
"""
# New profile to hold the normalised data
Normalized_fwd_profile = np.ones(plasmid_length)*scale
# Cycle through each transcript (no idea why this is a dict)
for item in sorted(fwd_border_dicts):
# For each transcript
for h in range(len(fwd_border_dicts[item])):
transcript_len = fwd_border_dicts[item][h][1]-fwd_border_dicts[item][h][0]+1
correction = np.zeros(transcript_len)
# Extract the relevant part of the correction profile
for kk in range(fwd_border_dicts[item][h][0], fwd_border_dicts[item][h][1]+1):
dist_5 = kk - fwd_border_dicts[item][h][0]
dist_3 = fwd_border_dicts[item][h][1] - kk
pp_5 = corr_factor[dist_5]
pp_3 = corr_factor[dist_3]
correction[kk-fwd_border_dicts[item][h][0]] = min(pp_5,pp_3)
Normalized_fwd_profile[fwd_border_dicts[item][h][0]:fwd_border_dicts[item][h][1]+1] += fwd_profile[fwd_border_dicts[item][h][0]:fwd_border_dicts[item][h][1]+1] / correction
return Normalized_fwd_profile | d7fbd70e7651b7467d7cc6e754bb4baad1e8d2ca | 3,630,991 |
import json
import sys
import traceback
def base_handler(event: Event, loader_cls: type) -> Response:
"""Handler which is called when accessing the endpoint."""
response: Response = {"statusCode": 200, "body": json.dumps("")}
db_helper = DatabaseHelper()
try:
point = (
Point(event["close_to_lon"], event["close_to_lat"])
if "close_to_lon" in event and "close_to_lat" in event
else None
)
loader = loader_cls(
db_helper.get_connection_string(),
point_of_interest=point,
point_radius=event.get("radius", None),
)
features = []
LOGGER.info("Getting features...")
if "pages" in event and event["pages"] is not None:
for page in event["pages"]:
features += loader.get_features(page)
else:
features = loader.get_features()
# Here, the features list may contain the entire features or just their ids.
# In case it only contains ids, the loader will know how to fetch each feature
# before saving.
msg = loader.save_features(
features, event.get("do_not_update_timestamp", False)
)
response["body"] = json.dumps(msg)
except Exception:
response["statusCode"] = 500
exc_info = sys.exc_info()
exc_string = "".join(traceback.format_exception(*exc_info))
response["body"] = exc_string
LOGGER.exception(exc_string)
return response | b1336c6ae71724284d5221cce3858e31070b68d1 | 3,630,992 |
import os
def publish_image_dir():
"""
checks if publishing of images configured
and if so it returns the directory to publish
to (full path) or None
"""
root_dir = publish_root_dir()
if root_dir != None:
rel_dir = get_value_with_default(['publish', 'package_dir'], 'images')
return os.path.join(root_dir, rel_dir)
return None | 19ac151adb31488b300e08db34fea617d0e9b2ff | 3,630,993 |
def transforToManagerProxyObject(manager, data):
"""将不转换dict中的key
只支持基础数据,dict,list和tuple
tuple将会转换成list
result = data
"""
if isinstance(data, dict):
result = manager.dict()
for key in data:
result[key] = transforToManagerProxyObject(manager, data[key])
elif isinstance(data, list) or isinstance(data, tuple):
result = manager.list()
for item in data:
result.append(transforToManagerProxyObject(manager, item))
return result | d9b0c17b1e1eb50a9bede908130d8444422b8fd4 | 3,630,994 |
import json
import time
def get_new_account_id(event):
"""Return account id for new account events."""
create_account_status_id = (
event["detail"]
.get("responseElements", {})
.get("createAccountStatus", {})["id"] # fmt: no
)
LOG.info("createAccountStatus = %s", create_account_status_id)
org = boto3.client("organizations")
while True:
account_status = org.describe_create_account_status(
CreateAccountRequestId=create_account_status_id
)
state = account_status["CreateAccountStatus"]["State"].upper()
if state == "SUCCEEDED":
return account_status["CreateAccountStatus"]["AccountId"]
if state == "FAILED":
LOG.error("Account creation failed:\n%s", json.dumps(account_status))
raise AccountCreationFailedException
LOG.info("Account state: %s. Sleeping 5 seconds and will try again...", state)
time.sleep(5) | e782e930d09b2f02b10547e5a39e14bba852d896 | 3,630,995 |
from tvrenamer.common import tools
def list_opts():
"""Returns a list of oslo_config options available in the library.
The returned list includes all oslo_config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
all_opts = []
all_opts.extend(tools.make_opt_list([CLI_OPTS,
EPISODE_OPTS,
FORMAT_OPTS], None))
all_opts.extend(tools.make_opt_list([CACHE_OPTS], 'cache'))
return all_opts | a650cf9fb8d3bfd0ee60e6301463ff62d3842f8f | 3,630,996 |
from datetime import datetime
def tokenize_for_t5_advice_training(encoder, subreddit=None, date=None, title=None,
selftext=None, body=None):
"""
Tokenizes the post title / post selftext / comment body.
If it's too long we'll cut some paragraphs at random from the selftext.
:param subreddit: 'relationship_advice'
:param date: datetime obj like datetime.datetime(2019, 7, 31, 23, 51, 21) always UTC time.
:param title:
:param selftext:
:param body:
:return:
"""
if len(selftext) < 64:
return None
if len(body) < 64:
return None
article_pieces = {}
if not isinstance(date, datetime):
raise ValueError("Date must be a datetime obj. Provided {}".format(date))
date_txt = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'][date.month - 1] + ' {}, {}'.format(
date.day, date.year)
article_pieces['subreddit'] = subreddit
article_pieces['date'] = date_txt
article_pieces['title'] = title
article_pieces['selftext'] = _trim_to_desired_length(encoder, selftext, desired_len=1250)
article_pieces['body'] = body
return {k: _fix_reddit_text(v) for k, v in article_pieces.items()} | 5716eb12d2f1c7773fcb49abfe7c203375ef9c65 | 3,630,997 |
def hvdisp_plot_records_2d(records,
to_pe,
config,
t_reference,
time_stream=None,
tools=(x_zoom_wheel(), 'xpan'),
default_tools=('save', 'pan', 'box_zoom', 'save', 'reset'),
plot_library='bokeh',
hooks=()):
"""Plot records in a dynamic 2D histogram of (time, pmt)
:param width: Plot width in pixels
:param time_stream: holoviews rangex stream to use. If provided,
we assume records is already converted to points (which hopefully
is what the stream is derived from)
:param tools: Tools to be used in the interactive plot. Only works
with bokeh as plot library.
:param plot_library: Default bokeh, library to be used for the
plotting.
:param width: With of the record matrix in pixel.
:param hooks: Hooks to adjust plot settings.
:returns: datashader object, records holoview points,
RangeX time stream of records.
"""
shader, records, time_stream = _hvdisp_plot_records_2d(records,
to_pe,
config,
t_reference,
time_stream=time_stream,
default_tools=default_tools,
tools=tools,
hooks=hooks,
plot_library=plot_library)
shader = shader.opts(title="Time vs. Channel")
return shader | 0fa08d4f71ef08c35a2121e4716bce213e39d9d8 | 3,630,998 |
import re
def join_url(url, *paths):
"""
:param url: api url
:param paths: endpoint
:return: full url
"""
for path in paths:
url = re.sub(r"/?$", re.sub(r"^/?", "/", path), url)
return url | 32e085acc64590901afcf3d4ded47f8be7d40119 | 3,630,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.