content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def is_in_cell(point:list, corners:list) -> bool:
"""
Checks if a point is within a cell.
:param point: Tuple of lat/Y,lon/X-coordinates
:param corners: List of corner coordinates
:returns: Boolean whether point is within cell
:Example:
"""
y1, y2, x1, x2 = corners[2][0], corners[0][0], corners[0][1], corners[2][1]
if (y1 <= point[0] <= y2) and (x1 <= point[1] <= x2):
return True
return False | 32,600 |
def test_input_para_validator():
"""Test running a calculation
note this does only a dry run to check if the calculation plugin works"""
# put an invalid type in params and check if the validator captures it
for key, val in {
'llg_n_iterations': 17.2,
'mc_n_iterations': [1, 2, 3],
'bravais_lattice': 'test'
}.items():
parameters = Dict(dict={key: val})
builder = CalculationFactory('spirit').get_builder()
raised_error = False
try:
builder.parameters = parameters
except (TypeError, ValueError):
raised_error = True
# check if an error was raised
assert raised_error | 32,601 |
def json_response(function):
"""
This decorator can be used to catch :class:`~django.http.Http404` exceptions and convert them to a :class:`~django.http.JsonResponse`.
Without this decorator, the exceptions would be converted to :class:`~django.http.HttpResponse`.
:param function: The view function which should always return JSON
:type function: ~collections.abc.Callable
:return: The decorated function
:rtype: ~collections.abc.Callable
"""
@wraps(function)
def wrap(request, *args, **kwargs):
r"""
The inner function for this decorator.
It tries to execute the decorated view function and returns the unaltered result with the exception of a
:class:`~django.http.Http404` error, which is converted into JSON format.
:param request: Django request
:type request: ~django.http.HttpRequest
:param \*args: The supplied arguments
:type \*args: list
:param \**kwargs: The supplied kwargs
:type \**kwargs: dict
:return: The response of the given function or an 404 :class:`~django.http.JsonResponse`
:rtype: ~django.http.JsonResponse
"""
try:
return function(request, *args, **kwargs)
except Http404 as e:
return JsonResponse({"error": str(e) or "Not found."}, status=404)
return wrap | 32,602 |
def plot_saliency(image, model):
"""Gets a saliency map for image, plots it next to image. """
saliency = get_saliency(image, model)
plt.ion()
fig, (ax1, ax2) = plt.subplots(2)
ax1.imshow(np.squeeze(saliency), cmap="viridis")
hide_ticks(ax1)
ax2.imshow(np.squeeze(image), cmap="gray")
hide_ticks(ax2)
plt.pause(0.01)
plt.show() | 32,603 |
def generate_junit_report_from_cfn_guard(report):
"""Generate Test Case from cloudformation guard report"""
test_cases = []
count_id = 0
for file_findings in report:
finding = file_findings["message"]
# extract resource id from finsind line
resource_regex = re.search("^\[([^]]*)]", finding)
if resource_regex:
resource_id = resource_regex.group(1)
test_case = TestCase(
"%i - %s" % (count_id, finding),
classname=resource_id)
test_case.add_failure_info(output="%s#R:%s" % (file_findings["file"], resource_id))
test_cases.append(test_case)
count_id += 1
test_suite = TestSuite("aws cfn-guard test suite", test_cases)
return TestSuite.to_xml_string([test_suite], prettyprint=False) | 32,604 |
def new_custom_alias():
"""
Create a new custom alias
Input:
alias_prefix, for ex "www_groupon_com"
alias_suffix, either .random_letters@simplelogin.co or @my-domain.com
optional "hostname" in args
Output:
201 if success
409 if the alias already exists
"""
user = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
user_custom_domains = [cd.domain for cd in user.verified_custom_domains()]
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip()
alias_suffix = data.get("alias_suffix", "").strip()
alias_prefix = convert_to_id(alias_prefix)
if not verify_prefix_suffix(user, alias_prefix, alias_suffix, user_custom_domains):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if GenEmail.get_by(email=full_alias):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
gen_email = GenEmail.create(user_id=user.id, email=full_alias)
db.session.commit()
if hostname:
AliasUsedOn.create(gen_email_id=gen_email.id, hostname=hostname)
db.session.commit()
return jsonify(alias=full_alias), 201 | 32,605 |
def setup_pen_kw(penkw={}, **kw):
"""
Builds a pyqtgraph pen (object containing color, linestyle, etc. information) from Matplotlib keywords.
Please dealias first.
:param penkw: dict
Dictionary of pre-translated pyqtgraph keywords to pass to pen
:param kw: dict
Dictionary of Matplotlib style plot keywords in which line plot relevant settings may be specified. The entire
set of mpl plot keywords may be passed in, although only the keywords related to displaying line plots will be
used here.
:return: pyqtgraph pen instance
A pen which can be input with the pen keyword to many pyqtgraph functions
"""
# Move the easy keywords over directly
direct_translations_pen = { # plotkw: pgkw
'linewidth': 'width',
}
for direct in direct_translations_pen:
penkw[direct_translations_pen[direct]] = kw.pop(direct, None)
# Handle colors and styles
penkw['color'] = color_translator(**kw)
penkw['style'] = style_translator(**kw)
# Prune values of None
penkw = {k: v for k, v in penkw.items() if v is not None}
return pg.mkPen(**penkw) if len(penkw.keys()) else None | 32,606 |
def seq(seq_aps):
"""Sequence of parsers `seq_aps`."""
if not seq_aps:
return succeed(list())
else:
ap = seq_aps[0]
aps = seq_aps[1:]
return ap << cons >> seq(aps) | 32,607 |
def test_bad_multiplication():
"""Tests that a multiplication fails."""
x = UniPoly(1, 'x', 1)
p = x + 1
q = x - 1
assert p * q == x * x + 1 | 32,608 |
def Growth_factor_Heath(omega_m, z):
"""
Computes the unnormalised growth factor at redshift z given the present day value of omega_m. Uses the expression
from Heath1977
Assumes Flat LCDM cosmology, which is fine given this is also assumed in CambGenerator. Possible improvement
could be to tabulate this using the CambGenerator so that it would be self consistent for non-LCDM cosmologies.
:param omega_m: the matter density at the present day
:param z: the redshift we want the matter density at
:return: the unnormalised growth factor at redshift z.
"""
avals = np.logspace(-4.0, np.log10(1.0 / (1.0 + z)), 10000)
integ = integrate.simps(1.0 / (avals * E_z(omega_m, 1.0 / avals - 1.0)) ** 3, avals, axis=0)
return 5.0 / 2.0 * omega_m * E_z(omega_m, z) * integ | 32,609 |
def freq2bark(freq_axis):
""" Frequency conversion from Hertz to Bark
See E. Zwicker, H. Fastl: Psychoacoustics. Springer,Berlin, Heidelberg, 1990.
The coefficients are linearly interpolated from the values given in table 6.1.
Parameter
---------
freq_axis : numpy.array
Hertz frequencies to be converted
Output
------
bark_axis : numpy.array
frequencies converted in Bark
"""
xp = np.array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 510, 570, 630, 700, 770, 840, 920, 1000,
1080, 1170, 1270, 1370, 1480, 1600, 1720, 1850, 2000,
2150, 2320, 2500, 2700, 2900, 3150, 3400, 3700, 4000,
4400, 4800, 5300, 5800, 6400, 7000, 7700, 8500, 9500,
10500, 12000, 13500, 15500, 20000])
yp = np.arange(0,25,0.5)
return np.interp(freq_axis,xp,yp) | 32,610 |
def main(argv):
"""Run tests, return number of failures (integer)."""
# insert our paths in sys.path:
# ../build/lib.*
# ..
# Q. Why this order?
# A. To find the C modules (which are in ../build/lib.*/Bio)
# Q. Then, why ".."?
# A. Because Martel may not be in ../build/lib.*
test_path = sys.path[0] or "."
source_path = os.path.abspath(f"{test_path}/..")
sys.path.insert(1, source_path)
build_path = os.path.abspath(
f"{test_path}/../build/lib.{distutils.util.get_platform()}-{sys.version[:3]}"
)
if os.access(build_path, os.F_OK):
sys.path.insert(1, build_path)
# Using "export LANG=C" (which should work on Linux and similar) can
# avoid problems detecting optional command line tools on
# non-English OS (we may want 'command not found' in English).
# HOWEVER, we do not want to change the default encoding which is
# rather important on Python 3 with unicode.
# lang = os.environ['LANG']
# get the command line options
try:
opts, args = getopt.getopt(
argv, "gv", ["generate", "verbose", "doctest", "help", "offline"]
)
except getopt.error as msg:
print(msg)
print(__doc__)
return 2
verbosity = VERBOSITY
# deal with the options
for opt, _ in opts:
if opt == "--help":
print(__doc__)
return 0
if opt == "--offline":
print("Skipping any tests requiring internet access")
EXCLUDE_DOCTEST_MODULES.extend(ONLINE_DOCTEST_MODULES)
# This is a bit of a hack...
import requires_internet
requires_internet.check.available = False
# Monkey patch for urlopen()
import urllib.request
def dummy_urlopen(url):
raise RuntimeError(
"Internal test suite error, attempting to use internet despite --offline setting"
)
urllib.request.urlopen = dummy_urlopen
if opt == "-v" or opt == "--verbose":
verbosity = 2
# deal with the arguments, which should be names of tests to run
for arg_num in range(len(args)):
# strip off the .py if it was included
if args[arg_num][-3:] == ".py":
args[arg_num] = args[arg_num][:-3]
print(f"Python version: {sys.version}")
print(f"Operating system: {os.name} {sys.platform}")
# run the tests
runner = TestRunner(args, verbosity)
return runner.run() | 32,611 |
def close_connection(conn: Connection):
"""
Closes current connection.
:param conn Connection: Connection to close.
"""
if conn:
conn.close()
return True
return False | 32,612 |
def tile_memory_free(y, shape):
"""
XXX Will be deprecated
Tile vector along multiple dimension without allocating new memory.
Parameters
----------
y : np.array, shape (n,)
data
shape : np.array, shape (m),
Returns
-------
Y : np.array, shape (n, *shape)
"""
import warnings
warnings.warn('Will be deprecated. Use np.newaxis instead')
for dim in range(len(shape)):
y = y[..., np.newaxis]
return y | 32,613 |
def load_ref_system():
""" Returns d-talose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.6934 -0.4440 -0.1550
C -2.0590 0.1297 0.3312
C -3.1553 -0.9249 0.1673
O -0.9091 -0.8895 -1.4780
C 0.4226 0.6500 -0.0961
O -1.9403 0.6391 1.6411
O -3.6308 -1.5177 1.1069
C 1.7734 0.0930 -0.6280
O 0.6442 1.1070 1.2190
C 2.7961 1.2385 -0.8186
O 2.2979 -0.9417 0.1683
O 3.8858 0.8597 -1.6117
H -0.4009 -1.3143 0.4844
H -2.3349 1.0390 -0.2528
H -3.4909 -1.1261 -0.8615
H -0.0522 -1.1155 -1.8272
H 0.1195 1.5189 -0.7325
H -2.0322 -0.0862 2.2502
H 1.5977 -0.4374 -1.5988
H -0.2204 1.2523 1.6061
H 3.1423 1.6308 0.1581
H 2.3529 2.0761 -1.3846
H 2.4151 -0.5980 1.0463
H 4.2939 0.1096 -1.1961
""") | 32,614 |
def PVLPrint (VLTable, image, streamname, err):
"""
Print the contents of a VL (survey catalog) table
* VLTable = VL table to print
* image = Image to to which VL table is attached
* streamname = Name of stream to use, "stdout", or "stderr"
* err = Python Obit Error/message stack
* window = in not None, then edit this OWindow in the server
"""
################################################################
# Checks
if not Table.PIsA(VLTable):
raise TypeError("VLTable MUST be a Python Obit Table")
Obit.OSurveyVLPrint(VLTable.me, image.me, streamname, err.me)
# end PVLPrint | 32,615 |
def format_user_id(user_id):
"""
Format user id so Slack tags it
Args:
user_id (str): A slack user id
Returns:
str: A user id in a Slack tag
"""
return f"<@{user_id}>" | 32,616 |
def test_module(params: dict):
"""
Returning 'ok' indicates that the integration works like it is supposed to.
This test works by running the listening server to see if it will run.
Args:
params (dict): The integration parameters
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
certificate = str(params.get('certificate'))
private_key = str(params.get('private_key'))
certificate_file = NamedTemporaryFile(mode='w', delete=False)
certificate_path = certificate_file.name
certificate_file.write(certificate)
certificate_file.close()
private_key_file = NamedTemporaryFile(mode='w', delete=False)
private_key_path = private_key_file.name
private_key_file.write(private_key)
private_key_file.close()
s = socket.socket()
ssl.wrap_socket(s, keyfile=private_key_path, certfile=certificate_path, server_side=True,
ssl_version=ssl.PROTOCOL_TLSv1_2)
return 'ok'
except ssl.SSLError as e:
if e.reason == 'KEY_VALUES_MISMATCH':
return 'Private and Public keys do not match'
except Exception as e:
return f'Test failed with the following error: {repr(e)}' | 32,617 |
def class_loss_regr(num_classes, num_cam):
"""Loss function for rpn regression
Args:
num_anchors: number of anchors (9 in here)
num_cam : number of cam (3 in here)
Returns:
Smooth L1 loss function
0.5*x*x (if x_abs < 1)
x_abx - 0.5 (otherwise)
"""
def class_loss_regr_fixed_num(y_true, y_pred):
#x = y_true[:, :, 4*num_classes:] - y_pred
x = y_true[:, :, num_cam*4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
#return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes])
#return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes]) * 0
return class_loss_regr_fixed_num | 32,618 |
def smtplib_connector(hostname, port, username=None, password=None, use_ssl=False):
""" A utility class that generates an SMTP connection factory.
:param str hostname: The SMTP server's hostname
:param int port: The SMTP server's connection port
:param str username: The SMTP server username
:param str password: The SMTP server port
:param bool use_ssl: Whether to use SSL
"""
def connect():
import smtplib
ctor = smtplib.SMTP_SSL if use_ssl else smtplib.SMTP
conn = ctor(hostname, port)
if use_ssl:
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
conn.ehlo()
conn.starttls(context=context)
conn.ehlo()
if username or password:
conn.login(username, password)
return conn
return connect | 32,619 |
def _to_one_hot_sequence(indexed_sequence_tensors):
"""Convert ints in sequence to one-hots.
Turns indices (in the sequence) into one-hot vectors.
Args:
indexed_sequence_tensors: dict containing SEQUENCE_KEY field.
For example: {
'sequence': '[1, 3, 3, 4, 12, 6]' # This is the amino acid sequence.
... }
Returns:
indexed_sequence_tensors with the same overall structure as the input,
except that SEQUENCE_KEY field has been transformed to a one-hot
encoding.
For example:
{
# The first index in sequence is from letter C, which
# is at index 1 in the amino acid vocabulary, and the second is from
# E, which is at index 4.
SEQUENCE_KEY: [[0, 1, 0, ...], [0, 0, 0, 1, 0, ...]...]
...
}
"""
indexed_sequence_tensors[SEQUENCE_KEY] = tf.one_hot(
indices=indexed_sequence_tensors[SEQUENCE_KEY],
depth=len(utils.AMINO_ACID_VOCABULARY))
return indexed_sequence_tensors | 32,620 |
def kaiser_smooth(x,beta):
""" kaiser window smoothing """
window_len=41 #Needs to be odd for proper response
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] #start:stop:step
w = np.kaiser(window_len,beta)
y = np.convolve(w/w.sum(),s,mode='valid')
return y[20:len(y)-20] | 32,621 |
def get_indel_dicts(bamfile, target):
"""Get all insertion in alignments within target. Return dict."""
samfile = pysam.AlignmentFile(bamfile, "rb")
indel_coverage = defaultdict(int)
indel_length = defaultdict(list)
indel_length_coverage = dict()
for c, s, e in parse_bed(target):
s = int(s) - 151
e = int(e) + 151
for alignment in samfile.fetch(c, int(s), int(e)):
if good_alignment(alignment) and cigar_has_insertion(alignment.cigarstring):
read_start = alignment.get_reference_positions(full_length=True)[0]
if read_start is None:
continue
locus, length = parse_cigartuple(alignment.cigar, read_start,
alignment.reference_name)
if pos_in_interval(locus.split(':')[1], s, e):
if locus in indel_length:
indel_length[locus].append(length)
else:
indel_length[locus] = [length]
indel_coverage[locus] += 1
samfile.close()
for locus, coverage in indel_coverage.items():
indel_length_coverage[locus] = tuple(set(indel_length[locus])), int(coverage)
return indel_length_coverage | 32,622 |
def _ComputeRelativeAlphaBeta(omega_b, position_b, apparent_wind_b):
"""Computes the relative alpha and beta values, in degrees, from kinematics.
Args:
omega_b: Array of size (n, 3). Body rates of the kite [rad/s].
position_b: Array of size (1, 3). Position of the surface to compute local
alpha/beta [m].
apparent_wind_b: Array of size (n,3). Apparent wind vector from the state
estimator [m/s].
Returns:
local_alpha_deg, local_beta_deg: The values of local alpha and beta.
The math for a relative angle of attack at a given section is as follows:
(1) Kinematically:
v_section_b = apparent_wind_b - omega_b X position_b
(2) By definition:
alpha_rad = atan2(-v_section_b_z, -v_section_b_x)
beta_rad = asin(-v_section_b_y, mag(v_section_b))
where _x, _y, _z denote the unit basis vectors in the body coordinates.
"""
assert np.shape(omega_b) == np.shape(apparent_wind_b)
# The subtraction is because the cross product is the rigid body motion
# but the reference frame for the aero has the opposite effect of the
# motion of the rigid body motion frame.
local_vel = apparent_wind_b - np.cross(omega_b, position_b, axisa=1,
axisb=1)
local_vel_mag = np.linalg.norm(local_vel, axis=1)
local_alpha_deg = np.rad2deg(np.arctan2(-1.0 * local_vel[:, 2],
-1.0 * local_vel[:, 0]))
local_beta_deg = np.rad2deg(np.arcsin(-1.0 * local_vel[:, 1]
/ local_vel_mag))
return local_alpha_deg, local_beta_deg | 32,623 |
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave | 32,624 |
def calculate_percent(partial, total):
"""Calculate percent value."""
if total:
percent = round(partial / total * 100, 2)
else:
percent = 0
return f'{percent}%' | 32,625 |
def test_item_not_available(app):
"""Test item not available."""
item_pid = "1"
transition = "checkout"
msg = (
"The item requested with pid '{0}' is not available. "
"Transition to '{1}' has failed.".format(item_pid, transition)
)
with pytest.raises(ItemNotAvailableError) as ex:
raise ItemNotAvailableError(item_pid=item_pid, transition=transition)
assert ex.value.code == 400
assert ex.value.description == msg | 32,626 |
def test_resolve_variable_no_type() -> None:
"""Test resolve_variable."""
with pytest.raises(VariableTypeRequired):
resolve_variable("name", {}, None, "test") | 32,627 |
def okgets(urls):
"""Multi-threaded requests.get, only returning valid response objects
:param urls: A container of str URLs
:returns: A tuple of requests.Response objects
"""
return nest(
ripper(requests.get),
filt(statusok),
tuple
)(urls) | 32,628 |
def check_content_type(content_type):
""" Checks that the media type is correct """
if request.headers['Content-Type'] == content_type:
return
app.logger.error('Invalid Content-Type: %s',
request.headers['Content-Type'])
abort(415, 'Content-Type must be {}'.format(content_type)) | 32,629 |
def worker(args):
"""
This function does the work of returning a URL for the NDSE view
"""
# Step 1. Create the NDSE view request object
# Set the url where you want the recipient to go once they are done
# with the NDSE. It is usually the case that the
# user will never "finish" with the NDSE.
# Assume that control will not be passed back to your app.
view_request = ConsoleViewRequest(return_url=args["ds_return_url"])
if args["starting_view"] == "envelope" and args["envelope_id"]:
view_request.envelope_id = args["envelope_id"]
# Step 2. Get the console view url
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.create_console_view(args["account_id"], console_view_request=view_request)
url = results.url
return {"redirect_url": url} | 32,630 |
def plot_horiz_xsection_quiver_map(Grids, ax=None,
background_field='reflectivity',
level=1, cmap='pyart_LangRainbow12',
vmin=None, vmax=None,
u_vel_contours=None,
v_vel_contours=None,
w_vel_contours=None,
wind_vel_contours=None,
u_field='u', v_field='v', w_field='w',
show_lobes=True, title_flag=True,
axes_labels_flag=True,
colorbar_flag=True,
colorbar_contour_flag=False,
bg_grid_no=0, contour_alpha=0.7,
coastlines=True,
quiver_spacing_x_km=10.0,
quiver_spacing_y_km=10.0,
gridlines=True,
quiverkey_len=5.0,
quiverkey_loc='best',
quiver_width=0.01):
"""
This procedure plots a horizontal cross section of winds from wind fields
generated by PyDDA using quivers onto a geographical map. The length of
the quivers varies with wind speed.
Parameters
----------
Grids: list
List of Py-ART Grids to visualize
ax: matplotlib axis handle (with cartopy ccrs)
The axis handle to place the plot on. Set to None to create a new map.
Note: the axis needs to be in a PlateCarree() projection. Support for
other projections is planned in the future.
background_field: str
The name of the background field to plot the quivers on.
level: int
The number of the vertical level to plot the cross section through.
cmap: str or matplotlib colormap
The name of the matplotlib colormap to use for the background field.
vmin: float
The minimum bound to use for plotting the background field. None will
automatically detect the background field minimum.
vmax: float
The maximum bound to use for plotting the background field. None will
automatically detect the background field maximum.
u_vel_contours: 1-D array
The contours to use for plotting contours of u. Set to None to not
display such contours.
v_vel_contours: 1-D array
The contours to use for plotting contours of v. Set to None to not
display such contours.
w_vel_contours: 1-D array
The contours to use for plotting contours of w. Set to None to not
display such contours.
u_field: str
Name of zonal wind (u) field in Grids.
v_field: str
Name of meridional wind (v) field in Grids.
w_field: str
Name of vertical wind (w) field in Grids.
show_lobes: bool
If True, the dual doppler lobes from each pair of radars will be shown.
title_flag: bool
If True, PyDDA will generate a title for the plot.
axes_labels_flag: bool
If True, PyDDA will generate axes labels for the plot.
colorbar_flag: bool
If True, PyDDA will generate a colorbar for the plot background field.
colorbar_contour_flag: bool
If True, PyDDA will generate a colorbar for the contours.
bg_grid_no: int
Number of grid in Grids to take background field from.
Set to -1 to use maximum value from all grids.
contour_alpha: float
Alpha (transparency) of velocity contours. 0 = transparent, 1 = opaque
coastlines: bool
Set to true to display coastlines.
quiver_spacing_x_km: float
Spacing in km between quivers in x axis.
quiver_spacing_y_km: float
Spacing in km between quivers in y axis.
gridlines: bool
Set to true to show grid lines.
quiverkey_len: float
Length to use for the quiver key in m/s.
quiverkey_loc: str
Location of quiverkey. One of:
'best'
'top_left'
'top'
'top_right'
'bottom_left'
'bottom'
'bottom_right'
'left'
'right'
'top_left_outside'
'top_right_outside'
'bottom_left_outside'
'bottom_right_outside'
'best' will put the quiver key in the corner with the fewest amount of
valid data points while keeping the quiver key inside the plot.
The rest of the options will put the quiver key in that
particular part of the plot.
quiver_width: float
The width of the lines for the quiver given as a fraction
relative to the plot width. Use this to specify the thickness
of the quiver lines.
Returns
-------
ax: matplotlib axis
Axis handle to output axis
"""
if(bg_grid_no > -1):
grid_bg = Grids[bg_grid_no].fields[background_field]['data']
else:
grid_array = np.ma.stack(
[x.fields[background_field]['data'] for x in Grids])
grid_bg = grid_array.max(axis=0)
if(vmin is None):
vmin = grid_bg.min()
if(vmax is None):
vmax = grid_bg.max()
grid_h = Grids[0].point_altitude['data']/1e3
grid_x = Grids[0].point_x['data']/1e3
grid_y = Grids[0].point_y['data']/1e3
grid_lat = Grids[0].point_latitude['data'][level]
grid_lon = Grids[0].point_longitude['data'][level]
qloc_x, qloc_y = _parse_quiverkey_string(
quiverkey_loc, grid_h[level], grid_x[level],
grid_y[level], grid_bg[level])
dx = np.diff(grid_x, axis=2)[0, 0, 0]
dy = np.diff(grid_y, axis=1)[0, 0, 0]
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
u = Grids[0].fields[u_field]['data'].filled(fill_value=np.nan)
else:
u = Grids[0].fields[u_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[v_field]['data'])):
v = Grids[0].fields[v_field]['data'].filled(fill_value=np.nan)
else:
v = Grids[0].fields[v_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
w = Grids[0].fields[w_field]['data'].filled(fill_value=np.nan)
else:
w = Grids[0].fields[w_field]['data']
transform = ccrs.PlateCarree()
if(ax is None):
ax = plt.axes(projection=transform)
the_mesh = ax.pcolormesh(grid_lon[:, :], grid_lat[:, :],
grid_bg[level, :, :],
cmap=cmap, transform=transform, zorder=0,
vmin=vmin, vmax=vmax)
horiz_wind_speed = np.ma.sqrt(u**2 + v**2)
quiver_density_x = int((1/dx)*quiver_spacing_x_km)
quiver_density_y = int((1/dy)*quiver_spacing_y_km)
q = ax.quiver(grid_lon[::quiver_density_y, ::quiver_density_x],
grid_lat[::quiver_density_y, ::quiver_density_x],
u[level, ::quiver_density_y, ::quiver_density_x],
v[level, ::quiver_density_y, ::quiver_density_x],
transform=transform, width=quiver_width,
scale=25.*quiverkey_len)
quiver_font = {'family': 'sans-serif',
'style': 'normal',
'variant': 'normal',
'weight': 'bold',
'size': 'medium'}
ax.quiverkey(q, qloc_x, qloc_y,
quiverkey_len, label=(str(quiverkey_len) +' m/s'),
fontproperties=quiver_font)
if(colorbar_flag is True):
cp = Grids[bg_grid_no].fields[background_field]['long_name']
cp.replace(' ', '_')
cp = cp + ' [' + Grids[bg_grid_no].fields[background_field]['units']
cp = cp + ']'
plt.colorbar(the_mesh, ax=ax, label=(cp))
if(u_vel_contours is not None):
u_filled = np.ma.masked_where(u[level, :, :] < np.min(u_vel_contours),
u[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
u_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(u_vel_contours), np.max(u_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='U [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(v_vel_contours is not None):
v_filled = np.ma.masked_where(v[level, :, :] < np.min(v_vel_contours),
v[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
v_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(v_vel_contours), np.max(v_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='V [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(w_vel_contours is not None):
w_filled = np.ma.masked_where(w[level, :, :] < np.min(w_vel_contours),
w[level, :, :])
try:
cs = ax.contourf(grid_lon[::, ::], grid_lat[::, ::],
w_filled, levels=w_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(w_vel_contours), np.max(w_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='W [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
if(wind_vel_contours is not None):
vel = np.ma.sqrt(u[level, :, :]**2 + v[level, :, :]**2)
vel = vel.filled(fill_value=np.nan)
try:
cs = ax.contourf(grid_x[level, :, :], grid_y[level, :, :],
vel, levels=wind_vel_contours, linewidths=2,
alpha=contour_alpha)
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='|V\ [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
bca_min = math.radians(Grids[0].fields[u_field]['min_bca'])
bca_max = math.radians(Grids[0].fields[u_field]['max_bca'])
if(show_lobes is True):
for i in range(len(Grids)):
for j in range(len(Grids)):
if (i != j):
bca = retrieval.get_bca(Grids[j].radar_longitude['data'],
Grids[j].radar_latitude['data'],
Grids[i].radar_longitude['data'],
Grids[i].radar_latitude['data'],
Grids[j].point_x['data'][0],
Grids[j].point_y['data'][0],
Grids[j].get_projparams())
ax.contour(
grid_lon[:, :], grid_lat[:, :], bca,
levels=[bca_min, bca_max], color='k', zorder=1)
if(axes_labels_flag is True):
ax.set_xlabel(('Latitude [$\degree$]'))
ax.set_ylabel(('Longitude [$\degree$]'))
if(title_flag is True):
ax.set_title(
('PyDDA retreived winds @' + str(grid_h[level, 0, 0]) + ' km'))
if(coastlines is True):
ax.coastlines(resolution='10m')
if(gridlines is True):
ax.gridlines()
ax.set_extent([grid_lon.min(), grid_lon.max(),
grid_lat.min(), grid_lat.max()])
num_tenths = round((grid_lon.max()-grid_lon.min())*10)+1
the_ticks_x = np.round(
np.linspace(grid_lon.min(), grid_lon.max(), num_tenths), 1)
num_tenths = round((grid_lat.max()-grid_lat.min())*10)+1
the_ticks_y = np.round(
np.linspace(grid_lat.min(), grid_lat.max(), num_tenths), 1)
ax.set_xticks(the_ticks_x)
ax.set_yticks(the_ticks_y)
return ax | 32,631 |
def register(registered_collection, reg_key):
"""Register decorated function or class to collection.
Register decorated function or class into registered_collection, in a
hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0"
the decorated function or class is stored under
registered_collection["my_model"]["my_exp"]["my_config_0"].
This decorator is supposed to be used together with the lookup() function in
this file.
Args:
registered_collection: a dictionary. The decorated function or class will be
put into this collection.
reg_key: The key for retrieving the registered function or class. If reg_key
is a string, it can be hierarchical like my_model/my_exp/my_config_0
Returns:
A decorator function
Raises:
KeyError: when function or class to register already exists.
"""
def decorator(fn_or_cls):
"""Put fn_or_cls in the dictionary."""
if isinstance(reg_key, str):
hierarchy = reg_key.split("/")
collection = registered_collection
for h_idx, entry_name in enumerate(hierarchy[:-1]):
if entry_name not in collection:
collection[entry_name] = {}
collection = collection[entry_name]
if not isinstance(collection, dict):
raise KeyError(
"Collection path {} at position {} already registered as "
"a function or class.".format(entry_name, h_idx))
leaf_reg_key = hierarchy[-1]
else:
collection = registered_collection
leaf_reg_key = reg_key
if leaf_reg_key in collection:
raise KeyError("Function or class {} registered multiple times.".format(
leaf_reg_key))
collection[leaf_reg_key] = fn_or_cls
return fn_or_cls
return decorator | 32,632 |
def form_hhaa_records(df,
team_locn='h',
records='h',
feature='ftGoals'):
"""
Accept a league table of matches with a feature
"""
team_records = []
for _, team_df in df.groupby(by=team_locn):
lags = range(0, len(team_df))
records_df = pd.DataFrame({f'{team_locn}_{records}_{feature}-{n}':
team_df[team_locn + '_' + feature].shift(n)
for n in lags})
team_record = pd.concat([team_df, records_df], sort=True, axis=1)
team_records.append(team_record)
full_df = pd.concat(team_records, axis=0, sort=True).sort_index()
return full_df | 32,633 |
def test_invalid_metric(aggregator):
"""
Invalid metrics raise a Warning and a critical service check
"""
instance = common.generate_instance_config(common.INVALID_METRICS)
check = common.create_check(instance)
check.check(instance)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.WARNING, tags=common.CHECK_TAGS, at_least=1) | 32,634 |
def plot_adjust():
"""
Adjust the plot
"""
plt.xlabel('Site occupation density $\\rho$')
plt.ylabel('Normalized flux $j$')
plt.legend(loc='upper left', labelspacing=0.2, frameon=False, borderaxespad=0)
plt.tight_layout()
return | 32,635 |
def test_io_mapping():
"""Test if ``inputs`` and ``outputs`` are translated to output/input names."""
good_io = {'inputs': ['input', 'second_input'], 'outputs': ['output', 'sum']}
model = SimpleModel(dataset=None, log_dir='', **good_io)
assert model.input_names == good_io['inputs']
assert model.output_names == good_io['outputs']
# test if an error is raised when certain input/output tensor is not found
with pytest.raises(ValueError):
SimpleModel(dataset=None, log_dir='', inputs=['input', 'second_input', 'third_input'],
outputs=['output', 'sum'])
with pytest.raises(ValueError):
SimpleModel(dataset=None, log_dir='', inputs=['input', 'second_input'], outputs=['output', 'sum', 'sub']) | 32,636 |
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
registry = dr.async_get(hass)
config = {
"topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk" | 32,637 |
def terminate_process_by_name(procname):
"""Terminate process by process name."""
for proc in psutil.process_iter():
if proc.name() == procname:
proc.kill() | 32,638 |
def process_dataset(material: str, frequency: float, plot=False,
pr=False) -> float:
"""
Take a set of data, fit curve and find thermal diffustivity.
Parameters
----------
material : str
Gives material of this dataset. 'Cu' or 'Al'.
frequency : float
Frequency used, in mHz.
plot : bool
True if a plot of the curves should be shown.
plot : bool
True if the ODR output should be printed.
Returns
-------
diffustivity : float
The estimated thermal diffusivity of this material.
"""
# Check parameter validity
if material not in ['Cu', 'Al']:
raise ValueError('Invalid material name')
# Get file
filename = '{}_{}mHz.csv'.format(material, frequency)
raw = pd.read_csv(filename,
names=['Time',
'Ref',
'Source',
'S1',
'S2',
'S3',
'S4',
'S5',
'S6'])
# Set sensor position (in m) based on bar material
if material == 'Cu':
x = np.array([12, 35, 70, 150, 310, 610]) / 1000
dx = np.full(6, 0.015)
elif material == 'Al':
x = np.array([27.5, 70, 150, 310, 630]) / 1000
dx = np.array([0.25, 0.25, 0.25, 0.25, 0.5]) / 100
# Start processing data into a useful format
data = raw.to_numpy()
# delete first row of zeroes
data = np.delete(data, 0, 0)
# For every temperature measurement, associates it with time and position
# Also dumps data from the dodgy sensor
# Calculates error in Temperature based a C class Pt100
def add_independents(row):
if material == 'Cu':
t = np.full(6, row[0])
relative_temperature = row[3:] - row[1]
temp_err = (row[3:] + row[1]) * 0.01 + 1.2
elif material == 'Al':
t = np.full(5, row[0])
relative_temperature = row[4:] - row[1]
temp_err = (row[4:] + row[1]) * 0.01 + 1.2
return np.column_stack((t, x, dx, relative_temperature, temp_err))
# This produces an array for each time measurment,
# where each row is [t, x, T(x,t) ]
data = np.apply_along_axis(add_independents, 1, data)
# Extract the rows from each time measurement array into one big array
data = np.reshape(data, (-1, 5))
# Split columns into named vars for clarity
# Note how the array has been transposed
time, x, dx, Temperature, dT = data.T
# Estimate time error
dtime = np.full(len(time), 0.01)
dindep = [dx, dtime]
# Set angular frquency, given we know frequency
w = 2 * np.pi * (frequency / 1000)
# Equation to fit to
def model(params, independent):
A, B, C = params
t, x = independent
return A * np.exp(- B * x) * np.sin(w * t - (C * x))
# Fit curve
mod = odr.Model(model)
realData = odr.RealData([time, x], y=Temperature, sx=dindep, sy=dT)
myodr = odr.ODR(realData, mod, beta0=[11., 2., 9.])
output = myodr.run()
parameters = output.beta
if plot:
# Plot experimental data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(time, x, Temperature, s=1, color='black')
# ax.scatter(time, x, Temperature, s=1, c=Temperature, cmap='plasma')
ax.set_title('{} at {}mHz'.format(material, frequency))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Distance (m)')
ax.set_zlabel('Temperature (C)')
# Plot the fitted function
sampling_time = 5 * 1000 / frequency
sample_time = np.linspace(0, sampling_time, 750)
sample_x = np.linspace(0, 0.65, 750)
Time, X = np.meshgrid(sample_time, sample_x, sparse=True)
sample_Temperature = model(parameters, [Time, X])
ax.plot_surface(Time, X, sample_Temperature, cmap='plasma',
alpha=0.4)
# ax.plot_wireframe(Time, X, sample_Temperature, color='black',
# alpha=0.5)
# Include sd uncertainties with parameters
pu = uarray(parameters, output.sd_beta)
if pr:
output.pprint()
# print(pu)
# Calculate diffusitivity
return w / (2 * pu[1] * pu[2]) | 32,639 |
def migrate(db_pass, image=image_tag):
"""
Performs migrations on database.
:param db_pass: Password for database.
:param image: Name of seventweets image.
"""
run(f'docker run '
f'--rm '
f'--net {network_name} '
f'-e ST_DB_USER={db_user} -e ST_DB_PASS={db_pass} '
f'-e ST_DB_HOST={db_container_name} '
f'-e ST_DB_NAME={db_name} '
f'{image} '
f'python3 -m seventweets migrate') | 32,640 |
def search(obj, terms, oper, packages):
"""
Search PyPI for packages or releases thereof.
Search terms may be specified as either ``field:value`` (e.g.,
``summary:Django``) or just ``value`` to search long descriptions.
"""
spec = {}
for t in terms:
key, colon, value = t.partition(":")
if colon == "":
key, value = "description", t
else:
key = SEARCH_SYNONYMS.get(key, key)
# ServerProxy can't handle defaultdicts, so we can't use those instead.
spec.setdefault(key, []).append(value)
results = map(clean_pypi_dict, obj.xmlrpc("search", spec, oper))
if packages:
results = squish_versions(results)
click.echo(dumps(results)) | 32,641 |
def load_shuttle(main_data_path, folder='shuttle', df=None):
"""
____ _ _ _ _ ___ ___ _ ____
[__ |__| | | | | | |___
___] | | |__| | | |___ |___
From UCI https://archive.ics.uci.edu/ml/datasets/Shuttle+Landing+Control
"""
# Encoder
encoder_shuttle = [
list(range(1, 3)),
list(range(1, 3)),
list(range(1, 5)),
list(range(1, 3)),
list(range(1, 3)),
list(range(1, 5)),
list(range(1, 3))
]
# Columns names
shuttle_columns = [
'Recommended\nControl Mode', 'Positioning', 'Altimeter Error\nMagnitude', 'Altimeter Error\nSign',
'Wind\nDirection', 'Wind\nStrength', 'Sky Condition'
]
# Decoder
shuttle_decoder = [['Manual', 'Automatic'], ['Stable', 'Unstable'], ['Very Large', 'Large', 'Medium', 'Small'],
['Positive', 'Negative'], ['Head', 'Tail'], ['Light', 'Medium', 'Strong', 'Very Strong'],
['Good Visibility', 'No Visibility']]
def combinatorial_from_record(record):
"""
Generate the combinatorial rows for missing ones
i.e. if * is present in a record it generates all the possible combinations for that column
(works on dicts, it's easier than pd.DataFrame)
"""
combi = [k for k, v in record.items() if v == '*']
non_combi = [k for k, v in record.items() if v != '*']
if len(combi) > 0:
combi_mesh_start = [encoder_shuttle[i] for i in combi]
combi_cols = np.array(np.meshgrid(*combi_mesh_start)).T.reshape(-1, len(combi_mesh_start))
retds = []
for cs in combi_cols:
retds.append({**{k: int(record[k]) for k in non_combi}, **{k: int(c) for k, c in zip(combi, cs)}})
return retds
else:
return [{k: int(v) for k, v in record.items()}]
df_raw = pd.read_csv(os.path.join(main_data_path, folder, 'shuttle-landing-control.data'), header=None)
df = pd.DataFrame(sum([combinatorial_from_record(record) for record in df_raw.to_dict('records')], []))
for col in df:
df[col] = df[col].apply(lambda x: shuttle_decoder[col][x - 1])
df.columns = [shuttle_columns[col] for col in df]
df = df[[shuttle_columns[i] for i in [0, 1, -2, -3, -1, 2, 3]]]
return df, df.columns.values[0] | 32,642 |
def cuda_reset() -> None:
"""Calls `cudaDeviceReset`
Destroy all allocations and reset all state on the current device
in the current process.
""" | 32,643 |
def hxlrename():
"""Console script for hxlrename."""
run_script(hxlrename_main) | 32,644 |
def show_drizzle_HDU(hdu):
"""Make a figure from the multiple extensions in the drizzled grism file.
Parameters
----------
hdu : `~astropy.io.fits.HDUList`
HDU list output by `drizzle_grisms_and_PAs`.
Returns
-------
fig : `~matplotlib.figure.Figure`
The figure.
"""
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator
h0 = hdu[0].header
NX = h0['NGRISM']
NY = 0
grisms = OrderedDict()
for ig in range(NX):
g = h0['GRISM{0:03d}'.format(ig+1)]
NY = np.maximum(NY, h0['N'+g])
grisms[g] = h0['N'+g]
NY += 1
fig = plt.figure(figsize=(5*NX, 1*NY))
widths = []
for i in range(NX):
widths.extend([0.2, 1])
gs = GridSpec(NY, NX*2, height_ratios=[1]*NY, width_ratios=widths)
for ig, g in enumerate(grisms):
sci_i = hdu['SCI',g]
wht_i = hdu['WHT',g]
kern_i = hdu['KERNEL',g]
h_i = sci_i.header
clip = wht_i.data > 0
if clip.sum() == 0:
clip = np.isfinite(wht_i.data)
avg_rms = 1/np.median(np.sqrt(wht_i.data[clip]))
vmax = np.maximum(1.1*np.percentile(sci_i.data[clip],98),
5*avg_rms)
vmax_kern = 1.1*np.percentile(kern_i.data,99.5)
# Kernel
ax = fig.add_subplot(gs[NY-1, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, cmap=plt.cm.viridis_r,
extent=extent, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[NY-1, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$ ($\mu$m) - '+g)
ax.xaxis.set_major_locator(MultipleLocator(grism_major[g]))
for ip in range(grisms[g]):
#print(ip, ig)
pa = h0['{0}{1:02d}'.format(g, ip+1)]
sci_i = hdu['SCI','{0},{1}'.format(g, pa)]
wht_i = hdu['WHT','{0},{1}'.format(g, pa)]
kern_i = hdu['KERNEL','{0},{1}'.format(g, pa)]
h_i = sci_i.header
# Kernel
ax = fig.add_subplot(gs[ip, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, extent=extent,
cmap=plt.cm.viridis_r, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[ip, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([]); ax.set_xticklabels([])
ax.xaxis.set_major_locator(MultipleLocator(grism_major[g]))
ax.text(0.015, 0.94, '{0:3.0f}'.format(pa), ha='left',
va='top',
transform=ax.transAxes, fontsize=8,
backgroundcolor='w')
if (ig == (NX-1)) & (ip == 0):
ax.text(0.98, 0.94, 'ID = {0}'.format(h0['ID']),
ha='right', va='top', transform=ax.transAxes,
fontsize=8, backgroundcolor='w')
gs.tight_layout(fig, pad=0.1)
return fig | 32,645 |
def test_get_tenant_shares_for_period_one_billing(
django_db_setup,
lease_factory,
contact_factory,
tenant_factory,
tenant_contact_factory,
assert_count_equal,
):
"""Lease with two tenants. Tenant2's billing contact is contact1"""
lease = lease_factory(
type_id=1, municipality_id=1, district_id=1, notice_period_id=1
)
tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=2)
tenant2 = tenant_factory(lease=lease, share_numerator=1, share_denominator=2)
contact1 = contact_factory(
first_name="First name 1", last_name="Last name 1", type=ContactType.PERSON
)
contact2 = contact_factory(
first_name="First name 2", last_name="Last name 2", type=ContactType.PERSON
)
tenant_contact_factory(
type=TenantContactType.TENANT,
tenant=tenant1,
contact=contact1,
start_date=datetime.date(year=2017, month=1, day=1),
)
tenant_contact_factory(
type=TenantContactType.TENANT,
tenant=tenant2,
contact=contact2,
start_date=datetime.date(year=2017, month=1, day=1),
)
tenant_contact_factory(
type=TenantContactType.BILLING,
tenant=tenant2,
contact=contact1,
start_date=datetime.date(year=2017, month=1, day=1),
)
start_date = datetime.date(year=2017, month=1, day=1)
end_date = datetime.date(year=2017, month=12, day=31)
shares = lease.get_tenant_shares_for_period(start_date, end_date)
assert len(shares) == 1
assert_count_equal(shares.keys(), [contact1])
assert shares[contact1] == {
tenant1: [(start_date, end_date)],
tenant2: [(start_date, end_date)],
} | 32,646 |
def make_ln_func(variable):
"""Take an qs and computed the natural log of a variable"""
def safe_ln_queryset(qs):
"""Takes the natural log of a queryset's values and handles zeros"""
vals = qs.values_list(variable, flat=True)
ret = np.log(vals)
ret[ret == -np.inf] = 0
return ret
return safe_ln_queryset | 32,647 |
def test_dice():
"""
Tests for catalyst.metrics.dice metric.
"""
size = 4
half_size = size // 2
shape = (1, 1, size, size)
# check 0: one empty
empty = torch.zeros(shape)
full = torch.ones(shape)
assert dice(empty, full, class_dim=1, mode="per-class").item() == 0
# check 0: no overlap
left = torch.ones(shape)
left[:, :, :, half_size:] = 0
right = torch.ones(shape)
right[:, :, :, :half_size] = 0
assert dice(left, right, class_dim=1, mode="per-class").item() == 0
# check 1: both empty, both full, complete overlap
assert dice(empty, empty, class_dim=1, mode="per-class").item() == 1
assert dice(full, full, class_dim=1, mode="per-class").item() == 1
assert dice(left, left, class_dim=1, mode="per-class").item() == 1
# check 0.5: half overlap
top_left = torch.zeros(shape)
top_left[:, :, :half_size, :half_size] = 1
assert torch.isclose(
dice(top_left, left, class_dim=1, mode="per-class"), torch.Tensor([[0.6666666]])
)
assert torch.isclose(
dice(top_left, left, class_dim=1, mode="micro"), torch.Tensor([[0.6666666]])
)
assert torch.isclose(
dice(top_left, left, class_dim=1, mode="macro"), torch.Tensor([[0.6666666]])
)
# check multiclass: 0, 0, 1, 1, 1, 0.66667
a = torch.cat([empty, left, empty, full, left, top_left], dim=1)
b = torch.cat([full, right, empty, full, left, left], dim=1)
ans = torch.Tensor([0, 0, 1, 1, 1, 0.6666666])
ans_micro = torch.tensor(0.6087)
assert torch.allclose(dice(a, b, class_dim=1, mode="per-class"), ans)
assert torch.allclose(dice(a, b, class_dim=1, mode="micro"), ans_micro)
aaa = torch.cat([a, a, a], dim=0)
bbb = torch.cat([b, b, b], dim=0)
assert torch.allclose(dice(aaa, bbb, class_dim=1, mode="per-class"), ans)
assert torch.allclose(dice(aaa, bbb, class_dim=1, mode="micro"), ans_micro) | 32,648 |
def calc_all_energies(n, k, states, params):
"""Calculate all the energies for the states given. Can be used for Potts.
Parameters
----------
n : int
Number of spins.
k : int
Ising or Potts3 model.
states : ndarray
Number of distinct states.
params : ndarray
(h,J) vector
Returns
-------
E : ndarray
Energies of all given states.
"""
e = np.zeros(len(states))
s_ = np.zeros((1,n), dtype=np.int8)
if k==2:
for i in range(len(states)):
s = states[i]
e[i] -= fast_sum(params[n:], s)
e[i] -= np.sum(s*params[:n])
elif k==3:
for i in range(len(states)):
s = states[i]
for ix in range(n):
# fields
e[i] -= params[ix+s[ix]*n]
e[i] -= fast_sum_ternary(params[n*k:], s)
else: raise NotImplementedError
return e | 32,649 |
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):
"""
Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists.
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])
schema_exists = cursor.fetchone()[0]
if schema_exists:
return False
cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])
with connection.schema_editor() as editor:
for model in get_heroku_connect_models():
editor.create_model(model)
# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):
editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";')
from heroku_connect.models import TriggerLog, TriggerLogArchive
for cls in [TriggerLog, TriggerLogArchive]:
editor.create_model(cls)
return True | 32,650 |
def extract_sentences(modifier, split_text):
"""
Extracts the sentences that contain the modifier references.
"""
extracted_text = []
for sentence in split_text:
if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(modifier), sentence,
re.IGNORECASE):
extracted_text.append(sentence)
return extracted_text | 32,651 |
def get_1_neighbours(graph, i):
"""
This function gets all the 1-neighborhoods including i itself.
"""
nbhd_nodes = graph.get_out_neighbours(i)
nbhd_nodes = np.concatenate((nbhd_nodes,np.array([i])))
return nbhd_nodes | 32,652 |
def window_slice(frame, center, window):
"""
Get the index ranges for a window with size `window` at `center`, clipped to the boundaries of `frame`
Parameters
----------
frame : ArrayLike
image frame for bound-checking
center : Tuple
(y, x) coordinate of the window
window : float,Tuple
window length, or tuple for each axis
Returns
-------
(ys, xs)
tuple of ranges for the indices for the window
"""
half_width = np.asarray(window) / 2
Ny, Nx = frame.shape[-2:]
lower = np.maximum(0, np.round(center - half_width), dtype=int, casting="unsafe")
upper = np.minimum(
(Ny - 1, Nx - 1), np.round(center + half_width), dtype=int, casting="unsafe"
)
return range(lower[0], upper[0] + 1), range(lower[1], upper[1] + 1) | 32,653 |
def each_30_sec(exercises):
""" Do one minute of each exercise with no break """
one_by_one(exercises, sec_on=30, sec_off=0) | 32,654 |
def revnum_to_revref(rev, old_marks):
"""Convert an hg revnum to a git-fast-import rev reference (an SHA1
or a mark)"""
return old_marks.get(rev) or b':%d' % (rev+1) | 32,655 |
def onset_precision_recall_f1(ref_intervals, est_intervals,
onset_tolerance=0.05, strict=False, beta=1.0):
"""Compute the Precision, Recall and F-measure of note onsets: an estimated
onset is considered correct if it is within +-50ms of a reference onset.
Note that this metric completely ignores note offset and note pitch. This
means an estimated onset will be considered correct if it matches a
reference onset, even if the onsets come from notes with completely
different pitches (i.e. notes that would not match with
:func:`match_notes`).
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_valued_intervals(
... 'reference.txt')
>>> est_intervals, _ = mir_eval.io.load_valued_intervals(
... 'estimated.txt')
>>> (onset_precision,
... onset_recall,
... onset_f_measure) = mir_eval.transcription.onset_precision_recall_f1(
... ref_intervals, est_intervals)
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
beta : float > 0
Weighting factor for f-measure (default value = 1.0).
Returns
-------
precision : float
The computed precision score
recall : float
The computed recall score
f_measure : float
The computed F-measure score
"""
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_onsets(ref_intervals, est_intervals,
onset_tolerance=onset_tolerance,
strict=strict)
onset_precision = float(len(matching))/len(est_intervals)
onset_recall = float(len(matching))/len(ref_intervals)
onset_f_measure = util.f_measure(onset_precision, onset_recall, beta=beta)
return onset_precision, onset_recall, onset_f_measure | 32,656 |
def parse_acs_metadata(acs_metadata, groups):
"""Returns a map of variable ids to metadata for that variable, filtered to
specified groups.
acs_metadata: The ACS metadata as json.
groups: The list of group ids to include."""
output_vars = {}
for variable_id, metadata in acs_metadata["variables"].items():
group = metadata.get("group")
if group in groups and metadata["label"].startswith("Estimate!!Total"):
output_vars[variable_id] = metadata
return output_vars | 32,657 |
def split_series_using_lytaf(timearray, data, lytaf):
"""
Proba-2 analysis code for splitting up LYRA timeseries around locations
where LARs (and other data events) are observed.
Parameters
----------
timearray : `numpy.ndarray` of times understood by `sunpy.time.parse_time`
function.
data : `numpy.array` corresponding to the given time array
lytaf : `numpy.recarray`
Events obtained from querying LYTAF database using
lyra.get_lytaf_events().
Output
------
output : `list` of dictionaries
Each dictionary contains a sub-series corresponding to an interval of
'good data'.
"""
n = len(timearray)
mask = np.ones(n)
el = len(lytaf)
# make the input time array a list of datetime objects
datetime_array = []
for tim in timearray:
datetime_array.append(parse_time(tim))
# scan through each entry retrieved from the LYTAF database
for j in range(0, el):
# want to mark all times with events as bad in the mask, i.e. = 0
start_dt = lytaf['begin_time'][j]
end_dt = lytaf['end_time'][j]
# find the start and end indices for each event
start_ind = np.searchsorted(datetime_array, start_dt)
end_ind = np.searchsorted(datetime_array, end_dt)
# append the mask to mark event as 'bad'
mask[start_ind:end_ind] = 0
diffmask = np.diff(mask)
tmp_discontinuity = np.where(diffmask != 0.)
# disc contains the indices of mask where there are discontinuities
disc = tmp_discontinuity[0]
if len(disc) == 0:
print('No events found within time series interval. '
'Returning original series.')
return [{'subtimes': datetime_array, 'subdata': data}]
# -1 in diffmask means went from good data to bad
# +1 means went from bad data to good
# want to get the data between a +1 and the next -1
# if the first discontinuity is a -1 then the start of the series was good.
if diffmask[disc[0]] == -1.0:
# make sure we can always start from disc[0] below
disc = np.insert(disc, 0, 0)
split_series = []
limit = len(disc)
# now extract the good data regions and ignore the bad ones
for h in range(0, limit, 2):
if h == limit-1:
# can't index h+1 here. Go to end of series
subtimes = datetime_array[disc[h]:-1]
subdata = data[disc[h]:-1]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
else:
subtimes = datetime_array[disc[h]:disc[h+1]]
subdata = data[disc[h]:disc[h+1]]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
return split_series | 32,658 |
def establish_file_structure(project_dir):
"""Create output data directory and empty spin system master if required
for MADByTE
Args:
output_dir (str or Path): desired output directory
project_dir (str or Path): project directory
"""
project_dir = Path(project_dir)
master = project_dir.joinpath("Spin_Systems_Master.json")
if not master.exists():
master_df = pd.DataFrame(
columns=["Spin_System_ID","Members","Found_In"],
)
master_df.to_json(master)
logger.info("Required master file not found. One has been created.") | 32,659 |
def getrqdata(request):
"""Return the request data.
Unlike the now defunct `REQUEST
<https://docs.djangoproject.com/en/1.11/ref/request-response/#django.http.HttpRequest.REQUEST>`_
attribute, this inspects the request's `method` in order to decide
what to return.
"""
if request.method in ('PUT', 'DELETE'):
return QueryDict(request.body)
# note that `body` was named `raw_post_data` before Django 1.4
# print 20130222, rqdata
# rqdata = request.REQUEST
if request.method == 'HEAD':
return request.GET
return getattr(request, request.method) | 32,660 |
def generate_per_level_fractions(highest_level_ratio: int, num_levels: int = NUM_LEVELS) -> List[float]:
"""
Generates the per-level fractions to reach the target sum (i.e. the highest level ratio).
Args:
highest_level_ratio:
The 1:highest_level_ratio ratio for the highest level; i.e. the target sum for the geometric series.
num_levels:
The number of levels to calculate the sum over.
Returns:
A list of fractions of the population, per-level.
"""
ratio = calc_geometric_ratio(highest_level_ratio, num_levels)
per_level = [(ratio ** i) / highest_level_ratio for i in range(num_levels)]
# Change so that the highest level information is at the end
per_level.reverse()
return per_level | 32,661 |
def choose_transformations(name):
"""Prompts user with different data transformation options"""
transformations_prompt=[
{
'type':'confirm',
'message':'Would you like to apply some transformations to the file? (Default is no)',
'name':'confirm_transformations',
'default':False
},
{
'type':'checkbox',
'message':f'Ok {name}, let\'s select some transformation before we convert your file:',
'name':'transformations',
'choices':[
{'name':'Change Column Names'},
{'name':'Change File Name'}
],
'when': lambda answers: answers['confirm_transformations']
}
]
answers = prompt(questions=transformations_prompt)
return answers | 32,662 |
def update_list_item_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Updates a list item. return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
list_id = int(args.get('list_id')) # type: ignore
item_id = int(args.get('item_id')) # type: ignore
raw_response = client.update_list_item(
list_id=list_id,
item_id=item_id,
type=args.get('type'),
value=args.get('value'),
risk=args.get('risk'),
notes=args.get('notes')
)
if raw_response:
title = f'{INTEGRATION_NAME} - List item {item_id} from list {list_id} was updated successfully'
context_entry = create_context_result(raw_response, LIST_ITEM_TRANS)
context = {
f'{INTEGRATION_CONTEXT_NAME}List(val.ID && val.ID === {list_id}).Item(val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not update list item.', {}, raw_response | 32,663 |
def fft(input, inverse=False):
"""Interface with torch FFT routines for 3D signals.
fft of a 3d signal
Example
-------
x = torch.randn(128, 32, 32, 32, 2)
x_fft = fft(x)
x_ifft = fft(x, inverse=True)
Parameters
----------
x : tensor
Complex input for the FFT.
inverse : bool
True for computing the inverse FFT.
Raises
------
TypeError
In the event that x does not have a final dimension 2 i.e. not
complex.
Returns
-------
output : tensor
Result of FFT or IFFT.
"""
if not _is_complex(input):
raise TypeError('The input should be complex (e.g. last dimension is 2)')
if inverse:
return torch.ifft(input, 3)
return torch.fft(input, 3) | 32,664 |
def dbdescs(data, dbname):
"""
return the entire set of information for a specific server/database
"""
# pylint: disable=bad-continuation
return {
'admin': onedesc(data, dbname, 'admin', 'rw'),
'user': onedesc(data, dbname, 'user', 'rw'),
'viewer': onedesc(data, dbname, 'viewer', 'ro')
} | 32,665 |
def format_date(date):
"""Format date to readable format."""
try:
if date != 'N/A':
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%d %b %Y')
except ValueError:
logger.error("Unexpected ValueError while trying to format date -> {}".format(date))
pass
return date | 32,666 |
def favor_attention(query,
key,
value,
kernel_transformation,
causal,
projection_matrix=None):
"""Computes FAVOR normalized attention.
Args:
query: query tensor.
key: key tensor.
value: value tensor.
kernel_transformation: transformation used to get finite kernel features.
causal: whether attention is causal or not.
projection_matrix: projection matrix to be used.
Returns:
FAVOR normalized attention.
"""
query_prime = kernel_transformation(query, True,
projection_matrix) # [B,L,H,M]
key_prime = kernel_transformation(key, False, projection_matrix) # [B,L,H,M]
query_prime = query_prime.permute(1, 0, 2, 3) # [L,B,H,M]
key_prime = key_prime.permute(1, 0, 2, 3) # [L,B,H,M]
value = value.permute(1, 0, 2, 3) # [L,B,H,D]
if causal:
av_attention = causal_numerator(query_prime, key_prime, value)
attention_normalizer = causal_denominator(query_prime, key_prime)
else:
av_attention = noncausal_numerator(query_prime, key_prime, value)
attention_normalizer = noncausal_denominator(query_prime, key_prime)
# TODO(kchoro): Add more comments.
av_attention = av_attention.permute(1, 0, 2, 3)
attention_normalizer = attention_normalizer.permute(1, 0, 2)
attention_normalizer = attention_normalizer.unsqueeze(dim=len(attention_normalizer.shape))
return av_attention / attention_normalizer | 32,667 |
def estimate_perfomance_plan(sims, ntra, stateinit, destination, plan=list(), plot=False, verbose=True):
"""
Estimates the performances of two plans and compares them on two scenarios.
:param list() sims: List of :class:`simulatorTLKT.Simulator`
:param int ntra: Number of trajectories used to estimate the performances on each scenarios
:param list(int,float,float) stateinit: [t_index, lat, lon], starting point of the plans
:param list(int,float,float) destination: [t_index, lat, lon], destination point of the plans
:param list plan: list of actions to apply
:param bool plot: if True displays the mean trajectories per scenario
:param bool verbose: if True verbose results
:return: mean_arrival_times, var_arrival_times, global_mean_time, variance_globale with length : len(list) = len(sims)
:rtype: list(float), list(float), float, float
"""
################### Arrival Time #############################
meantrajs = []
mean_arrival_times = []
var_arrival_times = []
all_arrival_times = []
nb_actions = len(plan)
for _, sim in enumerate(sims):
arrivaltimes = []
trajsofsim = np.zeros((ntra, len(sims[0].times), 3))
for ii in range(ntra):
traj = []
sim.reset(stateinit)
traj.append(list(sim.state))
compte_action = 0
while (compte_action < nb_actions):
action = plan[compte_action]
compte_action += 1
sim.doStep(action)
traj.append(list(sim.state))
if nb_actions == 0:
dist, action = sim.getDistAndBearing(sim.state[1:], destination)
sim.doStep(action)
traj.append(list(sim.state))
atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state)
while (not atDest) \
and (not Tree.is_state_terminal(sim, sim.state)):
dist, action = sim.getDistAndBearing(sim.state[1:], destination)
sim.doStep(action)
traj.append(list(sim.state))
atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state)
if atDest:
finalTime = sim.times[sim.state[0]] - \
(1 - frac) * (sim.times[sim.state[0]] - sim.times[sim.state[0] - 1])
arrivaltimes.append(finalTime)
all_arrival_times.append(finalTime)
else:
finalTime = sim.times[-1]
arrivaltimes.append(finalTime)
all_arrival_times.append(finalTime)
trajsofsim[ii, :, :] = traj[-1]
trajsofsim[ii, :, 0] = [i for i in range(len(sim.times))]
trajsofsim[ii, :len(traj), :] = traj
meantrajs.append(np.mean(trajsofsim, 0))
average_scenario = np.mean(arrivaltimes)
mean_arrival_times.append(average_scenario)
variance_scenario = 0
for value in arrivaltimes:
variance_scenario += (average_scenario - value) ** 2
variance_scenario = variance_scenario / ntra
var_arrival_times.append(variance_scenario)
global_mean_time = np.mean(all_arrival_times)
variance_globale = 0
for value in all_arrival_times:
variance_globale += (global_mean_time - value) ** 2
variance_globale = variance_globale / len(all_arrival_times)
if plot:
basemap_time = sims[0].prepareBaseMap(proj='aeqd', centerOfMap=stateinit[1:])
plt.title('Mean trajectory for minimal travel time estimation')
colors = plt.get_cmap("tab20")
colors = colors.colors[:len(sims)]
xd, yd = basemap_time(destination[1], destination[0])
xs, ys = basemap_time(stateinit[2], stateinit[1])
basemap_time.scatter(xd, yd, zorder=0, c="red", s=100)
plt.annotate("destination", (xd, yd))
basemap_time.scatter(xs, ys, zorder=0, c="green", s=100)
plt.annotate("start", (xs, ys))
for ii, sim in enumerate(sims):
sim.plotTraj(meantrajs[ii], basemap_time, color=colors[ii], label="Scen. num : " + str(ii))
plt.legend()
if verbose:
for nb in range(len(sims)):
print("temps scénario isochrones ", nb, " = ", mean_arrival_times[nb])
print("variance scénario isochrones = ", var_arrival_times[nb])
print()
print("moyenne des temps isochrones = ", global_mean_time)
print("variance globale des isochrones = ", variance_globale)
return [global_mean_time] + mean_arrival_times, [variance_globale] + var_arrival_times | 32,668 |
def xls_dslx_ir_impl(ctx, src, dep_src_list):
"""The implementation of the 'xls_dslx_ir' rule.
Converts a DSLX source file to an IR file.
Args:
ctx: The current rule's context object.
src: The source file.
dep_src_list: A list of source file dependencies.
Returns:
DslxModuleInfo provider
ConvIRInfo provider
DefaultInfo provider
"""
ir_file = _convert_to_ir(ctx, src, dep_src_list)
dslx_module_info = ctx.attr.dep[DslxModuleInfo]
return [
dslx_module_info,
ConvIRInfo(
dslx_source_file = src,
conv_ir_file = ir_file,
),
DefaultInfo(files = depset([ir_file])),
] | 32,669 |
def IterElementsWithTag(root, tag, depth=-1):
"""Iterates over DOM tree and yields elements matching tag name.
It's meant to be replacement for `getElementsByTagName`,
(which does recursive search) but without recursive search
(nested tags are not supported in histograms files).
Note: This generator stops going deeper in the tree when it detects
that there are elements with given tag.
Args:
root: XML dom tree.
tag: Element's tag name.
depth: Defines how deep in the tree function should search for a match.
Yields:
xml.dom.minidom.Node: Element matching criteria.
"""
if depth == 0 and root.nodeType == _ELEMENT_NODE and root.tagName == tag:
yield root
return
had_tag = False
skipped = 0
for child in root.childNodes:
if child.nodeType == _ELEMENT_NODE and child.tagName == tag:
had_tag = True
yield child
else:
skipped += 1
depth -= 1
if not had_tag and depth != 0:
for child in root.childNodes:
for match in IterElementsWithTag(child, tag, depth):
yield match | 32,670 |
def pendu_version1(nb_erreur_max: int, new_stat=nouvel_etat):
"""Procédure affichant le jeu du pendu version 1
Le paramètre new_stat contient la fonction nouvel_etat, pour éviter de recopier tout le code quand on passe à nouvel_etat_version2
"""
# Init
mot = choice(lire_mots("littre.txt"))
etat = "_" * len(mot)
nb_erreur = nb_erreur_max
# Jeu
while nb_erreur > 0 and etat != mot:
print('\n', affiche_etat(etat))
print(f"Vous pouvez encore faire {nb_erreur} erreurs.")
lettre = input("Entrez une lettre suivie d'un saut de ligne : ").strip()
etat_nouveau = new_stat(mot, etat, lettre)
if etat != etat_nouveau:
print("Bravo !")
etat = etat_nouveau
else:
print("Dommage...")
nb_erreur -= 1
# Conclusion
print("\n")
if etat == mot:
print(f"Gagné !\nLe mot était bien \'{mot}\'")
elif nb_erreur == 0:
print(f"Perdu...\nLe mot était \'{mot}\'")
else:
print("Ninja !") | 32,671 |
def split_data(images, labels):
"""
Split data into training (80%), validation (10%), and testing (10%)
datasets
Returns (images_train, images_validate, images_test, labels_train,
labels_validate, labels_test)
Assumes that num_covid_points <= num_normal_points and num_virus_points
"""
images, labels = shuffle_data_pair(images, labels)
num_covid_points = sum(map(lambda label: label == 0, labels))
# Calculate split
num_test = int(num_covid_points * 0.1)
num_covid_train = num_covid_points - num_test * 2
num_other_train = int(num_covid_train * 1.1)
# (train, validate, test) points added
num_points_added = [
[0, 0, 0], # COVID-19
[0, 0, 0], # Viral pneumonia
[0, 0, 0] # Normal
]
# Datasets
images_train = []
labels_train = []
images_validate = []
labels_validate = []
images_test = []
labels_test = []
# Add images and labels to datasets
notifier.send(" Adding images and labels to dataset...")
for i, label in enumerate(labels):
print(f" Point: {i} / {len(labels)}")
completed_labels = [False, False, False] # Enough of label added
if all(completed_labels):
break
for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal
if completed_labels[j]:
continue
if label == j:
# Add training data
can_add_training = False
if j == 0: # COVID-19
if num_points_added[j][0] < num_covid_train:
can_add_training = True
num_points_added[j][0] += 1
elif num_points_added[j][0] < num_other_train: # Not COVID-19
can_add_training = True
num_points_added[j][0] += 1
if can_add_training:
images_train.append(images[i])
labels_train.append(labels[i])
break
# Add validation data
if num_points_added[j][1] < num_test:
num_points_added[j][1] += 1
images_validate.append(images[i])
labels_validate.append(labels[i])
break
# Add testing data
if num_points_added[j][2] < num_test:
num_points_added[j][2] += 1
images_test.append(images[i])
labels_test.append(labels[i])
break
# Point couldn't be added anywhere: label is complete
completed_labels[j] = True
break
# Shuffle all data
notifier.send(" Shuffling data...")
images_train, labels_train = shuffle_data_pair(
images_train, labels_train
)
images_validate, labels_validate = shuffle_data_pair(
images_validate, labels_validate
)
images_test, labels_test = shuffle_data_pair(
images_test, labels_test
)
if PLOT_LABELS:
# Plot data frequencies
plt.hist(labels, bins=3)
plt.title("Labels")
plt.hist(labels_train, bins=3)
plt.title("Train Labels")
plt.hist(labels_validate, bins=3)
plt.title("Validate Labels")
plt.hist(labels_test, bins=3)
plt.title("Test Labels")
plt.show()
# Make labels categorical
notifier.send(" Making labels categorical: train...")
labels_train = tf.keras.utils.to_categorical(labels_train)
notifier.send(" Making labels categorical: validate...")
labels_validate = tf.keras.utils.to_categorical(labels_validate)
notifier.send(" Making labels categorical: test...")
labels_test = tf.keras.utils.to_categorical(labels_test)
notifier.send(" Converting data to NumPy arrays...")
return \
np.array(images_train), np.array(images_validate), np.array(images_test), \
np.array(labels_train), np.array(labels_validate), np.array(labels_test) | 32,672 |
def randomlyInfectRegions(network, regions, age_groups, infected):
"""Randomly infect regions to initialize the random simulation
:param network: object representing the network of populations
:type network: A NetworkOfPopulation object
:param regions: The number of regions to expose.
:type regions: int
:param age_groups: Age groups to infect
:type age_groups: list
:param infected: People to infect
:type infected: int
:return: Structure of initially infected regions with number
:rtype: dict
"""
infections = {}
for regionID in random.choices(list(network.graph.nodes()), k=regions):
infections[regionID] = {}
for age in age_groups:
infections[regionID][age] = infected
return infections | 32,673 |
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal[""],
verbose: bool,
):
"""
usage.statsmodels: 1
"""
... | 32,674 |
def timestamp_to_uint64(timestamp):
"""Convert timestamp to milliseconds since epoch."""
return int(timestamp.timestamp() * 1e3) | 32,675 |
async def test_response_handler_no_return_route_raises_error(
alice_gen, bob, send, dispatcher, message, response
):
"""Test response handler works."""
alice = alice_gen(partial(send.return_response, bob.pack(response)), dispatcher)
with pytest.raises(RuntimeError):
await alice.send_async(message) | 32,676 |
def main():
"""Receives the number of levels (N) of the pyramid to print."""
if len(sys.argv) < 2:
print('Usage: pyramid_of_numbers_kata.py <number_of_levels>.')
sys.exit(1)
if not sys.argv[1].isnumeric():
print('Usage: pyramid_of_numbers_kata.py <number_of_levels>,'
' where number_of_levels is a positive int.')
sys.exit(1)
number_of_levels = int(sys.argv[1])
for level_content in levels_content(number_of_levels):
print(level_content) | 32,677 |
def test_pd_types_correct_function_call(
mocker, test_function_call, expected_value, pd_testing_function
):
"""Test that the correct 'sub' assert function is called if expected for the given input type - and none
of the other functions are called.
"""
# test_function_call is the function to check has been called
# expected_value is the dummy value to use when calling h.assert_equal_dispatch, so test_function_call will be called
# pd_testing_function is the specific pd.testing function that should be used to compare that type
# patch all the potential functions that can be called by tubular.testing.helpers.assert_equal_dispatch
for x in potential_assert_functions:
mocker.patch(x)
actual_value = expected_value
msg_value = "test_msg"
h.assert_equal_dispatch(expected=expected_value, actual=actual_value, msg=msg_value)
getter, attribute = _get_target(test_function_call)
mocked_function_call = getattr(getter(), attribute)
assert (
mocked_function_call.call_count == 1
), f"Unexpected number of calls to {test_function_call} -\n Expected: 1\n Actual: {mocked_function_call.call_count}"
call_1_args = mocked_function_call.call_args_list[0]
call_1_pos_args = call_1_args[0]
call_1_kwargs = call_1_args[1]
call_1_expected_pos_arg = (expected_value, actual_value, msg_value)
assert len(call_1_pos_args) == len(
call_1_expected_pos_arg
), f"Unexpected number of positional args in call to {test_function_call} -\n Expected: {len(call_1_expected_pos_arg)}\n Actual: {len(call_1_pos_args)}"
pd_testing_function(call_1_expected_pos_arg[0], call_1_pos_args[0])
pd_testing_function(call_1_expected_pos_arg[1], call_1_pos_args[1])
e = call_1_expected_pos_arg[2]
a = call_1_pos_args[2]
assert (
e == a
), f"Unexpected last positional arg in call to {test_function_call} -\n Expected: {e}\n Actual: {a}"
assert (
call_1_kwargs == {}
), f"Unexpected keyword args in call to {test_function_call} -\n Expected: None\n Actual: {call_1_kwargs}"
# get functions that should not have been called
test_functions_not_call = list(
set(potential_assert_functions) - set([test_function_call])
)
# loop through each one and test it has not been called
for test_function_not_call in test_functions_not_call:
getter, attribute = _get_target(test_function_not_call)
mocked_function_not_call = getattr(getter(), attribute)
assert (
mocked_function_not_call.call_count == 0
), f"Unexpected number of calls to {test_function_not_call} -\n Expected: 0\n Actual: {mocked_function_not_call.call_count}" | 32,678 |
def extensible(x):
"""
Enables a function to be extended by some other function.
The function will get an attribute (extensible) which will return True.
The function will also get a function (extendedby) which will return a
list of all the functions that extend it.
"""
extensible_functions.append(x.__name__)
@wraps(x)
def wrapper(*args, **kwargs):
if x.__name__ in extensions:
for f in extensions[x.__name__]:
if not f.after:
f.func(*args, **kwargs)
result = x(*args, **kwargs)
if x.__name__ in extensions:
for f in extensions[x.__name__]:
if f.after:
f.func(*args, **kwargs)
return result
wrapper.extensible = True
def extended_by():
return extensions[x.__name__]
wrapper.extendedby = extended_by
return wrapper | 32,679 |
def main():
""" An example of usage... """
pb = SimpleProgressBar(total=200)
for i in range(201):
pb.update(i)
time.sleep(0.05) | 32,680 |
def _validate_user_deploy_steps(task, user_steps, error_prefix=None):
"""Validate the user-specified deploy steps.
:param task: A TaskManager object
:param user_steps: a list of deploy steps. A deploy step is a dictionary
with required keys 'interface', 'step', 'args', and 'priority'::
{ 'interface': <driver_interface>,
'step': <name_of_deploy_step>,
'args': {<arg1>: <value1>, ..., <argn>: <valuen>},
'priority': <priority_of_deploy_step> }
For example::
{ 'interface': 'bios',
'step': 'apply_configuration',
'args': { 'settings': [ { 'foo': 'bar' } ] },
'priority': 150 }
:param error_prefix: String to use as a prefix for exception messages, or
None.
:raises: InvalidParameterValue if validation of deploy steps fails.
:raises: InstanceDeployFailure if there was a problem getting the deploy
steps from the driver.
:return: validated deploy steps update with information from the driver
"""
driver_steps = _get_deployment_steps(task, enabled=False, sort=False)
return _validate_user_steps(task, user_steps, driver_steps, 'deploy',
error_prefix=error_prefix) | 32,681 |
def is_period_arraylike(arr):
""" return if we are period arraylike / PeriodIndex """
if isinstance(arr, pd.PeriodIndex):
return True
elif isinstance(arr, (np.ndarray, gt.ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'period'
return getattr(arr, 'inferred_type', None) == 'period' | 32,682 |
def _udld_B74(linLD, wavel=0.8):
"""
linLD ~ 0.5 for R band, Teff=5500K, logg=1 (van Hamme 1993)
"""
B = np.linspace(20,30) # NPOI, Armstrong+ 2001
p = {'diam':1.5, 'wavel':0.8, 'linLD':linLD} # delta cep
# plt.figure(1)
# plt.clf()
# plt.subplot(211)
# plt.plot(B, V2ld_B74(B, p))
# plt.ylabel('V2')
ud = {'diam':p['diam']}
fit = dpfit.leastsqFit(v2ud_fit, B/p['wavel']*1e6, ud, V2ld_B74(B, p))
plt.plot(B, fit['model'], color='r', linestyle='--')
print 'UD/LD= %4.3f'%(fit['best']['diam']/p['diam'])
# -- formula by Claret & Bloemen
print 'Claret & Bloemen formula:'
print 'UD/LD= %4.3f'%(((1-linLD/3.)/(1-7*linLD/15.))**(-0.5))
return | 32,683 |
def reset_password_step_2(token):
"""Processing the second step of changing the password (password change)"""
email = confirm_token_reset_password(token)
if not email:
return redirect(url_for('web_pages.reset_password_step_1'))
form = EditPassword()
if form.validate_on_submit():
password = form.password.data
session = create_session()
user = session.query(User).filter(User.email == email).first()
if not user:
abort(404)
user.set_password(password)
session.merge(user)
session.commit()
flash('Пароль успешно изменен', 'success')
return redirect(url_for('web_pages.login_page'))
return render_template('reset_password_step_2.html', form=form) | 32,684 |
def FStarTypeRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""An inline role to highlight F* types."""
#pylint: disable=dangerous-default-value, unused-argument
return nodes.literal(typ, rawtext, text, lineno, inliner, options=options, content=content) | 32,685 |
def matobj2dict(matobj):
"""A recursive function which converts nested mat object
to a nested python dictionaries
Arguments:
matobj {sio.matlab.mio5_params.mat_struct} -- nested mat object
Returns:
dict -- a nested dictionary
"""
ndict = {}
for fieldname in matobj._fieldnames:
attr = matobj.__dict__[fieldname]
if isinstance(attr, sio.matlab.mio5_params.mat_struct):
ndict[fieldname] = matobj2dict(attr)
elif isinstance(attr, np.ndarray) and fieldname == "move":
for ind, val in np.ndenumerate(attr):
ndict[
fieldname
+ str(ind).replace(",", "").replace(")", "").replace("(", "_")
] = matobj2dict(val)
elif fieldname == "skel":
tree = []
for ind in range(len(attr)):
tree.append(matobj2dict(attr[ind]))
ndict[fieldname] = tree
else:
ndict[fieldname] = attr
return ndict | 32,686 |
def download_from_vt(client: vt.Client, file_hash: str) -> bytes:
"""
Download file from VT.
:param vt.Client client: the VT client
:param str file_hash: the file hash
:rtype: bytes
:return: the downloaded data
:raises ValueError: in case of any error
"""
try:
buffer = io.BytesIO()
client.download_file(file_hash, buffer)
buffer.seek(0, 0)
return buffer.read()
except (IOError, vt.APIError) as e:
raise ValueError(str(e)) from e | 32,687 |
def divide_and_conquer(x, k, mul):
"""
Divide and conquer method for polynomial expansion
x is a 2d tensor of size (n_classes, n_roots)
The objective is to obtain the k first coefficients of the expanded
polynomial
"""
to_merge = []
while x[0].dim() > 1 and x[0].size(0) > 1:
size = x[0].size(0)
half = size // 2
if 2 * half < size:
to_merge.append([t[-1] for t in x])
x = mul([t[:half] for t in x],
[t[half: 2 * half] for t in x])
for row in to_merge:
x = mul(x, row)
x = torch.cat(x)
return x | 32,688 |
def calculate_operating_pressure(feed_state_block=None, over_pressure=0.15,
water_recovery=0.5, NaCl_passage=0.01, solver=None):
"""
estimate operating pressure for RO unit model given the following arguments:
feed_state_block: the state block of the RO feed that has the non-pressure state
variables initialized to their values (default=None)
over_pressure: the amount of operating pressure above the brine osmotic pressure
represented as a fraction (default=0.15)
water_recovery: the mass-based fraction of inlet H2O that becomes permeate
(default=0.5)
NaCl_passage: the mass-based fraction of inlet NaCl that becomes permeate
(default=0.01)
solver: solver object to be used (default=None)
"""
t = ConcreteModel() # create temporary model
prop = feed_state_block.config.parameters
t.brine = prop.build_state_block([0], default={})
# specify state block
t.brine[0].flow_mass_phase_comp['Liq', 'H2O'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'H2O']) * (1 - water_recovery))
t.brine[0].flow_mass_phase_comp['Liq', 'NaCl'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'NaCl']) * (1 - NaCl_passage))
t.brine[0].pressure.fix(101325) # valid when osmotic pressure is independent of hydraulic pressure
t.brine[0].temperature.fix(value(feed_state_block.temperature))
# calculate osmotic pressure
# since properties are created on demand, we must touch the property to create it
t.brine[0].pressure_osm
# solve state block
results = solve_indexed_blocks(solver, [t.brine])
check_solve(results)
return value(t.brine[0].pressure_osm) * (1 + over_pressure) | 32,689 |
def parse_pypi_index(text):
"""Parses the text and returns all the packages
Parameters
----------
text : str
the html of the website (https://pypi.org/simple/)
Returns
-------
List[str]
the list of packages
"""
soup = BeautifulSoup(text, "lxml")
return [i.get_text() for i in soup.find_all("a")] | 32,690 |
def fetch(data_dir, dest="wmt14"):
"""
Fetches most data from the WMT14 shared task.
Creates the `dest` if it doesn't exist.
Args:
data_dir (str): absolute path to the dir where datasets are stored
dest (str): name for dir where WMT14 datasets will be extracted
Returns:
final_dir (str): absolute path where WMT14 datasets were extracted
"""
# Create folder
wmt_dir = os.path.join(data_dir, dest)
utils.create_folder(wmt_dir)
# Download all datasets
for f, url in CORPORA.items():
utils.urlretrieve(url, os.path.join(wmt_dir, f))
return wmt_dir | 32,691 |
def _get_score_measure(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score_measure
"""
measure(pid)
return _get_score(func, alphabeta, color, board, alpha, beta, depth, pid) | 32,692 |
def mousePressed():
"""
Return True if the mouse has been left-clicked since the
last time mousePressed was called, and False otherwise.
"""
global _mousePressed
if _mousePressed:
_mousePressed = False
return True
return False | 32,693 |
def __make_responsive_for_list(root: object, parent: list) -> None:
"""Modify recursive object to be responsive.
Args:
root (object): the main object that should be responsive
parent (object): the current parent in the hierachy
"""
for index, value in enumerate(parent):
current_type = type(value)
if not current_type.__module__ == "builtins" or isinstance(value, dict):
wrapped_value = DictWrapper(value, make_responsive, root=root)
__make_responsive_for_dict(root, value)
wrapped_value.add_observer(root)
parent[index] = wrapped_value
elif isinstance(value, list):
wrapped_value = ListWrapper(value, make_responsive, root=root)
__make_responsive_for_list(root, value)
wrapped_value.add_observer(root)
parent[index] = wrapped_value | 32,694 |
def set_logging_config(logging_config: Dict) -> None:
"""Update logger configurations.
Warning:
This function modifies the configuration of the standard logging system
for all loggers, and might interfere with custom logger
configurations.
"""
dictConfig(logging_config) | 32,695 |
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we
must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/gpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var | 32,696 |
def adaptive_generate_association_rules(patterns, confidence_threshold):
"""
Given a set of frequent itemsets, return a dictof association rules
in the form {(left): (right)}
It has a check with 2048 thus will only retain multimodal rules.
"""
missed = 0
rules = defaultdict(set)
for setn, support in patterns.items():
if len(setn) > 1:
itemset = list(setn) # the itemset I with n element
for i in range(len(itemset)-1, -1, -1):
# the last pos is the inference item i for I->i
# every elem go to the last once, the itemset remains sorted
itemset[i], itemset[-1] = itemset[-1], itemset[i]
setn_1 = tuple(itemset[:-1])
if max(itemset[:-1]) < 2048 <= itemset[-1]:
if setn_1 in patterns:
confidence = patterns[setn] / patterns[setn_1]
if confidence >= confidence_threshold:
rules[setn_1].add(itemset[-1])
else:
missed += 1
print("missed", setn_1)
print('%d freq missed.' % missed)
return rules | 32,697 |
def cli():
"""
Rebuild the docker container
:return: Subprocess call result
"""
cmd = "docker-compose down && docker-compose build"
return subprocess.call(cmd, shell=True) | 32,698 |
def test_api_challenge_patch_admin():
"""Can a user patch /api/v1/challenges/<challenge_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.patch(
"/api/v1/challenges/1", json={"name": "chal_name", "value": "200"}
)
assert r.status_code == 200
assert r.get_json()["data"]["value"] == 200
destroy_ctfd(app) | 32,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.