content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def make_coro(func):
"""Wrap a normal function with a coroutine."""
async def wrapper(*args, **kwargs):
"""Run the normal function."""
return func(*args, **kwargs)
return wrapper | 080e543bc91daee13c012225ba47cd6d054c9ea5 | 3,637,600 |
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
solved_boxes = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_boxes:
digit = values[box]
if len(digit) == 1:
for peerBox in peers[box]:
values[peerBox] = values[peerBox].replace(digit,'')
return values | a8f41f2cf789c1c14a4f70f760731864af65cc80 | 3,637,601 |
def sqlpool_blob_auditing_policy_update(
cmd,
instance,
workspace_name,
resource_group_name,
sql_pool_name,
state=None,
blob_storage_target_state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
storage_account_subscription_id=None,
is_storage_secondary_key_in_use=None,
retention_days=None,
audit_actions_and_groups=None,
log_analytics_target_state=None,
log_analytics_workspace_resource_id=None,
event_hub_target_state=None,
event_hub_authorization_rule_id=None,
event_hub=None,
is_azure_monitor_target_enabled=None,
blob_auditing_policy_name=None):
"""
Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance.
"""
_audit_policy_update(
cmd=cmd,
instance=instance,
workspace_name=workspace_name,
resource_group_name=resource_group_name,
sql_pool_name=sql_pool_name,
state=state,
blob_storage_target_state=blob_storage_target_state,
storage_account=storage_account,
storage_endpoint=storage_endpoint,
storage_account_access_key=storage_account_access_key,
storage_account_subscription_id=storage_account_subscription_id,
is_storage_secondary_key_in_use=is_storage_secondary_key_in_use,
retention_days=retention_days,
category_name='SQLSecurityAuditEvents',
log_analytics_target_state=log_analytics_target_state,
log_analytics_workspace_resource_id=log_analytics_workspace_resource_id,
event_hub_target_state=event_hub_target_state,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub,
audit_actions_and_groups=audit_actions_and_groups,
is_azure_monitor_target_enabled=is_azure_monitor_target_enabled)
return instance | 1248e12dae9f6299d86e26d069c22f560856a7e3 | 3,637,602 |
def find_unique_ID(list_of_input_smpls):
"""Attempt to determine a unique ID shared among all input
sample names/IDs, via a largest substring function performed
combinatorially exhaustively pairwise among the input list.
Parameters
----------
list_of_input_smpls : list
Returns
-------
list
Unique set of all possible found shared uid's
"""
if len(list_of_input_smpls) == 1:
uid = list_of_input_smpls
uid = list(
set([
largest_substr(a, b)
for (a, b) in [*itl.combinations(list_of_input_smpls, 2)]
]))
return uid | c6ab308ac4e03d1ea6d855348a35eb3d58938439 | 3,637,603 |
import math
def cartesian_to_polar(xy):
"""Convert :class:`np.ndarray` `xy` to polar coordinates `r` and `theta`.
Args:
xy (:class:`np.ndarray`): x,y coordinates
Returns:
r, theta (tuple of float): step-length and angle
"""
assert xy.ndim == 2, f"Dimensions are {xy.ndim}, expecting 2"
x, y = np.split(xy,[-1], axis=1)
x, y = np.squeeze(x), np.squeeze(y)
r = math.sqrt(x * x + y * y)
theta = math.atan2(y, x)
return r, theta | c38c4abfbbe3acea6965530d58a1e6a9614a035b | 3,637,604 |
def return_manifold(name):
"""
Returns a list of possible manifolds with name 'name'.
Args:
name: manifold name, str.
Returns:
list of manifolds, name, metrics, retractions
"""
m_list = []
descr_list = []
if name == 'ChoiMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.ChoiMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'DensityMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.DensityMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'HermitianMatrix':
list_of_metrics = ['euclidean']
for metric in list_of_metrics:
m_list.append(manifolds.HermitianMatrix(metric=metric))
descr_list.append((name, metric))
if name == 'PositiveCone':
list_of_metrics = ['log_euclidean', 'log_cholesky']
for metric in list_of_metrics:
m_list.append(manifolds.PositiveCone(metric=metric))
descr_list.append((name, metric))
if name == 'StiefelManifold':
list_of_metrics = ['euclidean', 'canonical']
list_of_retractions = ['svd', 'cayley', 'qr']
for metric in list_of_metrics:
for retraction in list_of_retractions:
m_list.append(manifolds.StiefelManifold(metric=metric,
retraction=retraction))
descr_list.append((name, metric, retraction))
return m_list, descr_list | 5361a8c38069d01c5fc9383e4bc06407f485c0d2 | 3,637,605 |
def change_to_rgba_array(image, dtype="uint8"):
"""Converts an RGB array into RGBA with the alpha value opacity maxed."""
pa = image
if len(pa.shape) == 2:
pa = pa.reshape(list(pa.shape) + [1])
if pa.shape[2] == 1:
pa = pa.repeat(3, axis=2)
if pa.shape[2] == 3:
alphas = 255 * np.ones(
list(pa.shape[:2]) + [1],
dtype=dtype,
)
pa = np.append(pa, alphas, axis=2)
return pa | 3328ec90e114a7b2c0c2529d126494756f0ce608 | 3,637,606 |
def spacetime_lookup(ra,dec,time=None,buffer=0,print_table=True):
"""
Check for overlapping TESS ovservations for a transient. Uses the Open SNe Catalog for
discovery/max times and coordinates.
------
Inputs
------
ra : float or str
ra of object
dec : float or str
dec of object
time : float
reference time to use, must be in MJD
buffer : float
overlap buffer time in days
-------
Options
-------
print_table : bool
if true then the lookup table is printed
-------
Returns
-------
tr_list : list
list of ra, dec, and sector that can be put into tessreduce.
"""
if time is None:
print('!!! WARNING no MJD time specified, using default of 59000')
time = 59000
if type(ra) == str:
c = SkyCoord(ra,dec, unit=(u.hourangle, u.deg))
ra = c.ra.deg
dec = c.dec.deg
outID, outEclipLong, outEclipLat, outSecs, outCam, outCcd, outColPix, \
outRowPix, scinfo = focal_plane(0, ra, dec)
sec_times = pd.read_csv(package_directory + 'sector_mjd.csv')
if len(outSecs) > 0:
ind = outSecs - 1
secs = sec_times.iloc[ind]
disc_start = secs['mjd_start'].values - time
disc_end = secs['mjd_end'].values - time
covers = []
differences = []
tr_list = []
tab = []
for i in range(len(disc_start)):
ds = disc_start[i]
de = disc_end[i]
if (ds-buffer < 0) & (de + buffer> 0):
cover = True
dif = 0
elif (de+buffer < 0):
cover = False
dif = de
elif (ds-buffer > 0):
cover = False
dif = ds
covers += [cover]
differences += [dif]
tab += [[secs.Sector.values[i], cover, dif]]
tr_list += [[ra, dec, secs.Sector.values[i], cover]]
if print_table:
print(tabulate(tab, headers=['Sector', 'Covers','Time difference \n(days)'], tablefmt='orgtbl'))
return tr_list
else:
print('No TESS coverage')
return None | efdcfc315c82db808478c302163a146512659f0b | 3,637,607 |
from typing import Union
from datetime import datetime
import time
def utc2local(utc: Union[date, datetime]) -> Union[datetime, date]:
"""Returns the local datetime
Args:
utc: UTC type date or datetime.
Returns:
Local datetime.
"""
epoch = time.mktime(utc.timetuple())
offset = datetime.fromtimestamp(epoch) - datetime.utcfromtimestamp(epoch)
return utc + offset | 34997f08a8ca7e2156849bb6be346964cc3fadcd | 3,637,608 |
import sys,time
from datetime import datetime
from datetime import timedelta
def getAsDateTimeStr(value, offset=0,fmt=_formatTimeStr()):
""" return time as 2004-01-10T00:13:50.000Z """
if (not isinstance(offset,str)):
if isinstance(value, (tuple, time.struct_time,)):
return time.strftime(fmt, value)
if isinstance(value, (int, float,)):
secs = time.gmtime(value+offset)
return time.strftime(fmt, secs)
if isinstance(value, str):
try:
value = time.strptime(value, fmt)
return time.strftime(fmt, value)
except Exception as details:
info_string = formattedException(details=details)
sys.stderr.write('ERROR :: getDateTimeTuple Could not parse "%s".\n%s\n' % (value,info_string))
secs = time.gmtime(time.time()+offset)
return time.strftime(fmt, secs)
elif (isinstance(value,datetime)):
if (offset is not None):
value += timedelta(offset)
ts = time.strftime(fmt, value.timetuple())
return ts
else:
sys.stderr.write('ERROR :: offset must be a numeric type rather than string type.\n') | 6de90e2d9ec39b843fbdef2721ad2495aa7faa46 | 3,637,609 |
import math
def gvisc(P, T, Z, grav):
"""Function to Calculate Gas Viscosity in cp"""
#P pressure, psia
#T temperature, °R
#Z gas compressibility factor
#grav gas specific gravity
M = 28.964 * grav
x = 3.448 + 986.4 / T + 0.01009 * M
Y = 2.447 - 0.2224 * x
rho = (1.4926 / 1000) * P * M / Z / T
K = (9.379 + 0.01607 * M) * T ** 1.5 / (209.2 + 19.26 * M + T)
return K * math.exp(x * rho ** Y) / 10000 | 5ff1ad63ef581cea0147348104416913c7b77e37 | 3,637,610 |
def get_closest_mesh_normal_to_pt(mesh, pt):
"""
Finds the closest vertex normal to the point.
Parameters
----------
mesh: :class: 'compas.datastructures.Mesh'
pt: :class: 'compas.geometry.Point'
Returns
----------
:class: 'compas.geometry.Vector'
The closest normal of the mesh.
"""
closest_vkey = get_closest_mesh_vkey_to_pt(mesh, pt)
v = mesh.vertex_normal(closest_vkey)
return Vector(v[0], v[1], v[2]) | 95c9bf82c0c24da27ba8433feb9c5c02cf453713 | 3,637,611 |
from typing import Coroutine
import asyncio
import json
async def apiDiscordAssignrolesDelete(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /api/discord/assignroles/delete
"""
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
# get required vars
guild_id:str = Data.getStr("guild_id", UNDEFINED, must_be_digit=True)
assignrole_id:str = Data.getStr("assignrole_id", UNDEFINED, must_be_digit=True)
# checks
if not guild_id:
return await apiMissingData(cls, WebRequest, msg="missing or invalid 'guild_id'")
if not assignrole_id:
return await apiMissingData(cls, WebRequest, msg="missing or invalid 'assignrole_id'")
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await cls.Tree.Api.Discord.errors.apiDiscordGuildUnknown(cls, WebRequest)
# get user info
AuthDiscord:AuthDiscordWebUser = await authDiscordWebUser(cls, WebRequest)
if not AuthDiscord.found:
return await apiMissingAuthorisation(cls, WebRequest)
# get member
CheckMember:discord.Member = Guild.get_member(int(AuthDiscord.User.user_id))
if not CheckMember:
return await cls.Tree.Api.Discord.errors.apiDiscordMemberNotFound(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id)
# check permissions
if not (CheckMember.guild_permissions.administrator or CheckMember.guild_permissions.manage_guild):
return await cls.Tree.Api.Discord.errors. apiDiscordMissingPermission(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id)
# get assign roles
res_assignroles:list = await getDiscordServerAssignRoles(PhaazeDiscord, guild_id=guild_id, assignrole_id=assignrole_id)
if not res_assignroles:
return await cls.Tree.Api.Discord.Assignroles.errors.apiDiscordAssignRoleNotExists(cls, WebRequest, assignrole_id=assignrole_id)
AssignRoleToDelete:DiscordAssignRole = res_assignroles.pop(0)
cls.BASE.PhaazeDB.deleteQuery("""
DELETE FROM `discord_assignrole` WHERE `guild_id` = %s AND `id` = %s""",
(AssignRoleToDelete.guild_id, AssignRoleToDelete.assignrole_id)
)
# logging
GuildSettings:DiscordServerSettings = await getDiscordSeverSettings(PhaazeDiscord, guild_id, prevent_new=True)
log_coro:Coroutine = loggingOnAssignroleDelete(PhaazeDiscord, GuildSettings,
Deleter=CheckMember,
assign_role_trigger=AssignRoleToDelete.trigger,
)
asyncio.ensure_future(log_coro, loop=cls.BASE.DiscordLoop)
cls.BASE.Logger.debug(f"(API/Discord) Assignroles: {guild_id=} deleted {assignrole_id=}", require="discord:role")
return cls.response(
text=json.dumps(dict(msg="Assignroles: Deleted entry", deleted=AssignRoleToDelete.trigger, status=200)),
content_type="application/json",
status=200
) | 46c89f41c8b5bec43ad33b6bebecffd0d1b8623c | 3,637,612 |
def algo_reg_deco(func):
"""
Decorator for making registry of functions
"""
algorithms[str(func.__name__)] = func
return func | 56228dcf557e7de64c75b598fe8d283eb08050ba | 3,637,613 |
from typing import List
import operator
def find_top_slices(metrics: List[metrics_for_slice_pb2.MetricsForSlice],
metric_key: Text,
statistics: statistics_pb2.DatasetFeatureStatisticsList,
comparison_type: Text = 'HIGHER',
min_num_examples: int = 10,
num_top_slices: int = 10,
rank_by: Text = 'EFFECT_SIZE'):
"""Finds top-k slices.
Args:
metrics: List of slice metrics protos. We assume that the metrics have
MetricValue.confidence_interval field populated. This will be populated when
the metrics computed with confidence intervals enabled.
metric_key: Name of the metric based on which significance testing is done.
statistics: Data statistics used to configure AutoSliceKeyExtractor.
comparison_type: Type of comparison indicating if we are looking for slices
whose metric is higher (`HIGHER`) or lower (`LOWER`) than the metric
of the base slice (overall dataset).
min_num_examples: Minimum number of examples that a slice should have.
num_top_slices: Number of top slices to return.
rank_by: Indicates how the slices should be ordered in the result.
Returns:
List of ordered slices.
"""
assert comparison_type in ['HIGHER', 'LOWER']
assert min_num_examples > 0
assert 0 < num_top_slices
assert rank_by in ['EFFECT_SIZE', 'PVALUE']
metrics_dict = {
slicer_lib.deserialize_slice_key(slice_metrics.slice_key): slice_metrics
for slice_metrics in metrics
}
overall_slice_metrics = metrics_dict[()]
del metrics_dict[()]
boundaries = auto_slice_key_extractor._get_bucket_boundaries(statistics) # pylint: disable=protected-access
overall_metrics_dict = _get_metrics_as_dict(overall_slice_metrics)
to_be_sorted_slices = []
for slice_key, slice_metrics in metrics_dict.items():
slice_metrics_dict = _get_metrics_as_dict(slice_metrics)
num_examples = slice_metrics_dict['example_count'].unsampled_value
if num_examples < min_num_examples:
continue
# Prune non-interesting slices.
if np.isnan(slice_metrics_dict[metric_key].unsampled_value):
continue
if comparison_type == 'HIGHER':
comparison_fn = operator.le
else:
comparison_fn = operator.ge
if comparison_fn(slice_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value):
continue
# Only consider statistically significant slices.
is_significant, pvalue = _is_significant_slice(
slice_metrics_dict[metric_key].unsampled_value,
slice_metrics_dict[metric_key].sample_standard_deviation,
slice_metrics_dict['example_count'].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].sample_standard_deviation,
overall_metrics_dict['example_count'].unsampled_value, comparison_type)
if not is_significant:
continue
# Format the slice info (feature names, values) in the proto into a
# slice key.
transformed_slice_key = []
for (feature, value) in slice_key:
if feature.startswith(
auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX):
feature = feature[len(auto_slice_key_extractor
.TRANSFORMED_FEATURE_PREFIX):]
value = _bucket_to_range(value, boundaries[feature])
transformed_slice_key.append((feature, value))
slice_key = slicer_lib.stringify_slice_key(tuple(transformed_slice_key))
# Compute effect size for the slice.
effect_size = _compute_effect_size(
slice_metrics_dict[metric_key].unsampled_value,
slice_metrics_dict[metric_key].sample_standard_deviation,
overall_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].sample_standard_deviation)
to_be_sorted_slices.append(
SliceComparisonResult(slice_key, num_examples,
slice_metrics_dict[metric_key].unsampled_value,
overall_metrics_dict[metric_key].unsampled_value,
pvalue, effect_size))
# Rank the slices.
ranking_fn, reverse = operator.attrgetter('effect_size'), True
if rank_by == 'PVALUE':
ranking_fn, reverse = operator.attrgetter('pvalue'), False
result = sorted(
to_be_sorted_slices, key=ranking_fn, reverse=reverse)[:num_top_slices]
return result | 7d71a2a64001e792b4e7cc9467ae99bfe30ebf99 | 3,637,614 |
def parse_texts(texts):
"""
Create a set of parsed documents from a set of texts.
Parsed documents are sequences of tokens whose embedding vectors can be looked up.
:param texts: text documents to parse
:type texts: sequence of strings
:return: parsed documents
:rtype: sequence of spacy.Doc
"""
return _load_text_parser().pipe(texts) | 05f39ffa453ca448fe1d724d2a5fbb53c52c8ade | 3,637,615 |
import _json
def dict_to_string(d):
"""Return the passed dict of items converted to a json string.
All items should have the same type
Args:
d (dict): Dictionary to convert
Returns:
str: JSON version of dict
"""
j = {}
for key, value in d.items():
if value is None:
j[key] = None
else:
j[key] = value.to_data()
return _json.dumps(j) | 3a7b3e464fa68b262be7b08bdefa1c35f603b68f | 3,637,616 |
def domains(request):
"""
A page with number of services and layers faceted on domains.
"""
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings.SEARCH_TYPE == 'elasticsearch':
url = '%s/select?q=%s' % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == 'solr':
url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace('\n', '')
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/index.html')
context = RequestContext(request, {
'data': data,
'layers_count': layers_count,
'services_count': services_count,
})
return HttpResponse(template.render(context)) | 98adcea3c7a2bd19a9253bc771cc286b6b998a97 | 3,637,617 |
def load_test_data(path, var, years=slice('2017', '2018')):
"""
Args:
path: Path to nc files
var: variable. Geopotential = 'z', Temperature = 't'
years: slice for time window
Returns:
dataset: Concatenated dataset for 2017 and 2018
"""
assert var in ['z', 't'], 'Test data only for Z500 and T850'
ds = xr.open_mfdataset(f'{path}/*.nc', combine='by_coords')[var]
try:
ds = ds.sel(level=500 if var == 'z' else 850).drop('level')
except ValueError:
pass
return ds.sel(time=years) | fa30a9514654bb3f99f74eaed7b87e3e2eb23430 | 3,637,618 |
def encode(state, b=None):
"""
Encode a base-*b* array of integers into a single integer.
This function uses a `big-endian`__ encoding scheme. That is, the most
significant bits of the encoded integer are determined by the left-most
end of the unencoded state.
>>> from pyinform.utils import *
>>> encode([0,0,1], b=2)
1
>>> encode([0,1,0], b=3)
3
>>> encode([1,0,0], b=4)
16
>>> encode([1,0,4], b=5)
29
If *b* is not provided (or is None), the base is inferred from the state
with a minimum value of 2.
>>> from pyinform.utils import *
>>> encode([0,0,2])
2
>>> encode([0,2,0])
6
>>> encode([1,2,1])
16
See also :py:func:`.decode`.
.. __: https://en.wikipedia.org/wiki/Endianness#Examples
:param sequence state: the state to encode
:param int b: the base in which to encode
:return: the encoded state
:rtype: int
:raises ValueError: if the state is empty
:raises InformError: if an error occurs in the ``inform`` C call
"""
xs = np.ascontiguousarray(state, dtype=np.int32)
data = xs.ctypes.data_as(POINTER(c_int))
if xs.size == 0:
raise ValueError("cannot encode an empty array")
if b is None:
b = max(2, np.amax(xs)+1)
e = ErrorCode(0)
encoding = _inform_encode(data, c_ulong(xs.size), c_int(b), byref(e))
error_guard(e)
return encoding | 09b5e96c3b9238d41f02cad938b6cb370a3a41da | 3,637,619 |
from typing import Optional
from typing import Set
def get_equivalent(curie: str, cutoff: Optional[int] = None) -> Set[str]:
"""Get equivalent CURIEs."""
canonicalizer = Canonicalizer.get_default()
r = canonicalizer.single_source_shortest_path(curie=curie, cutoff=cutoff)
return set(r or []) | af5cc4049af258b7724539e81218ef74dd8a3229 | 3,637,620 |
def _standardize_input(y_true, y_pred, multioutput):
"""
This function check the validation of the input
input should be one of list/tuple/ndarray with same shape and not be None
input will be changed to corresponding 2-dim ndarray
"""
if y_true is None or y_pred is None:
raise ValueError("The input is None.")
if not isinstance(y_true, (list, tuple, np.ndarray, pd.DataFrame)):
raise ValueError("Expected array-like input."
"Only list/tuple/ndarray/pd.DataFrame are supported")
if isinstance(y_true, (list, tuple)):
y_true = np.array(y_true)
if isinstance(y_pred, (list, tuple)):
y_pred = np.array(y_pred)
if isinstance(y_true, pd.DataFrame) and isinstance(y_pred, pd.DataFrame):
y_true = y_true.to_numpy()
y_pred = y_pred.to_numpy()
original_shape = y_true.shape[1:]
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
else:
y_true = y_true.reshape((y_true.shape[0], -1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
else:
y_pred = y_pred.reshape((y_pred.shape[0], -1))
if y_true.shape[0] != y_pred.shape[0]:
raise ValueError("y_true and y_pred have different number of samples "
"({0}!={1})".format(y_true.shape[0], y_pred.shape[0]))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}"
.format(allowed_multioutput_str, multioutput))
return y_true, y_pred, original_shape | c536e777c40a5ce7c886b20f61fac7f20341c20b | 3,637,621 |
import logging
def disk_detach(vmdk_path, vm):
"""detach disk (by full path) from a vm and return None or err(msg)"""
device = findDeviceByPath(vmdk_path, vm)
if not device:
# Could happen if the disk attached to a different VM - attach fails
# and docker will insist to sending "unmount/detach" which also fails.
# Or Plugin retrying operation due to socket errors #1076
# Return success since disk is anyway not attached
logging.warning("*** Detach disk={0} not found. VM={1}".format(
vmdk_path, vm.config.uuid))
return None
return disk_detach_int(vmdk_path, vm, device) | b0f835c51eec4d97a8a925e12cbd3c7531b13fde | 3,637,622 |
def children_shape_ranks(rank, n):
"""
Return the partition of leaves associated
with the children of the tree of rank `rank`, and
the ranks of each child tree.
"""
part = []
for prev_part in partitions(n):
num_trees_with_part = num_tree_pairings(prev_part)
if rank < num_trees_with_part:
part = prev_part
break
rank -= num_trees_with_part
else:
if n != 1:
raise ValueError("Rank is out of bounds.")
grouped_part = group_partition(part)
child_ranks = []
next_child = 0
for g in grouped_part:
next_child += len(g)
k = g[0]
# TODO precompute vector up front
rest_children = part[next_child:]
rest_num_pairings = num_tree_pairings(rest_children)
shapes_comb_rank = rank // rest_num_pairings
g_shape_ranks = Combination.with_replacement_unrank(
shapes_comb_rank, num_shapes(k), len(g)
)
child_ranks += g_shape_ranks
rank %= rest_num_pairings
return part, child_ranks | a07239b5f820578d0368f1b9ba7dbed50ac95cb1 | 3,637,623 |
def url_mapper(url, package):
"""
In a package.json, the "url" field is a redirection to a package download
URL published somewhere else than on the public npm registry.
We map it to a download url.
"""
if url:
package.download_urls.append(url)
return package | 95d6b67a42cac14110b457b96216a40a5d5430f9 | 3,637,624 |
import random
def electricPotential(n, V_SD_grid, V_G_grid):
"""
Function to compute the electric potential of the QDot.
:param n: the number of electrons in the dot
:param V_SD_grid: the 2d array of source-drain voltage values
:param V_G_grid: the 2d array of gate voltage values
:return: The Electric Potential for adding the nth electron to the dot
"""
E_N = E_C*(((n)**2-(n-1)**2)/n*5+random()/9*n) # arbitrary random formula used to increase diamond width as more electrons are added
return (n - N_0 - 1/2) * E_C - (E_C / e) * (C_S * V_SD_grid + C_G * V_G_grid) + E_N | fedc11b23d781d16c786dca213eaa578de8017f6 | 3,637,625 |
def mettre_a_jour_uids(nom_fichier, organisateurs, uids):
""" Met à jour le fichier CSV UID,EMAIL à partir du dictionnaire """
nouveaux_uids = False
for id_reunion in organisateurs:
if organisateurs[id_reunion]["id_organisateur"] not in uids:
uids[organisateurs[id_reunion]["id_organisateur"]] = organisateurs[id_reunion]["email_organisateur"]
nouveaux_uids = True
if nouveaux_uids:
with open(nom_fichier, "w", encoding="utf-8") as fichier:
for uid in uids:
fichier.write(
"{:s},{:s}\n".format(
uid,
uids[uid],
)
)
return uids | ac0f61b135c8a7bb9de9bb6b5b8e3f9fd7b176f0 | 3,637,626 |
import warnings
def _spectrogram(signal, dB=True, log_prefix=20, log_reference=1,
yscale='linear', unit=None,
window='hann', window_length=1024, window_overlap_fct=0.5,
cmap=mpl.cm.get_cmap(name='magma'), ax=None):
"""Plot the magnitude spectrum versus time.
See pyfar.line.spectogram for more information.
"""
# check input
if not isinstance(signal, Signal):
raise TypeError('Input data has to be of type: Signal.')
_check_time_unit(unit)
_check_axis_scale(yscale, 'y')
if window_length > signal.n_samples:
raise ValueError("window_length exceeds signal length")
if np.prod(signal.cshape) > 1:
warnings.warn(("Using only the first channel of "
f"{np.prod(signal.cshape)}-channel signal."))
# take only the first channel of time data
first_channel = tuple(np.zeros(len(signal.cshape), dtype='int'))
# get spectrogram
frequencies, times, spectrogram = dsp.spectrogram(
signal[first_channel], window, window_length, window_overlap_fct)
# get magnitude data in dB
if dB:
eps = np.finfo(float).eps
spectrogram = log_prefix*np.log10(
np.abs(spectrogram) / log_reference + eps)
# auto detect the time unit
if unit is None:
unit = _time_auto_unit(times[..., -1])
# set the unit
if unit == 'samples':
times *= signal.sampling_rate
else:
factor, unit = _deal_time_units(unit)
times = times * factor
# plot the data
_, ax = _prepare_plot(ax)
ax.pcolormesh(times, frequencies, spectrogram, cmap=cmap,
shading='gouraud')
# Adjust axes:
ax.set_ylabel('Frequency in Hz')
ax.set_xlabel(f'Time in {unit}')
ax.set_xlim((times[0], times[-1]))
ax.set_ylim((max(20, frequencies[1]), signal.sampling_rate/2))
# color limits
if dB:
for PCM in ax.get_children():
if type(PCM) == mpl.collections.QuadMesh:
break
ymax = np.nanmax(spectrogram)
ymin = ymax - 90
ymax = ymax + 10
PCM.set_clim(ymin, ymax)
if yscale == 'log':
ax.set_yscale('symlog')
ax.yaxis.set_major_locator(LogLocatorITAToolbox())
ax.yaxis.set_major_formatter(LogFormatterITAToolbox())
ax.grid(ls='dotted', color='white')
plt.tight_layout()
return ax, spectrogram | 3648918524c73dff4427a49119a9926eea317f81 | 3,637,627 |
import pprint
def _merge_cwlinputs(items_by_key, input_order, parallel):
"""Merge multiple cwl records and inputs, handling multiple data items.
Special cases:
- Single record but multiple variables (merging arrayed jobs). Assign lists
of variables to the record.
"""
items_by_key = _maybe_nest_bare_single(items_by_key, parallel)
if parallel == "multi-combined":
items_by_key, input_order = _concat_records(items_by_key, input_order)
var_items = set([_item_count(items_by_key[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
rec_items = set([_item_count(items_by_key[k]) for (k, t) in input_order.items() if t == "record"])
if var_items:
num_items = var_items
if len(num_items) == 2 and 1 in num_items:
num_items.remove(1)
items_by_key_test = _check_for_single_nested(num_items.pop(), items_by_key, input_order)
var_items = set([_item_count(items_by_key_test[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
num_items = var_items
assert len(num_items) == 1, "Non-consistent variable data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
items_by_key, num_items = _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel)
else:
num_items = rec_items
assert len(num_items) == 1, "Non-consistent record data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
target_items = num_items.pop()
out = [{} for _ in range(target_items)]
for (cwl_key, cwl_type) in input_order.items():
if cwl_type == "var":
cwl_key = tuple(cwl_key.split("__"))
cur_vals = items_by_key[cwl_key]
if _is_nested_single(cur_vals, target_items):
cur_vals = [[x] for x in cur_vals[0]]
for i, cur_val in enumerate(cur_vals):
if isinstance(cwl_key, (list, tuple)):
# nested batches with records
if (parallel.startswith(("batch", "multi-parallel")) and
isinstance(out[i], (list, tuple))):
for j in range(len(out[i])):
out[i][j] = _update_nested(list(cwl_key), cur_val, out[i][j], allow_overwriting=True)
else:
out[i] = _update_nested(list(cwl_key), cur_val, out[i], allow_overwriting=True)
elif out[i] == {}:
out[i] = cur_val
else:
# Handle single non-batched records
if isinstance(cur_val, (list, tuple)) and len(cur_val) == 1:
cur_val = cur_val[0]
assert isinstance(cur_val, dict), (cwl_key, cur_val)
for k, v in cur_val.items():
out[i] = _update_nested([k], v, out[i], allow_overwriting=True)
return out | f78777f391747e964be6d02f77bb5c42db084546 | 3,637,628 |
def polar_distance(x1, x2):
"""
Given two arrays of numbers x1 and x2, pairs the cells that are the
closest and provides the pairing matrix index: x1(index(1,:)) should be as
close as possible to x2(index(2,:)). The function outputs the average of
the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
Parameters
----------
x1:
vector 1
x2:
vector 2
Returns
-------
d:
minimum distance between d
index:
the permutation matrix
"""
x1 = np.reshape(x1, (1, -1), order="F")
x2 = np.reshape(x2, (1, -1), order="F")
N1 = x1.size
N2 = x2.size
diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order="F")))
min_N1_N2 = np.min([N1, N2])
index = np.zeros((min_N1_N2, 2), dtype=int)
if min_N1_N2 > 1:
for k in range(min_N1_N2):
d2 = np.min(diffmat, axis=0)
index2 = np.argmin(diffmat, axis=0)
index1 = np.argmin(d2)
index2 = index2[index1]
index[k, :] = [index1, index2]
diffmat[index2, :] = float("inf")
diffmat[:, index1] = float("inf")
d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
else:
d = np.min(diffmat)
index = np.argmin(diffmat)
if N1 == 1:
index = np.array([1, index])
else:
index = np.array([index, 1])
return d, index | f3f4f564a6645d183b5d7b1ce700e2ddf40241b7 | 3,637,629 |
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs | 645af74a2547e25add5e7d7b0d8292568933c177 | 3,637,630 |
def is_base(base_pattern, str):
"""
base_pattern is a compiled python3 regex.
str is a string object.
return True if the string match the base_pattern or False if it is not.
"""
return base_pattern.match(str, 0, len(str)) | d0b0e3291fdbfad49698deffb9f57aefcabdce92 | 3,637,631 |
def stations_by_river(stations):
"""For a list of MonitoringStation objects (stations),
returns a dictionary that maps river names (key) to a list of MonitoringStation objects on a given river."""
# Dictionary containing river names and their corresponding stations
rivers = {}
for station in stations:
# Check if river is already in the dictionary
if station.river in rivers:
# Check if the station has already been added to the list
if station not in rivers[station.river]:
rivers[station.river].append(station)
else:
rivers.update({station.river: [station]})
return rivers | c7fc460aa3e387285abdddfcb216a8ec41d27e06 | 3,637,632 |
def dQ_dY(time):
"""Derivative of transformation matrix for nutation/presession with regards to the Y coordinate of CIP in GCRS
"""
# Rotation matrices
R3_E = R3(E(time))
R3_s = R3(s(time))
R2_md = R2(-d(time))
R3_mE = R3(-E(time))
dR3_s = dR3(s(time))
dR3_E = dR3(E(time))
dR3_mE = dR3(-E(time))
dR2_md = dR2(-d(time))
return (
dR3_mE @ R2_md @ R3_E @ R3_s * (-dE_dY(time))
+ R3_mE @ dR2_md @ R3_E @ R3_s * (-dd_dY(time))
+ R3_mE @ R2_md @ dR3_E @ R3_s * (dE_dY(time))
+ R3_mE @ R2_md @ R3_E @ dR3_s * (ds_dY(time))
) | 7341de34dccb4134bdc9b3d29e247dcc35b550bb | 3,637,633 |
def calculate(x: int, y: int = 1, operation: str = None) -> int:
"""Calculates the sum (or difference) of two numbers.
Parameters:
`x` : int
The first number
`y` : int, optional
The second number (default is `1`)
`operation`: str, optional
Pass "subtract" to perform subtraction (default is `None`)
Returns:
int
"""
if operation == "subtract":
return x - y
else:
return x + y | e2f79940c7329895bafe0c5ad2b17953f8276902 | 3,637,634 |
def get_power_state(instance):
"""Return the power state of the received instance.
:param instance: nova.objects.instance.Instance
:return: nova.compute.power_state
"""
instance_info = manage.VBoxManage.show_vm_info(instance)
return instance_info.get(constants.VM_POWER_STATE) | 407488593d5f29cb4d70387bdab18b5d13db5b23 | 3,637,635 |
def toBoolean(val, default=True):
"""convert strings from CSV to Python bool
if they have an empty string - default to true unless specified otherwise
"""
if default:
trueItems = ["true", "t", "yes", "y", "1", "on", ""]
falseItems = ["false", "f", "no", "n", "none", "0"]
else:
trueItems = ["true", "t", "yes", "y", "1", "on"]
falseItems = ["false", "f", "no", "n", "none", "0", ""]
if str(val).strip().lower() in trueItems:
return True
if str(val).strip().lower() in falseItems:
return False | d3ca42f73674d0104c2c036462ae421b00501cd3 | 3,637,636 |
from typing import Union
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
}) | 357141ac0cc128ee2cf9ff9db76bec10d947ede0 | 3,637,637 |
def _process_rows(app, sheet_name, rows, names_map, lang=None):
"""
Processes the rows of a worksheet of translations.
This is the complement of get_bulk_app_sheets_by_name() and
get_bulk_app_single_sheet_by_name(), from
corehq/apps/translations/app_translations/download.py, which creates
these worksheets and rows.
:param app: The application being translated
:param sheet_name: The tab name of the sheet being processed.
e.g. "menu1", "menu1_form1", or "Menus_and_forms"
:param rows: The rows in the worksheet
:param names_map: A map of sheet_name to module/form unique_id, used
to fetch a module/form even if it has been moved
since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
if not sheet_name or not rows:
return []
if is_modules_and_forms_sheet(sheet_name):
updater = BulkAppTranslationModulesAndFormsUpdater(app, names_map, lang=lang)
return updater.update(rows)
if is_module_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationModuleUpdater(app, sheet_name, unique_id, lang=lang)
except ModuleNotFoundException:
return [(
messages.error,
_('Invalid menu in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
if is_form_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationFormUpdater(app, sheet_name, unique_id, lang=lang)
except FormNotFoundException:
return [(
messages.error,
_('Invalid form in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
return [(
messages.error,
_('Did not recognize "%s", skipping row.') % sheet_name
)] | 3cb43f813822b1d18ee6afa90412a7a4d6cec7e5 | 3,637,638 |
def parse_line(line, metric):
"""Parses statistics from a line an experiment log file"""
if "top-k" in line:
return f"top-k.{metric}", parse_csv(line)
elif "bottom-k" in line:
return f"bottom-k.{metric}", parse_csv(line)
else:
return f"ml.{metric}", parse_csv(line) | 9f9f263d3a27256ca98bfc42dbb7426044b8ba42 | 3,637,639 |
from typing import Tuple
def get_adjusted_pvalues(pvals: pd.Series, fdr_thresh: float = 0.05) \
-> Tuple[pd.Series, float]:
"""
Function that controls FDR rate.
Accepts an unsorted list of p-values and an FDR threshold (1).
Returns:
1) the FDR associated with each p-value,
2) the p-value cutoff for the given FDR.
References:
(1) Storey, J. D., & Tibshirani, R. (2003). Statistical significance
for genomewide studies. Proceedings of the National Academy of Sciences,
100(16), 9440-9445. https://doi.org/10.1073/pnas.1530509100
"""
m = pvals.size
# sort list of p-values
sort_ids = np.argsort(pvals) # returns indices for sorting
p_sorted = pvals.values[sort_ids] # sorts the list
adj_p = np.nan * np.zeros(len(p_sorted), dtype=np.float64)
crit_p = 0
# go over all p-values, starting with the largest
crossed = False
adj_p[-1] = p_sorted[-1]
i = m-2
while i >= 0:
FP = m*p_sorted[i] # calculate false positives
FDR = FP / (i+1) # calculate FDR
adj_p[i] = min(FDR, adj_p[i+1])
if FDR <= fdr_thresh and not crossed:
crit_p = p_sorted[i]
crossed = True
i -= 1
# reverse sorting
unsort_ids = np.argsort(sort_ids) # indices for reversing the sort
adj_p = adj_p[unsort_ids]
adj_p = pd.Series(index=pvals.index, data=adj_p, name='adjusted_pval')
return adj_p, crit_p | d77bb4721eedd6af0797d2e4c7ea3fac8ddc0ab4 | 3,637,640 |
def solve_nonogram(constraints):
"""this function is solving all kinds of boards of the game and returning
the all possible solutions for it""" # BTM
return [solve_easy_nonogram(constraints)] | ec4f7a853af8d216ca800cc3aa2bde6a57c07a8b | 3,637,641 |
import os
import json
import importlib
def get_libs():
"""
Get all of the libraries defined in lib/definitions.
This is called automatically when the package is imported.
"""
print(">> Checking for libraries to download..")
definitions = os.listdir("lib/definitions")
tried = 0
downloaded = 0
failed = 0
exists = 0
definitions.remove("__init__.py")
for filename in definitions:
if not filename.endswith(".json"):
print("Unknown definition file type: %s" % filename)
try:
fh = open("lib/definitions/%s" % filename, "r")
tests = json.load(fh)
packs = tests["packages"]
except Exception as e:
print("[ERROR] Unable to load definitions file %s - %s" % e)
else:
for pack in packs:
if not os.path.exists("lib/%s" % pack["filename"]):
tried += 1
print(">> Downloading library: %s" % pack["name"])
print(" > Attribution: %s" % pack["attrib"])
if "." in pack["module"]:
folders = pack["module"].split(".")
folders.pop()
path = "lib/%s" % "/".join(folders)
try:
if not os.path.exists(path):
os.makedirs(path)
current_path = "lib/"
for folder in folders:
current_path += (folder + "/")
open("%s/__init__.py" % current_path, "w")
except Exception as e:
print("[ERROR] Unable to create path %s - %s" \
% (path, e))
continue
try:
rq = urllib2.urlopen(pack["url"])
except Exception as e:
print("[ERROR] %s" % e)
print("[ERROR] Please report this to the developers."
" Attempted URL: %s" % pack["url"])
print("")
failed += 1
else:
try:
fh = open("lib/%s" % pack["filename"], "w")
data = rq.read()
data = data.replace("\r\n", "\n")
fh.write(data)
fh.flush()
fh.close()
except Exception as e:
print("[ERROR] Unable to write file: %s" % e)
print("[ERROR] Do you have write access to this "
"file?")
print("")
failed += 1
else:
try:
importlib.import_module(
"lib.%s" % pack["module"]
)
except Exception as e:
print("[ERROR] Unable to import module: %s"
% e)
print("[ERROR] Please report this to the "
"developers.")
print("")
failed += 1
else:
downloaded += 1
else:
exists += 1
if not tried:
print(">> All libraries are present. Nothing to do.")
else:
print("")
print(">> Done - %s failed / %s succeeded" % (failed, downloaded))
print("")
return {"tried": tried, "downloaded": downloaded,
"failed": failed, "exists": exists} | f4015db28081b20b91506a95b7567ee0a4d24e54 | 3,637,642 |
import scipy
def downsample_data(data, scale_factor, order):
"""
Downsample data
TODO: Scikit-image has a transform module that works better,
this function should have the option to use either
"""
return scipy.ndimage.interpolation.zoom(data, scale_factor, order=order, mode="constant") | 656c0e166e369cfec73cec5c7789e2f9f43875c7 | 3,637,643 |
def uwid(string):
"""Return the width of a string"""
if not PY3:
string = string.decode('utf-8', 'ignore')
return sum(utf_char_width(c) for c in string) | adae434637415293443570f11ba58035eecf7d98 | 3,637,644 |
def check_double_quote(inpstring):
"""
Check if some strings needs of a double quote (if some space are inside the string, it will need to be inside two double quote). E.g.: --sfmt="TIFF (unstitched, 3D)"
Input:
inpstring: input string or array of strings
Output:
newstring = new string (or array of strings) corrected by quoting if necessary
"""
if type(inpstring) == list:
newstring = []
for index in inpstring:
tmp1 = index.find(" ")
if tmp1 != -1:
tmp2 = index.find('"')
if tmp2 == -1:
dummy = index.find("=")
if dummy != -1:
newstring.append(
index[0 : dummy + 1] + '"' + index[dummy + 1 :] + '"'
)
else:
newstring.append('"' + index + '"')
else:
newstring.append(index)
else:
newstring.append(index)
else:
tmp1 = inpstring.find(" ")
if tmp1 != -1:
tmp2 = inpstring.find('"')
if tmp2 == -1:
dummy = inpstring.find("=")
if dummy != -1:
newstring = (
inpstring[0 : dummy + 1] + '"' + inpstring[dummy + 1 :] + '"'
)
else:
newstring = '"' + inpstring + '"'
else:
newstring = inpstring
else:
newstring = inpstring
return newstring | 3da3941d9cd8c4c72643f87c533bcfbfbd9b9a79 | 3,637,645 |
import os
import requests
from bs4 import BeautifulSoup
import re
import urllib
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override):
"""
"""
if os.path.isfile(override_file) and os.stat(override_file).st_size:
with open(override_file) as f:
data = f.read()
else:
r = requests.get(url)
r.raise_for_status()
data = r.text
soup = BeautifulSoup(data, 'html.parser')
data = defaultdict(dict)
for link in soup.find_all('a'):
pkg, version, py, py_m, platform = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text).groups()
version = urllib.parse.unquote(version)
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data | 5ae87f003bb3077df8b4b5718e3f42ad108eb549 | 3,637,646 |
def get_linear_schedule_with_warmup(
num_warmup_steps, num_training_steps, last_epoch=-1
):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
function handle to create `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0,
float(num_training_steps - current_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
return partial(lr_scheduler.LambdaLR, lr_lambda=lr_lambda, last_epoch=last_epoch) | 10ee7baafd4751c0d578207706653b5c63f192f3 | 3,637,647 |
import base64
def search_image_targets_for_tag(trust_data: dict, image: Image):
"""
Searches in the `trust_data` for a digest, given an `image` with tag.
"""
image_tag = image.tag
if image_tag not in trust_data:
return None
base64_digest = trust_data[image_tag]["hashes"]["sha256"]
return base64.b64decode(base64_digest).hex() | 11cfb3e0fb985c8730f72f32466e77881a503b8b | 3,637,648 |
import os
import re
def convert_gene_ids(geneList,target):
"""
takes a list of gene ids (int) and returns the target field
Normally, the database can be used, however this script is
used for example if a species has not yet been entered in the db.
"""
if target not in ['taxid','symbol']:
raise Exception("Invalid target")
geneInfoFile = os.path.join(os.path.split(os.path.abspath(__file__))[0],"gene_info.db")
if os.path.exists(geneInfoFile) == False:
raise Exception("ERROR: cannot find gene info file")
geneInfoFID = open(geneInfoFile,'rU')
header = geneInfoFID.next()
if type(geneList) != type(np.array([])):
geneList = np.array(geneList)
toReturn = np.array([None for i in range(len(geneList))])
found = 0
for record in geneInfoFID:
record = record.rstrip("\n")
record = record.split("\t")
if re.search("^\#",record[0]) or len(record) != 15:
continue
geneID = int(record[1])
#if geneID not in geneList:
# continue
indx = np.where(geneList == geneID)[0]
if len(indx) == 0:
continue
found += 1
indx = indx[0]
if target == 'taxid':
taxID = int(record[0])
toReturn[indx] = taxID
if target == 'symbol':
symbol = record[2]
toReturn[indx] = symbol
#LocusTag = record[3]
#Synonyms = record[4]
#dbXrefs = record[5]
#chromosome = record[6]
#map_location = record[7]
#description = record[8]
#type_of_gene = record[9]
#Symbol_from_nomenclature_authority = record[10]
#Full_name_from_nomenclature_authority = record[11]
#Nomenclature_status = record[12]
#otherDesignations = record[13]
#Modification_date = record[14]
toReturn = toReturn.tolist()
print("convert_gene_ids: %s/%s genes found."%(found,len(toReturn)))
return toReturn | ecd33d7a6aaad3ed0093da36b60e134e2aac8111 | 3,637,649 |
def calculate_intersection(a: BoundingBox, b: BoundingBox) -> int:
"""Calculate the intersection of two bounding boxes.
:param BoundingBox a: The first bounding box.
:param BoundingBox b: The second Bounding box.
:returns iou: The intersection of ``a`` and ``b``.
:rtype: int
"""
left = max(a.upper_left_corner.x, b.upper_left_corner.x)
right = min(
a.upper_left_corner.x + a.size.width, b.upper_left_corner.x + b.size.width
)
top = max(a.upper_left_corner.y, b.upper_left_corner.y)
bottom = min(
a.upper_left_corner.y + a.size.height, b.upper_left_corner.y + b.size.height
)
return max((bottom - top) * (right - left), 0) | 74fd375f21a26af23208ba96bbd678ce30367b8f | 3,637,650 |
import fcntl
import os
import socket
import struct
def get_linux_ip(eth):
"""在Linux下获取IP"""
assert os.name == 'posix', NotLinuxSystemError('不是Linux系统')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', eth[:15])))
return ip[20:24] | 381d3681bee21de2f0fca489536e12f997eaaec8 | 3,637,651 |
def build_nn_model(input_shape):
"""Generate NN model
:param: input_shape (tuple): shape of the input
:return model: NN model
"""
model = keras.Sequential([
# input layer
# multi demensional array and flatten it out
# inputs.shape[1]: the intervals
# inputs.shape[2]: the value of the mfcc for that intervals
keras.layers.Flatten(input_shape=input_shape),
# 1st hidden layer
keras.layers.Dense(512, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 2nd hidden layer
keras.layers.Dense(256, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 3rd hidden layer
keras.layers.Dense(64, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# output layer
# softmax: the sum of the result of all the labels = 1
# predicting: pick the neuron hav highest value
keras.layers.Dense(10, activation="softmax")
])
return model | c4f56369875bb5ae99b2f98ac25a91b9a985f5a8 | 3,637,652 |
def window_sumsquare(
window,
n_frames,
hop_length=512,
win_length=None,
n_fft=2048,
dtype=np.float32,
norm=None,
):
"""Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time Fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches ``n_fft``.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=``(n_fft + hop_length * (n_frames - 1))``
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharey=True)
>>> ax[0].plot(wss_256)
>>> ax[0].set(title='hop_length=256')
>>> ax[1].plot(wss_512)
>>> ax[1].set(title='hop_length=512')
>>> ax[2].plot(wss_1024)
>>> ax[2].set(title='hop_length=1024')
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm) ** 2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x | 93d463933f126bc192bf5b3c872469c881096f2f | 3,637,653 |
from typing import Optional
def getLatestCode(appDbConnStr: str) -> Optional[ICode]:
"""get latest created code from db
Args:
appDbConnStr (str): app db connection string
Returns:
Optional[ICode]: code object
"""
latestIdFetchsql = """
select id
from code_book.op_codes
where code_issue_time=(select max(code_issue_time) from code_book.op_codes where is_deleted=0)
and is_deleted=0
order by id desc
"""
# initialise code object
code: Optional[ICode] = None
colNames = []
dbRows = []
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(appDbConnStr)
# get cursor and execute fetch sql
dbCur = dbConn.cursor()
dbCur.execute(latestIdFetchsql)
colNames = [row[0] for row in dbCur.description]
# fetch all rows
dbRows = dbCur.fetchall()
except Exception as err:
dbRows = []
print('Error while fetching latest code id from app db')
print(err)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
targetColumns = ["ID"]
if (False in [(col in targetColumns) for col in colNames]):
# all desired columns not fetched, hence return empty
return None
if len(dbRows) == 0:
return None
row = dbRows[0]
latestCodeId: ICode["id"] = row[colNames.index('ID')]
# get latest code by id
code = getCodeById(appDbConnStr=appDbConnStr, codeId=latestCodeId)
return code | aba4106f99e1e23cf0127ad45502bd17cde9c33e | 3,637,654 |
def start_initialization_pd(update: Update, context: CallbackContext) -> str:
"""When touch "Заполнить данные"."""
u = User.get_user(update, context)
current_text = update.effective_message.text
update.effective_message.edit_text(
text=current_text
)
context.bot.send_message(
chat_id=u.user_id,
text=static_text.ABOUT_FILLING_PERSONAL_DATA
)
update.effective_message.reply_text(
text=static_text.ASK_LAST_NAME,
parse_mode=ParseMode.HTML
)
return LAST_NAME | 8af74466b5dc84ef4ee0b5ff5a2d824a275ee5c9 | 3,637,655 |
def rpm_comments(table=RPMComment, prefix='comment_', relationships=False):
"""Get filters for rpm comments.
:param sqlalchemy.ext.declarative.api.declarativemeta table: database model
:param string prefix: prefix of the name of the filter
:return dict: dict of filters
"""
filters = dict(
**request_parser.equals(
table.id,
name=prefix + 'id',
function=(lambda x: int(x))
),
)
if relationships:
filters.update(dict(
**request_parser.equals(table.id_user, name=prefix + 'id_user'),
**request_parser.equals(
table.id_comp,
name=prefix + 'id_comp',
function=(lambda x: int(x))
),
**request_parser.equals(
table.id_diff,
name=prefix + 'id_diff',
function=(lambda x: int(x))
),
))
return filters | b7e34abdc81e3afa2b19ffe118d129068526b315 | 3,637,656 |
def split_line(line) -> list:
"""Split a line from a dmp file"""
return [x.strip() for x in line.split(" |")] | e9c5fb93bab1007b3deb11b8d71fe0cffd3f5bab | 3,637,657 |
def translate(text):
"""."""
return text | a0732d6a802f9846de5b294863f2c096f72c6c70 | 3,637,658 |
def stream_bytes(data, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
stream = BytesFileStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers() | b329f56cae62122dd4e341dfc80c9c6aaae7ba31 | 3,637,659 |
def helper(n, big):
"""
:param n: int, an integer number
:param big: the current largest digit
:return: int, the final largest digit
"""
n = abs(n)
if n == 0:
return big
else:
# check the last digit of a number
if big < int(n % 10):
big = int(n % 10)
# check the rest of the digits
return helper(n/10, big) | aa7fa862d326d9e3400b58c9c520a10672e7340c | 3,637,660 |
async def get_transfer_list(request: Request):
"""This function checks for transfer list for an authenticated user"""
transfer_status_list = []
# Code for globus
tokens = await globus.verify_globus_code(request)
if tokens:
globus_item_count = 10
if 'globus_item_count' in request.query_params:
globus_item_count = request.path_params['globus_item_count']
transfer_client = await globus.get_transfer_client(request)
transfer_response = await globus.get_transfer_globus_list(transfer_client, globus_item_count)
transfer_status_list.append(transfer_response)
else:
error_response = {'globus' : 'No authorization available'}
transfer_status_list.append(error_response)
# TODO Other type of transfers
transfer_status_json = jsonable_encoder(transfer_status_list)
return JSONResponse(content=transfer_status_json, status_code=200) | b5fd46a1b249b01ee37414c7798518a976867a0c | 3,637,661 |
def conv_out_shp(IR, IC, KR, KC, border_mode, subsample):
"""
.. todo::
WRITEME
"""
ssR, ssC = subsample
def ceildiv(x, y):
r = x // y
if r * y < x:
return r + 1
return r
if border_mode == 'valid':
OR, OC = ceildiv(IR - KR + 1,ssR), ceildiv(IC - KC + 1,ssC)
elif border_mode == 'full':
OR, OC = ceildiv(IR + KR - 1,ssR), ceildiv(IC + KC - 1,ssC)
else:
raise NotImplementedError(border_mode)
return OR, OC | 2d1664e54cf5362be6e6e89f482f27712ab0c380 | 3,637,662 |
def getPageNumber(ffile):
"""
Extract the page number from the file name
:param ffile:
:return: image URI as a string
"""
return str(ffile).split('_')[-1].split('.')[0] | f78166ae3da8ea234c98436144e6c815f341ff5e | 3,637,663 |
import colorsys
def colors_stepsort(r,g,b,repetitions=1):
"""
Sort colors in hue steps for more perceptually uniform colormaps
"""
lum = np.sqrt( .241 * r + .691 * g + .068 * b )
h, s, v = colorsys.rgb_to_hsv(r,g,b)
h2 = int(h * repetitions)
lum2 = int(lum * repetitions)
v2 = int(v * repetitions)
if h2 % 2 == 1:
v2 = repetitions - v2
lum = repetitions - lum
return (h2, lum, v2) | c9ac5729c4208e681e3a91e09746b3bc8f0b3cc5 | 3,637,664 |
def get_string_hash(string: str, algorithm_name: str):
"""Calculates the hash digest of a string.
Args:
string: str: The string to digest.
algorithm_name: str: The name of the algorithm to hash the string with.
Returns:
A hash digest in string form.
"""
hash_algorithm = _get_algorithm(algorithm_name)
hash_algorithm.update(string.encode('utf-8'))
return hash_algorithm.hexdigest() | 31eb5504703412ac775ae4344ef1ff2c4176104d | 3,637,665 |
import tqdm
def psd_error(times,rates,errors):
"""
obtain errors for the best frequency estimate of the signal
"""
"""
print(len(times),len(rates),len(errors))
newdatachoice = np.random.choice(len(times),size=int(0.1*len(times)))
newtimes = list(np.array([times[0]])) + list(np.array([times[-1]])) + list(times[np.array(list(set(newdatachoice)))])
newrates = list(np.array([rates[0]])) + list(np.array([rates[-1]])) + list(rates[np.array(list(set(newdatachoice)))])
newerrs = list(np.array([errors[0]])) + list(np.array([errors[-1]])) + list(errors[np.array(list(set(newdatachoice)))])
times = newtimes
rates = newrates
errors = newerrs
print(len(times),len(rates),len(errors))
"""
freqs_list = []
psds_list = []
for j in tqdm(range(1000)):
new_rates = np.zeros(len(rates))
for i in range(len(rates)):
if rates[i] != 0:
new_rates[i] = np.random.normal(loc=rates[i],scale=errors[i])
trunc_times = times-times[0]
newchoice = np.random.choice(len(trunc_times),size=len(trunc_times))
rand_times = trunc_times[np.array(list(set(newchoice)))]
rand_rates = new_rates[np.array(list(set(newchoice)))]
omega,psd,prob3,prob4,prob5 = lsp(rand_times,rand_rates)
nu_reg = omega/(2.0*np.pi)
freq = omega/(2*np.pi)
psds_list.append( np.max(psd[(freq>=8.2e-6)&(freq<=8.4e-6)]) )
freqs_list.append( freq[psd==psds_list[-1]][0])
#plt.figure()
#plt.plot(freq,psd,'rx-')
#plt.show()
return freqs_list,psds_list | ecb8dcb297872dae1e9c9a0b491feaf2b2c74490 | 3,637,666 |
def solve(global_step):
"""add solver to losses"""
# learning reate
lr = _configure_learning_rate(82783, global_step)
optimizer = _configure_optimizer(lr)
tf.summary.scalar('learning_rate', lr)
# compute and apply gradient
losses = tf.get_collection(tf.GraphKeys.LOSSES)
regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regular_loss = tf.add_n(regular_losses)
out_loss = tf.add_n(losses)
total_loss = tf.add_n(losses + regular_losses)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('out_loss', out_loss)
tf.summary.scalar('regular_loss', regular_loss)
update_ops = []
variables_to_train = _get_variables_to_train()
# update_op = optimizer.minimize(total_loss)
gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(gradients,
global_step=global_step)
update_ops.append(grad_updates)
# update moving mean and variance
if FLAGS.update_bn:
update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_bn = tf.group(*update_bns)
update_ops.append(update_bn)
return tf.group(*update_ops) | 085317d679495ab1959106c64fdeb10aeeeeab02 | 3,637,667 |
def get_features_from_policy(env, policy):
"""Represent policies with average feature vector.
This only makes sense for linear reward functions, but it is only used for the
HighwayDriving environment.
"""
assert isinstance(env.unwrapped, HighwayDriving)
assert isinstance(policy, FixedPolicy)
N = 10
features = np.zeros(env.Ndim_repr)
for i in range(N):
obs = env.reset()
done = False
while not done:
act = policy.get_action(obs)
obs, reward, done, info = env.step(act)
features += info["gp_repr"]
features /= N
return features | 524256c80a8c4dec7b30378e5a377ac02456ffa7 | 3,637,668 |
import os
def get_files_from_folder(directory, extension=None):
"""Get all files within a folder that fit the extension """
# NOTE Can be replaced by glob for newer python versions
label_files = []
for root, _, files in os.walk(directory):
for some_file in files:
label_files.append(os.path.abspath(os.path.join(root, some_file)))
if extension is not None:
label_files = list(filter(lambda x: x.endswith(extension), label_files))
return label_files | e572333be8786a32aabf2e217411c9db11e65175 | 3,637,669 |
def predicate(line):
"""
Remove lines starting with ` # `
"""
if "#" in line:
return False
return True | ff7d67c1fd7273b149c5a2148963bf898d6a3591 | 3,637,670 |
def pad_slices(ctvol, max_slices): #Done testing
"""For <ctvol> of shape (slices, side, side) pad the slices to shape
max_slices for output of shape (max_slices, side, side)"""
padding_needed = max_slices - ctvol.shape[0]
assert (padding_needed >= 0), 'Image slices exceed max_slices by'+str(-1*padding_needed)
if padding_needed > 0:
before_padding = int(padding_needed/2.0)
after_padding = padding_needed - before_padding
ctvol = np.pad(ctvol, pad_width = ((before_padding, after_padding), (0,0), (0,0)),
mode = 'constant', constant_values = np.amin(ctvol))
assert ctvol.shape[0]==max_slices
return ctvol | 01b03094dd66a770cb40a136399486bbc018e969 | 3,637,671 |
import random
def average_img_from_dir(path_data_dir,filepat="*", \
parentaslabel=False,\
labels=[],\
sampling_rate=0.001,\
title="average image") :
"""
create and visualize average image of given dataset
dataset_path = path to dataset
sampling_rate = sampling ratio to visualize value in [0,1]
seed = seed number to use for random value generation
return
average_image = average image of the given dataset
"""
data = [ f for lbl, f in gen_find(filepat,path_data_dir,parentaslabel,labels) \
if not parentaslabel or ( parentaslabel and lbl in labels) ]
num_elem = len(data)
ds_size= int(num_elem * sampling_rate)
print ("# sample : {} sampling_rate : {} # of data : {}".format(ds_size,sampling_rate,num_elem))
sampled_data = random.sample(data, ds_size)
img_avg = cv2.imread(sampled_data[0])
h, w = img_avg.shape[:2]
nSum = 1
for i in sampled_data[1:] :
imga = cv2.resize(cv2.imread(i),(w, h), interpolation = cv2.INTER_CUBIC)
weight_avg = float(nSum)/float(nSum+1)
weight_a = float(1)/float(nSum+1)
img_avg = cv2.addWeighted(img_avg,weight_avg,imga,weight_a,0)
#print ("Weight_avg : {} + Weight_a : {} = Total Weight {} " \
# .format(weight_avg,weight_a,weight_avg+weight_a))
nSum+=1
# Make w as 10 - 10 = w : v : h
vr = 10 * h / w
plt.figure(figsize=(10,vr))
plt.title(title)
plt.imshow(img_avg,interpolation='nearest', aspect='auto'),plt.show()
return img_avg | 98aaf33c35483a9100bb25d667970f9f177955f8 | 3,637,672 |
def get_size(positions):
"""Get the size of bounding rectangle that embodies positions.
Args:
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
Returns:
Tuple of width and height of bounding rectangle.
"""
max_y_list = [dendrogram.height + coords[1] for dendrogram, coords in positions.items()]
coords = np.array(list(positions.values()))
width = np.max(coords[:, 0]) - np.min(coords[:, 0])
height = np.max(max_y_list) - np.min(coords[:, 1])
return width, height | 2a212541746963d0aa83320d3aa08ddfb5d6f6e0 | 3,637,673 |
def getContactInfo(dic):
"""Returns the Contact info for Chapters.
dic -- Dictionary from the JSON with all values.
"""
return str(dic["content"]["$t"]).split(',')[1].split(':')[1].strip() | 27ed9bcb1e91db3cf58b82023505cfcffab00bcd | 3,637,674 |
import torch
import time
def draw_pointcloud(x: torch.Tensor, x_mask: torch.Tensor, grid_on=True):
""" Make point cloud image
:param x: Tensor([B, N, 3])
:param x_mask: Tensor([B, N])
:param grid_on
:return: Tensor([3 * B, W, H])
"""
tic = time.time()
figw, figh = 16., 12.
W, H = 256, int(256 * figh / figw)
imgs = list()
for p, m in zip(x, x_mask):
p = p[~m, :]
p = p.cpu()
fig = plt.figure(figsize=(figw, figh))
ax = fig.gca(projection='3d')
ax.set_facecolor('xkcd:steel')
ax.w_xaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_yaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_zaxis.set_pane_color((0., 0., 0., 1.0))
ax.scatter(-p[:, 2], p[:, 0], p[:, 1], color=(1, 1, 1), marker='o', s=100)
fig.tight_layout()
fig.canvas.draw()
buf = fig.canvas.buffer_rgba()
l, b, w, h = fig.bbox.bounds
img = np.frombuffer(buf, np.uint8).copy()
img.shape = int(h), int(w), 4
img = img[:, :, 0:3]
img = cv2.resize(img, dsize=(W, H), interpolation=cv2.INTER_CUBIC) # [H, W, 3]
imgs.append(torch.tensor(img).transpose(2, 0).transpose(2, 1)) # [3, H, W]
plt.close(fig)
return torch.stack(imgs, dim=0) | 8828088f8f319f0033c55a4fb5c63705a882f8cd | 3,637,675 |
def semantic_dsm(word_list, keyed_vectors):
"""Calculate a semantic dissimilarity matrix."""
vectors = np.array([keyed_vectors.word_vec(word) for word in word_list])
dsm = np.clip(pdist(vectors, metric="cosine"), 0, 1)
return dsm | 05a08b09af0cc95dc647c4a2388824a2f94ed7ec | 3,637,676 |
def prompt_id_num(message, length=ID_WIDTH):
""" Asks the user to enter a identifier which is a numeric string.
The length is the length of the identifier asked.
:param message: message to ask the input
:param length: the length of the identifier
:return: input
"""
response = input(message)
while len(response) != length:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
response_is_not_number = True
while response_is_not_number:
try:
int(response)
response_is_not_number = False
except ValueError:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
return response | 5cf705e600891bf168ac77ecf3d57637144d7b97 | 3,637,677 |
def click_snr(wl, Spec):
"""Calculate snr in a specific range given by clicks on a plot """
plt.figure()
plt.plot(wl, Spec)
plt.show(block=True)
# points from click
# temp values untill implement above
point2 = np.max(wl)
point1 = np.min(wl)
map2 = wl < point2
map1 = wl > point1
wl_slice = wl[map1 * map2]
Spec_slice = Spec[map1 * map2]
# Calculate SNR on the slice
SNR = snr(Spec_slice)
return SNR | b61216780bce3e63687c4fc79b37de8e138fa756 | 3,637,678 |
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, xt) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya, a_next) + by)
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache | 891a5ec7a789dbd4e1ee67598c9e350d9eacffee | 3,637,679 |
import os
import random
def load_random_batch(cfg, data_paths):
"""
Loads a random batch (batch_size, image, masks, weights)
Parameters:
-----------
cfg: contains the cfg.BATCH_SIZE
data_paths: list containing strings
paths to the folder where the images, masks and weights are in
Returns:
--------
batch containing the images, masks (and optionally weights)
"""
train_imgs_dirs = list()
valid_imgs_dirs = list()
test_imgs_dirs = list()
for data_path in data_paths:
train_imgs_dirs.append(os.path.join(data_path, 'train', 'images'))
valid_imgs_dirs.append(os.path.join(data_path, 'valid', 'images'))
#test_imgs_dirs.append(os.path.join(data_path, 'test', 'images'))
imgs_list = []
for train_imgs_dir in train_imgs_dirs:
imgs_list.extend([os.path.join(train_imgs_dir, s) for s in os.listdir(train_imgs_dir)])
for valid_imgs_dir in valid_imgs_dirs:
imgs_list.extend([os.path.join(valid_imgs_dir, s) for s in os.listdir(valid_imgs_dir)])
#for test_imgs_dir in test_imgs_dirs:
#imgs_list.extend([os.path.join(test_imgs_dir, s) for s in os.listdir(test_imgs_dir)])
imgs_list.sort()
# print(imgs_list)
n_imgs = len(imgs_list)
# Ziehen ohne Zurücklegen
# Takes cfg.BATCH_SIZE elements out of len(imgs_list)
batch_list = random.sample(range(0, n_imgs), cfg.BATCH_SIZE)
print("batch_list", batch_list)
# rand_img = random.randrange(0, n_imgs)
# print(rand_img)
#img_path = imgs_list[rand_img]
# Get the image paths of this batch
batch_imgs_list = [imgs_list[i] for i in batch_list]
# The first image will be batched with no other image
first_img = True
for img_path in batch_imgs_list:
# Get the image
print(img_path)
img = load_image(img_path)
# Get the corresponding masks
msk_paths = find_msk_paths(cfg, img_path)
print(msk_paths)
msk = concat_msks(msk_paths)
#print("mask shape", msk.shape)
# Put it together
if first_img:
imgs = np.expand_dims(img, axis=0)
msks = np.expand_dims(msk, axis=0)
first_img = False
else:
imgs = batch_data(imgs, img, multichannel=True)
msks = batch_data(msks, msk, multichannel=True)
#print("masks shape", msks.shape)
# Normalize it
imgs = imgs / cfg.RESCALE
msks = msks / cfg.RESCALE_MSK
if cfg.WEIGHTING:
# The first image will be batched with no other image
first_img = True
for img_path in batch_imgs_list:
wgt_paths = find_weight_paths(cfg, img_path)
#print(wgt_paths)
weight = concat_wgts(wgt_paths)
# Put it together
if first_img:
wgts = np.expand_dims(weight, axis=0)
first_img = False
else:
wgts = batch_data(wgts, weight, multichannel=True)
# print(wgts.shape)
#print("wgts min max shape", np.min(wgts), np.max(wgts), wgts.shape)
return imgs, msks, wgts
else:
return imgs, msks | 9288e4b8b8a669cce1702639ba46d7458145151b | 3,637,680 |
def load_song(trainsize=5000, testsize=5000):
""" The million song dataset
Not a good dataset for feature selection or regression
Standard linear regression performs only a little bit better than a random vector.
Additional complex models, such as interesting kernels, are needed
To improve performance
"""
if trainsize + testsize < 5000:
filename = 'datasets/YearPredictionMSD_small.csv'
else:
filename = 'datasets/YearPredictionMSD.csv'
dataset = loadcsv(filename)
trainset, testset = splitdataset(dataset,trainsize, testsize,outputfirst=True)
return trainset,testset | ee428f7c34f256ab9eb3e271751932cc4abcdb4c | 3,637,681 |
def convert_node(node_data: NodeData):
"""
Convenience method for converting NodeData to a packed TLV message.
:param core.emulator.data.NodeData node_data: node data to convert
:return: packed node message
"""
node = node_data.node
services = None
if node.services is not None:
services = "|".join([x.name for x in node.services])
server = None
if node.server is not None:
server = node.server.name
tlv_data = structutils.pack_values(
coreapi.CoreNodeTlv,
[
(NodeTlvs.NUMBER, node.id),
(NodeTlvs.TYPE, node.apitype.value),
(NodeTlvs.NAME, node.name),
(NodeTlvs.MODEL, node.type),
(NodeTlvs.EMULATION_SERVER, server),
(NodeTlvs.X_POSITION, int(node.position.x)),
(NodeTlvs.Y_POSITION, int(node.position.y)),
(NodeTlvs.CANVAS, node.canvas),
(NodeTlvs.SERVICES, services),
(NodeTlvs.LATITUDE, str(node.position.lat)),
(NodeTlvs.LONGITUDE, str(node.position.lon)),
(NodeTlvs.ALTITUDE, str(node.position.alt)),
(NodeTlvs.ICON, node.icon),
],
)
return coreapi.CoreNodeMessage.pack(node_data.message_type.value, tlv_data) | bef0f45295325e15c09249152d3252b7ed949b2e | 3,637,682 |
def parse_null_value(
null_value_node: "NullValueNode", schema: "GraphQLSchema"
) -> None:
"""
Returns the value of an AST null value node.
:param null_value_node: AST null value node to treat
:param schema: the GraphQLSchema instance linked to the engine
:type null_value_node: NullValueNode
:type schema: GraphQLSchema
"""
# pylint: disable=unused-argument
return None | ee4d3f544c83d58abaf40b5cc46aa8953a2745bc | 3,637,683 |
def SetDataTypesFromColInfo(df, tblCI):
"""
Use colinfo dictionaries to set newly-imported (CSV) DataFrame column types and Boolean Flag columns
"""
for col in df.columns:
#If col is a flag column (1/blank), convert to Boolean for memory and feather file size efficiency
if (col in tblCI.dict_isflagcol):
if (tblCI.dict_isflagcol[col]): df = pdutil.ConvertFlagColToBoolean(df, col)
#If the column is in the data type dictionary, set its type using either .to_datetime() or .astype()
if col in tblCI.dict_types:
if tblCI.dict_types[col] == 'dt':
df[col] = pd.to_datetime(df[col])
else:
df[col] = df[col].astype(tblCI.dict_types[col])
return df | 43b6ac47d760b613be7419a6d1e7910b04c792f8 | 3,637,684 |
def run_and_wait(request, _):
"""Implementation of RunAndWait."""
process_runner = new_process.ProcessRunner(request.executable_path,
request.default_args)
args = {}
protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')
if request.popen_args.env_is_set:
args['env'] = request.popen_args.env
else:
args['env'] = None
args['additional_args'] = request.additional_args
protobuf_utils.get_protobuf_field(args, request, 'timeout')
protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
protobuf_utils.get_protobuf_field(args, request, 'input_data')
protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')
logs.log('Running command: %s' % process_runner.get_command())
return untrusted_runner_pb2.RunAndWaitResponse(
result=process_result_to_proto(process_runner.run_and_wait(**args))) | a7c505221c44fd40156fa2a17ee31307e82d0a2f | 3,637,685 |
import os
import numpy
def import_dicom_series(path, files_start_with=None, files_end_with=None,
exclude_files_end_with=('.dat', '.txt', '.py', '.pyc', '.nii', '.gz')):
"""Rudimentary file to load dicom serie from a directory. """
N = 0
paths = []
slices = []
files = os.listdir(path)
for file_name in files:
file_valid = True
if files_start_with is not None:
if not file_name.startswith(files_start_with):
file_valid = False
if files_end_with is not None:
if not file_name.endswith(files_end_with):
file_valid = False
for s in exclude_files_end_with:
if file_name.endswith(s):
file_valid = False
if file_valid:
full_path = path + os.sep + file_name
# read moco information from files
paths.append(full_path)
f = dicom.read_file(full_path)
slice = f.pixel_array
slices.append(slice)
N += 1
instance_number = f.get(0x00200013).value
creation_time = f.get(0x00080013).value
# print "Instance number: ",instance_number
# print "Creation time: ",creation_time
array = numpy.zeros((slices[0].shape[0], slices[0].shape[1], N), dtype=numpy.float32)
for i in range(N):
slice = numpy.float32(slices[i]) # FIXME: handle other data types
array[:, :, i] = slice
# return occiput_from_array(array)
return array | c51a533e33fde6a8261c9514d8d4327eca69710e | 3,637,686 |
import random
def fight(player, enemy):
"""
This starts a round of combat between the user and their selected enemy.
It returns a list of information relating to combat, to be used in the
view function to display it, if required.
"""
# Random player damage based on 80-100% of player damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_player = round(player.damage * dmg_range_roll)
looted_power_crystals = 0
looted_gold = 0
if enemy.hp_max <= dmg_roll_player:
# Randomly generated loot values, added to player object.
resources_range_roll = random.randrange(75, 101) / 100
looted_power_crystals = round(
enemy.power_crystals * resources_range_roll)
player.power_crystals += looted_power_crystals
resources_range_roll = random.randrange(75, 101) / 100
looted_gold = round(enemy.gold * resources_range_roll)
player.gold += looted_gold
dmg_roll_enemy = 0
result = True
else:
result = False
# Random enemy damage, based on 80-100% of their damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_enemy = round(enemy.damage * dmg_range_roll)
player.hp_current -= dmg_roll_enemy
return [player, dmg_roll_player, dmg_roll_enemy, result, looted_gold, looted_power_crystals] | bca739be92ccacb92c90d784cdbf5b4abb2e61c0 | 3,637,687 |
import time
async def POST_Dataset(request):
""" Handler for POST /datasets"""
log.request(request)
app = request.app
params = request.rel_url.query
if not request.has_body:
msg = "POST_Dataset with no body"
log.error(msg)
raise HTTPBadRequest(reason=msg)
body = await request.json()
log.info(f"POST_Dataset, body: {body}")
if "bucket" in params:
bucket = params["bucket"]
elif "bucket" in body:
bucket = params["bucket"]
else:
bucket = None
dset_id = get_obj_id(request, body=body)
if not isValidUuid(dset_id, obj_class="dataset"):
log.error(f"Unexpected dataset_id: {dset_id}")
raise HTTPInternalServerError()
# verify the id doesn't already exist
obj_found = await check_metadata_obj(app, dset_id, bucket=bucket)
if obj_found:
log.error( "Post with existing dset_id: {}".format(dset_id))
raise HTTPInternalServerError()
if "root" not in body:
msg = "POST_Dataset with no root"
log.error(msg)
raise HTTPInternalServerError()
root_id = body["root"]
try:
validateUuid(root_id, "group")
except ValueError:
msg = "Invalid root_id: " + root_id
log.error(msg)
raise HTTPInternalServerError()
if "type" not in body:
msg = "POST_Dataset with no type"
log.error(msg)
raise HTTPInternalServerError()
type_json = body["type"]
if "shape" not in body:
msg = "POST_Dataset with no shape"
log.error(msg)
raise HTTPInternalServerError()
shape_json = body["shape"]
layout = None
if "layout" in body:
layout = body["layout"] # client specified chunk layout
# ok - all set, create committed type obj
now = int(time.time())
log.debug("POST_dataset typejson: {}, shapejson: {}".format(type_json, shape_json))
dset_json = {"id": dset_id, "root": root_id, "created": now, "lastModified": now, "type": type_json, "shape": shape_json, "attributes": {} }
if "creationProperties" in body:
dset_json["creationProperties"] = body["creationProperties"]
if layout is not None:
dset_json["layout"] = layout
await save_metadata_obj(app, dset_id, dset_json, bucket=bucket, notify=True, flush=True)
resp_json = {}
resp_json["id"] = dset_id
resp_json["root"] = root_id
resp_json["created"] = dset_json["created"]
resp_json["type"] = type_json
resp_json["shape"] = shape_json
resp_json["lastModified"] = dset_json["lastModified"]
resp_json["attributeCount"] = 0
resp = json_response(resp_json, status=201)
log.response(request, resp=resp)
return resp | c88b960c51e1f215659eb01a8740d49b80c7d386 | 3,637,688 |
def sort_list_by_list(L1,L2):
"""Sort a list by another list"""
return [x for (y,x) in sorted(zip(L2,L1), key=lambda pair: pair[0])] | 04b7c02121620be6d9344af6f56f1b8bfe75e9f3 | 3,637,689 |
def _to_protobuf_value(value: type_utils.PARAMETER_TYPES) -> struct_pb2.Value:
"""Creates a google.protobuf.struct_pb2.Value message out of a provide
value.
Args:
value: The value to be converted to Value message.
Returns:
A google.protobuf.struct_pb2.Value message.
Raises:
ValueError if the given value is not one of the parameter types.
"""
if isinstance(value, str):
return struct_pb2.Value(string_value=value)
elif isinstance(value, (int, float)):
return struct_pb2.Value(number_value=value)
elif isinstance(value, bool):
return struct_pb2.Value(bool_value=value)
elif isinstance(value, dict):
return struct_pb2.Value(
struct_value=struct_pb2.Struct(
fields={k: _to_protobuf_value(v) for k, v in value.items()}))
elif isinstance(value, list):
return struct_pb2.Value(
list_value=struct_pb2.ListValue(
values=[_to_protobuf_value(v) for v in value]))
else:
raise ValueError('Value must be one of the following types: '
'str, int, float, bool, dict, and list. Got: '
f'"{value}" of type "{type(value)}".') | 2714aa36c4b2ce98795c32993390853172863010 | 3,637,690 |
from typing import Union
from typing import List
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs) | 454d606a62d783047ce5d09372ed0718cf3f4af4 | 3,637,691 |
def _prepare_grid(times, time_step):
"""Prepares grid of times for path generation.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
Returns:
Tuple `(all_times, mask)`.
`all_times` is a 1-D real `Tensor` containing all points from 'times` and
the uniform grid of points between `[0, times[-1]]` with grid size equal to
`time_step`. The `Tensor` is sorted in ascending order and may contain
duplicates.
`mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
which elements of 'all_times' correspond to THE values from `times`.
Guarantees that times[0]=0 and mask[0]=False.
"""
additional_times = tf.range(
start=time_step, limit=times[-1], delta=time_step, dtype=times.dtype)
zeros = tf.constant([0], dtype=times.dtype)
all_times = tf.concat([zeros] + [times] + [additional_times], axis=0)
additional_times_mask = tf.zeros_like(additional_times, dtype=tf.bool)
mask = tf.concat([
tf.cast(zeros, dtype=tf.bool),
tf.ones_like(times, dtype=tf.bool)
] + [additional_times_mask], axis=0)
perm = tf.argsort(all_times, stable=True)
all_times = tf.gather(all_times, perm)
mask = tf.gather(mask, perm)
return all_times, mask | 7765a473ce6cf91281410006b07421daf6ed24a8 | 3,637,692 |
def unpack_singleton(x):
"""Gets the first element if the iterable has only one value.
Otherwise return the iterable.
# Argument:
x: A list or tuple.
# Returns:
The same iterable or the first element.
"""
if len(x) == 1:
return x[0]
return x | cf551f242c8ea585c1f91eadbd19b8e5f73f0096 | 3,637,693 |
from typing import Optional
from typing import Union
from typing import List
import sys
def main(argv: Optional[Union[str, List[str]]] = None) -> object:
"""
Apply R4 edits to FHIR JSON files
:param argv: Argument list. Can be an unparsed string, a list of strings or nothing. If nothing, we use sys.argv
:return: 0 if all RDF files that had valid FHIR in them were successful, 1 otherwise
"""
def gen_dlp(args: List[str]) -> dirlistproc.DirectoryListProcessor:
return dirlistproc.DirectoryListProcessor(args, "Add FHIR R4 edits to JSON file", '.json', '.json',
addargs=addargs)
dlp = gen_dlp(argv)
if not (dlp.opts.infile or dlp.opts.indir):
gen_dlp(argv if argv is not None else sys.argv[1:] + ["--help"]) # Does not exit
dlp.opts.converted_files = [] # If converting inline
nfiles, nsuccess = dlp.run(convert_file, file_filter_2=check_json)
print(f"Total={nfiles} Successful={nsuccess}")
return 0 if nfiles == nsuccess else 1 | 2ef82dbb7d610935d676e8e36545fbf0e579e6f6 | 3,637,694 |
def distribution_filter_for(bijector):
"""Returns a function checking Distribution compatibility with this bijector.
That is, `distribution_filter_for(bijector)(dist) == True` implies
that `bijector` can act on `dist` (i.e., they are safe to compose with
`TransformedDistribution`).
TODO(bjp): Make this sensitive to supports. Currently assumes `bijector` acts
on an unconstrained space, and just checks compatible ranks.
Args:
bijector: A `Bijector` instance to check compatibility with.
Returns:
filter: A Python callable filtering Distributions for compatibility with
this bijector.
"""
if isinstance(bijector, tfb.CholeskyToInvCholesky):
def additional_check(dist):
return (tensorshape_util.rank(dist.event_shape) == 2 and
int(dist.event_shape[0]) == int(dist.event_shape[1]))
elif isinstance(bijector, tfb.CorrelationCholesky):
def additional_check(dist):
# The isinstance check will be redundant when the
# `distribution_eligilibility_filter_for` above has been used, but we keep
# it here for safety.
return isinstance(dist, tfd.LKJ) and dist.input_output_cholesky
else:
additional_check = lambda dist: True
def distribution_filter(dist):
if not dtype_util.is_floating(dist.dtype):
return False
if bijector.forward_min_event_ndims > tensorshape_util.rank(
dist.event_shape):
return False
return additional_check(dist)
return distribution_filter | 5f139b7bc93257b8b58737fb1f70ce524d4d520b | 3,637,695 |
def create_markdown_table(table_info: dict, index_name: str='Id') -> str:
"""
Returns a string for a markdown table, formatted
according to the dictionary passed as `table_info`
Parameters:
table_info: Mapping from index to values
index_name: Name to use for the index column
Returns:
md_str: Markdown formatted table string
Example:
>>> table_info = {
'Apples': {
'Cost': '40p',
'Colour': 'Red/green',
},
'Oranges': {
'Cost': '50p',
'Colour': 'Orange',
},
}
>>> md_str = create_markdown_table(table_info, index_name='Fruit')
>>> print(md_str)
| Fruit | Cost | Colour |
|:--------|:-------|:----------|
| Apples | 40p | Red/green |
| Oranges | 50p | Orange |
"""
df_info = pd.DataFrame(table_info).T
df_info.index.name = index_name
md_str = df_info.to_markdown()
return md_str | bcda7ddb9338c3f7e656a0ec74a495f0a677eaeb | 3,637,696 |
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail | 6ba7ed95bd6bf18e24ae6bce47fdc03868ac4a98 | 3,637,697 |
from typing import List
from typing import Dict
def make_car_dict(key: str, data: List[str]) -> Dict:
"""Organize car data for 106 A/B of the debtor
:param key: The section id
:param data: Content extract from car data section
:return: Organized data for automobile of debtor
"""
return {
"key": key,
"make": data[0],
"model": data[1],
"year": data[2],
"mileage": data[3],
"other_information": data[5],
"property_value": data[6],
"your_property_value": data[7],
} | 671cb2f82f15d14345e34e9823ea390d72cf040a | 3,637,698 |
import ast
def insert_code(src, dest, kind):
"""Insert code in source into destination file."""
source_text = open(src).read().strip()
destination_text = open(dest).read()
destination_lines = destination_text.split('\n')
destination_tree = ast.parse(destination_text)
if not destination_tree.body:
idx = 0
elif kind == "prefix":
idx = find_prefix_insertion_idx(destination_tree)
elif kind == "postfix":
idx = find_postfix_insertion_idx(destination_tree)
if idx >= len(destination_tree.body):
# Strip blank line before insertion
if destination_lines[-1].strip() == '':
del destination_lines[-1]
# Append to file
destination_lines.append('\n\n' + source_text + '\n')
else:
# Start with index at first line above object definition
line_no = destination_tree.body[idx].lineno - 1 # line numbers count from 1
line_no = get_previous_blank_line_no(destination_lines, line_no)
# Strip blank lines before insertion
if destination_lines[line_no - 1].strip() == '':
del destination_lines[line_no - 1]
line_no -= 1
# perform the insertion
destination_lines.insert(line_no, '\n\n' + source_text + '\n')
all_text = '\n'.join(destination_lines)
return all_text | 7f07e8741f5354fc78b840c803424bfd70fe8997 | 3,637,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.