content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from datetime import datetime
def iso8601(dt=None, aware=False):
"""
Returns string datetime stamp in iso 8601 format from datetime object dt
If dt is missing and aware then use now(timezone.utc) else utcnow() naive
YYYY-MM-DDTHH:MM:SS.mmmmmm which is strftime '%Y-%m-%dT%H:%M:%S.%f'
Only TZ aware in python 3.2+
"""
if dt is None:
if aware and hasattr(datetime, "timezone"):
dt = datetime.datetime.now(datetime.timezone.utc) # make it aware
else: # naive
dt = datetime.datetime.utcnow() # naive
return(dt.isoformat()) | 181d2f38b39792cc0331ee7bd9a34f76691b5128 | 3,637,200 |
import re
import string
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
"""
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line] | 7d2b04c477624db322721b785d95bffa16af1576 | 3,637,201 |
def _check_blacklist_members(rule_members=None, policy_members=None):
"""Blacklist: Check that policy members ARE NOT in rule members.
If a policy member is found in the rule members, add it to the
violating members.
Args:
rule_members (list): IamPolicyMembers allowed in the rule.
policy_members (list): IamPolicyMembers in the policy.
Return:
list: Policy members found in the blacklist (rule members).
"""
violating_members = [
policy_member
for policy_member in policy_members
for rule_member in rule_members
if rule_member.matches(policy_member)
]
return violating_members | 2fc41f4ff6c401de0976b04dd6a8cb858cef96e7 | 3,637,202 |
def create_variable_weather(weather_data, original_epw_file, columns: list = ['drybulb'], variation: tuple = None):
"""
Create a new weather file adding gaussian noise to the original one.
Parameters
----------
weather_data : opyplus.WeatherData
Opyplus object with the weather for the simulation
original_epw_file : str
Path to the original EPW file
columns : list
List of columns to be affected
variation : tuple
(mean, std) of the Gaussian noise
Return
------
str
Name of the file created in the same location as the original one.
"""
if variation is None:
return None
else:
# Get dataframe with weather series
df = weather_data.get_weather_series()
# Generate random noise
shape = (df.shape[0], len(columns))
mu, std = variation
noise = np.random.normal(mu, std, shape)
df[columns] += noise
# Save new weather data
weather_data.set_weather_series(df)
filename = original_epw_file.split('.epw')[0]
filename += '_Random_%s_%s.epw' % (str(mu), str(std))
weather_data.to_epw(filename)
return filename | 13674db675cb5c03c77047e78d0cf57b3bfab1ac | 3,637,203 |
def transform(func, geom):
"""Applies `func` to all coordinates of `geom` and returns a new
geometry of the same type from the transformed coordinates.
`func` maps x, y, and optionally z to output xp, yp, zp. The input
parameters may iterable types like lists or arrays or single values.
The output shall be of the same type. Scalars in, scalars out.
Lists in, lists out.
For example, here is an identity function applicable to both types
of input.
def id_func(x, y, z=None):
return tuple(filter(None, [x, y, z]))
g2 = transform(id_func, g1)
Using pyproj >= 2.1, this example will accurately project Shapely geometries:
import pyproj
wgs84 = pyproj.CRS('EPSG:4326')
utm = pyproj.CRS('EPSG:32618')
project = pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform
g2 = transform(project, g1)
Note that the always_xy kwarg is required here as Shapely geometries only support
X,Y coordinate ordering.
Lambda expressions such as the one in
g2 = transform(lambda x, y, z=None: (x+1.0, y+1.0), g1)
also satisfy the requirements for `func`.
"""
if geom.is_empty:
return geom
if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'):
# First we try to apply func to x, y, z sequences. When func is
# optimized for sequences, this is the fastest, though zipping
# the results up to go back into the geometry constructors adds
# extra cost.
try:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)(zip(*func(*zip(*geom.coords))))
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
zip(*func(*zip(*geom.exterior.coords))))
holes = list(type(ring)(zip(*func(*zip(*ring.coords))))
for ring in geom.interiors)
return type(geom)(shell, holes)
# A func that assumes x, y, z are single values will likely raise a
# TypeError, in which case we'll try again.
except TypeError:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)([func(*c) for c in geom.coords])
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
[func(*c) for c in geom.exterior.coords])
holes = list(type(ring)([func(*c) for c in ring.coords])
for ring in geom.interiors)
return type(geom)(shell, holes)
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
return type(geom)([transform(func, part) for part in geom.geoms])
else:
raise ValueError('Type %r not recognized' % geom.type) | 71bde1500ec8370a7718542ee26181d2aad6591f | 3,637,204 |
def get_jit(policy_name, asc_location, resource_group_name):
"""Building query
Args:
policy_name: Policy name
asc_location: Machine location
resource_group_name: Resource name group
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/"
"{}?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response | 9e50eaf91fb2b2318f6b5334b848a6dce70ddf61 | 3,637,205 |
def rank_by_yield(df):
"""
Rank phenotypes by yield only.
Parameters
----------
df : pd.DataFrame
MAIZSIM yield output dataframe.
df_sims or df_mature
"""
# Prep data
groups = ['cvar', 'site']
how = 'mean'
sim = 'dm_ear'
mx_mean = agg_sims(df, groups, how, sim)
df_yield_means = pd.DataFrame(mx_mean)
# Sort data based on mean yield value
df_yield_means['mean'] = df_yield_means.mean(axis=1)
# Rank phenos by yield
phenos_ranked_by_yield = list(df_yield_means.sort_values(by=['mean'],
axis=0, ascending=False).index)
return phenos_ranked_by_yield | 10dd1c9a8e3ffc94cf4580bc789d7cc19353d748 | 3,637,206 |
def k2lc(epic):
"""
load k2 light curve
"""
prefix = epic[:4]
id = epic[4:]
c = "01"
path = "data/c01/{0}00000/{1}".format(prefix, id)
end = "kepler_v1.0_lc.fits"
file = "{0}/hlsp_everest_k2_llc_{1}-c{2}_{3}".format(path, epic, c, end)
x, y = process_data(file)
return x, y | 6cd5ffa387fa3d666c2f6561c06b458c7556509f | 3,637,207 |
def multi_leave_topics(multileaver, user_id, time):
"""Multileaves a number of suggested topics for a user and returns
the results."""
topics = get_user_suggested_topics(user_id)
if not topics:
return None
ranking, credit = multileaver.team_draft_multileave(topics)
topic_recommendations = []
# prepare results for database insertion
for index, (topic, system) in enumerate(zip(ranking, credit)):
score = multileaver.ranking_length - index
rec = (score, time, user_id, topic, system)
topic_recommendations.append(rec)
return topic_recommendations | 0b845a7a16419e4592ffd4f75d988728cef70727 | 3,637,208 |
def generate_age(sex):
"""Generate the age of a person depending on its sex
Parameters
----------
sex : int
Sex should be either 0 (men) or 1 (women).
Raises
------
ValueError
If sex is not 0 or 1.
Returns
-------
age : int
Generated age of a person.
"""
randunif = np.random.rand(1)
if sex == 0:
age = menecdf.iloc[(menecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
elif sex == 1:
age = womenecdf.iloc[(womenecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
else:
raise ValueError("Sex should be either 0 (men) or 1 (women)")
return age | 8f0ba4f215417035760fd1bbe1db5cc0974ed629 | 3,637,209 |
def _extract_text_Wikilink(node: mwparserfromhell.nodes.wikilink.Wikilink) -> str:
"""
Wikilinks come in 2 formats, thumbnails and actual links.
In the case of thumbnails, if posible pull out the nested caption.
"""
if node.title.startswith('File:') or node.title.startswith('Image:'):
if node.text == None:
return ''
else:
return ''.join(filter(lambda x: 'thumb|' not in x, map(_extract_text, node.text.nodes)))
else:
return ''.join(map(_extract_text, node.title.nodes)) | bc6c16aff602cfeac9756d0c357e054731dc7ff8 | 3,637,210 |
def dict_zip(*dicts):
"""
Take a series of dicts that share the same keys, and reduce the values
for each key as if folding an iterator.
"""
keyset = set(dicts[0])
for d in dicts:
if set(d) != keyset:
raise KeyError(f"Mismatched keysets in fold_dicts: {sorted(keyset)}, {sorted(set(d))}")
return { key: [d[key] for d in dicts] for key in keyset } | 47416641a6451828b78ae6dfd81a48676fcea71f | 3,637,211 |
import argparse
def process_command_line():
"""
Parse command line arguments
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
Return a Namespace representing the argument list.
"""
# Create the parser
parser = argparse.ArgumentParser(prog='obflow_6_output',
description='Run inpatient OB simulation output processor')
# Add arguments
parser.add_argument(
"output_path", type=str,
help="Destination Path for output summary files"
)
parser.add_argument(
"suffix", type=str,
help="String to append to various summary filenames"
)
parser.add_argument('--process_logs', dest='process_logs', action='store_true')
parser.add_argument(
"--stop_log_path", type=str, default=None,
help="Path containing stop logs"
)
parser.add_argument(
"--occ_stats_path", type=str, default=None,
help="Path containing occ stats csvs"
)
parser.add_argument(
"--run_time", type=float, default=None,
help="Simulation run time"
)
parser.add_argument(
"--warmup_time", type=float, default=None,
help="Simulation warmup time"
)
parser.add_argument('--include_inputs', dest='include_inputs', action='store_true')
parser.add_argument(
"--scenario_inputs_path", type=str, default=None,
help="Filename for scenario inputs"
)
#parser.add_argument('--include_qng_approx', dest='include_qng_approx', action='store_true')
# do the parsing
args = parser.parse_args()
return args | 5563be1fa3e122222fccd9ca1edfce25907dcc58 | 3,637,212 |
def Ustagger_to_mass(U):
"""
U are the data on the left and right of a grid box
A simple conversion of the U stagger grid to the mass points.
Calculates the average of the left and right value of a grid box. Looping
over all columns it reduces the staggered grid to the same dimensions as the
mass point.
Useful for converting U, XLAT_U, and XLONG_U to masspoints
Differnce between XLAT_U and XLAT is usually small, on order of 10e-5
(column_j1+column_j2)/2 = masspoint_incolumn
Input:
Ugrid with size (##, ##+1)
Output:
U on mass points with size (##,##)
"""
# create the first column manually to initialize the array with correct dimensions
U_masspoint = (U[:,0]+U[:,1])/2. # average of first and second row
U_num_cols = int(U.shape[1])-1 # we want one less column than we have
# Loop through the rest of the columns
# We want the same number of columns as we have rows.
# Take the first and second column, average them, and store in first column in U_masspoint
for col in range(1,U_num_cols):
col_avg = (U[:,col]+U[:,col+1])/2.
# Stack those onto the previous for the final array
U_masspoint = np.column_stack((U_masspoint,col_avg))
return U_masspoint | d3dbae52d74aff40b83b0437eed9f0aafb5e37ee | 3,637,213 |
def _linear(args,
output_size,
bias,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=initializer(),
scope=None,
reuse=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: If some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("'args' must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError('linear is expecting 2D arguments: %s' % shapes)
if shape[1].value is None:
raise ValueError('linear expects shape[1] to be provided for shape %s, '
'but saw %s' % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now to computatin.
with tf.variable_scope(scope, reuse=reuse) as outer_scope:
weights = tf.get_variable(
name='linear_kernel',
shape=[total_arg_size, output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, axis=1), weights)
if not bias:
return res
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
name='linear_bias',
shape=[output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
return nn_ops.bias_add(res, biases) | 3d11e74e4e28aeb737f63046fc4e53b4e68aeb9b | 3,637,214 |
def build_fpn_mask_graph(rois, feature_maps, image_size, num_classes,
pool_size, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P1, P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = ROIAlignLayer([pool_size, pool_size],
name="roi_align_mask")((rois, image_size, feature_maps))
# x [1, num_rois, 14, 14, 64]
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name='mask')(x)
print("pool mask feature map size", x.shape)
return x | c6286955fb07d3feb20801a409d44e20382133ef | 3,637,215 |
def calculate_perf_counter_counter(previous, current, property_name):
"""
PERF_COUNTER_COUNTER
https://technet.microsoft.com/en-us/library/cc740048(v=ws.10).aspx
"""
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
f = current["Frequency_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / ((d1 - d0) / f) | f517f39ef20af5a4d23f1fd74a14fab93be4037b | 3,637,216 |
import http
from datetime import datetime
def event_edit(request, id):
"""Edit form for a particular event."""
event = get_object_or_404(Event, id=id)
result = can_edit_event(event, request.user)
if isinstance(result, http.HttpResponse):
return result
if request.user.has_perm('main.change_event_others'):
form_class = forms.EventEditForm
elif request.user.has_perm('main.add_event_scheduled'):
form_class = forms.EventExperiencedRequestForm
else:
form_class = forms.EventRequestForm
curated_groups = (
CuratedGroup.objects.filter(event=event).order_by('created')
)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=event)
if form.is_valid():
event = form.save(commit=False)
_event_process(request, form, event)
if not event.location:
event.start_time = event.start_time.replace(
tzinfo=timezone.utc
)
event.save()
form.save_m2m()
edit_url = reverse('manage:event_edit', args=(event.pk,))
if is_privacy_vidly_mismatch(event):
# We'll need to update the status of token protection
# on Vid.ly for this event.
try:
vidly.update_media_protection(
event.template_environment['tag'],
event.privacy != Event.PRIVACY_PUBLIC,
)
submissions = VidlySubmission.objects.filter(
event=event,
tag=event.template_environment['tag'],
).order_by('-submission_time')
for submission in submissions[:1]:
submission.token_protection = (
event.privacy != Event.PRIVACY_PUBLIC
)
submission.save()
break
except vidly.VidlyUpdateError as x:
messages.error(
request,
'Video protect status could not be updated on '
'Vid.ly\n<code>%s</code>' % x
)
messages.info(
request,
'Event "<a href=\"%s\">%s</a>" saved. [Edit again](%s)' % (
reverse('main:event', args=(event.slug,)),
event.title,
edit_url
)
)
return redirect('manage:events')
else:
initial = {}
initial['curated_groups'] = curated_groups.values_list(
'name',
flat=True
)
curated_groups_choices = [
(x, x) for x in initial['curated_groups']
]
form = form_class(
instance=event,
initial=initial,
curated_groups_choices=curated_groups_choices,
)
context = {
'form': form,
'event': event,
'suggested_event': None,
'suggested_event_comments': None,
'tweets': EventTweet.objects.filter(event=event).order_by('id'),
}
try:
suggested_event = SuggestedEvent.objects.get(accepted=event)
context['suggested_event'] = suggested_event
context['suggested_event_comments'] = (
SuggestedEventComment.objects
.filter(suggested_event=suggested_event)
.select_related('user')
.order_by('created')
)
except SuggestedEvent.DoesNotExist:
pass
context['is_vidly_event'] = False
if event.template and 'Vid.ly' in event.template.name:
context['is_vidly_event'] = True
context['vidly_submissions'] = (
VidlySubmission.objects
.filter(event=event)
.order_by('-submission_time')
)
# Is it stuck and won't auto-archive?
context['stuck_pending'] = False
now = timezone.now()
time_ago = now - datetime.timedelta(minutes=15)
if (
event.status == Event.STATUS_PENDING and
event.template and
'Vid.ly' in event.template.name and
event.template_environment and # can be None
event.template_environment.get('tag') and
not VidlySubmission.objects.filter(
event=event,
submission_time__gte=time_ago
)
):
tag = event.template_environment['tag']
results = vidly.query(tag)
status = results.get(tag, {}).get('Status')
if status == 'Finished':
context['stuck_pending'] = True
try:
discussion = Discussion.objects.get(event=event)
context['discussion'] = discussion
context['comments_count'] = Comment.objects.filter(event=event).count()
except Discussion.DoesNotExist:
context['discussion'] = None
context['approvals'] = (
Approval.objects
.filter(event=event)
.select_related('group')
)
context['chapters_count'] = Chapter.objects.filter(event=event).count()
context['closed_captions'] = ClosedCaptions.objects.filter(event=event)
try:
context['assignment'] = EventAssignment.objects.get(event=event)
except EventAssignment.DoesNotExist:
context['assignment'] = None
try:
context['survey'] = Survey.objects.get(events=event)
except Survey.DoesNotExist:
context['survey'] = None
context['archived_hits'] = 0
context['live_hits'] = 0
for each in EventHitStats.objects.filter(event=event).values('total_hits'):
context['archived_hits'] += each['total_hits']
for each in EventLiveHits.objects.filter(event=event).values('total_hits'):
context['live_hits'] += each['total_hits']
context['count_event_uploads'] = Upload.objects.filter(event=event).count()
context['vidly_tag_domains'] = None
if (
event.template and
'Vid.ly' in event.template.name and
event.template_environment and
event.template_environment.get('tag')
):
context['vidly_tag_domains'] = VidlyTagDomain.objects.filter(
tag=event.template_environment['tag']
)
return render(request, 'manage/event_edit.html', context) | 0ea47e7b1772c3fa0529f6cc7675a83108ad0018 | 3,637,217 |
from typing import Callable
def migrator(from_: str, to_: str) -> Callable[[MigratorF], MigratorF]:
"""Decorate function as migrating settings from v `from_` to v `to_`.
A migrator should mutate a `NapariSettings` model from schema version
`from_` to schema version `to_` (in place).
Parameters
----------
from_ : str
NapariSettings.schema_version version that this migrator expects as
input
to_ : str
NapariSettings.schema_version version after this migrator has been
executed.
Returns
-------
Callable[ [MigratorF], MigratorF ]
_description_
"""
def decorator(migrate_func: MigratorF) -> MigratorF:
_from, _to = Version.parse(from_), Version.parse(to_)
assert _to >= _from, 'Migrator must increase the version.'
_MIGRATORS.append(Migrator(_from, _to, migrate_func))
return migrate_func
return decorator | 20bf5c7c8e693fc880ed9d31e610b2d939f8c020 | 3,637,218 |
import json
def rawChipByLocation_query():
"""
Get chips images by parcel id.
Generates a series of extracted Sentinel-2 LEVEL2A segments of 128x128 (10m
resolution bands) or 64x64 (20 m) pixels as list of full resolution GeoTIFFs
---
tags:
- rawChipByLocation
responses:
200:
description: A JSON dictionary with date labels and
relative URLs to cached GeoTIFFs.
"""
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Start by getting the request IP address
if request.environ.get('HTTP_X_FORWARDED_FOR') is None:
rip = request.environ['REMOTE_ADDR']
else:
rip = request.environ['HTTP_X_FORWARDED_FOR']
lon = request.args.get('lon')
lat = request.args.get('lat')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
band = request.args.get('band')
if 'plevel' in request.args.keys():
plevel = request.args.get('plevel')
else:
plevel = 'LEVEL2A'
if 'chipsize' in request.args.keys():
chipsize = request.args.get('chipsize')
else:
chipsize = '1280'
unique_id = f"dump/{rip}E{lon}N{lat}_{plevel}_{chipsize}_{band}".replace(
'.', '_')
data = image_requests.getRawChipByLocation(
lon, lat, start_date, end_date, unique_id, band, chipsize, plevel)
if data:
return send_from_directory(f"files/{unique_id}", 'dump.json')
else:
return json.dumps({}) | bfb2b32a17d5b1b8a05efcf710d70fc4179996c5 | 3,637,219 |
import os
from datetime import datetime
def load_adni_longitudinal_av45_pet():
"""Returns paths of longitudinal ADNI AV45-PET
"""
# get file paths and description
(subjects,
subject_paths,
description) = _get_subjects_and_description(base_dir='ADNI_av45_pet',
prefix='I[0-9]*')
# get pet files
pet_files = map(lambda x: _glob_subject_img(x, suffix='pet/wr*.nii',
first_img=False),
subject_paths).tolist()
idx = [0]
pet_files_all = []
for pet_file in pet_files:
idx.append(idx[-1] + len(pet_file))
pet_files_all.extend(pet_file)
pet_files_all = np.array(pet_files_all)
images = [os.path.split(pet_file)[-1].split('_')[-1][:-4]
for pet_file in pet_files_all]
images = np.array(images)
# get phenotype from csv
dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'DXSUM_PDXCONV_ADNIALL.csv'))
roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'ROSTER.csv'))
df = description[description['Image_ID'].isin(images)]
dx_group_all = np.array(df['DX_Group'])
subjects_all = np.array(df['Subject_ID'])
ages = np.array(df['Age'])
exams = np.array(df['Study_Date'])
exams = list(map(lambda e: datetime.strptime(e, '%m/%d/%Y').date(), exams))
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
def _get_ridspet(subjects_all):
return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all))
rids = memory.cache(_get_ridspet)(subjects_all)
def _get_examdatespet(rids):
return list(map(lambda i: _get_dx(
rids[i], dx, exams[i], viscode=None, return_code=True),
range(len(rids))))
exam_dates = np.array(memory.cache(_get_examdatespet)(rids))
def _get_viscodespet(rids):
return list(map(lambda i: _get_vcodes(
rids[i], str(exam_dates[i]), dx), range(len(rids))))
viscodes = np.array(memory.cache(_get_viscodespet)(rids))
if len(viscodes) > 0:
vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1]
else:
vcodes, vcodes2 = None, None
return Bunch(pet=pet_files_all,
dx_group=dx_group_all,
images=images, ages=ages, subjects=subjects_all,
exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2) | 7ccffff0f945a214088d2c14567cf34f2fb3d063 | 3,637,220 |
from pathlib import Path
from typing import Optional
from typing import List
import json
def build_settings(
tmp_path: Path,
template: str,
*,
oidc_clients: Optional[List[OIDCClient]] = None,
**settings: str,
) -> Path:
"""Generate a test Gafaelfawr settings file with secrets.
Parameters
----------
tmp_path : `pathlib.Path`
The root of the temporary area.
template : `str`
Settings template to use.
oidc_clients : List[`gafaelfawr.config.OIDCClient`] or `None`
Configuration information for clients of the OpenID Connect server.
**settings : `str`
Any additional settings to add to the settings file.
Returns
-------
settings_path : `pathlib.Path`
The path of the settings file.
"""
bootstrap_token = str(Token()).encode()
bootstrap_token_file = store_secret(tmp_path, "bootstrap", bootstrap_token)
session_secret = Fernet.generate_key()
session_secret_file = store_secret(tmp_path, "session", session_secret)
issuer_key = _ISSUER_KEY.private_key_as_pem()
issuer_key_file = store_secret(tmp_path, "issuer", issuer_key)
influxdb_secret_file = store_secret(tmp_path, "influxdb", b"influx-secret")
github_secret_file = store_secret(tmp_path, "github", b"github-secret")
oidc_secret_file = store_secret(tmp_path, "oidc", b"oidc-secret")
oidc_path = tmp_path / "oidc.json"
if oidc_clients:
clients_data = [
{"id": c.client_id, "secret": c.client_secret}
for c in oidc_clients
]
oidc_path.write_text(json.dumps(clients_data))
settings_path = _build_settings_file(
tmp_path,
template,
database_url=TEST_DATABASE_URL,
bootstrap_token_file=bootstrap_token_file,
session_secret_file=session_secret_file,
issuer_key_file=issuer_key_file,
github_secret_file=github_secret_file,
oidc_secret_file=oidc_secret_file,
influxdb_secret_file=influxdb_secret_file,
oidc_server_secrets_file=oidc_path if oidc_clients else "",
)
if settings:
with settings_path.open("a") as f:
for key, value in settings.items():
f.write(f"{key}: {value}\n")
return settings_path | aaba1048c96cd07b42492d11ca34a87365350a20 | 3,637,221 |
import os
import struct
def load_mnist(path, data_type='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels.idx1-ubyte'
% data_type)
images_path = os.path.join(path,
'%s-images.idx3-ubyte'
% data_type)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
labels = labels.reshape(labels.shape[0], 1)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII',
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels | 5443a355c78bfb34b5f219e8205265f46e9619f2 | 3,637,222 |
def get_actions_matching_arn(arn):
"""
Given a user-supplied ARN, get a list of all actions that correspond to that ARN.
Arguments:
arn: A user-supplied arn
Returns:
List: A list of all actions that can match it.
"""
raw_arns = get_matching_raw_arns(arn)
results = []
for raw_arn in raw_arns:
resource_type_name = get_resource_type_name_with_raw_arn(raw_arn)
service_prefix = get_service_from_arn(raw_arn)
service_prefix_data = get_service_prefix_data(service_prefix)
for action_name, action_data in service_prefix_data["privileges"].items():
# for some_action in service_prefix_data["privileges"]:
for resource_name, resource_data in action_data["resource_types"].items():
this_resource_type = resource_data["resource_type"].strip("*")
if this_resource_type.lower() == resource_type_name.lower():
results.append(f"{service_prefix}:{action_data['privilege']}")
results = list(dict.fromkeys(results))
results.sort()
return results | 595e985829df5035c81928a4441c64b136818e8d | 3,637,223 |
import os
def path_splitter(path):
"""
Split a path into its constituent parts.
Might be better written as a recursive function.
:param path: The path to split.
:return: A list of the path's constituent parts.
"""
res = []
while True:
p = os.path.split(path)
if p[0] == path:
# Were done, this is an absolute path.
res.insert(0, p[0])
break
elif p[1] == path:
# Were done, this is a relative path.
res.insert(0, p[0])
break
else:
path = p[0]
res.insert(0, p[1])
return res | cf9ec119eb302ff45b7835a00e235215110c8dc5 | 3,637,224 |
import logging
import time
def _run_defp(mode,code,time_steps,error_model,decoder,error_probability,perm_rates,code_name,layout,measurement_error_probability,
max_runs=None,max_failures=None,random_seed=None):
"""Implements run and run_ftp functions"""
# assumptions
assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp'
# derived defaults
if max_runs is None and max_failures is None:
max_runs = 1
if logger.isEnabledFor(logging.DEBUG):
logger.debug('run: code={},time_steps={},error_model={},decoder={},error_probability={},'
'measurement_error_probability={} max_runs={},max_failures={},random_seed={}.'
.format(code,time_steps,error_model,decoder,error_probability,
measurement_error_probability,max_runs,max_failures,random_seed))
wall_time_start = time.perf_counter()
runs_data = {
'code': code.label,
'n_k_d': code.n_k_d,
'time_steps': time_steps,
'error_model': error_model.label,
'decoder': decoder.label,
'error_probability': error_probability,
'measurement_error_probability': measurement_error_probability,
'n_run': 0,
'n_success': 0,
'n_fail': 0,
'n_logical_commutations': None,
'custom_totals': None,
'error_weight_total': 0,
'error_weight_pvar': 0.0,
'logical_failure_rate_samples': 0.0,
'logical_failure_rate_samples_errorbar': 0.0,
'coset_ps': 0.0,
'logical_failure_rate_errorbar': 0.0,
'logical_failure_rate': 0.0,
'logical_failure_rate_errorbar': 0.0,
'physical_error_rate': 0.0,
'wall_time': 0.0,
}
# if random_seed is None,unpredictable entropy is pulled from the OS,which we log for reproducibility
seed_sequence = np.random.SeedSequence(random_seed)
logger.info('run: np.random.SeedSequence.entropy={}'.format(seed_sequence.entropy))
rng = np.random.default_rng(seed_sequence)
array_sum_keys = ('n_logical_commutations','custom_totals',) # list of array sum keys
array_val_keys = ('logical_commutations','custom_values',) # list of array value keys
error_weights = [] # list of error_weight from current run
success_list = np.zeros(max_runs)
max_coset_p_list = np.zeros(max_runs)
coset_ps_list = np.zeros((max_runs,4))
perm_mat,perm_vec= deform_matsvecs(code,decoder,error_model,perm_rates,code_name,layout)
while ((max_runs is None or runs_data['n_run'] < max_runs)
and (max_failures is None or runs_data['n_fail'] < max_failures)):
# run simulation
data = _run_once_defp(mode,code,time_steps,error_model,decoder,error_probability,perm_rates,perm_mat,perm_vec,code_name,layout,
measurement_error_probability,rng)
# increment run counts
success_list[runs_data['n_run']] = data['success']
max_coset_p_list[runs_data['n_run']] = data['max_coset_p']
coset_ps_list[runs_data['n_run']] = data['coset_ps']
runs_data['n_run'] += 1
if data['success']:
runs_data['n_success'] += 1
else:
runs_data['n_fail'] += 1
# sum arrays
for array_sum_key,array_val_key in zip(array_sum_keys,array_val_keys):
array_sum = runs_data[array_sum_key] # extract sum
array_val = data[array_val_key] # extract val
if runs_data['n_run'] == 1 and array_val is not None: # first run,so initialize sum,if val not None
array_sum = np.zeros_like(array_val)
if array_sum is None and array_val is None: # both None
array_sum = None
elif (array_sum is None or array_val is None) or (array_sum.shape != array_val.shape): # mismatch
raise QecsimError(
'Mismatch between {} values to sum: {},{}'.format(array_val_key,array_sum,array_val))
else: # match,so sum
array_sum = array_sum + array_val
runs_data[array_sum_key] = array_sum # update runs_data
# append error weight
error_weights.append(data['error_weight'])
##error bar in logical failure rate
runs_data['logical_failure_rate'] = 1 - max_coset_p_list.mean()
runs_data['logical_failure_rate_errorbar'] = max_coset_p_list.std()/np.sqrt(max_runs)
runs_data['logical_failure_rate_samples'] = 1 - success_list.mean()
runs_data['logical_failure_rate_samples_errorbar'] = success_list.std()/np.sqrt(max_runs)
runs_data['coset_ps_list'] = coset_ps_list
return runs_data | dff7f10f32f4cbad4286207d6caf9039898cfedc | 3,637,225 |
import time
def api_retry(func, task_id):
"""
添加api重试机制
:param func: 调用的api函数
:param task_id: 任务id
:return: 重试结果
"""
retry_flag, status_result = False, ""
for i in range(TRANSPORT_RETRY_TIMES):
time.sleep(TRANSPORT_RETRY_INTERVAL)
retry_flag, status_result = func(task_id)
if retry_flag:
break
return retry_flag, status_result | 8f3ad5d6c9865ec8405c7504e9a6af851f5e4916 | 3,637,226 |
def test_bound_callables():
"""Test that we can use a callable as a bound value."""
@magicgui(x={"bind": lambda x: 10})
def f(x: int = 5):
return x
assert f() == 10
f.x.unbind()
assert f() == 5 | baf0cafcef7160e23c1b66be9734245adbe9d219 | 3,637,227 |
def delete_role(user_id: str, role_id: str):
""" Removes a role from a user """
print(user_id)
print(role_id)
return jsonify(), HTTPStatus.NO_CONTENT | aa97868d54f0f3b887d80a7f4f8ef258fb050001 | 3,637,228 |
import multiprocessing as mp
from functools import partial
from jinfo.utils.percentage_identity import percentage_identity
def remove_degenerate_seqs(
alignment_obj: BaseAlignment, identity_limit: int, show_id_array: bool = False
) -> BaseAlignment:
"""
Filter high similarity sequences from a list of Seq objects
Returns: BaseAlignment
"""
seq_list = alignment_obj.seqs
identity_array = []
filtered_seqs = []
pool = mp.Pool(mp.cpu_count()) # Set up cpu pool for parallel calculation
for seq_obj in seq_list:
id_partial = partial(percentage_identity, seq2=seq_obj)
identity_array_row = pool.map(id_partial, seq_list)
identity_array.append(identity_array_row)
if show_id_array:
print("Calculated alignment identity array:")
for i, row in enumerate(identity_array):
print(f"{seq_list[i].label}\t{row}")
for i, row in enumerate(identity_array):
row.remove(100) # remove seq 100% match with itself
if max(row) < float(identity_limit):
filtered_seqs.append(seq_list[i])
return BaseAlignment(filtered_seqs) | fcada477a01290fb54a83d074c31de31c9be17e1 | 3,637,229 |
def get_domain(url):
""" Get the domain from a URL.
Parameters
----------
url : string
HTTP URL
Returns
-------
domain : string
domain of the URL
"""
o = urlparse(url)
scheme = o.scheme
if not o.scheme:
scheme = "http"
link = scheme + "://" + o.netloc
return link | e47d2fdedab66d356887a94db5c22770f5e21823 | 3,637,230 |
def push_activations(activations, from_layer, to_layer):
"""Push activations from one model to another using prerecorded correlations"""
inverse_covariance_matrix = layer_inverse_covariance(from_layer)
activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T
covariance_matrix = layer_covariance(from_layer, to_layer)
activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix)
return activation_recorrelated | ddbacdbbfb30156204df27b00c79a28a4895810e | 3,637,231 |
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score using cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : numpy.array, shape=(len(list(cv)), 2)
Array of scores of the estimator for each run of the cross validation
with their corresponding uncertainty.
See Also
---------
:func:`skpro.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
if n_jobs == 1:
# If we are not multiprocessing it's possible to
# use a wrapper function to retrieve the std values
test_scores = []
def scoring_task(estimator, X, y):
score, std = scorer(estimator, X, y, return_std=True)
test_scores.append([score, std])
return score
else:
# We allow multiprocessing by passing in two scoring functions.
# That is far from ideal since we call the scorer twice,
# so any improvement is welcome
score_scorer = RetrievesScores(scorer, score=True, std=False)
std_scorer = RetrievesScores(scorer, score=False, std=True)
scoring_task = {'score': score_scorer, 'std': std_scorer}
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring=scoring_task, cv=cv,
return_train_score=False,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch)
if n_jobs == 1:
return np.array(test_scores)
else:
return np.column_stack((cv_results['test_score'], cv_results['test_std'])) | 8c4fe69cb2043adf5541b188d99da23beb4d9874 | 3,637,232 |
def load_graph(N, M):
"""
Builds an adjacency list representation of a graph with N vertices. Each
graph[i][j] is the minimum length of an edge between vertice i and j.
:rtype List[int, Dict[int, int]]
"""
graph = [dict() for i in range(0, N)]
for i in range(0, M):
(x, y, r) = read(int)
x -= 1
y -= 1
# Ignore all edges except minimum length edge.
r = r if y not in graph[x] else min(r, graph[x][y])
graph[x][y] = r
graph[y][x] = r
return graph | acee5cb79eb5bbc04eada9d54344700bff3ffaa4 | 3,637,233 |
def quartile_range(arr):
"""
Find out the Interquartile Range
"""
#if it is odd
if len(arr)%2 != 0:
left=median(arr[:len(arr)/2])
right=median(arr[len(arr)/2 + 1:])
else:
#if array is even
left = median(arr[:len(arr)/2])
right = median(arr[len(arr)/2:])
return left, abs(right - left), right | c6fafd63e3e64b893a4632bfb2027e330e8a3c32 | 3,637,234 |
def check_syntax(filename, raise_error=False):
"""Return True if syntax is okay."""
with autopep8.open_with_encoding(filename) as input_file:
try:
compile(input_file.read(), '<string>', 'exec', dont_inherit=True)
return True
except (SyntaxError, TypeError, UnicodeDecodeError):
if raise_error:
raise
else:
return False | d401e292ddb20d66c65a7ffa8988ddab4a7962ec | 3,637,235 |
from astropy.convolution import convolve as astropy_convolve
from ..utils import process_image_pixels
def test_process_image_pixels():
"""Check the example how to implement convolution given in the docstring"""
def convolve(image, kernel):
'''Convolve image with kernel'''
images = dict(image=np.asanyarray(image))
kernel = np.asanyarray(kernel)
out = dict(image=np.empty_like(image))
def convolve_function(images, kernel):
value = np.sum(images['image'] * kernel)
return dict(image=value)
process_image_pixels(images, kernel, out, convolve_function)
return out['image']
np.random.seed(0)
image = np.random.random((7, 10))
kernel = np.random.random((3, 5))
actual = convolve(image, kernel)
desired = astropy_convolve(image, kernel, boundary='fill')
assert_allclose(actual, desired) | 439e45a7fd403de4df8dd9dfc662be6405d69dc0 | 3,637,236 |
import math
def im2vec(im, bsize, padsize=0):
"""
Converts image to vector.
Args:
im: Input image to be converted to a vector.
bsize: Size of block of im to be converted to vec. Must be 1x2 non-negative int array.
padsize (optional, default=0): Must be non-negative integers in a 1x2 array. Amount of zeros padded on each
Returns:
v: Output vector.
rows: Number of rows of im after bsize and padsize are applied (before final flattening to vector).
cols: Number of cols of im after bsize and padsize are applied (before final flattening to vector).
"""
bsize = bsize+np.zeros((1, 2), dtype=int)[0]
padsize = padsize+np.zeros((1, 2), dtype=int)[0]
if(padsize.any() < 0):
raise Exception("Pad size must not be negative")
imsize = np.shape(im)
y = bsize[0]+padsize[0]
x = bsize[1]+padsize[1]
rows = math.floor((imsize[0]+padsize[0])/y)
cols = math.floor((imsize[1]+padsize[1])/x)
t = np.zeros((y*rows, x*cols))
imy = y*rows-padsize[0]
imx = x*cols-padsize[1]
t[:imy, :imx] = im[:imy, :imx]
t = np.reshape(t, (y, rows, x, cols), order='F')
t = np.reshape(np.transpose(t, [0, 2, 1, 3]), (y, x, rows*cols), order='F')
v = t[:bsize[0], :bsize[1], :rows*cols]
v = np.reshape(v, (y*x, rows*cols), order='F')
return [v, rows, cols] | 0a88cf02e37fdaeb24103cc0a7027067ea703c82 | 3,637,237 |
def mobilenetV2_block(
input_layer,
filters: int = 32,
dropout_ratio: float = DEFAULT_DROPOUT_RATIO,
use_batchnorm: bool = False,
prefix: str = "mobilenetV2_",
initializer=DEFAULT_KERNEL_INITIALIZER,
regularizer=DEFAULT_KERNEL_REGULARIZER,
channels_index: int = DEFAULT_CHANNEL_INDEX):
"""
Build a mobilenet V2 bottleneck with residual block
:param input_layer:
:param filters:
:param initializer:
:param regularizer:
:param prefix:
:param channels_index:
:param use_batchnorm:
:param dropout_ratio:
:return: mobilenet V2 bottleneck with residual block
"""
# --- argument checking
if input_layer is None:
raise ValueError("input_layer cannot be empty")
if filters <= 0:
raise ValueError("Filters should be > 0")
if dropout_ratio is not None:
if dropout_ratio > 1.0 or dropout_ratio < 0.0:
raise ValueError("Dropout ration must be [0, 1]")
# --- build block
previous_no_filters = K.int_shape(input_layer)[channels_index]
x = keras.layers.Conv2D(
filters=filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="linear",
name=prefix + "conv0",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(input_layer)
x = keras.layers.DepthwiseConv2D(
depth_multiplier=1,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv1",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm0")(x)
x = keras.layers.Conv2D(
filters=previous_no_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv2",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm1")(x)
# --- build skip layer and main
x = keras.layers.Add(name=prefix + "add")([
x,
input_layer
])
if dropout_ratio is not None and dropout_ratio > 0.0:
x = keras.layers.Dropout(
name=prefix + "dropout",
rate=dropout_ratio)(x)
return x | 0fd2e38d32d192412de4928c7ef92b577235581f | 3,637,238 |
def _grid_archive():
"""Deterministically created GridArchive."""
# The archive must be low-res enough that we can tell if the number of cells
# is correct, yet high-res enough that we can see different colors.
archive = GridArchive([10, 10], [(-1, 1), (-1, 1)], seed=42)
archive.initialize(solution_dim=2)
_add_uniform_sphere(archive, (-1, 1), (-1, 1))
return archive | 540ea0270bbe06830ab096c79590c6ffcad487a2 | 3,637,239 |
def check_flush(hand):
"""Check whether the hand has a flush; returns a boolean."""
if len(hand) == len(hand.by_suit(hand[0].suit)):
return True
return False | de11f50f11b477e61f284063c7f0da0dda2dd87e | 3,637,240 |
import torch
def binary_accuracy(preds, y):
"""
Returns accuracy per batch
:param preds: prediction logits
:param y: target labels
:return: accuracy = percentage of correct predictions
"""
# round predictions to the closest integer
rounded_predictions = torch.round(torch.sigmoid(preds))
correct = (rounded_predictions == y).float()
acc = correct.sum() / len(correct)
return acc | 2a321bb9e60a937a879619c2fa3baf1cbe968a33 | 3,637,241 |
import csv
def load_taxondump(idpath):
"""Importing the Acidobacteria taxon IDs"""
taxons = {}
with open(idpath) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
taxons[row[1]] = row[0]
return taxons | b20c973f97d609b646e5c15be7cc320019f21236 | 3,637,242 |
import re
def _to_numeric_range(cell):
"""
Translate an Excel cell (eg 'A1') into a (col, row) tuple indexed from zero.
e.g. 'A1' returns (0, 0)
"""
match = re.match("^\$?([A-Z]+)\$?(\d+)$", cell.upper())
if not match:
raise RuntimeError("'%s' is not a valid excel cell address" % cell)
col, row = match.groups()
# A = 1
col_digits = map(lambda c: ord(c) - ord("A") + 1, col)
col = 0
for digit in col_digits:
col = (col * 26) + digit
row = int(row) - 1
col = col - 1
return col, row | 468f452a7e4d4b045ecbb1a1fc261712fb25f3fc | 3,637,243 |
def LUCroutDecompose(A):
"""
Implementação do método de Crout para decomposição LU.
"""
assert A.shape[0] == A.shape[1] and type(A) is matrix, "'A' deve ser NxN."
L = zeros(A.shape)
n = A.shape[0]
U = L.copy()
lib.LUDec(n, byref(ctypeslib.as_ctypes(A)),
byref(ctypeslib.as_ctypes(L)),
byref(ctypeslib.as_ctypes(U)))
return L, U | cb201e00c8727a6fc005779ca315f61d811d38c1 | 3,637,244 |
def protocol(recarr, design_type, *hrfs):
""" Create an object that can evaluate the FIAC
Subclass of formulae.Formula, but not necessary.
Parameters
----------
recarr : (N,) structured array
with fields 'time' and 'event'
design_type : str
one of ['event', 'block']. Handles how the 'begin' term is
handled. For 'block', the first event of each block is put in
this group. For the 'event', only the first event is put in this
group. The 'begin' events are convolved with hrf.glover.
hrfs: symoblic HRFs
Each event type ('SSt_SSp','SSt_DSp','DSt_SSp','DSt_DSp') is
convolved with each of these HRFs in order.
Returns
-------
f: Formula
Formula for constructing design matrices.
contrasts : dict
Dictionary of the contrasts of the experiment.
"""
event_types = np.unique(recarr['event'])
N = recarr.size
if design_type == 'block':
keep = np.not_equal((np.arange(N)) % 6, 0)
else:
keep = np.greater(np.arange(N), 0)
# This first frame was used to model out a potentially
# 'bad' first frame....
_begin = recarr['time'][~keep]
termdict = {}
termdict['begin'] = utils.define('begin', utils.events(_begin, f=hrf.glover))
drift = formulae.natural_spline(utils.T,
knots=[N_ROWS/2.+1.25],
intercept=True)
for i, t in enumerate(drift.terms):
termdict['drift%d' % i] = t
# After removing the first frame, keep the remaining
# events and times
times = recarr['time'][keep]
events = recarr['event'][keep]
# Now, specify the experimental conditions. This creates expressions named
# SSt_SSp0, SSt_SSp1, etc. with one expression for each (eventtype, hrf)
# pair
for v in event_types:
k = np.array([events[i] == v for i in range(times.shape[0])])
for l, h in enumerate(hrfs):
# Make sure event type is a string (not byte string)
term_name = '%s%d' % (to_str(v), l)
termdict[term_name] = utils.define(term_name,
utils.events(times[k], f=h))
f = formulae.Formula(termdict.values())
Tcontrasts = {}
Tcontrasts['average'] = (termdict['SSt_SSp0'] + termdict['SSt_DSp0'] +
termdict['DSt_SSp0'] + termdict['DSt_DSp0']) / 4.
Tcontrasts['speaker'] = (termdict['SSt_DSp0'] - termdict['SSt_SSp0'] +
termdict['DSt_DSp0'] - termdict['DSt_SSp0']) * 0.5
Tcontrasts['sentence'] = (termdict['DSt_DSp0'] + termdict['DSt_SSp0'] -
termdict['SSt_DSp0'] - termdict['SSt_SSp0']) * 0.5
Tcontrasts['interaction'] = (termdict['SSt_SSp0'] - termdict['SSt_DSp0'] -
termdict['DSt_SSp0'] + termdict['DSt_DSp0'])
# Ftest
Fcontrasts = {}
Fcontrasts['overall1'] = formulae.Formula(Tcontrasts.values())
return f, Tcontrasts, Fcontrasts | d2ce4b35614ca692226133ec72b4f1d46baf065c | 3,637,245 |
def iter_children(param,childlist=[]):
"""
| Iterator over all sub children of a given parameters.
| Returns all childrens names.
=============== ================================= ====================================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph parameter the root node to be coursed
*childlist* list the child list recetion structure
=============== ================================= ====================================
Returns
-------
childlist : parameter list
The list of the children from the given node.
Examples
--------
>>> import custom_parameter_tree as cpt
>>> from pyqtgraph.parametertree import Parameter
>>> #Creating the example tree
>>> settings=Parameter(name='settings')
>>> child1=Parameter(name='child1', value=10)
>>> child2=Parameter(name='child2',value=10,visible=True,type='group')
>>> child2_1=Parameter(name='child2_1', value=10)
>>> child2_2=Parameter(name='child2_2', value=10)
>>> child2.addChildren([child2_1,child2_2])
>>> settings.addChildren([child1,child2])
>>> #Get the child list from the param argument
>>> childlist=cpt.iter_children(settings)
>>> #Verify the integrity of result
>>> print(childlist)
['child1', 'child2', 'child2_1', 'child2_2']
"""
for child in param.children():
childlist.append(child.name())
if child.type()=='group':
childlist.extend(iter_children(child,[]))
return childlist | 2edbdccc5957cbe6131da70d6dfc24ea67a19e69 | 3,637,246 |
from typing import Callable
from typing import Optional
from typing import Union
import sys
import atexit
def shell__shell_hook(callback: Callable[[int, WPARAM, LPARAM], Optional[str]]) -> Union[HHOOK, WindowsErrorMessage]:
"""
Adds a global shell hook, called when any key is pressed in any
context. The callback is called directly in the thread that invoked
this method. It is up to the callback to be as responsive as possible.
If the callback explicitly returns the value "Cancel"
(SHELL__CANCEL_CALLBACK_CHAIN), then the next hook in the chain of
listeners will not be called. This is made very explicit because
most circumstances dictate that the next chained handler should be
called. Even then, there are circumstances in which the next hook
will still need to be called.
The callback takes the parameters (code, wparam, lparam). It is up
to the callback to correctly parse the values. Note that this is
really windows specific, so it's a good idea to read through the docs.
:param callback:
:return: hook handle
"""
# Without some tricky logic, the shell hook will always fail.
# Specifically, it must use a DLL to perform the hook. One for 64-bit
# applications, and one for 32-bit applications.
# See https://www.codeproject.com/Articles/18638/Using-Window-Messages-to-Implement-Global-System-H
# Otherwise, this error is encountered.
# ERROR_HOOK_NEEDS_HMOD (1428):
# Cannot set nonlocal hook without a module handle.
#
hmod = GetModuleHandleW(None)
if hmod is None is None:
return WindowsErrorMessage('GetModuleHandleW')
# See https://msdn.microsoft.com/en-us/library/windows/desktop/ms644991(v=vs.85).aspx
hook_id = None
def shell_handler(code: int, wparam: WPARAM, lparam: LPARAM) -> LRESULT:
print("[Shell handler] {0} {1} {2}".format(code, wparam, lparam))
call_next = True
try:
# From the docs: If nCode is less than zero, the hook
# procedure must return the value returned by CallNextHookEx.
if code >= 0:
ret = callback(code, wparam, lparam)
if ret == SHELL__CANCEL_CALLBACK_CHAIN:
call_next = False
except: # pylint: broad-except
print("Unexpected error: {0}".format(sys.exc_info()[0]))
raise
finally:
if call_next:
return t_cast(LRESULT, CallNextHookEx(hook_id, code, wparam, lparam))
else:
# print("Canceling callback chain")
# From the docs:
# If the hook procedure processed the message, it may return
# a nonzero value to prevent the system from passing the
# message to the rest of the hook chain or the target window
# procedure.
return LRESULT(1)
callback_pointer = HOOK_CALLBACK_TYPE(shell_handler)
hook_id = t_cast(HHOOK, SetWindowsHookExW(WH_SHELL, callback_pointer, hmod, 0))
if hook_id == 0:
return WindowsErrorMessage('SetWindowsHookExW / shell')
print("started shell hook " + repr(hook_id))
_CALLBACK_POINTERS[hook_id] = callback_pointer
# Ensure that the hook is *always* uninstalled at exit to prevent OS
# resource leaks.
atexit.register(shell__unhook, hook_id)
return hook_id | 6209c800a8e5153f3de8d7f5dfa4793104b51085 | 3,637,247 |
def depth_first_graph_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Does not get trapped by loops.
If two paths reach a state, only use the first one.
"""
frontier = [(Node(problem.initial))] # Stack
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and child not in frontier)
return None | d610752a99a8c4e7f1b5eee2d520d88f868279eb | 3,637,248 |
def check_matrix_equality(A, B, tol=None):
"""
Checks the equality of two matrices.
:param A: The first matrix
:param B: The second matrix
:param tol: The decimal place tolerance of the check
:return: The boolean result of the equality check
"""
if len(A) != len(B) or len(A[0]) != len(B[0]):
return False
for i in range(len(A)):
for j in range(len(A[0])):
if tol == None:
if A[i][j] != B[i][j]:
return False
else:
if round(A[i][j], tol) != round(B[i][j], tol):
return False
return True | afc89de848597c6325b6eceb109f7f2311c9be7d | 3,637,249 |
def about(topic):
"""Return a select function that returns whether
a paragraph contains one of the words in TOPIC.
Arguments:
topic: a list of words related to a subject
>>> about_dogs = about(['dog', 'dogs', 'pup', 'puppy'])
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup!'], about_dogs, 0)
'Cute Dog!'
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup.'], about_dogs, 1)
'Nice pup.'
"""
assert all([lower(x) == x for x in topic]), 'topics should be lowercase.'
# BEGIN PROBLEM 2
def func(p):
p = [remove_punctuation(lower(x)) for x in split(p)]
for x in topic:
for y in p:
if x == y:
return True
return False
return func
# END PROBLEM 2 | b73512058675ac9a17a8d5cd36ab544080a2acbe | 3,637,250 |
def calcBarycentricCoords(pt, verts):
"""calculate the Barycentric coordinates"""
verts = np.array(verts) # vertices formed by N+1 nearest voxels
pt = np.array(pt) # voxel of interest
A = np.transpose(np.column_stack((verts, np.ones(verts.shape[0]))))
b = np.append(pt, 1)
return np.linalg.lstsq(A, b)[0] | 5869c40d9b95280d3db77dd7eb3a42fab46c45a8 | 3,637,251 |
import os
def get_masked_fastas(bed):
"""create the masked fasta files per chromosome. needed to run bl2seq.
and puts it into a dictionary seqid to path to genomic masked fasta"""
f = bed.fasta.fasta_name
fname = op.splitext(op.basename(f))[0]
d = op.dirname(f) + "/%s_split" % fname
try: os.mkdir(d)
except OSError: pass
fastas = {}
for seqid, seq in bed.mask_cds():
f = d + "/%s.fasta" % seqid
fastas[seqid] = f
if op.exists(f): continue
fh = open(f, "wb")
print >>fh, seq
fh.close()
return fastas | 2eb0eb7c9604563463ec7dd2d1e231804396edeb | 3,637,252 |
import re
def parse_regex(ctx, param, values):
"""Compile a regex if given.
:param click.Context ctx: click command context.
:param click.Parameter param: click command parameter (in this case,
``ignore_regex`` from ``-r|--ignore-regiex``).
:param list(str) values: list of regular expressions to be compiled.
:return: a list of compiled regular expressions.
.. versionchanged:: 1.1.3 parameter value (``values``) must be a
``list`` of ``str``s.
"""
if not values:
return
return [re.compile(v) for v in values] | b920d5a406ac3b7a8f28bb9125313c90eec5e212 | 3,637,253 |
import os
def FileJustRoot(fileName):
""" Gets just the root of the file name """
try:
return os.path.splitext(fileName)[0]
except:
return "" | 18fed9fbbaa0d5f3f08c89ff36a1f752605c52d2 | 3,637,254 |
import sys
import os
def generate_command(pbs_script, pbs_config, pbs_vars=None, python_exe=None):
"""Prepare a correct PBS command string"""
pbs_env_init()
# Initialisation
if pbs_vars is None:
pbs_vars = {}
# Necessary for testing
if python_exe is None:
python_exe = sys.executable
pbs_flags = []
pbs_queue = pbs_config.get('queue', 'normal')
pbs_flags.append('-q {queue}'.format(queue=pbs_queue))
pbs_project = pbs_config.get('project', os.environ['PROJECT'])
pbs_flags.append('-P {project}'.format(project=pbs_project))
pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs']
for res_key in pbs_resources:
res_flags = []
res_val = pbs_config.get(res_key)
if res_val:
res_flags.append('{key}={val}'.format(key=res_key, val=res_val))
if res_flags:
pbs_flags.append('-l {res}'.format(res=','.join(res_flags)))
# TODO: Need to pass lab.config_path somehow...
pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd()))
if pbs_jobname:
# PBSPro has a 15-character jobname limit
pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15]))
pbs_priority = pbs_config.get('priority')
if pbs_priority:
pbs_flags.append('-p {priority}'.format(priority=pbs_priority))
pbs_flags.append('-l wd')
pbs_join = pbs_config.get('join', 'n')
if pbs_join not in ('oe', 'eo', 'n'):
print('payu: error: unknown qsub IO stream join setting.')
sys.exit(-1)
else:
pbs_flags.append('-j {join}'.format(join=pbs_join))
# Append environment variables to qsub command
# TODO: Support full export of environment variables: `qsub -V`
pbs_vstring = ','.join('{0}={1}'.format(k, v)
for k, v in pbs_vars.items())
pbs_flags.append('-v ' + pbs_vstring)
storages = set()
storage_config = pbs_config.get('storage', {})
mounts = set(['/scratch', '/g/data'])
for mount in storage_config:
mounts.add(mount)
for project in storage_config[mount]:
storages.add(make_mount_string(encode_mount(mount), project))
# Append any additional qsub flags here
pbs_flags_extend = pbs_config.get('qsub_flags')
if pbs_flags_extend:
pbs_flags.append(pbs_flags_extend)
payu_path = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0]))
pbs_script = check_exe_path(payu_path, pbs_script)
# Check for storage paths that might need to be mounted in the
# python and script paths
extra_search_paths = [python_exe, payu_path, pbs_script]
laboratory_path = pbs_config.get('laboratory', None)
if laboratory_path is not None:
extra_search_paths.append(laboratory_path)
short_path = pbs_config.get('shortpath', None)
if short_path is not None:
extra_search_paths.append(short_path)
storages.update(find_mounts(extra_search_paths, mounts))
storages.update(find_mounts(get_manifest_paths(), mounts))
# Add storage flags. Note that these are sorted to get predictable
# behaviour for testing
pbs_flags_extend = '+'.join(sorted(storages))
if pbs_flags_extend:
pbs_flags.append("-l storage={}".format(pbs_flags_extend))
# Set up environment modules here for PBS.
envmod.setup()
envmod.module('load', 'pbs')
# Construct job submission command
cmd = 'qsub {flags} -- {python} {script}'.format(
flags=' '.join(pbs_flags),
python=python_exe,
script=pbs_script
)
return cmd | 069d86b9e91ef65339e1ebaf99e9833af43df863 | 3,637,255 |
def get_query_string(**kwargs):
"""
Concatenates the non-None keyword arguments to create a query string for ElasticSearch.
:return: concatenated query string or None if not arguments were given
"""
q = ['%s:%s' % (key, value) for key, value in kwargs.items() if value not in (None, '')]
return ' AND '.join(q) or None | cc73c157a8975e5df9c98efcd5b10396e5175486 | 3,637,256 |
def check_bin(img):
"""Checks whether image has been properly binarized. NB: works on the assumption that there should be more
background pixels than element pixels.
Parameters
----------
img : np.ndarray
Description of parameter `img`.
Returns
-------
np.ndarray
A binary array of the image.
"""
img_bool = np.asarray(img, dtype=np.bool)
# Gets the unique values in the image matrix. Since it is binary, there should only be 2.
unique, counts = np.unique(img_bool, return_counts=True)
print(unique)
print("Found this many counts:")
print(len(counts))
print(counts)
# If the length of unique is not 2 then print that the image isn't a binary.
if len(unique) != 2:
print("Image is not binarized!")
hair_pixels = len(counts)
print("There is/are {} value(s) present, but there should be 2!\n".format(hair_pixels))
# If it is binarized, print out that is is and then get the amount of hair pixels to background pixels.
if counts[0] < counts[1]:
print("{} is not reversed".format(str(img)))
img = skimage.util.invert(img_bool)
print("Now {} is reversed =)".format(str(img)))
return img
else:
print("{} is already reversed".format(str(img)))
img = img_bool
print(type(img))
return img | 808e4635befa5848d7683e6e12ead5b5ee297339 | 3,637,257 |
def add_quotes(path):
"""Return quotes if needed for spaces on path."""
quotes = '"' if ' ' in path and '"' not in path else ''
return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path) | 6e65da4512183ef62a0ac22b4c3c74f9e5273fbd | 3,637,258 |
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
if len(actions(board)) == 0:
return True
if winner(board) is not None:
return True
return False
#raise NotImplementedError | 6776ad6a261dd8dd90abbb6abb5fa428f8149bba | 3,637,259 |
def login():
"""LogIn Page"""
if request.method == "GET":
return render_template("login.html")
email = request.form.get("email")
password = request.form.get("password")
remember = bool(request.form.get("remember"))
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("auth.login"))
login_user(user, remember=remember)
return redirect(url_for("main.games")) | 3db9447298ca149037cdac89e850e893d5f9ac37 | 3,637,260 |
from typing import List
from operator import not_
def apply_modifiers(membership: npt.ArrayLike, modifiers: List[str]) -> npt.ArrayLike:
"""
Apply a list of modifiers or hedges to a numpy array.
:param membership: Membership values to be modified.
:param modifiers: List of modifiers or hedges.
>>> from fuzzy_expert.operators import apply_modifiers
>>> x = [0.0, 0.25, 0.5, 0.75, 1]
>>> apply_modifiers(x, ('not', 'very'))
array([1. , 0.9375, 0.75 , 0.4375, 0. ])
"""
if modifiers is None:
return membership
fn = {
"EXTREMELY": extremely,
"INTENSIFY": intensify,
"MORE_OR_LESS": more_or_less,
"NORM": norm,
"NOT": not_,
"PLUS": plus,
"SLIGHTLY": slightly,
"SOMEWHAT": somewhat,
"VERY": very,
}
membership = membership.copy()
modifiers = list(modifiers)
modifiers.reverse()
for modifier in modifiers:
membership = fn[modifier.upper()](membership)
return membership | 6140646bc5943ba7c7b6ce597e033c9797ba5ab4 | 3,637,261 |
def unitY(m=1.0):
"""Return an unit vector on Y"""
return np.array((0, m, 0)) | fda046e085e9ab00d263ec7f5569bcd719113c5d | 3,637,262 |
def create_suction_model(radius):
"""Create a suction model"""
hm = np.zeros((2 * radius + 1, 2 * radius + 1))
hm1 = np.tile(np.arange(-radius, radius + 1), (2 * radius + 1, 1))
hm2 = hm1.T
d = np.sqrt(hm1**2 + hm2**2)
return np.where(d < radius, 1, 0).astype(np.float64) | df8e34b0b8957169099740dc74d07c813056dfc4 | 3,637,263 |
def model_entrypoint(model_name):
"""Fetch a model entrypoint for specified model name
"""
return _model_entrypoints[model_name] | 8c1658f07db87e99ffbde428bc55281b6b185639 | 3,637,264 |
def encrypt(data, password):
"""Enrcrypt data and return content in binary"""
try:
cipher = AES.new(password.encode(), AES.MODE_CBC)
cypher_text_bytes = cipher.encrypt(pad(data.encode(), AES.block_size))
return b'' + cipher.iv + b':' + cypher_text_bytes
except ValueError:
print("There was an error")
raise ValueError | 2e4719cc48ded4f8c5400bfb5ab583a229034261 | 3,637,265 |
from datetime import datetime
def change_datetime_to_str(input_time=None, str_format="%Y-%m-%d"):
"""
:param input_time: 指定需要转换的时间, 默认当前时间
:param str_format: 字符时间的格式, 默认%Y-%m-%d
:return:
"""
spec_time = input_time or datetime.datetime.now()
return spec_time.strftime(str_format) | f0f3a72ee05b41dbeec12b05a89a26542fcefb21 | 3,637,266 |
def find_possible_words(word: str, dictionary: list) -> list:
"""Return all possible words from word."""
possible_words = []
first_character = word[0]
last_character = word[len(word) - 1]
for dictionary_entry in dictionary:
if (dictionary_entry.startswith(first_character) and
dictionary_entry.endswith(last_character)):
for character in dictionary_entry:
if character in word:
continue
else:
break
else:
possible_words.append(dictionary_entry)
return possible_words | a3e63e6b6b9d8de3ca718cfc8e031bbc34630d50 | 3,637,267 |
def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin):
"""Log-likelhood under a multidimensional Gaussian distribution with diagonal covariance.
Returns the log-likelihood for the multidim distribution.
"""
return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0) | 010b1f510a74b2af29fe0cc94a2c36bc9c980778 | 3,637,268 |
import os
def _shared_galaxy_properties(config_directory, kwds, for_tests):
"""Setup properties useful for local and Docker Galaxy instances.
Most things related to paths, etc... are very different between Galaxy
modalities and many taken care of internally to the container in that mode.
But this method sets up API stuff, tool, and job stuff that can be shared.
"""
master_api_key = _get_master_api_key(kwds)
user_email = _user_email(kwds)
properties = {
'master_api_key': master_api_key,
'admin_users': "%s,test@bx.psu.edu" % user_email,
'expose_dataset_path': "True",
'cleanup_job': 'never',
'collect_outputs_from': "job_working_directory",
'allow_path_paste': "True",
'check_migrate_tools': "False",
'use_cached_dependency_manager': str(kwds.get("conda_auto_install", False)),
'brand': kwds.get("galaxy_brand", DEFAULT_GALAXY_BRAND),
'strict_cwl_validation': str(not kwds.get("non_strict_cwl", False)),
}
if kwds.get("galaxy_single_user", True):
properties['single_user'] = user_email
if for_tests:
empty_dir = os.path.join(config_directory, "empty")
_ensure_directory(empty_dir)
properties["tour_config_dir"] = empty_dir
properties["interactive_environment_plugins_directory"] = empty_dir
properties["visualization_plugins_directory"] = empty_dir
properties["refgenie_config_file"] = kwds.get('refgenie_config_file', '')
return properties | 2ebda019b8752eadfe25bdc212a7ad64d8f6989c | 3,637,269 |
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client) | 9678cf38bfcf8dd34d6ccbad9b689709e9ab3dc5 | 3,637,270 |
import os
def createRegionLabeledSet(setname, entity, label, mesh, format="Exodus II"):
"""Create a labeled set region.
setname | string, name of the region
entity | string, entity (see mesh_entity.py)
label | string, label id in mesh file (note this is usually a string containing an integer)
mesh | string, mesh filename
fomat | string, format of mesh (currently only 'Exodus II' supported)
returns the region xml
"""
e = extractDoxygenXML(os.path.join(AMANZI_SRC_DIR, 'src', 'geometry', 'RegionLabeledSet.hh'))
search.replace_by_name(e, "label", label)
search.replace_by_name(e, "entity", mesh_entity.valid_mesh_entity(entity))
search.replace_by_name(e, "format", format)
search.replace_by_name(e, "mesh", mesh)
pl = parameter_list.ParameterList(setname)
pl.append(e)
return pl | e32ba65bdc08f88128d62b36348c899e7e3ebfdd | 3,637,271 |
def set_up_cube(
zero_point_indices=((0, 0, 7, 7),),
num_time_points=1,
num_grid_points=16,
num_realization_points=1,
):
"""Set up a cube with equal intervals along the x and y axis."""
zero_point_indices = list(zero_point_indices)
for index, indices in enumerate(zero_point_indices):
if len(indices) == 3:
indices = (0,) + indices
zero_point_indices[index] = indices
zero_point_indices = tuple(zero_point_indices)
data = np.ones(
(num_realization_points, num_time_points, num_grid_points, num_grid_points),
dtype=np.float32,
)
for indices in zero_point_indices:
realization_index, time_index, lat_index, lon_index = indices
data[realization_index][time_index][lat_index][lon_index] = 0
cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2")
cube.add_dim_coord(
DimCoord(range(num_realization_points), standard_name="realization"), 0
)
tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian")
time_points = [402192.5 + _ for _ in range(num_time_points)]
cube.add_dim_coord(DimCoord(time_points, standard_name="time", units=tunit), 1)
step_size = 2000
y_points = np.arange(0.0, step_size * num_grid_points, step_size, dtype=np.float32)
cube.add_dim_coord(
DimCoord(
y_points,
"projection_y_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
2,
)
x_points = np.arange(
-50000.0, (step_size * num_grid_points) - 50000, step_size, dtype=np.float32
)
cube.add_dim_coord(
DimCoord(
x_points,
"projection_x_coordinate",
units="m",
coord_system=STANDARD_GRID_CCRS,
),
3,
)
return cube | d19380e0cc7471178887ea006fa4f367461dac4d | 3,637,272 |
def messageBox(self, title, text, icon=QMessageBox.Information):
"""
Working on generic message box
"""
m = QMessageBox(self)
m.setWindowTitle(title)
m.setText(text)
m.setIcon(icon)
# yesButton = m.addButton('Yes', QMessageBox.ButtonRole.YesRole)
# noButton = m.addButton('No', QMessageBox.ButtonRole.NoRole)
m.setDefaultButton(QMessageBox.Ok)
m.setFont(self.font())
m.exec_()
return QMessageBox.Ok | 7bec2d2f0ca1366382d5bfc800c23af424d9a3d3 | 3,637,273 |
def binary_seg_loss(loss):
"""
Chooses the binary segmentation loss to use depending on the loss name in parameter
:param loss: the type of loss to use
"""
if loss == 'focal':
return BinaryFocalLoss()
else:
return tf.keras.losses.BinaryCrossentropy() | 9d94a7e406a2fa1a12ba970731c7ce25b2408b21 | 3,637,274 |
def get_config_file():
""" Return the loaded config file if one exists. """
# config will be created here if we can't find one
new_config_path = os.path.expanduser('~/dagobahd.yml')
config_dirs = ['/etc',
os.path.expanduser('~/dagobah/dagobah/daemon/')]
config_filenames = ['dagobahd.yml',
'dagobahd.yaml',
'.dagobahd.yml',
'.dagobahd.yaml']
for directory in config_dirs:
for filename in config_filenames:
try:
if os.path.isfile(os.path.join(directory, filename)):
to_load = open(os.path.join(directory, filename))
config = yaml.load(to_load.read())
to_load.close()
replace_nones(config)
return config
except:
pass
# if we made it to here, need to create a config file
# double up on notifications here to make sure first-time user sees it
print 'Creating new config file in home directory'
print 'sometrhins'
logging.info('Creating new config file in home directory')
new_config = open(new_config_path, 'w')
new_config.write(return_standard_conf())
new_config.close()
new_config = open(new_config_path, 'r')
config = yaml.load(new_config.read())
new_config.close()
replace_nones(config)
return config | 4a5009d5d6f5a4be6d953d7bc9150c033a56f187 | 3,637,275 |
def bostock_cat_colors(color_sets = ["set3"]):
"""
Get almost as many categorical colors as you please.
Get more than one of the color brewer sets with ['set1' , 'set2']
Parameters
----------
sets : list
list of color sets to return valid options are
(set1, set2, set3, pastel1, pastel2, paired, dark, accent, category10)
Returns
-------
categorical_colors : list
list of strings (e.g. ["#e41a1c",...])
Examples
--------
>>> bostock_cat_colors(['set3'])[:5]
['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3']
>>> bostock_cat_colors(['category10'])[:5]
['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']
Notes
-----
list of hex colors can be found here:
https://observablehq.com/@d3/color-schemes
"""
bostock = \
{"set1" : ["#e41a1c","#377eb8","#4daf4a","#984ea3",
"#ff7f00","#ffff33","#a65628","#f781bf",
"#999999"],
"set2" : ["#66c2a5","#fc8d62","#8da0cb","#e78ac3",
"#a6d854","#ffd92f","#e5c494","#b3b3b3"],
"set3" : ["#8dd3c7","#ffffb3","#bebada","#fb8072",
"#80b1d3","#fdb462","#b3de69","#fccde5",
"#d9d9d9","#bc80bd","#ccebc5","#ffed6f"],
"pastel1" : ["#fbb4ae","#b3cde3","#ccebc5","#decbe4",
"#fed9a6","#ffffcc","#e5d8bd","#fddaec",
"#f2f2f2"],
"pastel2" : ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4",
"#e6f5c9","#fff2ae","#f1e2cc","#cccccc"],
"paired" : ["#a6cee3","#1f78b4","#b2df8a","#33a02c",
"#fb9a99","#e31a1c","#fdbf6f","#ff7f00",
"#cab2d6","#6a3d9a","#ffff99","#b15928"],
"dark" : ["#1b9e77","#d95f02","#7570b3","#e7298a",
"#66a61e","#e6ab02","#a6761d","#666666"],
"accent" : ["#7fc97f","#beaed4","#fdc086","#ffff99",
"#386cb0","#f0027f","#bf5b17","#666666"],
"category10":["#1f77b4","#ff7f0e","#2ca02c","#d62728",
"#9467bd","#8c564b","#e377c2","#7f7f7f",
"#bcbd22","#17becf"]
}
l = [bostock[k] for k in color_sets]
categorical_colors = [item for sublist in l for item in sublist]
return categorical_colors | d01a2c833c3ee4ab1a196184ec4aecdb6cfc97a0 | 3,637,276 |
def bbpssw_gates_and_measurement_bob(q1, q2):
"""
Performs the gates and measurements for Bob's side of the BBPSSW protocol
:param q1: Bob's qubit from the first entangled pair
:param q2: Bob's qubit from the second entangled pair
:return: Integer 0/1 indicating Bob's measurement outcome
"""
q1.cnot(q2)
m2 = q2.measure()
return m2 | 71e981a99065ea2b0d76a2ebaacebdf04b53488a | 3,637,277 |
from typing import Tuple
def fiber_array(
n: int = 8,
pitch: float = 127.0,
core_diameter: float = 10,
cladding_diameter: float = 125,
layer_core: Tuple[int, int] = gf.LAYER.WG,
layer_cladding: Tuple[int, int] = gf.LAYER.WGCLAD,
) -> Component:
"""Returns a fiber array
.. code::
pitch
<->
_________
| | lid
| o o o o |
| | base
|_________|
length
"""
c = Component()
for i in range(n):
core = c.add_ref(circle(radius=core_diameter / 2, layer=layer_core))
cladding = c.add_ref(circle(radius=cladding_diameter / 2, layer=layer_cladding))
core.movex(i * pitch)
cladding.movex(i * pitch)
c.add_port(name=f"F{i}", width=core_diameter, orientation=0)
return c | 88bf1536788313c99f6b3c56ce8633db8cc30b8b | 3,637,278 |
def delete(movie_id):
"""
deletes the movie from the database
:param movie_id: id to delete
:return: index file
"""
movie_to_delete_id = Movie.query.get(movie_id)
db_session.delete(movie_to_delete_id)
db_session.commit()
return redirect(url_for('home')) | cfaade7b63e4d4413b7593a667fe62d5de90eaad | 3,637,279 |
import torch
def one_vector_block_diagonal(num_blocks: int, vector_length: int) -> Tensor:
"""Computes a block diagonal matrix with column vectors of ones as blocks.
Associated with the mathematical symbol :math:`E`.
Example:
::
one_vector_block_diagonal(3, 2) == tensor([
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.]]).
Args:
num_blocks: number of columns.
vector_length: number of ones in each matrix diagonal block.
Returns:
``(n * vector_length, n)`` 0-1 tensor.
"""
# pylint: disable=E1103
return torch.eye(num_blocks).repeat(1, vector_length).reshape(
num_blocks * vector_length, num_blocks) | babe3e8178f3d9cde9150909fbf890ae17830730 | 3,637,280 |
def get_cli_args():
"""
:return: argparse.Namespace with command-line arguments from user
"""
args = get_main_pipeline_arg_names().difference({
'output', 'ses', 'subject', 'task', WRAPPER_LOC[2:].replace('-', '_')
})
tasks = ('SST', 'MID', 'nback')
parser = get_pipeline_cli_argparser(arg_names=args)
parser.add_argument('-all-events', '--all-events', type=valid_readable_dir,
help=('Valid path to an existing directory which has '
'1 folder per subject, with the folder structure '
'(--all-events)/(subject ID)/(session name)/'
'level-1/events.'))
parser.add_argument('-all-outputs', '--all-outputs', type=valid_output_dir,
help=('Valid path to your output directory root. In '
'other words, the "--output" argument for each '
'command in the --script-list file will be '
'subject- and session-specific subdirectories '
'of this --all-outputs directory.'))
parser.add_argument('-output', '--output', type=valid_output_dir, required=False)
parser.add_argument('-script', '--script', type=valid_readable_file)
parser.add_argument('-script-list', '--script-list', required=True)
parser.add_argument('-slurm', '--slurm', action='store_true')
parser.add_argument('-sourcedata', '--sourcedata', type=valid_readable_dir)
parser.add_argument('-tasks', '--tasks', nargs='+', default=tasks) # choices=tasks,
parser.add_argument(WRAPPER_LOC, type=valid_readable_dir,
default=SCRIPT_DIR) #, dest='loc')
return vars(parser.parse_args()) | d728c1254ffcfdeb2ba883681dd997d578084e72 | 3,637,281 |
from google.cloud import securitycenter
def list_all_assets(organization_id):
"""Demonstrate listing and printing all assets."""
i = 0
# [START securitycenter_list_all_assets]
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization.
# organization_id = "1234567777"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# Call the API and print results.
asset_iterator = client.list_assets(request={"parent": org_name})
for i, asset_result in enumerate(asset_iterator):
print(i, asset_result)
# [END securitycenter_list_all_assets]
return i | 882672c91e8a698730532e1e7801aba0d5ec7d05 | 3,637,282 |
def _0_to_empty_str(dataframe: pd.DataFrame, column_data_type: dict):
"""
데이터가 str인 column에 들어있는 0을 '' 로 바꾸어 준다.
column_data_type 에서 value가 'str' 인 column 만 바꾸어 준다.
"""
for column, datatype in column_data_type.items():
if datatype == "str":
dataframe[column].replace("0", "", inplace=True)
return dataframe | 2453b53c0e7a0067772f37d9d8c370b8accb933c | 3,637,283 |
def _predict(rel):
"""
Predicts the betrayal probabilities and returns them as an inference.Output object.
"""
return inference.predict(rel) | c3b0489bef0723012f1de336dad5f61c40d77c7e | 3,637,284 |
from datetime import datetime
def evaluate_exams(request, exam_id):
"""
Request-Methods :POST
Request-Headers : Authorization Token
Request-Body: Student-Solution -> JSON
Response: "student_name" -> str,
"teacher_name" -> str,
"batch" -> str,
"marks" -> str,
"exam_start_date_time" -> str,
"total_marks" -> str,
"grade" -> str
"""
student_solutions = request.data.get("student_solutions")
student = check_token_and_get_student(request)
if not Exam.objects.filter(Q(id=exam_id) & Q(batch=student.batch)).exists():
raise NotFoundException("Exam not found")
exam = Exam.objects.get(Q(id=exam_id) & Q(batch=student.batch))
exam_start_date_time = datetime.now(tz=timezone.utc) - timedelta(
hours=exam.exam_period.hour
)
if Result.objects.filter(Q(exam=exam) & Q(student=student)).exists():
raise AlreadyExistsException("Already Submitted the exam")
score = evaluate_exam_score(exam.questions_and_solutions, student_solutions)[
"total_score"
]
grade = evaluate_exam_grade(score, exam.total_marks)
result = Result(
exam_start_date_time=(exam_start_date_time),
exam=exam,
student=student,
teacher=exam.teacher,
student_solutions=student_solutions,
total_marks=float(exam.total_marks),
score=score,
grade=grade,
)
result.save()
response = {
"student_name": student.name,
"teacher_name": exam.teacher.name,
"batch": exam.batch,
"score": score,
"exam_start_date_time": (exam_start_date_time),
"total_marks": float(exam.total_marks),
"grade": grade,
}
return JsonResponse(data=response, status=200) | 688cf2cd43991c98475fb7921ad64fccd0ea2b36 | 3,637,285 |
def _accumulated_penalty_energy_fw(energy_to_track, penalty_matrix, parallel):
"""Calculates acummulated penalty in forward direction (t=0...end).
`energy_to_track`: squared abs time-frequency transform
`penalty_matrix`: pre-calculated penalty for all potential jumps between
two frequencies
# Returns:
`penalized_energy`: new energy with added forward penalty
`ridge_idxs`: calculated initial ridge with only forward penalty
"""
penalized_energy = energy_to_track.copy()
fn = (__accumulated_penalty_energy_fwp if parallel else
__accumulated_penalty_energy_fw)
fn(penalized_energy, penalty_matrix)
ridge_idxs = np.unravel_index(np.argmin(penalized_energy, axis=0),
penalized_energy.shape)[1]
return penalized_energy, ridge_idxs | cc05de06ab53a9dcf7937df8bc6c5613a649b01c | 3,637,286 |
def rot90(m, k=1, axis=2):
"""Rotate an array k*90 degrees in the counter-clockwise direction
around the given axis
This differs from np's rot90 because it's 3D
"""
m = np.swapaxes(m, 2, axis)
m = np.rot90(m, k)
m = np.swapaxes(m, 2, axis)
return m | 40bb5c4406e8f7a1f4f6019c56d1a734bee0eac6 | 3,637,287 |
def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]
"""
Get padding values for the operation described by an ONNX node.
If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID values are
calculated. Otherwise values are taken from the `pads` attribute.
`pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]
:param onnx_node: wrapped ONNX node for Conv or Pool operation
:return: tuple of numbers of pixels to pad (height, width, depth)
"""
auto_pad = onnx_node.get_attribute_value('auto_pad')
pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis
kernel_shape = onnx_node.get_attribute_value('kernel_shape')
# Attribute 'auto_pad' is deprecated, but is currently used by CNTK
if auto_pad:
if auto_pad == 'VALID':
pads = [0, 0] * len(kernel_shape)
else:
# SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.
# In case of odd number add the extra padding at the end for SAME_UPPER and at the
# beginning for SAME_LOWER.
def pad_value(kernel_dim): # type: (int) -> float
return (kernel_dim - 1.0) / 2.0
pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else
ceil(pad_value(dim)) for dim in kernel_shape]
pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else
floor(pad_value(dim)) for dim in kernel_shape]
pads = pads_starts + pads_ends
verify_symmetric_padding(onnx_node, pads)
pad_h, pad_w, pad_d = 0, 0, 0
if pads and len(pads) == 2: # ONNX input axes NCHW
pad_h, pad_w = pads
if pads and len(pads) == 3: # ONNX input axes NCHWD
pad_h, pad_w, pad_d = pads
if pads and len(pads) == 4: # ONNX input axes NCHW
pad_h, pad_w, _, _ = pads
elif pads and len(pads) == 6: # ONNX input axes NCHWD
pad_h, pad_w, pad_d, _, _, _ = pads
return pad_h, pad_w, pad_d | 9199129f59c3f459dfbad209427f4dcb8b5863e7 | 3,637,288 |
from typing import Optional
import os
import re
def get_changelog_version() -> Optional[str]:
"""
Return latest version from changelog.txt file.
"""
version: Optional[str] = None
root_dir = hgit.get_client_root(super_module=False)
changelog_file = os.path.join(root_dir, "changelog.txt")
hdbg.dassert_file_exists(changelog_file)
changelog = hio.from_file(changelog_file)
match = re.search(_CHANGELOG_VERSION_RE, changelog)
if match:
version = match.group()
return version | 83eff9412d13e33bb9c8ff0cc0f62dd34aa06c0f | 3,637,289 |
def from_dict(transforms):
"""Deserializes the transformations stored in a dict.
Supports deserialization of Streams only.
Parameters
----------
transforms : dict
Transforms
Returns
-------
out : solt.core.Stream
An instance of solt.core.Stream.
"""
if not isinstance(transforms, dict):
raise TypeError("Transforms must be a dict!")
for t in transforms:
if "transforms" in transforms[t]:
transforms[t]["transforms"] = [from_dict(x) for x in transforms[t]["transforms"]]
if "affine_transforms" in transforms[t]:
transforms[t]["affine_transforms"] = from_dict(transforms[t]["affine_transforms"])
if t in Serializable.registry:
cls = Serializable.registry[t]
else:
raise ValueError(f"Could not find {t} in the registry!")
return cls(**transforms[t]) | bf09deac48819306a7fef9b98cd68775f3d9bcbd | 3,637,290 |
def create_mp_pool(nproc=None):
"""Creates a multiprocessing pool of processes.
Arguments
---------
nproc : int, optional
number of processors to use. Defaults to number of available CPUs
minus 2.
"""
n_cpu = pathos.multiprocessing.cpu_count()
if nproc is None:
nproc = n_cpu - 2
assert nproc <= n_cpu, \
f'Cannot allocate more processes than existing CPUs: {nproc} > {n_cpu}'
return ProcessingPool(nproc) | 37b750fb961535eada1924f524a4ec851ad7d613 | 3,637,291 |
def subpixel_edges(img, threshold, iters, order):
"""
Detects subpixel features for each pixel belonging to an edge in `img`.
The subpixel edge detection used the method published in the following paper:
"Accurate Subpixel Edge Location Based on Partial Area Effect"
http://www.sciencedirect.com/science/article/pii/S0262885612001850
Parameters
----------
img: ndarray
A grayscale image.
threshold: int or float
Specifies the minimum difference of intensity at both
sides of a pixel to be considered as an edge.
iters: int
Specifies how many smoothing iterations are needed
to find the final edges:
0: Oriented to noise free images. No previous smoothing on
the image. The detection is applied on the original
image values (section 3 of the paper).
1: Oriented to low-noise images. The detection is applied
on the image previously smoothed by a 3x3 mask
(default) (sections 4 and 5 of the paper)
>1: Oriented to high-noise images. Several stages of
smoothing + detection + synthetic image creation are
applied (section 6 of the paper). A few iterations are
normally enough.
order: int
Specifies the order of the edges to find:
1: first order edges (straight lines)
2: second order edges (default)
Returns
-------
An instance of EdgePixel
"""
if iters == 0:
return main_iter0(img, threshold, iters, order)
elif iters == 1:
return main_iter1(img, threshold, iters, order)
elif iters > 1:
for iterN in range(iters):
ep, img = main_iterN(img, threshold, iters, order)
return ep | 546a8d1aedd1c53a329ce7a6e600307cf85b70a4 | 3,637,292 |
import numbers
import numpy
def arrays(hyperchunks, array_count):
"""Iterate over the arrays in a set of hyperchunks."""
class Attribute(object):
def __init__(self, expression, hyperslices):
self._expression = expression
self._hyperslices = hyperslices
@property
def expression(self):
return self._expression
@property
def hyperslice_count(self):
return 0 if self._hyperslices is None else len(self._hyperslices)
def hyperslices(self):
"""Iterate over the hyperslices in a hyperchunk."""
if self._hyperslices is not None:
for hyperslice in self._hyperslices:
yield tuple(hyperslice)
class Array(object):
def __init__(self, index, attributes, order, hyperslices):
self._index = index
self._attributes = attributes
self._order = order
self._hyperslices = hyperslices
@property
def index(self):
return self._index
@property
def attribute_count(self):
return 0 if self._attributes is None else len(self._attributes)
@property
def order(self):
return self._order
def attributes(self, attribute_count):
"""Iterate over the attributes in a hyperchunk."""
if self._attributes is not None:
for attributes in self._attributes:
if isinstance(attributes, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(attributes, numbers.Integral):
if attributes < 0:
attributes = slice(attribute_count + attributes, attribute_count + attributes + 1)
else:
attributes = slice(attributes, attributes + 1)
elif isinstance(attributes, type(Ellipsis)):
attributes = slice(0, attribute_count)
start, stop, step = attributes.indices(attribute_count)
for index in numpy.arange(start, stop, step):
yield Attribute(slycat.hyperchunks.grammar.AttributeIndex(index), self._hyperslices)
else:
yield Attribute(attributes, self._hyperslices)
for hyperchunk in hyperchunks:
for arrays in hyperchunk.arrays:
if isinstance(arrays, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(arrays, numbers.Integral):
if arrays < 0:
arrays = slice(array_count + arrays, array_count + arrays + 1)
else:
arrays = slice(arrays, arrays + 1)
elif isinstance(arrays, type(Ellipsis)):
arrays = slice(0, array_count)
start, stop, step = arrays.indices(array_count)
for index in numpy.arange(start, stop, step):
yield Array(index, hyperchunk.attributes, hyperchunk.order, hyperchunk.hyperslices)
else:
cherrypy.log.error("hyperchunks.__init__.py", "Unexpected array: %r" % arrays)
raise ValueError("Unexpected array: %r" % arrays) | 0b4e8833b1dd0f7cf90ed1fc97dbc77d76e29e17 | 3,637,293 |
from typing import Union
import torch
import types
def ne(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative.
Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be
compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand involved in the comparison
y: DNDarray or scalar
The second operand involved in the comparison
Examples
---------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.ne(x, 3.0)
DNDarray([[ True, True],
[False, True]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.ne(x, y)
DNDarray([[ True, False],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.ne, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res | f948f586781fb6c841a576b19defe2dff388469b | 3,637,294 |
def _quote_embedded_quotes(text):
"""
Replace any embedded quotes with two quotes.
:param text: the text to quote
:return: the quoted text
"""
result = text
if '\'' in text:
result = result.replace('\'', '\'\'')
if '"' in text:
result = result.replace('"', '""')
return result | 71231e590e025c2ceb7b2dd4fde4465a9ff61a4c | 3,637,295 |
def exp2(x):
"""Calculate 2**x"""
return 2 ** x | d76d1e344e79ebb05d38a2e7e6ef36b6f367e85b | 3,637,296 |
import json
from typing import Generator
def play():
"""Play page."""
ticket_name = request.cookies.get('ticket_name')
ticket = None
game = get_game()
new_ticket = True
if ticket_name:
ticket = Ticket.get_by_name(ticket_name)
new_ticket = ticket and ticket.game != game.id
if new_ticket:
ticket = Ticket.create(
name=get_name(),
game=game.id,
data=json.dumps(Generator().get_ticket())
)
resp = make_response(render_template("public/play.html", card=ticket, data=json.loads(ticket.data)))
resp.set_cookie('ticket_name', ticket.name)
return resp | 75dd74a843c60d7eea2f1f2ffd08febbabbf5d41 | 3,637,297 |
def count_search_results(idx, typ, query, date_range, exclude_distributions,
exclude_article_types):
"""Count the number of results for a query
"""
q = create_query(query, date_range, exclude_distributions,
exclude_article_types)
#print q
return _es().count(index=idx, doc_type=typ, body=q) | b53742010645fc363abca8ddad5a15c7268ff49b | 3,637,298 |
def ifft(data: np.ndarray) -> np.ndarray:
"""
Perform inverse discrete Fast Fourier transform of data by conjugating signal.
Arguments:
data: frequency data to be transformed (np.array, shape=(n,), dtype='float64')
Return:
result: Inverse transformed data
"""
n = len(data)
result = np.conjugate(fft(np.conjugate(data)))
return result | 540ed47b2c7c4085609a9f94dba469c2b3a32d7a | 3,637,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.