content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def moeda(n=0):
"""
-> Formata um número como moeda
:param n: número
:return: número formatado
"""
return f'R$ {n:.2f}' | 3727a2257afe8746d6ef2b3c8ee088842c46c5ce | 3,627,800 |
from PIL import Image
import aggdraw
def _draw_subpath(subpath_list, width, height, brush, pen):
"""
Rasterize Bezier curves.
TODO: Replace aggdraw implementation with skimage.draw.
"""
mask = Image.new('L', (width, height), 0)
draw = aggdraw.Draw(mask)
pen = aggdraw.Pen(**pen) if pen else None
brush = aggdraw.Brush(**brush) if brush else None
for subpath in subpath_list:
if len(subpath) <= 1:
logger.warning('not enough knots: %d' % len(subpath))
continue
path = ' '.join(map(str, _generate_symbol(subpath, width, height)))
symbol = aggdraw.Symbol(path)
draw.symbol((0, 0), symbol, pen, brush)
draw.flush()
del draw
return np.expand_dims(np.array(mask).astype(np.float32) / 255., 2) | dc8762c537fc316e23ac920232da2726bc124068 | 3,627,801 |
def parse_devices(metadata, parser):
"""
Iterate device ``metadata`` to use ``parser`` to create and return a
list of network device objects.
:param metadata:
A collection of key/value pairs (Generally returned from
`~trigger.rancid.parse_rancid_file`)
:param parser:
A callabale used to create your objects
"""
# Two tees of `metadata` iterator, in case a TypeError is encountered, we
# aren't losing the first item.
md_original, md_backup = itertools.tee(metadata)
try:
# Try to parse using the generator (most efficient)
return [parser(d) for d in md_original]
except TypeError:
# Try to parse by unpacking a dict into kwargs
return [parser(**dict(d)) for d in md_backup]
except Exception as err:
# Or just give up
print "Parser failed with this error: %r" % repr(err)
return None
else:
raise RuntimeError('This should never happen!') | de45f8d7564d13ddf56257407c857a473ed869a1 | 3,627,802 |
def preprocess_chunk(raw_chunk, subtract_mean=True, convert_to_milivolt=False):
"""
Preprocesses a chunk of data.
Parameters
----------
raw_chunk : array_like
Chunk of raw_data to preprocess
subtract_mean : bool, optional
Subtract mean over all other channels, by default True
convert_to_milivolt : bool, optional
Convert chunk to milivolt , by default False
Returns
-------
raw_chunk : array_like
preprocessed_chunk
"""
# Subtract mean across all channels
if subtract_mean:
raw_chunk = raw_chunk.transpose() - np.mean(raw_chunk.transpose(), axis=0)
raw_chunk = raw_chunk.transpose()
# Convert to milivolt
if convert_to_milivolt:
raw_chunk = raw_chunk * (0.195 / 1000)
return raw_chunk | 333eabb21efb40bef8e2eecbe7d93381d067df26 | 3,627,803 |
def generate_presigned_url(s3_client, client_method, method_parameters, expires_in):
"""
Generating a presigned Amazon S3 URL that can be used to perform an action.
"""
try:
url = s3_client.generate_presigned_url(
ClientMethod=client_method,
Params=method_parameters,
ExpiresIn=expires_in
)
log.info("Got presigned URL")
except ClientError:
log.info(
f"Couldn't get a presigned URL for client method {client_method}")
raise
return url | b19ff1019efacb4af5b11b7c46c6b7483346f169 | 3,627,804 |
def update_topic(zkurl, topic, partitions, replica=None, kafka_env=None):
"""Alter a topic in Kafka instances from zkurl list"""
return _exec_topic_cmd(TOPIC_CLASS, 'alter', zkurl, topic,
partitions, replica=replica, kafka_env=kafka_env) | 8dd270dff30f6e66c67d0cfa52c199b8176c352a | 3,627,805 |
def get_list_peaks(matrix, image_size):
"""Return a list of class Peak form H5
gets a matrix with data for all peas given
file h5
Returns
-------
peaks : list
List of class Peak object.
"""
try:
# array[:,] next rows
peaks = [Peak(posx=(row[0] + image_size[1]/2.0),
posy=(-row[1] + image_size[0]/2.0),
intensive=row[2], offset=row[3]) for row in matrix[:, ]]
return peaks
except IndexError:
LOGGER.warning("Problem with peaks from the h5 file.") | 7f9bd83fea592a6e55016011e5f67e90966591c6 | 3,627,806 |
from datetime import datetime
async def add_report_history(
id: int,
search_id: str,
search_start: datetime,
search_end: datetime,
search_type: str,
session: str):
"""Add report history
Adds reports to the history.
Args:
id: the user_id
search_id: the id of the search
search_start: the start of the search
search_end: the end of the search
search_type: the type of search
session: the logged in user session
Returns:
json with message for error or an id for the added log
"""
if not _validate_session(session):
return {"message": "Logout and log back in."}
print("added report history")
log = db.add_report_history(id, search_id, search_start, search_end, search_type)
return {"id": log["id"]} | aea938dbc26ac973c9d61371dfed8da6c223fb92 | 3,627,807 |
def get_latest_featuregroup_version(featuregroup, featurestore=None):
"""
Utility method to get the latest version of a particular featuregroup
Example usage:
>>> featurestore.get_latest_featuregroup_version("teams_features_spanish")
Args:
:featuregroup: the featuregroup to get the latest version of
:featurestore: the featurestore where the featuregroup resides
Returns:
the latest version of the featuregroup in the feature store
"""
if featurestore is None:
featurestore = project_featurestore()
try:
return fs_utils._do_get_latest_featuregroup_version(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=False))
except:
return fs_utils._do_get_latest_featuregroup_version(featuregroup,
core._get_featurestore_metadata(featurestore,
update_cache=False)) | d670c8e8ec2ba08e5e5248f613702a46445592bd | 3,627,808 |
def guardian(badger: BadgerSystem, startBlock, endBlock, pastRewards, test=False):
"""
Guardian Role
- Check if there is a new proposed root
- If there is, run the rewards script at the same block height to verify the results
- If there is a discrepency, notify admin
(In case of a one-off failure, Script will be attempted again at the guardianInterval)
"""
console.print("\n[bold cyan]===== Guardian =====[/bold cyan]\n")
console.print(
"\n[green]Calculate rewards for {} blocks: {} -> {} [/green]\n".format(
endBlock - startBlock, startBlock, endBlock
)
)
badgerTree = badger.badgerTree
# Only run if we have a pending root
if not badgerTree.hasPendingRoot():
console.print("[bold yellow]===== Result: No Pending Root =====[/bold yellow]")
return False
rewards_data = generate_rewards_in_range(badger, startBlock, endBlock, pastRewards)
console.print("===== Guardian Complete =====")
if not test:
upload(rewards_data["contentFileName"]),
badgerTree.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": badger.guardian, "gas_price": gas_strategy},
) | 00100d17b64cbfbd9ff309374c20bc8ff39be803 | 3,627,809 |
def cutMapById(data, subcatchmap, id, x, y, FillVal):
"""
:param data: 2d numpy array to cut
:param subcatchmap: 2d numpy array with subcatch
:param id: id (value in the array) to cut by
:param x: array with x values
:param y: array with y values
:return: x,y, data
"""
if len(data.flatten()) == len(subcatchmap.flatten()):
scid = subcatchmap == id
data[np.logical_not(scid)] = FillVal
xid, = np.where(scid.max(axis=0))
xmin = xid.min()
xmax = xid.max()
if xmin >= 1:
xmin = xmin - 1
if xmax < len(x) - 1:
xmax = xmax + 1
yid, = np.where(scid.max(axis=1))
ymin = yid.min()
ymax = yid.max()
if ymin >= 1:
ymin = ymin - 1
if ymax < len(y) - 1:
ymax = ymax + 1
return (
x[xmin:xmax].copy(),
y[ymin:ymax].copy(),
data[ymin:ymax, xmin:xmax].copy(),
)
else:
return None, None, None | d8183f4a46e553885e0ab1e9b9257249b6e3a5ba | 3,627,810 |
def train_step(input_image, target):
"""Run a single training step and return losses."""
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_output = GENERATOR(input_image, training=True)
disc_real_output = DISCRIMINATOR([input_image, target], training=True)
disc_generated_output = DISCRIMINATOR([input_image, gen_output],
training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = gn.generator_loss(
disc_generated_output,
gen_output,
target)
disc_loss = ds.discriminator_loss(disc_real_output,
disc_generated_output)
generator_gradients = gen_tape.gradient(gen_total_loss,
GENERATOR.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss,
DISCRIMINATOR.trainable_variables)
GENERATOR_OPTIMIZER.apply_gradients(zip(generator_gradients,
GENERATOR.trainable_variables))
DISCRIMINATOR_OPTIMIZER.apply_gradients(zip(discriminator_gradients,
DISCRIMINATOR.trainable_variables))
return {'gen_total_loss': gen_total_loss,
'gen_gan_loss': gen_gan_loss,
'gen_l1_loss': gen_l1_loss,
'disc_loss': disc_loss} | e77f4f3bef6f953183436e417b05e926a5441701 | 3,627,811 |
from functools import wraps
def flow(flow):
"""Decorator:
decorator = flow(flow)
The decorator then transforms a method:
method = decorator(method)
so that the "flow" kwarg is set the argument to the decorator.
A nonsense value of "flow" will raise and Exception in
Componenet.__select_flow
"""
## flow(flow) returns this (that's the python decorator protocal)
def decorator(method):
## The decorator returns the method with the flow keyword set;
## @wraps makes the docstring and __name__
@wraps(method)
def method_with_flow(self, *args, **kwargs):
kwargs["flow"] = flow
return method(self)(*args, **kwargs)
return method_with_flow
return decorator | 24292de3d0f63ca6eafc9785db4c5bfa5519852f | 3,627,812 |
def do_tags_for_model(parser, token):
"""
Retrieves a list of ``Tag`` objects associated with a given model
and stores them in a context variable.
The model is specified in ``[appname].[modelname]`` format.
If specified - by providing extra ``with counts`` arguments - adds
a ``count`` attribute to each tag containing the number of
instances of the given model which have been tagged with it.
Example usage::
{% tags_for_model products.Widget as widget_tags %}
{% tags_for_model products.Widget as widget_tags with counts %}
"""
bits = token.contents.split()
len_bits = len(bits)
if len_bits not in (4, 6):
raise TemplateSyntaxError('%s tag requires either three or five arguments' % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError("second argument to %s tag must be 'as'" % bits[0])
if len_bits == 6:
if bits[4] != 'with':
raise TemplateSyntaxError("if given, fourth argument to %s tag must be 'with'" % bits[0])
if bits[5] != 'counts':
raise TemplateSyntaxError("if given, fifth argument to %s tag must be 'counts'" % bits[0])
if len_bits == 4:
return TagsForModelNode(bits[1], bits[3], counts=False)
else:
return TagsForModelNode(bits[1], bits[3], counts=True) | 44c371d97b75bf609d437bc2cabf5fbd03bf40be | 3,627,813 |
def use(alpha, beta):
"""Sum 2 things."""
return functions.func(alpha, beta) | 3ba6d89218ae9f6ff87288e3580c3d42e097513b | 3,627,814 |
def controller_enabled_provisioned(hostname):
""" check if host is enabled """
try:
with openstack.OpenStack() as client:
hosts = get_hosts(client.admin_token,
client.conf['region_name'])
for host in hosts:
if (hostname == host.name and
host.is_controller_enabled_provisioned()):
LOG.info("host %s is enabled/provisioned" % host.name)
return True
except Exception as e:
LOG.exception("failed to check if host is enabled/provisioned")
raise e
return False | a6ade2fbd6131a51e868c6d8abcfaa0e74221d20 | 3,627,815 |
from covid19sim.human import Human
def get_humans_with_age(city, age_histogram, conf, rng):
"""
Creats human objects corresponding to the numbers in `age_histogram`.
Args:
city (covid19sim.location.City): simulator's city object
age_histogram (dict): a dictionary with keys as age bins (a tuple) and values as number of humans in that bin (int)
conf (dict): yaml configuration of the experiment
rng (np.random.RandomState): Random number generator
Returns:
dict: keys are age bins (tuple) and values are a list of human_type objects
"""
# TODO - parallelize this
humans = defaultdict(list)
human_id = -1
for age_bin, n in age_histogram.items():
# sample age of humans before initialization
ages = city.rng.randint(low=age_bin[0], high=age_bin[1]+1, size=n)
for i in range(n):
human_id += 1
humans[age_bin].append(Human(
env=city.env,
city=city,
rng=np.random.RandomState(rng.randint(2 ** 16)),
name=human_id,
age=ages[i],
conf=conf
))
return humans | 6971deb609f5b7376c2d269dd42c81469119fc0e | 3,627,816 |
from typing import Union
from typing import Optional
from typing import Generator
from typing import Tuple
from typing import List
import logging
def place_ontop_obj_pos(
env: "BehaviorEnv",
obj: Union["URDFObject", "RoomFloor"],
place_rel_pos: Array,
rng: Optional[Generator] = None,
) -> Optional[Tuple[List[List[float]], List[List[float]]]]:
"""Parameterized controller for placeOnTop.
Runs motion planning to find a feasible trajectory to a certain
offset from obj and selects an orientation such that the palm is
facing the object. If the placement is infeasible, returns an
indication to this effect (None). Otherwise, returns the plan, which
is a list of list of hand poses, as well as the original euler angle
orientation of the hand.
"""
if rng is None:
rng = np.random.default_rng(23)
obj_in_hand = env.scene.get_objects()[
env.robots[0].parts["right_hand"].object_in_hand]
logging.info(f"PRIMITIVE: attempt to place {obj_in_hand.name} ontop "
f"{obj.name} with params {place_rel_pos}")
# if the object in the agent's hand is None or not equal to the object
# passed in as an argument to this option, fail and return None
if not (obj_in_hand is not None and obj_in_hand != obj):
logging.info("Cannot place; either no object in hand or holding "
"the object to be placed on top of!")
return None
# if the object is not a urdf object, fail and return None
if not isinstance(obj, URDFObject):
logging.info(f"PRIMITIVE: place {obj_in_hand.name} ontop "
f"{obj.name} fail, too far")
return None
state = p.saveState()
# To check if object fits on place location
p.restoreState(state)
# NOTE: This below line is *VERY* important after the
# pybullet state is restored. The hands keep an internal
# track of their state, and if we don't reset their this
# state to mirror the actual pybullet state, the hand will
# think it's elsewhere and update incorrectly accordingly
env.robots[0].parts["right_hand"].set_position(
env.robots[0].parts["right_hand"].get_position())
env.robots[0].parts["left_hand"].set_position(
env.robots[0].parts["left_hand"].get_position())
plan = place_obj_plan(env, obj, state, place_rel_pos, rng=rng)
# If RRT planning fails, fail and return None
if plan is None:
logging.info(f"PRIMITIVE: placeOnTop {obj.name} fail, failed "
f"to find plan to continuous params {place_rel_pos}")
return None
original_orientation = list(
p.getEulerFromQuaternion(
env.robots[0].parts["right_hand"].get_orientation()))
logging.info(f"PRIMITIVE: placeOnTop {obj.name} success! Plan found with "
f"continuous params {place_rel_pos}.")
return plan, original_orientation | 149e55421b8393ba1d81dfbf5bf75d07be5d2977 | 3,627,817 |
def make_htc_proxy_X(X: np.ndarray):
"""
Makes HTC proxy values from data series.
The value of the HTC proxy is sum(gas) / mean(in_temp - out_temp).
"""
return np.array([[np.sum(x[:,2]) / np.sum(x[:,0] - x[:,1])] for x in X]) | 8b052c9a50faf9e4a47d3a7ea3247cf78f3d7606 | 3,627,818 |
def recovery_invalid_token(
) -> str:
"""Return a valid auth token"""
return 'wrong' | 38ff965ffa7b579965e479ca1d676a4b40978772 | 3,627,819 |
def squeeze(input_vector):
"""Ensure vector only has one axis of dimensionality."""
if input_vector.ndim > 1:
return np.squeeze(input_vector)
else:
return input_vector | 97a80a73c0061dbfe0d6a9f3ab579c4163e00c36 | 3,627,820 |
import warnings
def summarize_darshan_perf(darshan_logs):
"""
Given a list of Darshan log file paths, calculate the performance observed
from each file and identify OSTs over which each file was striped. Return
this summary of file performances and stripes.
"""
results = {
'file_paths': [],
'performance': [],
'ost_lists': [],
}
for darshan_log in darshan_logs:
darshan_data = tokio.connectors.darshan.Darshan(darshan_log)
darshan_data.darshan_parser_base()
if 'counters' not in darshan_data:
warnings.warn("Invalid Darshan log %s" % darshan_log)
continue
elif 'posix' not in darshan_data['counters']:
warnings.warn("Darshan log %s does not contain POSIX module data" % darshan_log)
continue
elif 'lustre' not in darshan_data['counters']:
warnings.warn("Darshan log %s does not contain Lustre module data" % darshan_log)
continue
counters = darshan_data['counters']
for logged_file_path, ranks_data in counters['posix'].items():
# encode the darshan log's name in addition to the file path in case
# multiple Darshan logs with identical file paths (but different
# striping) are being processed
file_path = "%s@%s" % (darshan_log, logged_file_path)
# calculate the file's I/O performance
performance = estimate_darshan_perf(ranks_data)
# assemble a list of OSTs
ost_list = set([])
if logged_file_path not in counters['lustre']:
continue
for counter_data in counters['lustre'][logged_file_path].values():
for ost_id in range(counter_data['STRIPE_WIDTH']):
key = "OST_ID_%d" % ost_id
ost_list.add(counter_data[key])
# append findings from this file record to the master dict
if file_path not in results['file_paths']:
results['file_paths'].append(file_path)
results['performance'].append(performance)
results['ost_lists'].append(list(ost_list))
else:
index = results['file_paths'].index(file_path)
if results['performance'][index] < performance:
results['performance'][index] = performance
results['ost_lists'][index] = \
list(set(results['ost_lists'][index]) | ost_list)
return results | 7275454355f85045ff8d65baea748c75cb7939bc | 3,627,821 |
from django.contrib.auth import login
from django.contrib.auth import authenticate
from django.contrib.auth import login
def login(request, template_name="lfs/customer/login.html"):
"""Custom view to login or register/login a user.
The reason to use a custom login method are:
* validate checkout type
* integration of register and login form
It uses Django's standard AuthenticationForm, though.
"""
# shop = lfs.core.utils.get_default_shop(request)
# If only anonymous checkout is allowed this view doesn't exists :)
# if shop.checkout_type == CHECKOUT_TYPE_ANON:
# raise Http404()
login_form = CustomerAuthenticationForm()
login_form.fields["username"].label = _(u"E-Mail")
register_form = RegisterForm()
if request.POST.get("action") == "login":
login_form = CustomerAuthenticationForm(data=request.POST)
login_form.fields["username"].label = _(u"E-Mail")
if login_form.is_valid():
redirect_to = request.POST.get("next")
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = reverse("lfs_shop_view")
login(request, login_form.get_user())
return lfs.core.utils.set_message_cookie(
redirect_to, msg=_(u"You have been logged in."))
elif request.POST.get("action") == "register":
register_form = RegisterForm(data=request.POST)
if register_form.is_valid():
email = register_form.data.get("email")
password = register_form.data.get("password_1")
# Create user
user = User.objects.create_user(
username=create_unique_username(email), email=email, password=password)
# Create customer
customer = customer_utils.get_or_create_customer(request)
customer.user = user
customer.save()
# Notify
lfs.core.signals.customer_added.send(sender=user)
# Log in user
user = authenticate(username=email, password=password)
login(request, user)
redirect_to = request.POST.get("next")
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = reverse("lfs_shop_view")
return lfs.core.utils.set_message_cookie(
redirect_to, msg=_(u"You have been registered and logged in."))
# Get next_url
next_url = (request.POST if request.method == 'POST' else request.GET).get("next")
if next_url is None:
next_url = request.META.get("HTTP_REFERER")
if next_url is None:
next_url = reverse("lfs_shop_view")
# Get just the path of the url. See django.contrib.auth.views.login for more
next_url = urlparse(next_url)
next_url = next_url[2]
try:
login_form_errors = login_form.errors["__all__"]
except KeyError:
login_form_errors = None
return render(request, template_name, {
"login_form": login_form,
"login_form_errors": login_form_errors,
"register_form": register_form,
"next_url": next_url,
}) | d7bc83bc3a63913bbf5a3e8f34ca6afd72ac3320 | 3,627,822 |
def is_string(var):
"""Check if `var` is a string (or unicode)."""
target = (str, unicode) if python2 else str
return isinstance(var, target) | 6141524b4c98e700199f8376aa923e9b347fdeaf | 3,627,823 |
def get_parameter_name(argument):
"""Return the name of the parameter without the leading prefix."""
if argument[0] not in {'$', '%'}:
raise AssertionError(u'Unexpectedly received an unprefixed parameter name, unable to '
u'determine whether it is a runtime or tagged parameter: {}'
.format(argument))
return argument[1:] | 54b51cd5e3239fbfaaccaad123975df0e84374fc | 3,627,824 |
def find_distance(a1, num_atoms, canon_adj_list, max_distance=7):
"""
Calculate graph distance between atom a1 with the remaining atoms using BFS
"""
distance = np.zeros((num_atoms, max_distance))
radial = 0
# atoms `radial` bonds away from `a1`
adj_list = set(canon_adj_list[a1])
# atoms less than `radial` bonds away
all_list = set([a1])
while radial < max_distance:
distance[list(adj_list), radial] = 1
all_list.update(adj_list)
# find atoms `radial`+1 bonds away
next_adj = set()
for adj in adj_list:
next_adj.update(canon_adj_list[adj])
adj_list = next_adj - all_list
radial = radial + 1
return distance | 8576cda6d975549fd55ad707b1cd146648517314 | 3,627,825 |
def _get_pixel_coords(plot_params):
"""A helper method to define coordinates for a plotting window.
This routine builds a coordinate surface map for the plotting window defined
for by the user. If no window was defined, then this routine uses the outer
bounding box around the geometry as the plotting window.
Parameters
----------
plot_params : openmoc.plotter.PlotParams
A PlotParams object initialized with a geometry
Returns
-------
coords : dict
A dictionary with the plotting window map and bounding box
"""
# initialize variables to be returned
geometry = plot_params.geometry
coords = dict()
bounds = dict()
root = geometry.getRootUniverse()
bounds['x'] = [root.getMinX() + TINY_MOVE,
root.getMaxX() - TINY_MOVE]
bounds['y'] = [root.getMinY() + TINY_MOVE,
root.getMaxY() - TINY_MOVE]
bounds['z'] = [root.getMinZ() + TINY_MOVE,
root.getMaxZ() - TINY_MOVE]
if not plot_params.xlim is None:
bounds['x'] = list(plot_params.xlim)
if not plot_params.ylim is None:
bounds['y'] = list(plot_params.ylim)
if not plot_params.zlim is None:
bounds['z'] = list(plot_params.zlim)
# add attributes to coords dictionary
if plot_params.plane == 'xy':
coords['dim1'] = np.linspace(bounds['x'][0], bounds['x'][1],
plot_params.gridsize)
coords['dim2'] = np.linspace(bounds['y'][0], bounds['y'][1],
plot_params.gridsize)
coords['bounds'] = bounds['x'] + bounds['y']
elif plot_params.plane == 'xz':
coords['dim1'] = np.linspace(bounds['x'][0], bounds['x'][1],
plot_params.gridsize)
coords['dim2'] = np.linspace(bounds['z'][0], bounds['z'][1],
plot_params.gridsize)
coords['bounds'] = bounds['x'] + bounds['z']
elif plot_params.plane == 'yz':
coords['dim1'] = np.linspace(bounds['y'][0], bounds['y'][1],
plot_params.gridsize)
coords['dim2'] = np.linspace(bounds['z'][0], bounds['z'][1],
plot_params.gridsize)
coords['bounds'] = bounds['y'] + bounds['z']
return coords | 9967381d54d2eb5841d3fe702cc15183cce14819 | 3,627,826 |
import requests
import itertools
def parse_nasa_catalog(mission, product, version, from_date=None, to_date=None, min_max=False):
"""
Function to parse the NASA Hyrax dap server via the catalog xml.
Parameters
----------
missions : str, list of str, or None
The missions to parse. None will parse all available.
products : str, list of str, or None
The products to parse. None will parse all available.
version : int
The product version.
from_date : str or None
The start date to query.
end_date : str or None
The end date to query.
min_max : bool
Should only the min and max dates of the product and version be returned?
Returns
-------
DataFrame
indexed by mission and product
Notes
-----
I wish their server was faster, but if you try to query too many dates then it might take a while.
"""
## mission/product parse
base_url = mission_product_dict[mission]['base_url']
mis_url = '/'.join([base_url, 'opendap/hyrax', mission_product_dict[mission]['process_level']])
prod_xml = requests.get(mis_url + '/catalog.xml')
prod_lst = parse(prod_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
prod1 = [p for p in prod_lst if (product in p['@name']) & (str(version) in p['@name'])]
if not prod1:
raise ValueError('No combination of product and version in specified mission')
## Parse available years
years_url = '/'.join([mis_url, prod1[0]['@name']])
years_xml = requests.get(years_url + '/catalog.xml')
years_lst = parse(years_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
if isinstance(years_lst, list):
years_dict = {int(y['@name']): y for y in years_lst}
else:
years_dict = {int(years_lst['@name']): years_lst}
## Parse available months/days of the year
big_lst = []
for y in years_dict:
my_url = '/'.join([years_url, str(y)])
my_xml = requests.get(my_url + '/catalog.xml')
my_lst = parse(my_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
if not isinstance(my_lst, list):
my_lst = [my_lst]
big_lst.extend([[y, int(d['@name']), base_url + d['@ID']] for d in my_lst])
my_df = pd.DataFrame(big_lst, columns=['year', 'dayofyear', 'url'])
my_df['date'] = pd.to_datetime(my_df.year.astype(str)) + pd.to_timedelta(my_df.dayofyear - 1, unit='D')
my_df.drop(['year', 'dayofyear'], axis=1, inplace=True)
## Get all requested dates
if isinstance(from_date, str):
my_df = my_df[(my_df.date >= from_date)]
if isinstance(to_date, str):
my_df = my_df[(my_df.date <= to_date)]
if min_max:
my_df = my_df.iloc[[0, -1]]
iter1 = [(row.date, row.url) for index, row in my_df.iterrows()]
big_lst = ThreadPool(30).starmap(parse_dates, iter1)
big_lst2 = list(itertools.chain.from_iterable(big_lst))
date_df = pd.DataFrame(big_lst2, columns=['date', 'start_time', 'end_time', 'file_name', 'file_url', 'file_size', 'modified_date'])
date_df['modified_date'] = pd.to_datetime(date_df['modified_date'] + '+00')
date_df['start_time'] = pd.to_datetime(date_df['start_time'], format='%H%M%S', errors='coerce').dt.time.astype(str) + 'Z+00'
date_df['end_time'] = pd.to_datetime(date_df['end_time'], format='%H%M%S', errors='coerce').dt.time.astype(str) + 'Z+00'
date_df['from_date'] = pd.to_datetime(date_df['date'].astype(str) + 'T' + date_df['start_time'])
date_df['to_date'] = pd.to_datetime(date_df['date'].astype(str) + 'T' + date_df['end_time'])
date_df.drop(['date', 'start_time', 'end_time'], axis=1, inplace=True)
## Add in extra columns and return
date_df['mission'] = mission
date_df['product'] = product
date_df['version'] = version
return date_df | b621adf204e013e6c235245bd59a4be91955b716 | 3,627,827 |
def open_pdb(f_loc):
"""
This function reads in a .pdb file and returns the atom names and coordinates.
Parameters
----------
f_loc : str
File path to .pdb file
Returns
-------
symbols, coordinates : np.ndarray
Numpy arrays of the atomic symbols (str) and coordinates (float) of all atoms in the file
Example
-------
>>> symb, coords = open_pdb('water.pdb')
>>> symb
['H', 'H', 'O']
>>> coords
array([[ 9.626, 6.787, 12.673],
[ 9.626, 8.42 , 12.673],
[10.203, 7.604, 12.673]])
"""
with open(f_loc) as f:
data = f.readlines()
c = []
sym = []
for l in data:
if 'ATOM' in l[0:6] or 'HETATM' in l[0:6]:
sym.append(l[76:79].strip())
try:
c2 = [float(x) for x in l[30:55].split()]
except not c2:
print('Please make sure .pdb file is properly formatted.')
break
c.append(c2)
coords = np.array(c)
return sym, coords | e944a37834b77ca86ab70e6294657ed51ec5b8b3 | 3,627,828 |
def update_name(name, mapping):
"""Makes an improvement in the address (name) according to the dictionary (mapping)"""
m = street_type_re.search(name)
not_good_type = m.group()
try:
name = name.replace(not_good_type, mapping[not_good_type])
return name
except:
return False | 6f2d26091663888ac602854968e29981f66b1be7 | 3,627,829 |
def print_list_text(img_src, str_list, origin = (0, 0), color = (0, 255, 255), thickness = 2, fontScale = 0.45, y_space = 20):
""" prints text list in cool way
Args:
img_src: `cv2.math` input image to draw text
str_list: `list` list with text for each row
origin: `tuple` (X, Y) coordinates to start drawings text vertically
color: `tuple` (R, G, B) color values of text to print
thickness: `int` thickness of text to print
fontScale: `float` font scale of text to print
Returns:
img_src: `cv2.math` input image with text drawn
"""
for idx, strprint in enumerate(str_list):
cv2.putText(img = img_src,
text = strprint,
org = (origin[0], origin[1] + (y_space * idx)),
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = fontScale,
color = (0, 0, 0),
thickness = thickness+3,
lineType = cv2.LINE_AA)
cv2.putText(img = img_src,
text = strprint,
org = (origin[0], origin[1] + (y_space * idx)),
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = fontScale,
color = color,
thickness = thickness,
lineType = cv2.LINE_AA)
return img_src | de53fd31146b63c3f29f335d235919263a2e29f3 | 3,627,830 |
def get_extension_modules(config):
"""Handle extension modules"""
EXTENSION_FIELDS = ("sources",
"include_dirs",
"define_macros",
"undef_macros",
"library_dirs",
"libraries",
"runtime_library_dirs",
"extra_objects",
"extra_compile_args",
"extra_link_args",
"export_symbols",
"swig_opts",
"depends")
ext_modules = []
for section in config:
if ':' in section:
labels = section.split(':', 1)
else:
# Backwards compatibility for old syntax; don't use this though
labels = section.split('=', 1)
labels = [l.strip() for l in labels]
if (len(labels) == 2) and (labels[0] == 'extension'):
ext_args = {}
for field in EXTENSION_FIELDS:
value = has_get_option(config, section, field)
# All extension module options besides name can have multiple
# values
if not value:
continue
value = split_multiline(value)
if field == 'define_macros':
macros = []
for macro in value:
macro = macro.split('=', 1)
if len(macro) == 1:
macro = (macro[0].strip(), None)
else:
macro = (macro[0].strip(), macro[1].strip())
macros.append(macro)
value = macros
ext_args[field] = value
if ext_args:
if 'name' not in ext_args:
ext_args['name'] = labels[1]
ext_modules.append(Extension(ext_args.pop('name'),
**ext_args))
return ext_modules | 0d21ac1e04879d90577debfc8ae04761af1d16e2 | 3,627,831 |
def configure_ibgp_vrrp_vxlan(module):
"""
Method to configure iBGP, VRRP and Vxlan for DCI.
:param module: The Ansible module to fetch input parameters.
:return: String describing details of all configurations.
"""
global CHANGED_FLAG
output = ''
cluster_dict_info = find_clustered_switches(module)
cluster_list = cluster_dict_info[0]
# Configure iBGP connection between clusters
for cluster in cluster_list:
cluster_node1 = cluster[0]
cluster_node2 = cluster[1]
vlan_id = module.params['pn_ibgp_vlan']
vlan_scope = 'local'
ibgp_ip_range = module.params['pn_ibgp_ip_range']
subnet_count = 0
# Create local vlans on both cluster nodes.
output += create_vlan(module, vlan_id, cluster_node1, vlan_scope)
output += create_vlan(module, vlan_id, cluster_node2, vlan_scope)
address = ibgp_ip_range.split('.')
static_part = str(address[0]) + '.' + str(address[1]) + '.'
static_part += str(address[2]) + '.'
last_octet = str(address[3]).split('/')
subnet = last_octet[1]
ip_count = subnet_count * 4
node1_ip = static_part + str(ip_count + 1) + '/' + subnet
node2_ip = static_part + str(ip_count + 2) + '/' + subnet
subnet_count += 1
# Get the bgp-as values of cluster nodes.
third_party_data = module.params['pn_third_party_bgp_data'].replace(' ',
'')
third_party_data = third_party_data.split('\n')
for row in third_party_data:
row = row.split(',')
if row[4] == cluster_node1 or row[4] == cluster_node2:
bgp_as = row[3]
break
# Configure iBGP connection.
output += configure_ibgp_connection(module, cluster_node1, node1_ip,
node2_ip, bgp_as)
output += configure_ibgp_connection(module, cluster_node2, node2_ip,
node1_ip, bgp_as)
# Configure VRRP to be used for VTEP HA
output += configure_vrrp(module)
# Configure vxlan tunnels
output += configure_vxlan(module)
return output | 43dc1d5adf4c93106e6d5ed5341b215b7e4685d9 | 3,627,832 |
def part_has_modifier(data, part, modifier):
"""Returns true if the modifier is in the given subject/object part
:param dict data: A PyBEL edge data dictionary
:param str part: either :data:`pybel.constants.SUBJECT` or :data:`pybel.constants.OBJECT`
:param modifier: The modifier to look for
:rtype: bool
"""
part_data = data.get(part)
if part_data is None:
return False
found_modifier = part_data.get(MODIFIER)
if found_modifier is None:
return False
return found_modifier == modifier | cd7596b792fd8803ea5eb62573b0ba5efeae5c93 | 3,627,833 |
def unbox_unchecked_bool(stage: ImportStage, value: ir.Value) -> ir.Value:
"""Unboxes an object value to a bool, not checking for success."""
return d.UnboxOp(d.ExceptionResultType.get(), d.BoolType.get(),
value).primitive | 36977ea10532b57290ee7c24e1459a284dd67387 | 3,627,834 |
def step4_pfg(data_input, col, g_list, nfrag):
"""
Parallel FP-Growth
"""
g_list = g_list[0]
result = [[] for _ in range(nfrag)]
df = read_stage_file(data_input, col)
for transaction in df[col].to_numpy():
# group_list has already been pruned, but item_set hasn't
item_set = [item for item in transaction if item in g_list]
# for each transaction, sort item_set by count in descending order
item_set = sorted(item_set,
key=lambda item: g_list[item].count,
reverse=True)
# a list of the groups for each item
items = [g_list[item].group for item in item_set]
emitted_groups = set()
# iterate backwards through the ordered list of items in the transaction
# for each distinct group, emit the transaction to that group-specific
# reducer.
for i, group_id in reversed(list(enumerate(items))):
# we don't care about length 1 itemsets
if i == 0:
continue
if group_id not in emitted_groups:
emitted_groups.add(group_id)
result[group_id].append(item_set[:(i + 1)])
return result | b3b3e8b44de3245494206649a882a49162e074db | 3,627,835 |
def camera_to_points_world(camera,robot,points_format='numpy',color_format='channels'):
"""Same as :meth:`camera_to_points`, but converts to the world coordinate
system given the robot to which the camera is attached.
Points that have no reading are stripped out.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
link = int(camera.getSetting('link'))
Tsensor = camera.getSetting('Tsensor')
#first 9: row major rotation matrix, last 3: translation
entries = [float(v) for v in Tsensor.split()]
Tworld = get_sensor_xform(camera,robot)
#now get the points
pts = camera_to_points(camera,points_format,all_points=False,color_format=color_format)
if points_format == 'numpy':
Rw = np.array(so3.matrix(Tworld[0]))
tw = np.array(Tworld[1])
pts[:,0:3] = np.dot(pts[:,0:3],Rw.T) + tw
return pts
elif points_format == 'native':
for p in pts:
p[0:3] = se3.apply(Tworld,p[0:3])
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
pts.transform(*Tworld)
else:
raise ValueError("Invalid format "+str(points_format))
return pts | 4a576452a9fe96fa0267644c2d682d29fc886461 | 3,627,836 |
def array_size(arr):
""" Return size of an numpy.ndarray in bytes """
return np.prod(arr.shape) * arr.dtype.itemsize | 27b1862dc02d3e404fe63495d3b683b08cdffa5a | 3,627,837 |
def is_deprecated(image_array, blank_rate):
"""whether to deprecate the patches with blank ratio greater than max_blank_ratio"""
blank_num = np.sum(image_array == (255, 255, 255)) / 3
height, width = image_array.shape[:2]
if blank_num / (height * width) >= blank_rate:
return True
else:
return False | 2f7fc0f1c14b505c33d7f93e7fbded3b6b9498e9 | 3,627,838 |
def get_one_hot_labels(labels, n_classes):
"""
Function for creating one-hot vectors for the labels, returns a tensor of shape (?, num_classes).
Parameters:
labels: tensor of labels from the dataloader, size (?)
n_classes: the total number of classes in the dataset, an integer scalar
"""
return F.one_hot(labels, n_classes) | afcd20c26c776d71574c5f48f37a816f98f1d38a | 3,627,839 |
import hashlib
def cmpHash(file1, file2):
"""Compare the hash of two files."""
hash1 = hashlib.md5()
with open(file1, 'rb') as f:
hash1.update(f.read())
hash1 = hash1.hexdigest()
hash2 = hashlib.md5()
with open(file2, 'rb') as f:
hash2.update(f.read())
hash2 = hash2.hexdigest()
return hash1 == hash2 | 891b71188de42fb9c30a6559cd22b39685b6fc13 | 3,627,840 |
def gsfLoadDepthScaleFactorAutoOffset(
p_mb_ping,
subrecord_id: c_int,
reset: c_int,
min_depth: c_double,
max_depth: c_double,
last_corrector,
p_c_flag,
precision: c_double,
) -> int:
"""
:param p_mb_ping: POINTER(gsfpy3_09.gsfSwathBathyPing.c_gsfSwathBathyPing)
:param subrecord_id: c_int
:param reset: c_int
:param min_depth: c_double
:param max_depth: c_double
:param p_c_flag: POINTER(c_uchar)
:param precision: c_double
:return: 0 if successful. Note that, in the event of a successful call, estimated
scale factors for each of the beam arrays in the ping will be set.
"""
return _libgsf.gsfLoadDepthScaleFactorAutoOffset(
p_mb_ping,
subrecord_id,
reset,
min_depth,
max_depth,
last_corrector,
p_c_flag,
precision,
) | 295560ed85df1ef5ec7fb03e7ebf9f4a1f9b14ed | 3,627,841 |
def preprocess_onnx(img: Image, width: int, height: int, data_type, scale: float, mean: list,
stddev: list):
"""Preprocessing function for ONNX imagenet models based on:
https://github.com/onnx/models/blob/master/vision/classification/imagenet_inference.ipynb
Args:
img (PIL.Image): Loaded PIL.Image
width (int): Target image width
height (int): Target image height
data_type: Image datatype (np.uint8 or np.float32)
scale (float): Scaling factor
mean: RGB mean values
stddev: RGB standard deviation
Returns:
np.array: Preprocess image as Numpy array
"""
img = img.resize((256, 256), Image.BILINEAR)
# first rescale to 256,256 and then center crop
left = (256 - width) / 2
top = (256 - height) / 2
right = (256 + width) / 2
bottom = (256 + height) / 2
img = img.crop((left, top, right, bottom))
img = img.convert('RGB')
img = np.array(img)
img = np.reshape(img, (-1, 3)) # reshape to [RGB][RGB]...
img = ((img / scale) - mean) / stddev
# NHWC to NCHW conversion, by default NHWC is expected
# image is loaded as [RGB][RGB][RGB]... transposing it makes it [RRR...][GGG...][BBB...]
img = np.transpose(img)
img = img.flatten().astype(data_type) # flatten into a 1D tensor and convert to float32
return img | 1c7d29ea33e400c085b8924189a594826ef3ca5d | 3,627,842 |
import numpy
def boxerFrameStack(framestackpath, parttree, outstack, boxsize,framelist):
"""
boxes the particles and returns them as a list of numpy arrays
"""
start_frame = framelist[0]
nframe = len(framelist)
apDisplay.printMsg("boxing %d particles from sum of total %d frames starting from frame %d using mmap" % (len(parttree),nframe,start_frame))
boxedparticles = []
stack = mrc.mmap(framestackpath)
for partdict in parttree:
x1,x2,y1,y2 = getBoxBoundary(partdict, boxsize)
apDisplay.printDebug(' crop range of (x,y)=(%d,%d) to (%d,%d)' % (x1,y1,x2-1,y2-1))
#numpy arrays are rows,cols --> y,x not x,y
boxpart = numpy.sum(stack[tuple(framelist),y1:y2,x1:x2],axis=0)
boxedparticles.append(boxpart)
apImagicFile.writeImagic(boxedparticles, outstack)
return True | 271ee7ad606e603b802161532352df337cb89d8e | 3,627,843 |
def validate_optional_prompt(val, error_msg=None):
"""Dummy validation function for optional prompts. Just returns val"""
# TODO Should there just be an option in prompt()? If input is non-blank you'll probably still wanna validate it
return val | 8020dcc7547a32f4d1e0abc81faaf2de834833f4 | 3,627,844 |
def acorr_grouped_df(
df,
col = None,
by = 'date',
nfft = 'pad',
func = lambda x: x,
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc autocorrelation for each group separately.
Returns: mean and std over groups for positive lags only.
Parameters:
===========
df: pandas.DataFrame, pandas.Series
input time series. If by is a string, df must include the column
for which we calculate the autocorr and the one by which we group.
If by is a series, df can be a series, too.
col: str, None [optional]
column with the time series of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
twice the maximal lag measured. default: 'auto'
'auto': use smallest group size.
'auto pad > 100': zero pad to segments of length >= 200,
skip days with fewer events
func: function [optional]
function to apply to col before calculating the autocorr.
default: identity.
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
default: 'total' (normalise mean response to one at lag zero).
Other values
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, xcorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
if not col:
if (
is_string_like(by)
and hasattr(df, 'columns')
and by in df.columns
):
# we just got two columns, one is group, so it's clear what to do
col = list(df.columns)
col.remove(by)
elif len(df.shape) > 1:
# unclear what to do
raise ValueError
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((maxlag + 1, len(g)))
# what to subtract
fdf = None
if subtract_mean in ('total', 'auto'):
subtract = func(df[col]).mean()
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = 0
sm = True
else:
subtract = 0
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day, later norm to one giving a corr.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(maxlag+1)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += acorr(
func(gs[col][tj[j]:tj[j]+nfft]) - subtract,
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)[:maxlag+1]
acd[:,i] = x / nit
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1
if norm in ("total", "auto"):
# norm to one
n = 1./acdm[0]
elif discarded_days:
n = len(g) / float(len(g) - discarded_days)
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(maxlag+1)), name='lag')
return pd.DataFrame({
'acorr': acdm,
'acorr_std': acde,
}, index=lag)
else:
return acdm, acde | d5af7fc9e12084e21d3f65666997b7fc46e13387 | 3,627,845 |
import pyte
def normalize_pyte(raw_result: str) -> str:
"""ta metoda normalizacji używa emulatora terminala
do wyrenderowania finalnej planszy
wymaga zainstalowania paczki `pyte` z pip
pyte nie wspiera następujących ansi escape codes:
Esc[s -- save cursor position
Esc[u -- restore cursor position
i potencjalnie jakies inne
"""
try:
except ImportError:
print("`pyte` nie jest zainstalowane lub nie jest widoczne dla skryptu")
sys.exit(2)
screen = pyte.Screen(100, 30)
stream = pyte.Stream(screen)
stream.feed(raw_result)
return "\n".join(map(str.strip, screen.display)) | 1c76a3f7e6c82667e576fc513d97db1c8a851808 | 3,627,846 |
def delete(isamAppliance, id, check_mode=False, force=False):
"""
Deleting a Password Strength
"""
if force is True or _check(isamAppliance, id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete a Password Strength",
"/wga/pwd_strength/{0}".format(id))
return isamAppliance.create_return_object() | f9c916b0be3123dd0ad1ff8bad9a704d3cf69ab5 | 3,627,847 |
def register(style, func=None):
"""注册一个拼音风格实现
::
@register('echo')
def echo(pinyin, **kwargs):
return pinyin
# or
register('echo', echo)
"""
if func is not None:
_registry[style] = func
return
def decorator(func):
_registry[style] = func
@wraps(func)
def wrapper(pinyin, **kwargs):
return func(pinyin, **kwargs)
return wrapper
return decorator | c581df1459ec4e022d0f071a5e2e837b17652c3f | 3,627,848 |
def pickoff_image(ap_obs, v2_obj, v3_obj, flux_obj, oversample=1):
"""
Create an unconvolved image of filled pixel values that have
been shifted via bilinear interpolation. The image will then
be convolved with a PSF to create the a focal plane image that
is the size of the NIRCam pick-off mirror. This image should
then be cropped to generate the final detector image.
Returns the tuple (xsci, ysci, image), where xsci and ysci are
the science coordinates associated with the image.
Parameters
==========
ap_obs : str
Name of aperture in which the observation is taking place.
Necessary to determine pixel locations for stars.
v2_obj : ndarray
List of V2 coordiantes of stellar sources
v3_obj : ndarray
List of V3 coordinates of stellar sources
flux_obj : ndarray
List of fluxes (e-/sec) for each source
Keyword Args
============
oversample : int
If set, the returns an oversampled version of the image to
convolve with PSFs. If set to one, then detector pixels.
"""
# xpix and ypix locations in science orientation
ap_siaf = siaf_nrc[ap_obs]
xpix, ypix = ap_siaf.tel_to_sci(v2_obj, v3_obj)
x1, x2, y1, y2 = pickoff_xy(ap_obs)
# Mask all sources that are outside pick-off mirror
mask = ((xpix>x1) & (xpix<x2-1)) & ((ypix>y1) & (ypix<y2-1))
xpix = xpix[mask]
ypix = ypix[mask]
src_flux = flux_obj[mask]
# Create oversized and oversampled image
ys = int((y2 - y1) * oversample)
xs = int((x2 - x1) * oversample)
oversized_image = np.zeros([ys,xs])
# X and Y detector pixel values
dstep = 1/oversample
xsci = np.arange(x1, x2, dstep)
ysci = np.arange(y1, y2, dstep)
# Zero-based (x,y) locations for oversized images
xvals_os = (xpix - x1) * oversample
yvals_os = (ypix - y1) * oversample
# separate into an integers and fractions
intx = xvals_os.astype(np.int)
inty = yvals_os.astype(np.int)
fracx = xvals_os - intx
fracy = yvals_os - inty
# flip negative shift values
ind = fracx < 0
fracx[ind] += 1
intx[ind] -= 1
ind = fracy<0
fracy[ind] += 1
inty[ind] -= 1
# Bilinear interpolation of all sources
val1 = src_flux * ((1-fracx)*(1-fracy))
val2 = src_flux * ((1-fracx)*fracy)
val3 = src_flux * ((1-fracy)*fracx)
val4 = src_flux * (fracx*fracy)
# Add star-by-star in case of overlapped indices
for i, (iy, ix) in enumerate(zip(inty,intx)):
oversized_image[iy, ix] += val1[i]
oversized_image[iy+1, ix] += val2[i]
oversized_image[iy, ix+1] += val3[i]
oversized_image[iy+1, ix+1] += val4[i]
#print("NStars: {}".format(len(intx)))
return xsci, ysci, oversized_image | d14055d322bdba079e91895c31c320cc78e807ed | 3,627,849 |
def kms_encrypt(value, key, aws_config=None):
"""Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
"""
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key, Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob'])) | f2e70c8ee6caa6c8f2069485eada953a5f031e0b | 3,627,850 |
def pd_expand_json_column(df, json_column):
"""
https://stackoverflow.com/a/25512372
"""
df = pd.concat(
[df, json_column.apply(lambda content: pd.Series(list(content.values()), index=list(content.keys())))],
axis=1
)
return df.drop(columns=['data']) | be9808e9b80e8fbe2de79ac4839b28d9b5705662 | 3,627,851 |
def correct_spellings(text):
"""
converts incorrectly spelled words into correct spelling
"""
corrected_text = []
misspelled_words = spell.unknown(text.split())
for word in text.split():
if word in misspelled_words:
corrected_text.append(spell.correction(word))
else:
corrected_text.append(word)
return " ".join(corrected_text) | 7d75f5cffbec29c6707e627183d4b57165951928 | 3,627,852 |
def xception_module(inputs,
depth_list,
skip_connection_type,
stride,
unit_rate_list=None,
rate=1,
activation_fn_in_separable_conv=False,
regularize_depthwise=False,
outputs_collections=None,
scope=None):
"""An Xception module.
The output of one Xception module is equal to the sum of `residual` and
`shortcut`, where `residual` is the feature computed by three separable
convolution. The `shortcut` is the feature computed by 1x1 convolution with
or without striding. In some cases, the `shortcut` path could be a simple
identity function or none (i.e, no shortcut).
Note that we replace the max pooling operations in the Xception module with
another separable convolution with striding, since atrous rate is not properly
supported in current TensorFlow max pooling implementation.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth_list: A list of three integers specifying the depth values of one
Xception module.
skip_connection_type: Skip connection type for the residual path. Only
supports 'conv', 'sum', or 'none'.
stride: The block unit's stride. Determines the amount of downsampling of
the units output compared to its input.
unit_rate_list: A list of three integers, determining the unit rate for
each separable convolution in the xception module.
rate: An integer, rate for atrous convolution.
activation_fn_in_separable_conv: Includes activation function in the
separable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
outputs_collections: Collection to add the Xception unit output.
scope: Optional variable_scope.
Returns:
The Xception module's output.
Raises:
ValueError: If depth_list and unit_rate_list do not contain three elements,
or if stride != 1 for the third separable convolution operation in the
residual path, or unsupported skip connection type.
"""
if len(depth_list) != 3:
raise ValueError('Expect three elements in depth_list.')
if unit_rate_list:
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in unit_rate_list.')
with tf.variable_scope(scope, 'xception_module', [inputs]) as sc:
residual = inputs
def _separable_conv(features, depth, kernel_size, depth_multiplier,
regularize_depthwise, rate, stride, scope):
if activation_fn_in_separable_conv:
activation_fn = tf.nn.relu
else:
activation_fn = None
features = tf.nn.relu(features)
return separable_conv2d_same(features,
depth,
kernel_size,
depth_multiplier=depth_multiplier,
stride=stride,
rate=rate,
activation_fn=activation_fn,
regularize_depthwise=regularize_depthwise,
scope=scope)
for i in range(3):
residual = _separable_conv(residual,
depth_list[i],
kernel_size=3,
depth_multiplier=1,
regularize_depthwise=regularize_depthwise,
rate=rate*unit_rate_list[i],
stride=stride if i == 2 else 1,
scope='separable_conv' + str(i+1))
if skip_connection_type == 'conv':
shortcut = slim.conv2d(inputs,
depth_list[-1],
[1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
outputs = residual + shortcut
elif skip_connection_type == 'sum':
outputs = residual + inputs
elif skip_connection_type == 'none':
outputs = residual
else:
raise ValueError('Unsupported skip connection type.')
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
outputs) | 1b6137392e027c3cb55fa43edf7828ddd108117c | 3,627,853 |
def greatest_common_divisor(a: int, b: int) -> int:
"""
Euclid's Lemma : d divides a and b, if and only if d divides a-b and b
Euclid's Algorithm
>>> greatest_common_divisor(7,5)
1
Note : In number theory, two integers a and b are said to be relatively prime,
mutually prime, or co-prime if the only positive integer (factor) that divides
both of them is 1 i.e., gcd(a,b) = 1.
>>> greatest_common_divisor(121, 11)
11
"""
if a < b:
a, b = b, a
while a % b != 0:
a, b = b, a % b
return b | b481c0162c233e1e5e8a717e4b469118e6fa9eea | 3,627,854 |
def bfmt(num, size=8):
""" Returns the printable string version of a binary number <num> that's length <size> """
if num > 2**size:
return format((num >> size) & (2**size - 1), 'b').zfill(size)
try:
return format(num, 'b').zfill(size)
except ValueError:
return num | 8aadc9671643b48c7c05032473b05fd872475bb0 | 3,627,855 |
from datetime import datetime
def text_to_NAF(text,
nlp,
dct,
layers,
title=None,
uri=None,
language='en',
layer_to_attributes_to_ignore=dict(),
naf_version='v3',
cdata=True,
replace_hidden_characters=False,
map_udpos2naf_pos=False,
add_mws=True,
dtd_validation=False):
"""
Function that takes a text and returns an xml object containing the NAF.
"""
if replace_hidden_characters:
text_to_use = text.translate(hidden_table)
else:
text_to_use = text
assert len(text) == len(text_to_use)
dct_correct_format = time_in_correct_format(dct)
start_time = time_in_correct_format(datetime.now())
doc = nlp(text_to_use)
end_time = time_in_correct_format(datetime.now())
model_name = f'spaCy-model_{nlp.meta["lang"]}_{nlp.meta["name"]}'
model_version = f'spaCy_version-{spacy.__version__}__model_version-{nlp.meta["version"]}'
return naf_from_doc(doc=doc,
dct=dct_correct_format,
start_time=start_time,
end_time=end_time,
modelname=model_name,
modelversion=model_version,
naf_version=naf_version,
language=language,
title=title,
uri=uri,
layers=layers,
add_mws=add_mws,
cdata=cdata,
layer_to_attributes_to_ignore=layer_to_attributes_to_ignore,
map_udpos2naf_pos=map_udpos2naf_pos,
dtd_validation=dtd_validation) | 016e4662f40ca411b924b9b7dda103033215aae5 | 3,627,856 |
from typing import Optional
def getContextRect(
context: Context,
obj: Optional[TextContainerObject] = None
) -> Optional[locationHelper.RectLTRB]:
"""Gets a rectangle for the specified context."""
if context == Context.FOCUS:
return getObjectRect(obj or api.getFocusObject())
elif context == Context.NAVIGATOR:
return getObjectRect(obj or api.getNavigatorObject())
elif context == Context.REVIEW:
return getReviewRect()
elif context == Context.BROWSEMODE:
caret = obj or api.getCaretObject()
if api.isCursorManager(caret):
return getCaretRect(obj=caret)
return None
elif context == Context.CARET:
caret = obj or api.getCaretObject()
if not api.isCursorManager(caret):
return getCaretRect(obj=caret)
return None
elif context == Context.MOUSE:
return getMouseRect() | 8638c4f9ed2b569b95ae0549a6ed61ba973fe02e | 3,627,857 |
def spans_to_binary(spans, length=None):
"""
Converts spans to a binary array indicating whether each character is in the span.
Args: spans (list of lists of two ints): Spans.
Returns: np array [length]: Binarized spans.
"""
length = np.max(spans) if length is None else length
binary = np.zeros(length)
for start, end in spans:
binary[start:end] = 1
return binary | fea51009dd8a33208e6e29db88d1385ef8f0bb98 | 3,627,858 |
def check_dataset_access_permission(view_func):
"""
Decorator ensuring that the user has access to dataset.
Arg: 'dataset'.
Return: the dataset or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None:
dataset = Dataset.objects.is_accessible_or_exception(request, dataset)
kwargs['dataset'] = dataset
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate) | d30a496bdff110cfefa711e6b84dcb8f30f1c1f9 | 3,627,859 |
def check_barcode_is_off(alignment, tags, log=None):
"""
See if the barcode was recognised with soft clipping.
if so, it returns True and can be counted in the optional log
:param alignment: the read
:param tags: alignment tags as dict
:return:
"""
if 'RG' in tags:
if tags['bm'] != '0':
if log:
log.misbar(alignment)
return True
else:
return False
else:
return False | 7adcbb8eae797750b3e543c52db41341d82f0937 | 3,627,860 |
def flatten_all_lists_in_dict(obj):
"""
>>> flatten_all_lists_in_dict({1: [[2], [3, {5: [5, 6]}]]})
{1: [2, 3, {5: [5, 6]}]}
"""
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = flatten_all_lists_in_dict(value)
return obj
elif isinstance(obj, list):
return [flatten_all_lists_in_dict(x) for x in flatten(obj)]
else:
return obj | 85afdb04337ee8e942073c4193c8b027981c5429 | 3,627,861 |
def already_visited(string):
"""
Helper method used to identify if a subroutine call or definition has
already been visited by the script in another instance
:param string: The call or definition of a subroutine/function
:return: a boolean indicating if it has been visited already or not
"""
separated = string.partition('(')[2]
if separated.replace(' ', '').replace('(', '')[:2] == 'gr':
visited = True
else:
visited = False
return visited | 7a9d84b6e04cdf7edb27bb7cf49cf1021130ab07 | 3,627,862 |
def series_to_dict(ts, cat=None, dynamic_feat=None):
"""Given a pandas.Series object, returns a dictionary encoding the time series.
ts -- a pands.Series object with the target time series
cat -- an integer indicating the time series category
Return value: a dictionary
"""
obj = {"start": str(ts.index[0]), "target": encode_target(ts)}
if cat is not None:
obj["cat"] = cat
if dynamic_feat is not None:
obj["dynamic_feat"] = dynamic_feat
return obj | be2c5a22f6b57d446e58b73ae966fbc7b0fdf9ef | 3,627,863 |
def expected_loss_t(u_m, lgd, ead, new):
""" Total expected loss.
Shape (K,)"""
el = np.sum(expected_loss_g_i_t(u_m, lgd, ead, new), axis=0)
el = np.sum(el, axis=0)
return el | ce81dcac5fb58dc66e174bcce9c89b528fa8b4dc | 3,627,864 |
def tool_pred_class_label(log_likelyhood, cutoff=0):
"""
Infer class label based on log-likelyhood
Args:
log_likelyhood:
Returns:
"""
if log_likelyhood > cutoff + EPSLONG:
return 1
return 0 | dd80e7ba95005f561d3e121b7d60dcd896cc917f | 3,627,865 |
from typing import Sequence
import itertools
def select_polymorph(polymorphs, args):
"""Determine the polymorphic signature that will match a given argument list.
This is the mechanism used to reconcile Java's strict-typing polymorphism with
Python's unique-name, weak typing polymorphism. When invoking a method on the
Python side, the number and types of the arguments provided are used to determine
which Java method will be invoked.
polymorphs should be a dictionary, keyed by the JNI signature of the arguments
expected by the method. The values in the dictionary are not used; this method
is only used to determine, which key should be used.
args is a list of arguments that have been passed to invoke the method.
Returns a 3-tuple:
* arg_sig - the actual signature of the provided arguments
* match_types - the type list that was matched. This is a list of individual
type signatures; not in string form like arg_sig, but as a list where each
element is the type for a particular argument. (i.e.,
[b'I', b'Ljava/lang/String;', b'Z'], not b'ILjava/langString;Z').
The contents of match_types will be the same as arg_sig if there is a
direct match in polymorphs; if there isn't, the signature of the matching
polymorph will be returned.
* polymorph - the value from the input polymorphs that matched. Equivalent
to polymorphs[match_types]
"""
arg_types = []
if len(args) == 0:
arg_sig = b''
options = [[]]
else:
for arg in args:
if isinstance(arg, (bool, jboolean)):
arg_types.append([b'Z'])
elif isinstance(arg, jbyte):
arg_types.append([b'B'])
elif isinstance(arg, jchar):
arg_types.append([b'C'])
elif isinstance(arg, jshort):
arg_types.append([b'S'])
elif isinstance(arg, jint):
arg_types.append([b'I'])
elif isinstance(arg, int):
arg_types.append([b'I', b'J', b'S'])
elif isinstance(arg, jlong):
arg_types.append([b'J'])
elif isinstance(arg, jfloat):
arg_types.append([b'F'])
elif isinstance(arg, float):
arg_types.append([b'D', b'F'])
elif isinstance(arg, jdouble):
arg_types.append([b'D'])
elif isinstance(arg, str):
arg_types.append([
b"Ljava/lang/String;",
b"Ljava/io/Serializable;",
b"Ljava/lang/Comparable;",
b"Ljava/lang/CharSequence;",
b"Ljava/lang/Object;",
])
# If char arrays are useful to handle, add them later. Handle all other types of primitive type arrays.
elif isinstance(arg, bytes):
arg_types.append([b'[B'])
elif isinstance(arg, Sequence) and len(arg) > 0:
# If arg is an iterable of all the same basic numeric type, then
# an array of that Java type can work.
if isinstance(arg[0], (bool, jboolean)):
if all((isinstance(item, (bool, jboolean)) for item in arg)):
arg_types.append([b'[Z'])
else:
raise ValueError("Convert entire list to bool/jboolean to create a Java boolean array")
elif isinstance(arg[0], (int, jint)):
if all((isinstance(item, (int, jint)) for item in arg)):
arg_types.append([b'[I'])
else:
raise ValueError("Unable to treat all data in list as integers")
elif isinstance(arg[0], (float, jfloat, jdouble)):
if all((isinstance(item, (float, jfloat, jdouble)) for item in arg)):
arg_types.append([b'[D', b'[F'])
else:
raise ValueError("Unable to treat all data in list as floats/doubles")
else:
raise ValueError("Unable convert sequence into array of Java primitive types")
elif isinstance(arg, (JavaInstance, JavaProxy)):
arg_types.append(arg.__class__.__dict__['_alternates'])
elif isinstance(arg, JavaNull):
arg_types.append([arg._signature])
else:
raise ValueError("Unknown argument type", arg, type(arg))
arg_sig = b''.join(t[0] for t in arg_types)
options = list(itertools.product(*arg_types))
# Try all the possible interpretations of the arguments
# as polymorphic forms.
for option in options:
try:
return option, polymorphs[b''.join(option)]
except KeyError:
pass
raise KeyError(arg_sig) | bf2811989ce9af4cd433df9d9a3841777f47bc4d | 3,627,866 |
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/main") | 88696b0e642389ede52cac6504b10e3a0f890c3b | 3,627,867 |
import pathlib
def get_stem_name(file_name: pathlib.Path | str | None) -> str:
"""Get the stem name from a file name.
Args:
file_name (pathlib.Path | str | None): File name or file path.
Returns:
str: Stem name.
"""
if file_name is None:
return ""
if isinstance(file_name, str):
file_name = pathlib.Path(file_name)
return file_name.stem | 01bab045f2c54aedf848922550ae241c9ddf8bce | 3,627,868 |
def getSingleIndexedParamValue(request, param_name, values=()):
"""Returns a value indexed by a query parameter in the HTTP request.
Args:
request: the Django HTTP request object
param_name: name of the query parameter in the HTTP request
values: list (or tuple) of ordered values; one of which is
retrieved by the index value of the param_name argument in
the HTTP request
Returns:
None if the query parameter was not present, was not an integer, or
was an integer that is not a valid [0..len(values)-1] index into
the values list.
Otherwise, returns values[int(param_name value)]
"""
value_idx = request.GET.get(param_name)
if isinstance(value_idx, (tuple, list)):
# keep only the first argument if multiple are present
value_idx = value_idx[0]
try:
# GET parameter 'param_name' should be an integer value index
value_idx = int(value_idx) if value_idx is not None else -1
except ValueError:
# ignore bogus or missing parameter values, so return None (no message)
return None
if value_idx < 0:
# value index out of range, so return None (no value)
return None
if value_idx >= len(values):
# value index out of range, so return None (no value)
return None
# return value associated with valid value index
return values[value_idx] | c8a1a552d1ad9435e21243bf05226b373257d163 | 3,627,869 |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
#if the state is a terminal state, then there is no possible action
if terminal(board):
return "game over"
else:
#traverse through the board and add the location of empty cells to the possible_actions
possible_actions = []
for row in range(0, 3):
for cell in range(0, 3):
if board[row][cell] is None:
possible_actions.append([row, cell])
return possible_actions | e3daa821650b665a5119bfa206b7dc42b941d8c9 | 3,627,870 |
def requestLanguage(request, try_user=True):
"""
Return the user interface language for this request.
The user interface language is taken from the user preferences for
registered users, or request environment, or the default language of
the wiki, or English.
This should be called once per request, then you should get the value from
request object lang attribute.
Unclear what this means: "Until the code for get
text is fixed, we are caching the request language locally."
@param request: the request object
@param try_user: try getting language from request.user
@keyword usecache: whether to get the value form the local cache or
actually look for it. This will update the cache data.
@rtype: string
@return: ISO language code, e.g. 'en'
"""
# Return the user language preferences for registered users
if try_user and request.user.valid and request.user.language:
return request.user.language
# Or try to return one of the user browser accepted languages, if it
# is available on this wiki...
lang = get_browser_language(request)
if not lang:
available = wikiLanguages() or ["en"]
# Or return the wiki default language...
if request.cfg.language_default in available:
lang = request.cfg.language_default
# If everything else fails, read the manual... or return 'en'
else:
lang = 'en'
return lang | 58cc57acc55f6e34f44bc6c932b123642e7f1c09 | 3,627,871 |
import IPython
import IPython.display
from typing import Type
def register_json_formatter(cls: Type, to_dict_method_name: str = 'to_dict'):
"""
TODO
:param cls:
:param to_dict_method_name:
:return:
"""
if not hasattr(cls, to_dict_method_name) or not callable(getattr(cls, to_dict_method_name)):
raise ValueError(f'{cls} must define a {to_dict_method_name}() method')
try:
if IPython.get_ipython() is not None:
def obj_to_dict(obj):
return getattr(obj, to_dict_method_name)()
ipy_formatter = IPython.get_ipython().display_formatter.formatters['application/json']
ipy_formatter.for_type(cls, obj_to_dict)
except ImportError:
pass | 76394307a2d549e735a9a4bd7323290188358755 | 3,627,872 |
def get_best_muscle_hits(subject_seq, query_aln,threshold,use_shorter=True):
"""Returns subset of query_aln with alignment scores above threshold.
- subject_seq is sequence aligned against query_aln seqs.
- query_aln is dict or Alignment object with candidate seqs to be
aligned with subject_seq.
- threshold is an alignment score (fraction shared aligned length)
which returned seqs must be above when aligned w/ subject_seq.
- use_shorter (default=True) is to decide whether to use the length
of the shorter sequence to calculate the alignment score.
"""
keep={}
#best = 0
for query_label, query_seq in query_aln.items():
subject_aligned, query_aligned, frac_same = \
get_aligned_muscle(subject_seq,query_seq)
#if frac_same > best:
if frac_same > threshold:
keep[query_label]=query_seq
#best=frac_same
return keep | 980930d7bf98635de4f4efb9bb4affbbaff0c053 | 3,627,873 |
def _hrf_d_basis(d, t_r, n_times_atom):
""" Private helper to define the double gamma HRF 2/3 basis function.
Parameters
----------
d : int, the number of atoms in the HRF basis, possible values are 2 or 3
t_r : float, Time of Repetition, fMRI acquisition parameter, the temporal
resolution
n_hrf_atoms : int, number of components on which to decompose the neural
activity (number of temporal components and its associated spatial
maps).
Return
------
hrf_basis : array, shape (n_hrf_atoms, n_times_atom), HRF basis
"""
assert d in [2, 3], "HRF basis can only have 2 or 3 atoms"
dur = t_r * n_times_atom
h_1 = _double_gamma_hrf(delta=1.0, t_r=t_r, dur=dur)[0]
h_2_ = _double_gamma_hrf(delta=1.0, t_r=t_r, dur=dur, onset=0.0)[0]
h_2__ = _double_gamma_hrf(delta=1.0, t_r=t_r, dur=dur, onset=t_r)[0]
# h3 is derived w.r.t p_disp variable... can't used precomputed fonction
t = np.linspace(0, dur, int(float(dur) / DT))
peak = _gamma_pdf(t, P_DELAY/1.001, loc=DT/1.001)
undershoot = _gamma_pdf_hrf_undershoot(t)
h_3_ = (peak - P_U_RATIO * undershoot)[::int(t_r/DT)]
h_2 = h_2_ - h_2__
h_3 = (h_1 - h_3_) / 0.001
h_1 = check_len_hrf(h_1, n_times_atom)
h_2 = check_len_hrf(h_2, n_times_atom)
h_3 = check_len_hrf(h_3, n_times_atom)
if d == 2:
return np.c_[h_1, h_2].T
else:
return np.c_[h_1, h_2, h_3].T | 3736c4bdc852d118901789fb24b450e5d3666d84 | 3,627,874 |
from lhrhost.messaging.transport.ascii import transport_loop
from lhrhost.messaging.transport.firmata import transport_loop
def parse_argparser_transport_selector(args):
"""Return a transport loop as specified from the argparse args."""
if args.transport == 'ascii':
elif args.transport == 'firmata':
else:
raise NotImplementedError(
'Unknown transport-layer implementation: {}'.format(args.transport)
)
return transport_loop | 01bda845fa368cf3212d46e14c557a2749889f3c | 3,627,875 |
from typing import Optional
import os
def gsea_results_to_filtered_df(
dataset,
kegg_manager: Optional[bio2bel_kegg.Manager] = None,
reactome_manager: Optional[bio2bel_reactome.Manager] = None,
wikipathways_manager: Optional[bio2bel_wikipathways.Manager] = None,
p_value: Optional[float] = None,
absolute_nes_filter=None,
geneset_set_filter_minimum_size=None,
geneset_set_filter_maximum_size=None
):
"""Get filtered GSEA results dataFrames."""
kegg_gsea_path = os.path.join(GSEA, KEGG, f'kegg_{dataset}.tsv')
reactome_gsea_path = os.path.join(GSEA, REACTOME, f'reactome_{dataset}.tsv')
wikipathways_gsea_path = os.path.join(GSEA, WIKIPATHWAYS, f'wikipathways_{dataset}.tsv')
merge_gsea_path = os.path.join(GSEA, MPATH, f'merge_{dataset}.tsv')
# Load GSEA results and filter dataFrames
kegg_pathway_df = filter_gsea_results(
kegg_gsea_path,
KEGG,
kegg_manager=kegg_manager,
reactome_manager=reactome_manager,
wikipathways_manager=wikipathways_manager,
p_value=p_value,
absolute_nes_filter=absolute_nes_filter,
geneset_set_filter_minimum_size=geneset_set_filter_minimum_size,
geneset_set_filter_maximum_size=geneset_set_filter_maximum_size,
)
reactome_pathway_df = filter_gsea_results(
reactome_gsea_path,
REACTOME,
kegg_manager=kegg_manager,
reactome_manager=reactome_manager,
wikipathways_manager=wikipathways_manager,
p_value=p_value,
absolute_nes_filter=absolute_nes_filter,
geneset_set_filter_minimum_size=geneset_set_filter_minimum_size,
geneset_set_filter_maximum_size=geneset_set_filter_maximum_size,
)
wikipathways_pathway_df = filter_gsea_results(
wikipathways_gsea_path,
WIKIPATHWAYS,
kegg_manager=kegg_manager,
reactome_manager=reactome_manager,
wikipathways_manager=wikipathways_manager,
p_value=p_value,
absolute_nes_filter=absolute_nes_filter,
geneset_set_filter_minimum_size=geneset_set_filter_minimum_size,
geneset_set_filter_maximum_size=geneset_set_filter_maximum_size,
)
merged_pathway_df = filter_gsea_results(
merge_gsea_path,
MPATH,
kegg_manager=kegg_manager,
reactome_manager=reactome_manager,
wikipathways_manager=wikipathways_manager,
p_value=p_value,
absolute_nes_filter=absolute_nes_filter,
geneset_set_filter_minimum_size=geneset_set_filter_minimum_size,
geneset_set_filter_maximum_size=geneset_set_filter_maximum_size,
)
# Merge pathway dataframe without applying filters
merged_total_df = filter_gsea_results(
merge_gsea_path,
MPATH,
kegg_manager=kegg_manager,
reactome_manager=reactome_manager,
wikipathways_manager=wikipathways_manager,
# TODO why not give arguments for other parts?
)
return (
kegg_pathway_df,
reactome_pathway_df,
wikipathways_pathway_df,
merged_pathway_df,
merged_total_df,
) | a625c7bd3b9187ba66c8e0332d23e8fda6c3a480 | 3,627,876 |
def create_network(request, id_vlan="0", sf_number='0', sf_name='0', sf_environment='0', sf_nettype='0', sf_subnet='0', sf_ipversion='0', sf_network='0', sf_iexact='0', sf_acl='0'):
""" Set column 'active = 1' in tables """
try:
if request.method == 'POST':
form = CreateForm(request.POST)
# Get user
auth = AuthSession(request.session)
client = auth.get_clientFactory()
networks_activated = False
networks_was_activated = True
equipments_ipv4 = list()
equipments_ipv6 = list()
if form.is_valid():
# If vlan with parameter id_vlan don't exist,
# VlanNaoExisteError exception will be called
vlan = client.create_vlan().get(id_vlan)
environment = client.create_ambiente().buscar_por_id(
vlan['vlan']["ambiente"]).get("ambiente")
# All ids to be activated
ids = split_to_array(form.cleaned_data['ids_create'])
for id in ids:
value = id.split('-')
id_net = value[0]
network_type = value[1]
if network_type == 'v4':
net = client.create_network().get_network_ipv4(id_net)
else:
net = client.create_network().get_network_ipv6(id_net)
if net['network']['active'] == 'True':
networks_activated = True
else:
networks_was_activated = False
if network_type == 'v4':
equipments_ipv4.extend(
list_equipment_by_network_ip4(client, id_net))
client.create_api_network_ipv4().deploy(id_net)
else:
equipments_ipv6.extend(
list_equipment_by_network_ip6(client, id_net))
client.create_api_network_ipv6().deploy(id_net)
apply_acl_for_network_v4(
request, client, equipments_ipv4, vlan, environment)
apply_acl_for_network_v6(
request, client, equipments_ipv6, vlan, environment)
if networks_activated is True:
messages.add_message(
request, messages.ERROR, network_ip_messages.get("networks_activated"))
if networks_was_activated is False:
messages.add_message(
request, messages.SUCCESS, network_ip_messages.get("net_create_success"))
else:
vlan = client.create_vlan().get(id_vlan)
if vlan['vlan']['ativada'] == 'True':
messages.add_message(
request, messages.ERROR, error_messages.get("vlan_select_one"))
else:
messages.add_message(
request, messages.ERROR, error_messages.get("select_one"))
except VlanNaoExisteError, e:
logger.error(e)
return redirect('vlan.search.list')
except Exception, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseRedirect(reverse('vlan.list.by.id', args=[id_vlan, sf_number, sf_name, sf_environment, sf_nettype, sf_subnet, sf_ipversion, sf_network, sf_iexact, sf_acl])) | 017b3f8bf765e7f5a4aeea8c24c28dd3cd37f539 | 3,627,877 |
from typing import List
from typing import Union
import copy
def _order_fun(term: List[List[int]], weight: Union[float, complex] = 1.0):
"""
Return a normal ordered single term of the fermion operator.
Normal ordering corresponds to placing the operator acting on the
highest index on the left and lowest index on the right. In addition,
the creation operators are placed on the left and annihilation on the right.
In this ordering, we make sure to account for the anti-commutation of operators.
"""
parity = -1
term = copy.deepcopy(list(term))
weight = copy.copy(weight)
ordered_terms = []
ordered_weights = []
# the arguments given to this function will be transformed in a normal ordered way
# loop through all the operators in the single term from left to right and order them
# by swapping the term operators (and transform the weights by multiplying with the parity factors)
for i in range(1, len(term)):
for j in range(i, 0, -1):
right_term = term[j]
left_term = term[j - 1]
# exchange operators if creation operator is on the right and annihilation on the left
if right_term[1] and not left_term[1]:
term[j - 1] = right_term
term[j] = left_term
weight *= parity
# if same indices switch order (creation on the left), remember a a^ = 1 + a^ a
if right_term[0] == left_term[0]:
new_term = term[: (j - 1)] + term[(j + 1) :]
# ad the processed term
o, w = _order_fun(tuple(new_term), parity * weight)
ordered_terms += o
ordered_weights += w
# if we have two creation or two annihilation operators
elif right_term[1] == left_term[1]:
# If same two Fermionic operators are repeated,
# evaluate to zero.
if parity == -1 and right_term[0] == left_term[0]:
return ordered_terms, ordered_weights
# swap if same type but order is not correct
elif right_term[0] > left_term[0]:
term[j - 1] = right_term
term[j] = left_term
weight *= parity
ordered_terms.append(term)
ordered_weights.append(weight)
return ordered_terms, ordered_weights | c4e5d5fc748633aa601effb137b9c74d4a6252a4 | 3,627,878 |
def validate(number):
"""Check if the number provided is a valid RNC."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if number in whitelist:
return number
if len(number) != 9:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number | 2bb71bdf15a6e69cebab5af0f65bf32fe6922cea | 3,627,879 |
def bindwith(*mappings, **kwargs):
"""Bind variables to a function's outer scope, but don't yet call the function.
>>> @bindwith(cheez='cheddar')
... def makez_cheezburger():
... bun = ...
... patty = ...
... cheezburger = [bun, patty, cheez, bun]
>>> makez_cheezburger.outer_scope['cheez']
'cheddar'
>>> haz_cheezburger = makez_cheezburger()
>>> 'cheddar' in haz_cheezburger['cheezburger']
True
See Also
--------
callwith
"""
def bindwith_inner(func, *, use_closures=True, use_globals=True):
return scoped_function(
func, *mappings, kwargs, use_closures=use_closures, use_globals=use_globals
)
return bindwith_inner | d4dd4dd747035b10415631b362472db2f5220454 | 3,627,880 |
def _call(calls):
"""Make final call"""
final_call = ''
if calls['is_hiv'] == 'No':
final_call = 'NonHIV'
return final_call
if calls['deletion'] == 'Yes':
final_call = 'Large Deletion'
if calls['inversion'] == 'Yes':
final_call += ' with Internal Inversion'
elif calls['hypermut'] == 'Yes':
final_call += ' with Hypermut'
return final_call
if calls['inversion'] == 'Yes':
final_call = 'Internal Inversion'
return final_call
if calls['hypermut'] == 'Yes':
final_call = 'Hypermut'
return final_call
if calls['psc'] == 'Yes':
final_call = 'Premature Stop Codon'
return final_call
if calls['defect'] == 'Yes' and calls['primer'] == 'Yes':
final_call = "5' defect"
return final_call
if calls['primer'] == 'No':
final_call = 'Inferred Intact'
return final_call
return 'Intact' | c5e293255911cfdb16a73a026a13b7a394ae71cc | 3,627,881 |
from typing import OrderedDict
def load_madminer_settings(file_name: str, include_nuisance_benchmarks: bool) -> tuple:
"""
Loads the complete set of Madminer settings from a HDF5 data file
Parameters
----------
file_name: str
HDF5 file name to load the settings from
include_nuisance_benchmarks: bool
Whether or not to filter out the nuisance benchmarks
Returns
-------
analysis_params: OrderedDict
benchmarks: OrderedDict
benchmark_nuisance_flags: list
morphing_components: numpy.ndarray
morphing_matrix numpy.ndarray
observables: OrderedDict
num_samples: int
systematics: OrderedDict
ref_benchmark: str
nuisance_params: OrderedDict
num_signal_events: numpy.ndarray
num_background_events: int
fin_differences: dict
fin_diff_epsilon: float
"""
analysis_params = _load_analysis_params(file_name)
nuisance_params = _load_nuisance_params(file_name)
(
benchmark_names,
benchmark_values,
benchmark_nuisance_flags,
benchmark_reference_flags,
) = _load_benchmarks(file_name)
(
morphing_components,
morphing_matrix,
) = _load_morphing(file_name)
(
fin_diff_base_benchmark,
fin_diff_shift_benchmark,
fin_diff_epsilon,
) = _load_finite_diffs(file_name)
(
observable_names,
observable_defs,
) = _load_observables(file_name)
(
num_signal_events,
num_background_events,
) = _load_samples_summary(file_name)
(
sample_observations,
_,
_,
) = _load_samples(file_name)
(
syst_names,
syst_types,
syst_values,
syst_scales,
) = _load_systematics(file_name)
# Build benchmarks dictionary
benchmarks = OrderedDict()
for b_name, b_matrix, b_nuisance_flag in zip(benchmark_names, benchmark_values, benchmark_nuisance_flags):
# Filter out the nuisance benchmarks
if include_nuisance_benchmarks is False and b_nuisance_flag is True:
continue
benchmarks[b_name] = Benchmark.from_params(b_name, analysis_params.keys(), b_matrix)
# Build observables dictionary
observables = OrderedDict()
for o_name, o_def in zip(observable_names, observable_defs):
observables[o_name] = Observable(o_name, o_def)
# Build systematics dictionary
systematics = OrderedDict()
for s_name, s_type, s_value, s_scale in zip(syst_names, syst_types, syst_values, syst_scales):
s_type = SystematicType.from_str(s_type)
s_scale = SystematicScale.from_str(s_scale)
systematics[s_name] = Systematic(s_name, s_type, s_value, s_scale)
# Build finite differences dictionary
fin_differences = OrderedDict()
for base_name, matrix in zip(fin_diff_base_benchmark, fin_diff_shift_benchmark):
fin_differences[base_name] = FiniteDiffBenchmark.from_params(base_name, analysis_params.keys(), matrix)
# Compute concrete values
num_samples = len(sample_observations)
ref_benchmark = [name for name, flag in zip(benchmark_names, benchmark_reference_flags) if flag]
ref_benchmark = ref_benchmark[0] if len(ref_benchmark) > 0 else None
return (
analysis_params,
benchmarks,
benchmark_nuisance_flags,
morphing_components,
morphing_matrix,
observables,
num_samples,
systematics,
ref_benchmark,
nuisance_params,
num_signal_events,
num_background_events,
fin_differences,
fin_diff_epsilon,
) | da283ad14a8b570bca366dee7adcadb95ccf7757 | 3,627,882 |
def namespace2dict(namespace):
"""
Converts recursively namespace to dictionary. Does not work if there is a namespace whose
parent is not a namespace.
"""
d = dict(**namespace)
for k, v in d.items():
if isinstance(v, NamespaceMap):
d[k] = namespace2dict(v)
return d | 0a8ed70b03e5a8a5c348fe3a619a8f46d1ff46b8 | 3,627,883 |
def editGenre(genre_id):
""" Edits the genre
"""
editedGenre = session.query(Genre).filter_by(id=genre_id).one()
if editedGenre.user_id != login_session['user_id']:
return """<script>(function() {alert("not authorized");})();</script>"""
if request.method == 'POST':
if request.form['name']:
editedGenre.name = request.form['name']
flash('Genre successfully edited %s' % editedGenre.name)
return redirect(url_for('showSongs', genre_id = genre_id))
else:
return render_template('edit-genre.html', genre=editedGenre) | f8c1400d13bb00e474bc52334ca319238e676db0 | 3,627,884 |
def potential_fn(scale, coefs, preds, x):
"""Linear regression"""
y = jnp.dot(x, coefs)
logpdf = stats.norm.logpdf(preds, y, scale)
return -jnp.sum(logpdf) | ccd1c9767be5ae9523839694a62d5960f926ae2b | 3,627,885 |
def van32(**kwargs):
"""Constructs a 32 layers vanilla model.
"""
model = Vanilla(*make_layers(32), **kwargs)
return model | 6eb8bea96c120c1d8c64b31940c1b45548db4552 | 3,627,886 |
import collections
def indices(record):
"""
Generalization of Mapping.keys().
@type: record: Record[Any, Any]
@rtype: Iterator[Any]
@raises: TypeError
"""
if isinstance(record, collections.Mapping):
if hasattr(record, 'keys'):
return iter(record.keys())
else:
return iter(collections.Mapping.keys(record))
elif isinstance(record, collections.Sequence) and not isinstance(record, basestring):
return (index for index, elm in enumerate(record))
else:
raise TypeError("'record' should be a Mapping or Sequence.") | 4b204d2e8a82a3bf996d2c994e8bafdd1ea320f6 | 3,627,887 |
def my_request_classifier(environ):
""" Returns one of the classifiers 'dav', 'xmlpost', or 'browser',
depending on the imperative logic below"""
request_method = REQUEST_METHOD(environ)
if request_method in _DAV_METHODS:
return "dav"
useragent = USER_AGENT(environ)
if useragent:
for agent in _DAV_USERAGENTS:
if useragent.find(agent) != -1:
return "dav"
if request_method == "POST":
if CONTENT_TYPE(environ) == "text/xml":
return "xmlpost"
elif CONTENT_TYPE(environ) == "application/soap+xml":
return "soap"
return "browser" | 0797e408c516e6ecf07fc8d7d13896ef4b990b9a | 3,627,888 |
def bisection_solve(x, power, epsilon, low, high):
"""x, epsilon, low, high are floats
epsilon > 0
low <= high and there is an ans between low and high s.t.
ans**power is within epsilon of x
returns ans s.t. ans**power within epsilon of x"""
ans = (high + low)/2
while abs(ans**power - x) >= epsilon:
if ans**power < x:
low = ans
else:
high = ans
ans = (high + low)/2
return ans | 64df81073237499e0e05f2daef6cd3dcafe85f0f | 3,627,889 |
def get_distinct_rotations(structure, symprec=0.1, atol=1e-6):
"""
Get distinct rotations from structure spacegroup operations
Args:
structure (Structure): structure object to analyze and
get corresponding rotations for
symprec (float): symprec for SpacegroupAnalyzer
atol (float): absolute tolerance for relative indices
"""
sga = SpacegroupAnalyzer(structure, symprec)
symmops = sga.get_symmetry_operations(cartesian=True)
rotations = [s.rotation_matrix for s in symmops]
if len(rotations) == 1:
return rotations
unique_rotations = [np.array(rotations[0])]
for rotation in rotations[1:]:
if not any([np.allclose(urot, rotation, atol=atol)
for urot in unique_rotations]):
unique_rotations.append(rotation)
return unique_rotations | 26fd948e3091d21bb3343fc231545ef106a8db2e | 3,627,890 |
import math
def get_sierpinski_carpet_set(width, height):
"""
获得谢尔宾斯基地毯点集
:param width:
:param height:
:return: 谢尔宾斯基地毯点集
"""
def get_sierpinski_carpet_points(left, top, right, bottom):
"""
递归获取谢尔宾斯基地毯的点
:param left:
:param top:
:param right:
:param bottom:
:return:
"""
w, h = right - left + 1, bottom - top + 1
if w != h or w < 3:
return []
sub_len = int(w / 3) # 小正方形边长
screen_points = []
# 递归
for row in range(3):
for col in range(3):
x1, y1 = left + sub_len * row, top + sub_len * col
x2, y2 = x1 + sub_len - 1, y1 + sub_len - 1
if row == 1 and col == 1: # 中间块
for i in range(x1, x2 + 1):
for j in range(y1, y2 + 1):
screen_points.append((i, j))
else: # 周围8块
screen_points.extend(get_sierpinski_carpet_points(x1, y1, x2, y2))
return screen_points
# 递归获取谢尔宾斯基地毯的点
square_len = int(math.pow(3, int(math.log(min(width, height), 3)))) # 大正方形边长,应为3的指数
sierpinski_carpet_points = get_sierpinski_carpet_points(0, 0, square_len - 1, square_len - 1)
print('Got {} Sierpinski carpet points in area {} * {}.'.format(len(sierpinski_carpet_points), width, height))
return sierpinski_carpet_points | b66bbe0dd25b47d81089b35d21a255b3b56c1f1e | 3,627,891 |
def find_target_node(ctx, stmt, is_instance_allowed = False):
"""Find the target node for the 'refine' or 'augment' statements"""
parent = stmt.parent
if stmt.arg == '.':
return parent
# parse the path into a list of two-tuples of (prefix,identifier)
pstr = '/' + stmt.arg
path = [(m[1], m[2]) for m in syntax.re_schema_node_id_part.findall(pstr)]
node = parent
# go down the path
for (prefix, identifier) in path:
if not is_instance_allowed and node is not parent and is_instation(node):
err_add(ctx.errors, stmt.pos, 'BAD_REF_AUG', (node.arg, node.pos))
return None
module = statements.prefix_to_module(parent.i_module, prefix,
stmt.pos, ctx.errors)
if module is None:
return None
child = statements.search_child(node.i_children,
module.i_modulename, identifier)
if child is None:
err_add(ctx.errors, stmt.pos, 'NODE_NOT_FOUND',
(module.i_modulename, identifier))
return None
node = child
return node | cb44f81c7bf23dee5fadbf8d451aaa0714fbbe52 | 3,627,892 |
def destroy_asteroids(angles):
"""Destroy asteroids, start with laser pointing up and rotate clockwise."""
destroy_list = []
sorted_angles = sorted(angles)
while sorted_angles:
for angle in sorted_angles:
if not angles[angle]:
sorted_angles.remove(angle)
else:
asteroids = sorted(angles[angle])
to_remove = asteroids[0]
angles[angle].remove(to_remove)
destroy_list.append((angle, to_remove))
return destroy_list | 166fbd4e87152748b1d6527315fb87a91b617b7a | 3,627,893 |
def zoom(image, zoom_scale):
"""
TODO: Check that this works for odd, even zoom_scale
TODO: write tests
Zooms in on center of image, with a representative zoom_scale.
Inputs:
:image: (numpy array) image to be zoomed in on.
:zoom_scale: (int) length of side of box of zoomed image, in pixels.
Outputs:
:zoomed: (numpy array) zoomed image.
"""
cent_x = im_shape[0] / 2
cent_y = im_shape[1] / 2
zoomed = image[cent_x - zoom_scale / 2 : cent_x + zoom_scale / 2][
cent_y - zoom_scale / 2 : cent_y + zoom_scale / 2
]
return zoomed | 364d317506f5a7c96964d9c540c9e878eaf9363a | 3,627,894 |
def _get_multipart_param(param_name: str, count: int, ssm) -> str:
"""You must pass the count returned from _get_num_multiparts"""
param_value = ""
i = 0
for i in range(count):
param_value += ssm.get_parameter(Name=_get_multipart_param_part_name(param_name, i))[
"Parameter"
]["Value"]
return param_value | ce4840be7d74d80067608561ac3177fb3f22eda0 | 3,627,895 |
from typing import Optional
def get_lrs(lr:slice, count:Optional[int]=None):
"""
Exponentially increasing lr from
slice.start to slice.stop.
if `count is None` then count = int(stop/start)
"""
lr1 = lr.start
lr2 = lr.stop
if count is None:
count = int(lr2/lr1)
incr = np.exp((np.log(lr2/lr1)/(count-1)))
return [lr1*incr**i for i in range(count)] | 2df40e7716f579b84f631af2eaaa2d4848b5eb74 | 3,627,896 |
def install_softcurrent_turn():
"""
turn to current 10 day's line chart of yesterday soft install top 10
:return:
"""
return render_template("cb_soft_install_crnt_line_show.html") | 488c0628b26522a20d089e2d5331f8aa62fdcb56 | 3,627,897 |
import json
def add_filename(json_):
"""
Args:
string: json path
Returns:
dict: annotion label
"""
with open(json_) as f:
imgs_anns = json.load(f)
img_extension = json_.split('/')[-1].split('.')[0]+'.jpg'
imgs_anns['filename'] = img_extension
return imgs_anns | 3710a1d6f177b36c6786797f9af5c58cd527f354 | 3,627,898 |
import unittest
def test():
""" Function to execute unitest.
"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestMemory)
runtime = unittest.TextTestRunner(verbosity=2).run(suite)
return runtime.wasSuccessful() | 4013ee35937307f01899129a46884fb8a6113452 | 3,627,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.