content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
def retrieve_tensors(logdir, groups=["train", "valid"]):
"""
Grab all tensorboard tensors and put them in one loss dataframe
Parameters
----------
logdir : str
Directory containing summary log of loss for tensorboard
groups : list, optional
groups containing tensors in the summary data, by default ["train", "valid"]
Returns
-------
results_df : pd.DataFrame
A pandas dataframe of results grabbed from tensorflow summary
"""
#
tensor_dfs = []
event_accs = {}
for group in groups:
event_accs[group] = EventAccumulator(
os.path.join(logdir, group), size_guidance={"tensors": 0}
)
event_accs[group].Reload()
#print(event_accs[group].Tags()["tensors"])
for tag in event_accs[group].Tags()["tensors"]:
tensor_df = pd.DataFrame(
[
(w, s, float(tf.make_ndarray(t)), group, tag)
for w, s, t in event_accs[group].Tensors(tag)
],
columns=["wall_time", "step", "val", "group", "variable"],
)
tensor_dfs.append(tensor_df)
return pd.concat(tensor_dfs) | e8a7d7a6d0626a66682c61a25bb7fb79b62cc186 | 32,800 |
def get_page_index(obj, amongst_live_pages=True):
"""
Get oage's index (a number) within its siblings.
:param obj:
Wagtail page object
:param amongst_live_pages:
Get index amongst live pages if True or all pages if False.
:return:
Index of a page if found or None if page doesn't have an index.
"""
qs = obj.__class__.objects.filter(depth=obj.depth).values_list('pk', flat=True)
if amongst_live_pages:
qs = qs.live()
if obj.depth > 1:
# making sure the non-root nodes share a parent
parentpath = obj._get_basepath(obj.path, obj.depth - 1)
qs = qs.filter(
path__range=obj._get_children_path_interval(parentpath))
try:
index = list(qs).index(obj.pk)
return index
except ValueError:
return None | fd1950533a398019ab0d3e208a1587f86b134a13 | 32,801 |
import requests
def contact_ocsp_server(certs):
"""Sends an OCSP request to the responding server for a certificate chain"""
chain = convert_to_oscrypto(certs)
req = create_ocsp_request(chain[0], chain[1])
URI = extract_ocsp_uri(certs[0])
data = requests.post(URI, data=req, stream=True, headers={'Content-Type': 'application/ocsp-request'})
response = ocsp.OCSPResponse.load(data.raw.data)
parsed = parse_ocsp(response)
return parsed | 15c32e72d41e6db275f5ef1d91ee7ccf05eb8cde | 32,802 |
import warnings
def reset_index_inplace(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
"""
Return the dataframe with an inplace resetting of the index.
This method mutates the original DataFrame.
Compared to non-inplace resetting, this avoids data copying, thus
providing a potential speedup.
In Pandas, `reset_index()`, when used in place, does not return a
`DataFrame`, preventing this option's usage in the function-chaining
scheme. `reset_index_inplace()` provides one the ability to save
computation time and memory while still being able to use the chaining
syntax core to pyjanitor. This function, therefore, is the chaining
equivalent of:
.. code-block:: python
data = {"class": ["bird", "bird", "bird", "mammal", "mammal"],
"max_speed": [389, 389, 24, 80, 21],
"index": ["falcon", "falcon", "parrot", "Lion", "Monkey"]}
df = (
pd.DataFrame(data).set_index("index")
.drop_duplicates()
)
df.reset_index(inplace=True)
instead, being called simply as:
.. code-block:: python
df = (
pd.DataFrame(data).set_index("index")
.drop_duplicates()
.reset_index_inplace()
)
All supplied parameters are sent directly to `DataFrame.reset_index()`.
:param df: A pandas DataFrame.
:param args: Arguments supplied to `DataFrame.reset_index()`
:param kwargs: Arguments supplied to `DataFrame.reset_index()`
:returns: A pandas DataFrame with reset indexes.
"""
# Deprecation Warning
warnings.warn(
"reset_index_inplace will be deprecated in the "
"upcoming 0.18 release. Use .reset_index() instead",
DeprecationWarning,
)
kwargs.update(inplace=True)
df.reset_index(*args, **kwargs)
return df | 6c10d67ffdaf195c0a9eea3ae473bb0e93e6e9ef | 32,803 |
import errno
import os
def load_mapping():
"""Load the mapping from the disk.
If the configuration directory files do not exist (probably because it is
the user's first time executing a dtags command), create them.
:return: the mapping object loaded from the disk
"""
try:
# Parse and load the mapping from the disk
return parse_mapping(MAPPING_FILE)
except (IOError, OSError) as mapping_load_error:
if mapping_load_error.errno != errno.ENOENT:
abort(
message='Failed to read {file}: {msg}'.format(
file=MAPPING_FILE,
msg=mapping_load_error.strerror
),
exit_code=mapping_load_error.errno
)
else:
try: # Create the directory if it does not exist
os.makedirs(CFG_DIR)
except (IOError, OSError) as dir_create_error:
if dir_create_error.errno != errno.EEXIST:
abort(
message='Failed to create {file}: {msg}'.format(
file=CFG_DIR,
msg=dir_create_error.strerror
),
exit_code=dir_create_error.errno
)
for config_file_to_create in (MAPPING_FILE, TAGS_FILE):
try:
open(config_file_to_create, 'w').close()
except (IOError, OSError) as file_create_error:
if file_create_error.errno != errno.EEXIST:
abort(
message='Failed to create {file}: {msg}'.format(
file=config_file_to_create,
msg=file_create_error.strerror
),
exit_code=file_create_error.errno
)
return defaultdict(set), None | a81189c9ed01df8970888bf9f3292cd10a9247b5 | 32,804 |
def get_current_user():
"""Get the current logged in user, or None."""
if environment.is_local_development():
return User('user@localhost')
current_request = request_cache.get_current_request()
if local_config.AuthConfig().get('enable_loas'):
loas_user = current_request.headers.get('X-AppEngine-LOAS-Peer-Username')
if loas_user:
return User(loas_user + '@google.com')
iap_email = get_iap_email(current_request)
if iap_email:
return User(iap_email)
cache_backing = request_cache.get_cache_backing()
oauth_email = getattr(cache_backing, '_oauth_email', None)
if oauth_email:
return User(oauth_email)
cached_email = getattr(cache_backing, '_cached_email', None)
if cached_email:
return User(cached_email)
session_cookie = get_session_cookie()
if not session_cookie:
return None
try:
decoded_claims = decode_claims(get_session_cookie())
except AuthError:
logs.log_warn('Invalid session cookie.')
return None
if not decoded_claims.get('email_verified'):
return None
email = decoded_claims.get('email')
if not email:
return None
# We cache the email for this request if we've validated the user to make
# subsequent get_current_user() calls fast.
setattr(cache_backing, '_cached_email', email)
return User(email) | 3b80285c87358dc33595ca97ecee3cf38cf96034 | 32,805 |
import calendar
import time
def genericGetCreationDate(page):
"""
Go to each date position and attempt to get date
"""
randSleep()
allDates = []
signaturePositions = findSignatures(page)
for p in signaturePositions:
timestamp = getTimestampFromSERP(p, page)
# print('timestamp/locationOfSignature:', timestamp)
try:
epoch = calendar.timegm(
time.strptime(timestamp, '%b %d, %Y'))
date = time.strftime('%Y-%m-%dT%H:%M:%S',
time.gmtime(epoch))
allDates.append(date)
except Exception:
pass
return getLowest(allDates) | 3d3f6e9a0b1ce9b49f887ba4d1d99493b75d2f9e | 32,806 |
def depth(data):
"""
For each event, it finds the deepest layer in which the shower has
deposited some E.
"""
maxdepth = 2 * (data[2].sum(axis=(1, 2)) != 0)
maxdepth[maxdepth == 0] = 1 * (
data[1][maxdepth == 0].sum(axis=(1, 2)) != 0
)
return maxdepth | aa48c88c516382aebe2a8b761b21f650afea82b1 | 32,807 |
import sys
def python_obj_size(obj, humanize=True):
""" 获取一个python对象的大小,默认返回人可读的形式 """
if humanize:
return humanize_bytes(sys.getsizeof(obj))
else:
return sys.getsizeof(obj) | 39da1f8148247828b279486c25c80cd45f46a2db | 32,808 |
def transform_user_extensions(user_extension_json):
"""
Transforms the raw extensions JSON from the API into a list of extensions mapped to users
:param user_extension_json: The JSON text blob returned from the CRXcavator API
:return: Tuple containing unique users list, unique extension list, and extension mapping for ingestion
"""
user_extensions = user_extension_json.items()
users_set = set()
extensions = []
extensions_by_user = []
for extension in user_extensions:
for details in extension[1].items():
extension_id = extension[0]
version = details[0]
extensions.append({
'extension_id': extension_id,
'version': version,
'name': details[1]['name'],
})
for user in details[1]['users']:
if user is None:
logger.info(f'bad user for {extension_id}{version}')
continue
users_set.add(user)
extensions_by_user.append({
'id': f"{extension_id}|{version}",
'user': user,
})
if len(users_set) == 0:
raise ValueError('No users returned from CRXcavator')
if len(extensions) == 0:
raise ValueError('No extensions information returned from CRXcavator')
if len(extensions_by_user) == 0:
raise ValueError('No user->extension mapping returned from CRXcavator')
return list(users_set), extensions, extensions_by_user | 89d91781028eb4335fff2c9bca446f50b5e91a3f | 32,809 |
def shower_array_rot(shower_array, alt, az):
"""
Given a series of point on the Z axis, perform a rotation of alt around Y and az around Z
Parameters
----------
shower_array: numpy array of shape (N,3) giving N points coordinates
alt: altitude shower direction - float
az: azimuth shower direction - float
Returns
-------
Numpy array of shape (N,3) giving N points coordinates
"""
rotated_shower_array = geo.rotation_matrix_z(az) * geo.rotation_matrix_y(pi / 2. - alt) * shower_array.T
return np.array(rotated_shower_array.T) | d98a6a4589b8945e8dff1272608307915f499fd6 | 32,810 |
from typing import Optional
def get_dscp_configuration(dscp_configuration_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDscpConfigurationResult:
"""
Use this data source to access information about an existing resource.
:param str dscp_configuration_name: The name of the resource.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['dscpConfigurationName'] = dscp_configuration_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200601:getDscpConfiguration', __args__, opts=opts, typ=GetDscpConfigurationResult).value
return AwaitableGetDscpConfigurationResult(
associated_network_interfaces=__ret__.associated_network_interfaces,
destination_ip_ranges=__ret__.destination_ip_ranges,
destination_port_ranges=__ret__.destination_port_ranges,
etag=__ret__.etag,
location=__ret__.location,
markings=__ret__.markings,
name=__ret__.name,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
qos_collection_id=__ret__.qos_collection_id,
resource_guid=__ret__.resource_guid,
source_ip_ranges=__ret__.source_ip_ranges,
source_port_ranges=__ret__.source_port_ranges,
tags=__ret__.tags,
type=__ret__.type) | 80e0a388581cd7028c7a8351737808e578778611 | 32,811 |
def aggregate_returns(df_daily_rets, convert_to):
"""
Aggregates returns by week, month, or year.
Parameters
----------
df_daily_rets : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet (returns).
convert_to : str
Can be 'weekly', 'monthly', or 'yearly'.
Returns
-------
pd.Series
Aggregated returns.
"""
def cumulate_returns(x):
return cum_returns(x)[-1]
if convert_to == WEEKLY:
return df_daily_rets.groupby(
[lambda x: x.year,
lambda x: x.isocalendar()[1]]).apply(cumulate_returns)
elif convert_to == MONTHLY:
return df_daily_rets.groupby(
[lambda x: x.year, lambda x: x.month]).apply(cumulate_returns)
elif convert_to == YEARLY:
return df_daily_rets.groupby(
[lambda x: x.year]).apply(cumulate_returns)
else:
ValueError(
'convert_to must be {}, {} or {}'.format(WEEKLY, MONTHLY, YEARLY)
) | 7ccf2763420df055a59f15b679ba2c8265744a41 | 32,812 |
import argparse
def parse_args(args):
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(
description='STOMP client for Network Rail\'s public data.'
)
subparsers = parser.add_subparsers()
# Creation of new configuration files
create_parser = subparsers.add_parser(
'create',
description='Create a new, empty configuration file.'
)
create_parser.add_argument(
'location',
help='The location for the new configuration file.'
)
# Running the client
run_parser = subparsers.add_parser(
'run',
description='Run the client'
)
run_parser.add_argument(
'config',
help='The configuration file to use for setup.'
)
return parser.parse_args(args) | 632b3702f6a3b3837b58ca39b7290081e7a3fb94 | 32,813 |
import random
def impute(x,nu):
"""
Impute to missing values: for each row of x this function find the nearest row in eucledian distance
in a sample of nu rows of x and replace the missing value of the former row
with the corrisponding values of the latter row
"""
remember=x[:,22]
N,D = x.shape
idx = get_jet_masks(x)
x, x = missing_values(x, x)
x,_,_ = standardize (x)
cols = set(range(D))
# class 1
col1 = set([4,5,6,12,26,27,28])
col1n = cols-col1
idx23 = np.array(idx[2])+np.array(idx[3])
x1 = x[idx[1],:]
x23 = x[idx23,:]
for j in col1:
for i in range(x[idx[1]].shape[0]):
key = random.sample(range(x23.shape[0]), nu)
k = np.argmin(abs((x23[key,:][:,list(col1n)]-x[i,list(col1n)])).sum(axis=1))
x1[i,j]= x23[key,:][k,j]
x[idx[1],:] = x1
# class 0
col0= set([23,24,25,29]).union(col1)
col0n = cols-col0
idx123 = np.array(idx[1])+np.array(idx[2])+np.array(idx[3])
x0=x[idx[0],:]
x123=x[idx123,:]
for j in col0:
for i in range(x[idx[1]].shape[0]):
key = random.sample(range(x123.shape[0]), nu)
k = np.argmin(abs((x123[key,:][:,list(col0n)]-x[i,list(col0n)])).sum(axis=1))
x0[i,j]= x123[key,:][k,j]
x[idx[0],:] = x0
x[:,22]=remember
return x | e13562e28eaafcfaa7848eae9cca7ac9d435d1f4 | 32,814 |
def get_data_splits(comment_type_str=None, ignore_ast=False):
"""Retrieves train/validation/test sets for the given comment_type_str.
comment_type_str -- Return, Param, Summary, or None (if None, uses all comment types)
ignore_ast -- Skip loading ASTs (they take a long time)"""
dataset, high_level_details = load_processed_data(comment_type_str, ignore_ast)
train_examples = dataset['train']
valid_examples = dataset['valid']
test_examples = dataset['test']
return train_examples, valid_examples, test_examples, high_level_details | b9451c5539c7ce235b7bb7f3251e3caa8e4b77c7 | 32,815 |
from typing import Tuple
def get_slide_roi_masks(slide_path, halo_roi_path, annotation_name, slide_id:str=None,
output_dir:str=None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""get roi masks from slides
Given a slide, halo annotation xml file, generate labels from xml polygons,
then crop both the image and mask to ROI (rectangle) region
Optionally: save the RGB image, downsampled image sample, and interger label mask as a tiff
Args:
slide_path (str): path to input slide
halo_roi_path (str): path to halo annotation file
annotation_name (str): name of annotation
slide_id (Optional, str): slide id
output_dir (Optional, str): destination to save RGB image, thumbnail and mask
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: the cropped image as RGB numpy array, a
downsampled array (as sample for stains), and mask array as single channel
"""
slide = openslide.OpenSlide(slide_path)
wsi_shape = slide.dimensions[1], slide.dimensions[0] # Annotation file has flipped dimensions w.r.t openslide conventions
annotation_mask = convert_xml_to_mask(halo_roi_path, wsi_shape, annotation_name)
x_roi, y_roi = convert_halo_xml_to_roi(halo_roi_path)
print (x_roi, y_roi)
# print ((min(x_roi), min(y_roi)), 0, (abs(x_roi[1] - x_roi[0]), abs(y_roi[1] - y_roi[1])))
slide_image_cropped = slide.read_region((min(x_roi), min(y_roi)), 0, (abs(x_roi[1] - x_roi[0]), abs(y_roi[1] - y_roi[0]))).convert('RGB')
print (slide_image_cropped)
slide_array = np.array(slide_image_cropped, dtype=np.uint8)
sample_array = np.array(slide_image_cropped.resize( (slide_image_cropped.size[0] // 80, slide_image_cropped.size[1] // 80) ), dtype=np.uint8)
mask_array = annotation_mask[ min(y_roi):max(y_roi), min(x_roi):max(x_roi)].astype(np.uint8)
if slide_id is not None and output_dir is not None:
with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_slideImage_roi_inRGB.tiff', bigtiff=True) as tiff:
tiff.save(slide_array)
with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_slideSample_roi_inRGB.tiff', bigtiff=True) as tiff:
tiff.save(sample_array)
with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_annotMask_roi_uint8.tiff', bigtiff=True) as tiff:
tiff.save(mask_array)
return slide_array, sample_array, mask_array | b79e9b8d93416c404110b56582b4ae1e9030bd7c | 32,816 |
def generate_stop_enex_nb(entries: tp.Array2d,
ts: tp.Array,
stop: tp.MaybeArray[float],
trailing: tp.MaybeArray[bool],
entry_wait: int,
exit_wait: int,
pick_first: bool,
flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Generate one after another using `generate_enex_nb` and `stop_choice_nb`.
Returns two arrays: new entries and exits.
!!! note
Has the same logic as calling `generate_stop_ex_nb` with `skip_until_exit=True`, but
removes all entries that come before the next exit."""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_enex_nb(
entries.shape,
entry_wait,
exit_wait,
True,
pick_first,
first_choice_nb, (entries,),
stop_choice_nb, (ts, stop, trailing, exit_wait, pick_first, temp_idx_arr, flex_2d)
) | a34108bd324498e70155f5dcc8c8c4364982c43f | 32,817 |
def mangle_type(typ):
"""
Mangle Numba type
"""
if typ in N2C:
typename = N2C[typ]
else:
typename = str(typ)
return mangle_type_c(typename) | 944952e184d1d33f9424c9e1118920f69f757e86 | 32,818 |
def lorentz(sample_len=1000, sigma=10, rho=28, beta=8 / 3, step=0.01):
"""This function generates a Lorentz time series of length sample_len,
with standard parameters sigma, rho and beta.
"""
x = np.zeros([sample_len])
y = np.zeros([sample_len])
z = np.zeros([sample_len])
# Initial conditions taken from 'Chaos and Time Series Analysis', J. Sprott
x[0] = 0
y[0] = -0.01
z[0] = 9
for t in range(sample_len - 1):
x[t + 1] = x[t] + sigma * (y[t] - x[t]) * step
y[t + 1] = y[t] + (x[t] * (rho - z[t]) - y[t]) * step
z[t + 1] = z[t] + (x[t] * y[t] - beta * z[t]) * step
x.shape += (1,)
y.shape += (1,)
z.shape += (1,)
return np.concatenate((x, y, z), axis=1) | c8dc9de84dde15453fe99ed8fb55eddcdd628648 | 32,819 |
def draw_lane_lines_on_all_images(images, cols=2, rows=3, figsize=(15, 13)):
"""
This method calls draw_windows_and_fitted_lines Fn for each image and then show the grid of output images.
"""
no_of_images = len(images)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
indexes = range(cols * rows)
image_path_with_fitted_parameters = []
for ax, index in zip(axes.flat, indexes):
if index < no_of_images:
image_path, image = images[index]
left_fit, right_fit, left_fit_m, right_fit_m = draw_windows_and_fitted_lines(image, ax)
ax.set_title(image_path)
ax.axis('off')
image_path_with_fitted_parameters.append((image_path, left_fit, right_fit, left_fit_m, right_fit_m))
fig.show()
return image_path_with_fitted_parameters | 27975a95836b784fece0547375b4d79868bbd3b6 | 32,820 |
import re
def get_field_order(address, latin=False):
"""
Returns expected order of address form fields as a list of lists.
Example for PL:
>>> get_field_order({'country_code': 'PL'})
[[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']]
"""
rules = get_validation_rules(address)
address_format = (
rules.address_latin_format if latin else rules.address_format)
address_lines = address_format.split('%n')
replacements = {'%%%s' % code: field_name
for code, field_name in FIELD_MAPPING.items()}
all_lines = []
for line in address_lines:
fields = re.split('(%.)', line)
single_line = [replacements.get(field) for field in fields]
single_line = list(filter(None, single_line))
all_lines.append(single_line)
return all_lines | d86c36ab1026fdad6e0b66288708786d8d2cb906 | 32,821 |
def env_start():
""" returns numpy array """
global maze, current_position
current_position = 500
return current_position | ed377adedc48159607a4bb08ea6e3624575ec723 | 32,822 |
def bootstrap_acceleration(d):
""" Bootstrap (BCA) acceleration term.
Args:
d : Jackknife differences
Returns:
a : Acceleration
"""
return np.sum(d**3) / np.sum(d**2)**(3.0/2.0) / 6.0 | fbd9a05934d4c822863df0ba0b138db840f34955 | 32,823 |
def normalize_map(x):
"""
normalize map input
:param x: map input (H, W, ch)
:return np.ndarray: normalized map (H, W, ch)
"""
# rescale to [0, 2], later zero padding will produce equivalent obstacle
return x * (2.0/255.0) | f750df26c8e6f39553ada82e247e21b2e3d6aabd | 32,824 |
def today():
"""Get the today of int date
:return: int date of today
"""
the_day = date.today()
return to_int_date(the_day) | 81c497cdf33050b6e8de31b0098d10acfb555444 | 32,825 |
from datetime import datetime
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
{
'title':'联系我们',
'message':'你可以通过以下方式和我们取得联系',
'year':datetime.now().year,
}
) | 65056534556a8503d897b38468b43da968d4223d | 32,826 |
import math
def plagdet_score(rec, prec, gran):
"""Combines recall, precision, and granularity to a allow for ranking."""
if (rec == 0 and prec == 0) or prec < 0 or rec < 0 or gran < 1:
return 0
return ((2 * rec * prec) / (rec + prec)) / math.log(1 + gran, 2) | f8debf876d55296c3945d0d41c7701588a1869b6 | 32,827 |
def continuum(spec,bin=50,perc=60,norder=4):
""" Derive the continuum of a spectrum."""
nx = len(spec)
x = np.arange(nx)
# Loop over bins and find the maximum
nbins = nx//bin
xbin1 = np.zeros(nbins,float)
ybin1 = np.zeros(nbins,float)
for i in range(nbins):
xbin1[i] = np.mean(x[i*bin:i*bin+bin])
ybin1[i] = np.percentile(spec[i*bin:i*bin+bin],perc)
# Fit polynomial to the binned values
coef1 = np.polyfit(xbin1,ybin1,norder)
cont1 = np.poly1d(coef1)(x)
# Now remove large negative outliers and refit
gdmask = np.zeros(nx,bool)
gdmask[(spec/cont1)>0.8] = True
xbin = np.zeros(nbins,float)
ybin = np.zeros(nbins,float)
for i in range(nbins):
xbin[i] = np.mean(x[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]])
ybin[i] = np.percentile(spec[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]],perc)
# Fit polynomial to the binned values
coef = np.polyfit(xbin,ybin,norder)
cont = np.poly1d(coef)(x)
return cont,coef | 42709c9361707ef8b614030906e9db5ea38087b3 | 32,828 |
def mean_IoU(Y_true, Y_pred):
"""
Calculate the mean IoU score between two lists of labeled masks.
:param Y_true: a list of labeled masks (numpy arrays) - the ground truth
:param Y_pred: a list labeled predicted masks (numpy arrays) for images with the original dimensions
:return: mean IoU score for corresponding images
"""
image_precs = []
for y_true,y_pred in zip(Y_true,Y_pred):
true_objects = len(np.unique(y_true))
pred_objects = len(np.unique(y_pred))
# Compute intersection between all objects
intersection = np.histogram2d(y_true.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(y_true, bins = true_objects)[0]
area_pred = np.histogram(y_pred, bins = pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
p = tp / (tp + fp + fn)
prec.append(p)
image_precs.append(prec)
return [np.mean(image_precs), image_precs] | 79581b1015512653f428a93c0e61cd5d451f031e | 32,829 |
def get_matrix_header(filename):
"""
Returns the entries, rows, and cols of a matrix market file.
"""
with open(filename) as f:
entries = 0
rows = 0
cols = 0
for line in f.readlines():
if line.startswith('%'):
continue
line = line.split()
entries = int(line[0])
rows = int(line[1])
cols = int(line[2])
return entries, rows, cols | 66200661715cb9a67522ced7b13d4140a3905c28 | 32,830 |
def get_slot_names(slotted_instance):
"""Get all slot names in a class with slots."""
# thanks: https://stackoverflow.com/a/6720815/782170
return slotted_instance.__slots__ | bd0f5b58964444396ceae7facb916012a4fb7c8a | 32,831 |
def make_standard_fisher_regularizer(make_logits, scope, should_regularize,
perturbation, differentiate_probability):
"""Creates per-example logits and the per-example standard Fisher-Rao norm.
This function assumes the model of a categorical distribution generated by a
softmax function.
The standard Fisher-Rao norm uses the model distribution computed from the
logits by the softmax function to estimate the Fisher information matrix.
The empirical training distribution is used for the input values.
Args:
make_logits: Function, returns `Tensor` representing the per-example logits.
The expected shape of the tensor is such that the number of categories
is the last dimension.
scope: String, name of `VariableScope` to use for the `Variable` objects
that represent the regularized parameter.
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
The passed variable name includes the name of the scope.
perturbation: Float, finite difference perturbation constant.
The choice of perturbation constant represents a tradeoff between rounding
and approximation error and should depend on floating point precision and
parameter norm.
differentiate_probability: Boolean, determines whether the label probability
distribution should be differentiated.
Returns:
A tuple of `Tensor` objects representing the per-example logits and the
scalar standard Fisher-Rao norm regularization loss.
Raises:
ValueError: if the last dimension of the logits shape is not statically
inferrable.
"""
collector = VariableCollector()
with tf.variable_scope(scope, custom_getter=collector.collector_getter):
logits = make_logits()
if logits.shape[-1].value is None:
raise ValueError("The size of the last dimension of the logits vector must"
" be statically inferrable.")
with tf.variable_scope(
scope,
custom_getter=
make_perturbation_getter(should_regularize, collector, perturbation)):
perturbed_logits = make_logits()
log_probs = tf.nn.log_softmax(logits, axis=-1)
perturbed_log_probs = tf.nn.log_softmax(perturbed_logits, axis=-1)
stop_probs = tf.stop_gradient(tf.exp(log_probs))
log_prob_derivative = (tf.square((perturbed_log_probs - log_probs) /
perturbation))
if differentiate_probability:
prob_regularizer_loss = (log_prob_derivative * stop_probs +
tf.stop_gradient(log_prob_derivative) * log_probs *
stop_probs -
tf.stop_gradient(log_prob_derivative * log_probs *
stop_probs))
else:
prob_regularizer_loss = log_prob_derivative * stop_probs
regularizer = logits.shape[-1].value * tf.reduce_mean(prob_regularizer_loss)
return (logits, regularizer) | b8edc41ccce39511e147fdfeb919c30ad65e8a85 | 32,832 |
def objective(y_objective, sim_param, curve_param):
"""
Calculates the objective function (RMS-VIF) given the control point y-values of a given particle
:param y_objective: control point y-values of the particle
:param sim_param: Instance of sim_param
:param curve_param: Instance of curve_param
:return: RMS-VIF
"""
curve = initialize_NURBS(curve_param, sim_param, y_objective) # builds NURBS from the control point y-values
R = calculate_R_from_curve(curve, sim_param) # builds the PDE-FIND system matrix from the NURBS IC
rms_vif = calculate_rms_vif(R) # Calculate RMS-VIF from the matrix R
return rms_vif | 8191aa0cc346ea596c88d3cdff86c3a3abc3ccca | 32,833 |
def form_poisson_equation_impl(height, width, alpha, normals, depth_weight, depth):
"""
Creates a Poisson equation given the normals and depth at every pixel in image.
The solution to Poisson equation is the estimated depth.
When the mode, is 'depth' in 'combine.py', the equation should return the actual depth.
When it is 'normals', the equation should integrate the normals to estimate depth.
When it is 'both', the equation should weight the contribution from normals and actual depth,
using parameter 'depth_weight'.
Input:
height -- height of input depth,normal array
width -- width of input depth,normal array
alpha -- stores alpha value of at each pixel of image.
If alpha = 0, then the pixel normal/depth should not be
taken into consideration for depth estimation
normals -- stores the normals(nx,ny,nz) at each pixel of image
None if mode is 'depth' in combine.py
depth_weight -- parameter to tradeoff between normals and depth when estimation mode is 'both'
High weight to normals mean low depth_weight.
Giving high weightage to normals will result in smoother surface, but surface may be very different from
what the input depthmap shows.
depth -- stores the depth at each pixel of image
None if mode is 'normals' in combine.py
Output:
constants for equation of type Ax = b
A -- left-hand side coefficient of the Poisson equation
note that A can be a very large but sparse matrix so csr_matrix is used to represent it.
b -- right-hand side constant of the the Poisson equation
"""
assert alpha.shape == (height, width)
assert normals is None or normals.shape == (height, width, 3)
assert depth is None or depth.shape == (height, width)
'''
Since A matrix is sparse, instead of filling matrix, we assign values to a non-zero elements only.
For each non-zero element in matrix A, if A[i,j] = v, there should be some index k such that,
row_ind[k] = i
col_ind[k] = j
data_arr[k] = v
Fill these values accordingly
'''
row_ind = []
col_ind = []
data_arr = []
'''
For each row in the system of equation fill the appropriate value for vector b in that row
'''
b = []
if depth_weight is None:
depth_weight = 1
'''
TODO
Create a system of linear equation to estimate depth using normals and crude depth Ax = b
x is a vector of depths at each pixel in the image and will have shape (height*width)
A: ( k, height)
x: ( height, width, 3)
b: ( k, width)
If mode is 'depth':
> Each row in A and b corresponds to an equation at a single pixel
> For each pixel k,
if pixel k has alpha value zero do not add any new equation.
else, fill row in b with depth_weight*depth[k] and fill column k of the corresponding
row in A with depth_weight.
Justification:
Since all the elements except k in a row is zero, this reduces to
depth_weight*x[k] = depth_weight*depth[k]
you may see that, solving this will give x with values exactly same as the depths,
at pixels where alpha is non-zero, then why do we need 'depth_weight' in A and b?
The answer to this will become clear when this will be reused in 'both' mode
Note: The normals in image are +ve when they are along an +x,+y,-z axes, if seen from camera's viewpoint.
If mode is 'normals':
> Each row in A and b corresponds to an equation of relationship between adjacent pixels
> For each pixel k and its immideate neighbour along x-axis l
if any of the pixel k or pixel l has alpha value zero do not add any new equation.
else, fill row in b with nx[k] (nx is x-component of normal), fill column k of the corresponding
row in A with -nz[k] and column k+1 with value nz[k]
> Repeat the above along the y-axis as well, except nx[k] should be -ny[k].
Justification: Assuming the depth to be smooth and almost planar within one pixel width.
The normal projected in xz-plane at pixel k is perpendicular to tangent of surface in xz-plane.
In other word if n = (nx,ny,-nz), its projection in xz-plane is (nx,nz) and if tangent t = (tx,0,tz),
then n.t = 0, therefore nx/-nz = -tz/tx
Therefore the depth change with change of one pixel width along x axis should be proporational to tz/tx = -nx/nz
In other words (depth[k+1]-depth[k])*nz[k] = nx[k]
This is exactly what the equation above represents.
The negative sign in ny[k] is because the indexing along the y-axis is opposite of +y direction.
If mode is 'both':
> Do both of the above steps.
Justification: The depth will provide a crude estimate of the actual depth. The normals do the smoothing of depth map
This is why 'depth_weight' was used above in 'depth' mode.
If the 'depth_weight' is very large, we are going to give preference to input depth map.
If the 'depth_weight' is close to zero, we are going to give preference normals.
'''
#TODO Block Begin
#fill row_ind,col_ind,data_arr,b
rn = 0
for row_i in range(height):
for col_j in range(width):
k = row_i * width + col_j
if alpha[row_i, col_j] != 0:
if depth is not None:
b.append(depth_weight * depth[row_i, col_j]) # depth
row_ind.append(rn) # depth
col_ind.append(k) # depth
data_arr.append(depth_weight) # depth
rn += 1
if normals is not None:
if col_j + 1 <= width - 1 and alpha[row_i, col_j + 1] != 0:
# normals x-axis
b.append(normals[row_i, col_j, 0])
row_ind.append(rn)
col_ind.append(k)
data_arr.append(-normals[row_i, col_j, 2])
row_ind.append(rn)
col_ind.append(k + 1)
data_arr.append(normals[row_i, col_j, 2])
rn += 1
if row_i + 1 <= height - 1 and alpha[row_i + 1, col_j] != 0:
# normals mode y-axis
b.append(-normals[row_i, col_j, 1])
row_ind.append(rn)
col_ind.append(k)
data_arr.append(-normals[row_i, col_j, 2])
row_ind.append(rn)
col_ind.append(k + width)
data_arr.append(normals[row_i, col_j, 2])
rn += 1
row = rn
#TODO Block end
# Convert all the lists to numpy array
row_ind = np.array(row_ind, dtype=np.int32)
col_ind = np.array(col_ind, dtype=np.int32)
data_arr = np.array(data_arr, dtype=np.float32)
b = np.array(b, dtype=np.float32)
# Create a compressed sparse matrix from indices and values
A = csr_matrix((data_arr, (row_ind, col_ind)), shape=(row, width * height))
return A, b | 56b94e106f6035af94489a9e40a2c17ef40ab7d8 | 32,834 |
def shortest_first_name(names):
"""Returns the shortest first name (str)"""
names = dedup_and_title_case_names(names)
name_dict = []
for i in names:
i = i.split()
name_dict.append({'name':i[0], 'surname': i[1]})
short_name_sort = sorted(name_dict, key=lambda k: len(k['name']))
return short_name_sort[0]['name'] | 868d5d977d4ef3aa4264fa1644ca5f142920bbe4 | 32,835 |
def timezone_validator(self, response):
"""Match timezone code in libraries/timezone.
Arguments
---------
response: "String containing current answer"
Raises
------
ValidationError: "Display a short description with available formats"
Returns
-------
boolean: True
"""
timezone_list = open('libraries/timezone').read()
if ('{response}\n'.format(response=response) not in timezone_list) or \
(response == ''):
raise ValidationError('', reason=self.trad(
'Invalid timezone: {response} (e.q., Europe/Paris)').format(
response=response))
return True | 080e56256a72a254e1c5940d16a5a89b693a3ad6 | 32,836 |
def reset_clb_srv(req):
"""
Service when reset of state planner
:param req:
:return:
"""
global reset_pose, pose_msgs , goal_sub, global_offset
rospy.loginfo_once("reset pose")
resp = TriggerResponse()
resp.success = True
resp.message = "Reset pose: True"
reset_pose = pose_msgs.pose.pose.position
print "reset position:", reset_pose
global_offset.linear.x = 0.
global_offset.linear.y = 0.
global_offset.linear.z = 0.
return resp | 14deae77fd35fa256760164978eacf7cee1421ad | 32,837 |
import tkinter
def Calcola():
"""Calcolate point of task!"""
esci=False
while esci is False:
try:
check=False
while check is False:
DeadLine=input("inserisci la deadline in giorni ")
try:
DeadLine = int(DeadLine)
check=True
except ValueError:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","errore non è stato inserito un numero")
root.destroy()
check=False
while check is False:
livello=input("Livello(da 1-10 secondo la gravità del problema) ")
try:
livello = int(livello)
if livello<1 or livello>10:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","errore il valore inserito non è valido")
root.destroy()
else:
check=True
except ValueError:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","errore non è stato inserito un numero")
root.destroy()
check=False
while check is False:
difficolta=input("inserire il livello di difficolta stimato da 1-10 ")
try:
difficolta = int(difficolta)
if difficolta<1 or difficolta>10:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","errore il valore inserito non è valido")
root.destroy()
else:
check=True
esci=True
except ValueError:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","errore non è stato inserito un numero")
root.destroy()
except KeyboardInterrupt:
root = tkinter.Tk()
root.withdraw()
mb.showerror("errore","non puoi uscire")
root.destroy()
punteggio=250-DeadLine-livello-difficolta
return punteggio | 7f9d03a2c45a3dd06172368213000c38ffa6532d | 32,838 |
import six
def disabled(name):
"""
Ensure an Apache module is disabled.
.. versionadded:: 2016.3.0
name
Name of the Apache module
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_enabled = __salt__["apache.check_mod_enabled"](name)
if is_enabled:
if __opts__["test"]:
msg = "Apache module {0} is set to be disabled.".format(name)
ret["comment"] = msg
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["result"] = None
return ret
status = __salt__["apache.a2dismod"](name)["Status"]
if isinstance(status, six.string_types) and "disabled" in status:
ret["result"] = True
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to disable {0} Apache module".format(name)
if isinstance(status, six.string_types):
ret["comment"] = ret["comment"] + " ({0})".format(status)
return ret
else:
ret["comment"] = "{0} already disabled.".format(name)
return ret | edc69d3ad8b03c739a01e28d50aacbf00db54d9c | 32,839 |
def get_stops_in_polygon(feed, polygon, geo_stops=None):
"""
Return the slice of ``feed.stops`` that contains all stops that lie
within the given Shapely Polygon object that is specified in
WGS84 coordinates.
Parameters
----------
feed : Feed
polygon : Shapely Polygon
Specified in WGS84 coordinates
geo_stops : Geopandas GeoDataFrame
A geographic version of ``feed.stops`` which will be computed
if not given.
Specify this parameter in batch jobs to avoid unnecessary
computation.
Returns
-------
DataFrame
Subset of ``feed.stops``
Notes
-----
- Requires GeoPandas
- Assume the following feed attributes are not ``None``:
* ``feed.stops``, if ``geo_stops`` is not given
"""
if geo_stops is not None:
f = geo_stops.copy()
else:
f = geometrize_stops(feed.stops)
cols = f.columns
f['hit'] = f['geometry'].within(polygon)
f = f[f['hit']][cols]
return ungeometrize_stops(f) | cf42652a1a00f9f70f51d5bc9597733ca9d89cf6 | 32,840 |
def define_stkvar(*args):
"""
define_stkvar(pfn, name, off, flags, ti, nbytes) -> bool
Define/redefine a stack variable.
@param pfn: pointer to function (C++: func_t *)
@param name: variable name, NULL means autogenerate a name (C++: const
char *)
@param off: offset of the stack variable in the frame. negative values
denote local variables, positive - function arguments.
(C++: sval_t)
@param flags: variable type flags ( byte_flag() for a byte variable,
for example) (C++: flags_t)
@param ti: additional type information (like offsets, structs, etc)
(C++: const opinfo_t *)
@param nbytes: number of bytes occupied by the variable (C++: asize_t)
@return: success
"""
return _ida_frame.define_stkvar(*args) | bbd52e35a92dcd84afe990e0519a2a5e26abe5b8 | 32,841 |
def spherical_polar_area(r, lon, lat):
"""Calculates the area bounding an array of latitude and longitude points.
Parameters
----------
r : float
Radius of sphere.
lon : 1d array
Longitude points. [Degrees]
lat : 1d array
Longitude points. [Degrees]
Returns
-------
areas: 2d array
"""
mid_dlon = (lon[2:] - lon[:-2]) / 2.0
s_dlon = lon[1] - lon[0]
e_dlon = lon[-1] - lon[-2]
dlon = np.hstack((s_dlon, mid_dlon, e_dlon))
mid_dlat = (lat[2:] - lat[:-2]) / 2.0
s_dlat = lat[1] - lat[0]
e_dlat = lat[-1] - lat[-2]
dlat = np.hstack((s_dlat, mid_dlat, e_dlat))
dlon, dlat = np.deg2rad(dlon), np.deg2rad(dlat)
gdlon, gdlat = np.meshgrid(dlon, dlat)
solid_angle = gdlon.T * gdlat.T * np.cos(np.deg2rad(lat))
return solid_angle.T * r ** 2 | a07b73f6e04ee64b06d1e663dfff7ff971d00bf5 | 32,842 |
def row_plays(hand, row):
"""Return the set of legal plays in the specified row.
A row play is a (start, 'WORD') pair,
"""
results = set()
# for each anchor and for each legal prefix, add all legal suffixes and save any valid words in results
for (i, square) in enumerate(row[1: -1], start=1):
if isinstance(square, Anchor):
prefix, max_size = legal_prefix(i, row)
# there are already letters in the board, to the left of this anchor
if prefix:
start = i - len(prefix)
add_suffixes(hand, prefix, start, row, results, anchored=False)
# the board is empty to the left of this anchor
else:
for prefix in find_prefixes(hand):
if len(prefix) <= max_size:
start = i - len(prefix)
add_suffixes(removed(hand, prefix), prefix, start, row, results, anchored=False)
return results | 5b98f30fb8a932f31bc9bc0b22f21480880c3302 | 32,843 |
def nearest_pillar(grid, xy, ref_k0 = 0, kp = 0):
"""Returns the (j0, i0) indices of the primary pillar with point closest in x,y plane to point xy."""
# note: currently works with unmasked data and using primary pillars only
pe_i = grid.extent_kji[2] + 1
sum_dxy2 = grid.pillar_distances_sqr(xy, ref_k0 = ref_k0, kp = kp)
ji = np.nanargmin(sum_dxy2)
j, i = divmod(ji, pe_i)
return (j, i) | 2338698963cb6bddca8d9702e000e5be125e6e86 | 32,844 |
def read_state(file, statename):
""" read and select state from file
Args:
file (str): path to state shapefile
statename (str): name of state (i.e. California)
"""
all = gpd.read_file("../data/states.shp")
state = all[all['STATE_NAME'] == statename]
return state | 0732d01dfb466ac00622964dfd3a9d0655367fbf | 32,845 |
def get_random_state(seed):
"""
Get a random number from the whole range of large integer values.
"""
np.random.seed(seed)
return np.random.randint(MAX_INT) | abf99c5547d146bfc9e6e1d33e7970d7090ba6d2 | 32,846 |
def get_start_and_end_time(file_or_file_object):
"""
Returns the start and end time of a MiniSEED file or file-like object.
:type file_or_file_object: str or file
:param file_or_file_object: MiniSEED file name or open file-like object
containing a MiniSEED record.
:return: tuple (start time of first record, end time of last record)
This method will return the start time of the first record and the end time
of the last record. Keep in mind that it will not return the correct result
if the records in the MiniSEED file do not have a chronological ordering.
The returned end time is the time of the last data sample and not the
time that the last sample covers.
.. rubric:: Example
>>> from obspy.core.util import get_example_file
>>> filename = get_example_file(
... "BW.BGLD.__.EHE.D.2008.001.first_10_records")
>>> get_start_and_end_time(filename) # doctest: +NORMALIZE_WHITESPACE
(UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
UTCDateTime(2008, 1, 1, 0, 0, 20, 510000))
It also works with an open file pointer. The file pointer itself will not
be changed.
>>> f = open(filename, 'rb')
>>> get_start_and_end_time(f) # doctest: +NORMALIZE_WHITESPACE
(UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
UTCDateTime(2008, 1, 1, 0, 0, 20, 510000))
And also with a MiniSEED file stored in a BytesIO
>>> import io
>>> file_object = io.BytesIO(f.read())
>>> get_start_and_end_time(file_object) # doctest: +NORMALIZE_WHITESPACE
(UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
UTCDateTime(2008, 1, 1, 0, 0, 20, 510000))
>>> file_object.close()
If the file pointer does not point to the first record, the start time will
refer to the record it points to.
>>> _ = f.seek(512)
>>> get_start_and_end_time(f) # doctest: +NORMALIZE_WHITESPACE
(UTCDateTime(2008, 1, 1, 0, 0, 1, 975000),
UTCDateTime(2008, 1, 1, 0, 0, 20, 510000))
The same is valid for a file-like object.
>>> file_object = io.BytesIO(f.read())
>>> get_start_and_end_time(file_object) # doctest: +NORMALIZE_WHITESPACE
(UTCDateTime(2008, 1, 1, 0, 0, 1, 975000),
UTCDateTime(2008, 1, 1, 0, 0, 20, 510000))
>>> f.close()
"""
# Get the starttime of the first record.
info = get_record_information(file_or_file_object)
starttime = info['starttime']
# Get the end time of the last record.
info = get_record_information(
file_or_file_object,
(info['number_of_records'] - 1) * info['record_length'])
endtime = info['endtime']
return starttime, endtime | 4c094a8fb1d9e186a0bbcbff25b7af8ac5461131 | 32,847 |
from typing import Optional
import json
import asyncio
async def send_message(msg: str) -> Optional[str]:
"""
Send a message to the websocket and return the response
Args:
msg: The message to be sent
Returns:
The response message or None if it was not defined
"""
if not (WS and WS.open):
print('Reconnecting websocket')
await connect_ws()
print(f'Sending message {msg}')
data = {
'text': msg
}
json_data = json.dumps(data)
await WS.send(json_data)
try:
resp = json.loads(await asyncio.wait_for(WS.recv(), 3))
except asyncio.TimeoutError:
return
new_message = resp['text']
return new_message | d6395c3f74baf1900d99f605a08b77990637be8e | 32,848 |
def _getname(storefile):
"""returns the filename"""
if storefile is None:
raise ValueError("This method cannot magically produce a filename when given None as input.")
if not isinstance(storefile, basestring):
if not hasattr(storefile, "name"):
storefilename = _getdummyname(storefile)
else:
storefilename = storefile.name
else:
storefilename = storefile
return storefilename | fa423102a3ff8355af4784eaa39b2a065da80ec4 | 32,849 |
def flow_lines(sol, nlines, time_length, scale=0.5):
"""
compute the flow lines of the solution
Parameters
----------
sol : :py:class:`Simulation<pylbm.simulation.Simulation>`
the solution given by pylbm
nlines : int (number of flow lines)
time_length : double (time during which we follow the lines)
scale : double (velocity scale (default 0.5))
Returns
-------
list
lines
"""
u_x = sol.m[QX] / sol.m[RHO]
u_y = sol.m[QY] / sol.m[RHO]
# if scale is None:
# scale = max(np.linalg.norm(u_x, np.inf), np.linalg.norm(u_y, np.inf))
lines = []
xmin, xmax = sol.domain.geom.bounds[0]
ymin, ymax = sol.domain.geom.bounds[1]
dx = sol.domain.dx
nx, ny = sol.domain.shape_in
for _ in range(nlines):
# begin a new line
cont = True # boolean to continue the line
x = xmin + (xmax-xmin) * np.random.rand()
y = ymin + (ymax-ymin) * np.random.rand()
line_x, line_y = [x], [y]
t = 0
while cont:
i, j = int((x-xmin)/(xmax-xmin)*nx), int((y-ymin)/(ymax-ymin)*ny)
uxij, uyij = u_x[i, j], u_y[i, j]
if uxij == 0 and uyij == 0:
cont = False
else:
dt = dx*scale / np.sqrt(uxij**2+uyij**2)
x += uxij*dt
y += uyij*dt
t += dt
if x < xmin or x >= xmax or y < ymin or y >= ymax:
cont = False
else:
line_x.append(x)
line_y.append(y)
if t >= time_length:
cont = False
lines.append([np.array(line_x), np.array(line_y)])
return lines | fea6667aa8b3012918a66ae3f6e94b9b0a4439ad | 32,850 |
def minOperations(n):
""" finds min. operations to reach and string """
if type(n) != int or n <= 1:
return 0
res = 0
i = 2
while(i <= n + 1):
if (n % i == 0):
res += i
n /= i
else:
i += 1
return res | c26cbd71c6e675adea79938b6e7248a4c093e63f | 32,851 |
def matrix2dictionary(matrix):
"""
convert matrix to dictionary of comparisons
"""
pw = {}
for line in matrix:
line = line.strip().split('\t')
if line[0].startswith('#'):
names = line[1:]
continue
a = line[0]
for i, pident in enumerate(line[1:]):
b = names[i]
if a not in pw:
pw[a] = {}
if b not in pw:
pw[b] = {}
if pident != '-':
pident = float(pident)
pw[a][b] = pident
pw[b][a] = pident
return pw | fd53dc4f80ff45d4eb41939af54be1d712ee2fa4 | 32,852 |
def combine_fastq_output_files(files_to_combine, out_prefix, remove_temp_output):
""" Combines fastq output created by BMTagger/bowtie2 on multiple databases and
returns a list of output files. Also updates the log file with read counts for the
input and output files.
"""
# print out the reads for all files
utilities.log_read_count_for_files(files_to_combine,"Total reads after removing those found in reference database")
# create lists of all of the output files for pair 1 and for pair 2
files_for_pair1 = [f[0] for f in files_to_combine]
try:
files_for_pair2 = [f[1] for f in files_to_combine]
except IndexError:
files_for_pair2 = []
# select an output prefix based on if the outputs are paired or not
output_file = out_prefix + "_1" + config.fastq_file_extension
if not files_for_pair2:
output_file = out_prefix + config.fastq_file_extension
# create intersect file from all output files for pair 1
intersect_fastq(files_for_pair1, output_file, remove_temp_output)
output_files=[output_file]
# create an intersect file from all output files for pair 2
if files_for_pair2:
output_file = out_prefix + "_2" + config.fastq_file_extension
intersect_fastq(files_for_pair2, output_file, remove_temp_output)
output_files.append(output_file)
# Get the read counts for the newly merged files
utilities.log_read_count_for_files(output_files,"Total reads after merging results from multiple databases")
# remove temp files if set
if remove_temp_output:
for group in [files_for_pair1, files_for_pair2]:
for filename in group:
utilities.remove_file(filename)
return output_files | 1421878edf7e44b46b386d7d4465090cc22acfa4 | 32,853 |
import json
def deliver_dap():
"""
Endpoint for submissions only intended for DAP. POST request requires the submission JSON to be uploaded
as "submission" and the filename passed in the query parameters.
"""
logger.info('Processing DAP submission')
filename = request.args.get("filename")
meta = MetaWrapper(filename)
files = request.files
submission_bytes = files[SUBMISSION_FILE].read()
survey_dict = json.loads(submission_bytes.decode())
data_bytes = submission_bytes
meta.set_dap(survey_dict, data_bytes)
return process(meta, data_bytes) | e7c2753319f512eaa1328fcb3cc808a17a5933b8 | 32,854 |
def api_run_delete(run_id):
"""Delete the given run and corresponding entities."""
data = current_app.config["data"] # type: DataStorage
RunFacade(data).delete_run(run_id)
return "DELETED run %s" % run_id | c2a07caa95d9177eb8fe4b6f27caf368d1a9fbdd | 32,855 |
import numpy
def gfalternate_createdataandstatsdict(ldt_tower,data_tower,attr_tower,alternate_info):
"""
Purpose:
Creates the data_dict and stat_dict to hold data and statistics during gap filling from
alternate data sources.
Usage:
Side effects:
Called by:
Calls:
Author: PRI
Date: May 2015
"""
data_dict = {}
stat_dict = {}
label_tower = alternate_info["label_tower"]
label_composite = alternate_info["label_composite"]
data_dict["DateTime"] = {"data":ldt_tower}
data_dict[label_tower] = {"attr":attr_tower,
"output_list":[label_tower,label_composite],
"data":data_tower}
data_dict[label_composite] = {"data":numpy.ma.masked_all_like(data_tower),
"fitcorr":numpy.ma.masked_all_like(data_tower),
"attr":attr_tower}
stat_dict[label_tower] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]}
stat_dict[label_composite] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]}
return data_dict,stat_dict | a1690fb9e53abcd6b23e33046d82c10a2ca7abc0 | 32,856 |
import re
def doGeneMapping(model):
"""
Function that maps enzymes and genes to reactions
This function works only if the GPR associations are defined
as follows: (g1 and g2 and g6) or ((g3 or g10) and g12)
- *model* Pysces model
- *GPRdict* dictionary with (iso)enzymes mapped to reactions
- *SubUdict* dictionary with genes mapped to isoenzymes
"""
def unique_list(seq):
"""Function to remove duplicates"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
# Get GPR associations for reactions (strings) and
# and split according to keywords 'and', 'or'
GPRdict = {}
model.createGeneAssociationsFromAnnotations()
reactions = model.getReactionIds()
SubUdict = {}
no_associations = 0
for r_ in reactions:
try:
ass = model.getReaction(r_).getAnnotations()
if 'GENE ASSOCIATION' in ass:
g_ = ass['GENE ASSOCIATION']
elif 'GENE_ASSOCIATION' in ass:
g_ = ass['GENE_ASSOCIATION']
elif 'gene_association' in ass:
g_ = ass['gene_association']
elif 'gene association' in ass:
g_ = ass['gene association']
if g_ != 'None' and g_ != '' :
# Enzymes
# g_ = g_.split(') or (')
g_ = re.split(r'\)\s+or\s+\(|\)\s+or\s+|\s+or\s+\(',g_)
S_list = []
for enzyme in g_:
enzyme = enzyme.replace(')','')
enzyme = enzyme.replace('(','')
# Isoenzymes
enzyme = enzyme.replace(' or ','_or_')
# Subunits
subunits = enzyme.split(' and ')
subunits_mod = []
for s in subunits:
# remove extra space
tmp = s.replace(' ','')
# replace possible dashes
tmp = tmp.replace('-','_')
# add gene prefix
tmp = 'GENE_' + tmp
subunits_mod.append(tmp)
S_list.append(subunits_mod)
# Dictionary for isoenzymes
for enzymes in S_list:
for gene in enzymes:
gene = gene.replace(' ','')
if 'or' in gene:
# SubUdict[gene] = gene.split('_or_')
SubUdict[gene] = unique_list(gene.split('_or_'))
# GPRdict[r_] = S_list
GPRdict[r_] = [unique_list(s) for s in S_list]
except:
no_associations+=1
print '{} of {} reactions have no GPR Associations' .format(no_associations,len(reactions))
# print GPRdict
# print SubUdict
# raw_input()
return GPRdict, SubUdict | d2e12f7161aca69afa28f7bb0d528d9b922b25b4 | 32,857 |
def delay_to_midnight():
"""Calculates the delay between the current time and midnight"""
current_time = get_current_time()
delay = time_conversions.hhmm_to_seconds("24:00") - time_conversions.hhmm_to_seconds(current_time)
return delay | 14b33591e58975cd5a4f95d3602e6a1494131267 | 32,858 |
def predict(model, imgs):
"""
Predict the labels of a set of images using the VGG16 model.
Args:
imgs (ndarray) : An array of N images (size: N x width x height x channels).
Returns:
preds (np.array) : Highest confidence value of the predictions for each image.
idxs (np.ndarray): Class index of the predictions with the max confidence.
classes (list) : Class labels of the predictions with the max confidence.
"""
# predict probability of each class for each image
all_preds = model.predict(imgs)
# for each image get the index of the class with max probability
idxs = np.argmax(all_preds, axis=1)
# get the values of the highest probability for each image
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
# get the label of the class with the highest probability for each image
classes = [model.classes[idx] for idx in idxs]
return preds, idxs, classes | 0f992412a9067608e99a6976c6ef65b466ef7572 | 32,859 |
def get_last_query_records_count(connection: psycopg2_connection):
"""
Returns the number of rows that were loaded by the last COPY command run in the current session.
"""
# TODO: handle array extraction of rows num , handle NONE
result_set = redshift_query(connection, COPY_ROWS_COUNT_QUERY)
if result_set:
return result_set[0][0] | 65ea989fef25be6e0ba6b275eec1315b258ea618 | 32,860 |
def _endmsg(rd) -> str:
"""
Returns an end message with elapsed time
"""
msg = ""
s = ""
if rd.hours > 0:
if rd.hours > 1:
s = "s"
msg += colors.bold(str(rd.hours)) + " hour" + s + " "
s = ""
if rd.minutes > 0:
if rd.minutes > 1:
s = "s"
msg += colors.bold(str(rd.minutes)) + " minute" + s + " "
# if rd.seconds > 0:
# msg+=str(rd.seconds)
# else:
# msg+="0."
milliseconds = int(rd.microseconds / 1000)
if milliseconds > 0:
msg += colors.bold(str(rd.seconds) + "." + str(milliseconds))
msg += " seconds"
return msg | 635792c4ebf772926f492e5976ee4ac7caf92cca | 32,861 |
import bz2
import zlib
import lzma
def decompress(fcn):
"""Decorator that decompresses returned data.
libmagic is used to identify the MIME type of the data and the function
will keep decompressing until no supported compression format is identified.
"""
def wrapper(cls, raw=False, *args, **kw):
data = fcn(cls)
if raw:
# return raw data without decompressing
return data
mime_type, mime_subtype = magic.from_buffer(data, mime=True).split('/')
while mime_subtype in ('x-bzip2', 'x-bzip', 'bzip', 'x-gzip', 'gzip', 'x-xz'):
if mime_subtype in ('x-bzip2', 'x-bzip', 'bzip'):
data = bz2.decompress(data)
elif mime_subtype in ('x-gzip', 'gzip'):
data = zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif mime_subtype in ('x-xz'):
data = lzma.decompress(data)
mime_type, mime_subtype = magic.from_buffer(data, mime=True).split('/')
return data
return wrapper | ba5d1540da70c4f92604888f5fd10b879bd62371 | 32,862 |
def get_satellite_params(platform=None):
"""
Helper function to generate Landsat or Sentinel query information
for quick use during NRT cube creation or sync only.
Parameters
----------
platform: str
Name of a satellite platform, Landsat or Sentinel only.
params
"""
# check platform name
if platform is None:
raise ValueError('Must provide a platform name.')
elif platform.lower() not in ['landsat', 'sentinel']:
raise ValueError('Platform must be Landsat or Sentinel.')
# set up dict
params = {}
# get porams depending on platform
if platform.lower() == 'landsat':
# get collections
collections = [
'ga_ls5t_ard_3',
'ga_ls7e_ard_3',
'ga_ls8c_ard_3']
# get bands
bands = [
'nbart_red',
'nbart_green',
'nbart_blue',
'nbart_nir',
'nbart_swir_1',
'nbart_swir_2',
'oa_fmask']
# get resolution
resolution = 30
# build dict
params = {
'collections': collections,
'bands': bands,
'resolution': resolution}
else:
# get collections
collections = [
's2a_ard_granule',
's2b_ard_granule']
# get bands
bands = [
'nbart_red',
'nbart_green',
'nbart_blue',
'nbart_nir_1',
'nbart_swir_2',
'nbart_swir_3',
'fmask']
# get resolution
resolution = 10
# build dict
params = {
'collections': collections,
'bands': bands,
'resolution': resolution}
return params | 2298c100eed431a48a9531bc3038c5ab8565025d | 32,863 |
import random
def generate_random_token(length = 64):
""" Generates a random token of specified length.
"""
lrange = 16 ** length
hexval = "%0{}x".format(length)
return hexval % (random.randrange(lrange)) | 5140dc2a07cb336387fd3e71b3b1edc746cccb44 | 32,864 |
import os
import http
async def process_file(path, request_headers):
"""Serves a file when doing a GET request with a valid path."""
sever_root="/opt/vosk-server/websocket/web"
MIME_TYPES = {
"html": "text/html",
"js": "text/javascript",
"css": "text/css"
}
if "Upgrade" in request_headers:
return # Probably a WebSocket connection
if path == '/':
path = '/index.html'
response_headers = [
('Server', 'asyncio websocket server'),
('Connection', 'close'),
]
# Derive full system path
full_path = os.path.realpath(os.path.join(sever_root, path[1:]))
# Validate the path
if os.path.commonpath((sever_root, full_path)) != sever_root or \
not os.path.exists(full_path) or not os.path.isfile(full_path):
print("HTTP GET {} 404 NOT FOUND".format(full_path))
return http.HTTPStatus.NOT_FOUND, [], b'404 NOT FOUND'
# Guess file content type
extension = full_path.split(".")[-1]
mime_type = MIME_TYPES.get(extension, "application/octet-stream")
response_headers.append(('Content-Type', mime_type))
# Read the whole file into memory and send it out
body = open(full_path, 'rb').read()
response_headers.append(('Content-Length', str(len(body))))
print("HTTP GET {} 200 OK".format(path))
return http.HTTPStatus.OK, response_headers, body | d51d2ff1ec27185c31fc4eff3dfed8243e6d1764 | 32,865 |
from typing import Optional
from typing import Union
from typing import Tuple
def permute_sse_metric(
name: str,
ref: np.ndarray,
est: np.ndarray,
compute_permutation: bool = False,
fs: Optional[int] = None) -> Union[float, Tuple[float, list]]:
"""
Computation of SiSNR/PESQ/STOI in permutation/non-permutation mode
Args:
name: metric name
ref: array, reference signal (N x S or S, ground truth)
est: array, enhanced/separated signal (N x S or S)
compute_permutation: return permutation order or not
fs: sample rate of the audio
"""
if name == "sisnr":
return _permute_eval(aps_sisnr,
ref,
est,
compute_permutation=compute_permutation,
fs=fs)
elif name == "pesq":
return _permute_eval(aps_pesq,
ref,
est,
compute_permutation=compute_permutation,
fs=fs)
elif name == "stoi":
return _permute_eval(aps_stoi,
ref,
est,
compute_permutation=compute_permutation,
fs=fs)
elif name == "sdr":
if ref.ndim == 1:
ref, est = ref[None, :], est[None, :]
sdr, _, _, _, ali = bss_eval_images(ref[..., None],
est[..., None],
compute_permutation=True)
if compute_permutation:
return sdr.mean(), ali[:, 0].tolist()
else:
return sdr[0, 0]
else:
raise ValueError(f"Unknown name of the metric: {name}") | 4e6398852231fa74b8999158dc5f20833f53643b | 32,866 |
import os
def get_tids_from_directory(audio_dir):
"""Get track IDs from the mp3s in a directory.
Parameters
----------
audio_dir : str
Path to the directory where the audio files are stored.
Returns
-------
A list of track IDs.
"""
tids = []
for _, dirnames, files in os.walk(audio_dir):
if dirnames == []:
tids.extend(int(file[:-4]) for file in files)
return tids | 46c1e0b753392f7098c43afb58a24f6272bd432d | 32,867 |
from datetime import datetime
import os
def coverage_qc_report(institute_id, case_name):
"""Display coverage and qc report."""
_, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.multiqc(store, institute_id, case_name)
if data["case"].get("coverage_qc_report") is None:
return abort(404)
coverage_qc_report = data["case"]["coverage_qc_report"]
report_format = request.args.get("format", "html")
if report_format == "pdf":
try: # file could not be available
html_file = open(coverage_qc_report, "r")
source_code = html_file.read()
return render_pdf(
HTML(string=source_code),
download_filename=case_obj["display_name"]
+ "_"
+ datetime.datetime.now().strftime("%Y-%m-%d")
+ "_coverage_qc_report.pdf",
)
except Exception as ex:
flash(
"An error occurred while downloading delivery report {} -- {}".format(
coverage_qc_report, ex
),
"warning",
)
out_dir = os.path.dirname(coverage_qc_report)
filename = os.path.basename(coverage_qc_report)
return send_from_directory(out_dir, filename) | 097fd405b01e4c0976cb0973cc01a6fe7bb7f5a3 | 32,868 |
import os
def get(name, decode=True):
"""
Get a resource from the trimesh/resources folder.
Parameters
-------------
name : str
File path relative to `trimesh/resources`
decode : bool
Whether or not to decode result as UTF-8
Returns
-------------
resource : str or bytes
File data
"""
# key by name and decode
cache_key = (name, bool(decode))
if cache_key in _cache:
# return cached resource
return _cache[cache_key]
# get the resource using relative names
with open(os.path.join(_pwd, name), 'rb') as f:
resource = f.read()
# make sure we return it as a string if asked
if decode and hasattr(resource, 'decode'):
resource = resource.decode('utf-8')
# store for later access
_cache[cache_key] = resource
return resource | 191c03dd5c7ebd62b521a7669411090807105a66 | 32,869 |
from typing import Literal
def test_if() -> None:
"""if-elif-else."""
PositiveOrNegative = Literal[-1, 0, 1]
def positive_negative(number: int) -> PositiveOrNegative:
"""Return -1 for negative numbers, 1 for positive numbers, and 0 for 0."""
result: PositiveOrNegative
if number < 0:
result = -1
elif number == 0:
result = 0
else:
result = 1
return result
assert positive_negative(100) == 1
assert positive_negative(0) == 0
assert positive_negative(-99) == -1 | 771c5e5375b161d5eed0efc00db1094a7996169a | 32,870 |
def ba2str(ba):
"""Convert Bluetooth address to string"""
string = []
for b in ba.b:
string.append('{:02X}'.format(b))
string.reverse()
return ':'.join(string).upper() | 765fc9dbbea5afdd32c6d09c18f428e3693e20bf | 32,871 |
import numpy as np
from accessory import get_iterable
import logging
import sys
def calc_variance(img, omit=[]):
"""
calculate variance of pixel values in image
:param img: 2D array of pixel values
:param omit: pixel values to omit from calculation
:return: variance
"""
if np.ndim(img) > 2:
msg = 'ERROR [calc_variance] Input image must be 2D grayscale (not ' \
'RGB).'
logging.error(msg)
print(msg)
sys.exit()
# collapse image into array
pixels = np.ravel(np.array(img))
pixels = pixels.astype('float')
# omit specified pixel values from variance calculation
for omit_idx in get_iterable(omit):
pixels[pixels == omit_idx] = np.nan
pixels = pixels[np.isfinite(pixels)]
variance = np.std(pixels)
return variance | 1506ede8136b1f67ccfb905427e4487c2fe32127 | 32,872 |
import requests
def delete_user(client: Client, user_id: str) -> bool:
"""Deletes disabled user account via the `/users/{user_id}` endpoint.
:param client: Client object
:param user_id: The ID of the user account
:return: `True` if succeeded, `False` otherwise
"""
params = {'version': get_user_info(client, user_id)['version']}
response = requests.delete(
f'{client.base_url}/api/2/users/{user_id}',
headers=client.auth_header,
params=params,
)
handle_error_response(response)
return response.status_code == 204 | f385ca6e1108f95c00e28dbd99ffa640fefed761 | 32,873 |
import requests
def get_token(corp_id: str, corp_secret: str):
"""获取access_token
https://open.work.weixin.qq.com/api/doc/90000/90135/91039
"""
req = requests.get(
f'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corp_id}&corpsecret={corp_secret}'
)
return req.json().get('access_token') | 9a9c3fcdb74312b5d2d7c62588aea3cf78796ec9 | 32,874 |
def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter):
"""
Update parameters.
Args:
beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).
eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
lr (Tensor): Learning rate.
weight_decay (Number): Weight decay. Should be equal to or greater than 0.
global_step (Tensor): Global step.
param (Tensor): Parameters.
m (Tensor): m value of parameters.
v (Tensor): v value of parameters.
gradient (Tensor): Gradient of parameters.
decay_flag (bool): Specifies whether param update with weight decay.
optim_filter(bool): Applies parameter update or not.
Returns:
Tensor, the new value of v after updating.
"""
if optim_filter:
op_mul = P.Mul()
op_sqrt = P.Sqrt()
op_rsqrt = P.Rsqrt()
op_square = P.Square()
op_cast = P.Cast()
op_reshape = P.Reshape()
op_shape = P.Shape()
op_pow = P.Pow()
op_norm = layer.Norm()
op_select = P.Select()
op_greater = P.Greater()
op_fill = P.Fill()
op_dtype = P.DType()
param_fp32 = op_cast(param, mstype.float32)
m_fp32 = op_cast(m, mstype.float32)
v_fp32 = op_cast(v, mstype.float32)
gradient_fp32 = op_cast(gradient, mstype.float32)
next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta1, gradient_fp32)
next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta2, op_square(gradient_fp32))
next_mm = next_m / (op_cast(num_one, mstype.float32)
- op_pow(beta1, op_cast(global_step + num_one, mstype.float32)))
next_vv = next_v / (op_cast(num_one, mstype.float32) -
op_pow(beta2, op_cast(global_step + num_one, mstype.float32)))
w_norm = op_norm(param_fp32)
g_norm = op_norm(gradient_fp32)
g_norm_hat = op_norm(op_mul(next_mm, op_rsqrt(next_vv + eps)) + weight_decay * param_fp32)
zeros = F.zeros_like(w_norm)
ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0)
trust_ratio = op_select(
op_greater(w_norm, zeros),
op_select(op_greater(g_norm, zeros), w_norm / g_norm_hat, ones),
ones)
tens = op_fill(op_dtype(trust_ratio), op_shape(trust_ratio), 10.0)
trust_ratio = C.clip_by_value(trust_ratio, zeros, tens)
update = next_mm / (op_sqrt(next_vv) + eps)
if decay_flag:
update = update + op_mul(weight_decay, param_fp32)
update_with_lr = op_mul(op_mul(trust_ratio, lr), update)
next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param))))
next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m))))
next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v))))
return op_cast(next_param, F.dtype(param))
return gradient | 292f493ff83aba5e95a7b4ddce6b454ce4600e2c | 32,875 |
def make_initial_ledger(toodir=None):
"""Set up the initial ToO ledger with one ersatz observation.
Parameters
----------
toodir : :class:`str`, optional, defaults to ``None``
The directory to treat as the Targets of Opportunity I/O directory.
If ``None`` then look up from the $TOO_DIR environment variable.
Returns
-------
:class:`~astropy.table.Table`
A Table of the initial, example values for the ToO ledger.
The initial (.ecsv) ledger is also written to toodir or $TOO_DIR.
"""
# ADM get the ToO directory (or check it exists).
tdir = get_too_dir(toodir)
# ADM retrieve the file name to which to write.
fn = get_filename(tdir)
# ADM make a single line of the ledger with some indicative values.
data = np.zeros(3, dtype=indatamodel.dtype)
data["RA"] = 359.999999, 101.000001, 201.5
data["DEC"] = -89.999999, -89.999999, -89.999999
data["PMRA"] = 13.554634, 4.364553, 12.734214
data["PMDEC"] = 10.763842, -10.763842, -10.763842
data["REF_EPOCH"] = 2015.5, 2015.5, 2015.5
data["CHECKER"] = "ADM", "AM", "ADM"
data["TOO_TYPE"] = "TILE", "FIBER", "TILE"
data["TOO_PRIO"] = "HI", "LO", "HI"
data["MJD_BEGIN"] = 40811.04166667, 41811.14166667, 42811.14
data["MJD_END"] = 40811.95833333, 41811.85833333, 42811.85
data["OCLAYER"] = "BRIGHT", "DARK", "DARK"
# ADM write out the results.
_write_too_files(fn, data, ecsv=True)
return data | 9f377c54b65973bd26b868a3a7ca11dd96a7e1e6 | 32,876 |
def trace_sqrt_product_tf(cov1, cov2):
""" This function calculates trace(sqrt(cov1 * cov2))
This code is inspired from:
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
:param cov1:
:param cov2:
:return:
"""
sqrt_cov1 = sqrt_sym_mat_tf(cov1)
cov_121 = tf.matmul(tf.matmul(sqrt_cov1, cov2), sqrt_cov1)
return tf.trace(sqrt_sym_mat_tf(cov_121)) | f685080ce644a889aff633fb44cccf452746c5e9 | 32,877 |
from typing import Any
from typing import Optional
import json
def opennem_serialize(obj: Any, indent: Optional[int] = None) -> str:
"""Use custom OpenNEM serializer which supports custom types and GeoJSON"""
obj_deserialized = None
if not obj_deserialized:
obj_deserialized = json.dumps(obj, cls=OpenNEMGeoJSONEncoder, indent=indent)
return obj_deserialized | 77bcb41130b5d8d95f5460cc45f4e78b9ef8bdf5 | 32,878 |
def min_vector(first_atom, second_atom, cell=None):
"""Helper to find mimimum image criterion distance."""
if cell is None:
cell = first_atom._parent.cell.cell
return min_vect(first_atom.pos,
first_atom.fractional,
second_atom.pos,
second_atom.fractional,
cell) | aa159ff5379b8087f05c8b4c88e3ee71a5d2765f | 32,879 |
def dice_coeff_2label(pred, target):
"""This definition generalize to real valued pred and target vector.
This should be differentiable.
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
"""
target = target.data.cpu()
# pred = torch.sigmoid(pred)
# pred = pred.data.cpu()
# pred[pred > 0.75] = 1
# pred[pred <= 0.75] = 0
# print target.shape
# print pred.shape
if len(pred.shape) == 3:
return dice_coefficient_numpy(pred[0, ...], target[0, ...]), dice_coefficient_numpy(pred[1, ...], target[1, ...])
else:
dice_cup = []
dice_disc = []
for i in range(pred.shape[0]):
cup, disc = dice_coefficient_numpy(pred[i, 0, ...], target[i, 0, ...]), dice_coefficient_numpy(pred[i, 1, ...], target[i, 1, ...])
dice_cup.append(cup)
dice_disc.append(disc)
return sum(dice_cup) / len(dice_cup), sum(dice_disc) / len(dice_disc) | 553f3cdc76f4061512dea27a482f177948f87063 | 32,880 |
def cost_arrhenius(p, T, rate):
"""
Sum of absolute deviations of obs and arrhenius function.
Parameters
----------
p : iterable of floats
`p[0]` is activation energy [J]
x : float or array_like of floats
independent variable
y : float or array_like of floats
dependent variable, observations
Returns
-------
float
sum of absolute deviations
"""
return np.sum(np.abs(rate-arrhenius_p(T,p))) | f69a98e06e79774e2fa7eef2709f0bdd6adbe3e1 | 32,881 |
def get_ou_accounts_by_ou_name(ou_name, accounts_list=None, parent=None):
"""
Returns the account of an OU by itsname
Args:
ou_name: name of the OU
accounts_list: list of accounts from a previous call due to the recursive
next_token: the token for the call in case a recursive occurs
Returns:
list of dict() with the information of the accounts in the OU
"""
if accounts_list is None:
accounts_list = []
if parent is None:
parent = get_root()['Id']
try:
ou_info = get_ou_by_name(ou_name, parent)
parent = ou_info['Id']
except:
raise ValueError(f'Failed to retrieve the organization unit of name {ou_name}')
return get_ou_accounts(parent) | 6a8d638b18de08937208de45665fd3586dff8c76 | 32,882 |
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None):
"""Split the Pandas result evenly based on the provided number of splits.
Args:
axis: The axis to split across.
num_splits: The number of even splits to create.
result: The result of the computation. This should be a Pandas
DataFrame.
length_list: The list of lengths to split this DataFrame into. This is used to
return the DataFrame to its original partitioning schema.
Returns:
A list of Pandas DataFrames.
"""
if num_splits == 1:
return result
if length_list is not None:
length_list.insert(0, 0)
sums = np.cumsum(length_list)
if axis == 0:
return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]
else:
return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]
# We do this to restore block partitioning
chunksize = compute_chunksize(result, num_splits, axis=axis)
if axis == 0:
return [
result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits)
]
else:
return [
result.iloc[:, chunksize * i : chunksize * (i + 1)]
for i in range(num_splits)
] | bd136350147db12ed165129310fd5ee22f55b40e | 32,883 |
def is_icypaw_scalar_type_annotation(obj):
"""Return if the object is usable as an icypaw scalar type annotation."""
if isinstance(obj, type) and issubclass(obj, IcypawScalarType):
return True
return False | 7c3095ff03183a1dce33062b18dac94ee2528170 | 32,884 |
from typing import List
import os
def gcs_batched_data(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
"""
upload two batches of data
"""
data_objs = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-4]] | 99475abcc92bbbb921964e21dbfe90f35d2a902f | 32,885 |
def pair_is_inward(read, regionlength):
"""Determine if pair is pointing inward"""
return read_is_inward(read, regionlength) and mate_is_inward(read, regionlength) | 4d67b7824093df1cd5d2e020d88894a0877d5d71 | 32,886 |
import requests
import html
import re
import json
import os
def lambda_handler(event, context):
"""
Call the main function
"""
print(event)
# check if it's the original invokation or not.
if is_the_original_invokation(event):
# original invocation. Go on as usual
ugetter = UrlsGetter()
domains = ugetter.get_domains_list()
domains_wn_meta = []
sub = False
else:
# Sub invokation. Resume the info from the context
domains = event['domains']
domains_wn_meta = event['domains_wn_meta']
sub = True
for domain in domains:
try:
page = requests.get('http://'+domain, allow_redirects=False, timeout=20)
if page.status_code == 200:
tree = html.fromstring(page.content)
h1 = tree.xpath('//title/text()')
title = h1[0] if len(h1) > 0 else ""
if title != 'Index of /':
meta = tree.xpath('//meta[re:test(@name, "^robots$", "i")]/@content',
namespaces={"re": "http://exslt.org/regular-expressions"})
if len(meta) == 0:
domains_wn_meta.append(domain)
elif re.match('noindex', ",".join(meta), re.IGNORECASE) is None:
domains_wn_meta.append(domain)
except Exception as e:
print(e)
domains.remove(domain)
print(context.get_remaining_time_in_millis())
if context.get_remaining_time_in_millis() <= 40000:
client = boto3.client('lambda')
client.invoke(
FunctionName=context.function_name,
InvocationType='Event',
Payload=json.dumps({
'domains':domains,
'domains_wn_meta':domains_wn_meta
})
)
sub = True
break
else:
sub = False
if sub is True:
return 1
else:
if len(domains_wn_meta) != 0:
message = build_message(domains_wn_meta)
sns = boto3.client('sns')
response = sns.publish(TopicArn=os.environ['TOPIC_ARN'],
Message=message,
Subject="Meta Robots: weekly status")
return response['MessageId']
else:
return "All the websites are configured correctly in the staging server" | f136021588ab75c391167affd6f25c4d74732cb9 | 32,887 |
import logging
import requests
def send_request(url, payload):
"""
Send http request.
:param url: The url to access.
:param payload: The payload to send.
:return: None if request failed, Response content if request ok.
"""
logging.debug(f'Request {url} with payload:\n{payload}')
retries = Retry(
total=10,
allowed_methods=False, # Set to a False value to retry on any verb.
status_forcelist=[401, 500, 502, 503, 504],
backoff_factor=3
)
with requests.Session() as s:
s.mount(url, HTTPAdapter(max_retries=retries))
try:
resp = s.post(url, data=payload, verify=False, timeout=5)
if resp.ok:
return resp.text
except requests.exceptions.RequestException as e:
logging.error(str(e))
return None | 7f608575548ea99b4ca1f0fbeb524aaeee806b10 | 32,888 |
def sort_by_game(game_walker, from_locale, pack):
"""Sort a pack by the order in which strings appears in the game files.
This is one of the slowest sorting method. If the pack contains strings
that are not present in the game, they are sorted alphabetically at the
end and a message is logged."""
def get_file_path_tuple(file_dict_path_str):
return tuple(common.unserialize_dict_path(file_dict_path_str)[0])
def get_packs_by_file(pack):
"""Return a dict from file_path_tuple to a pack for that file path"""
packs_by_file = {}
for file_dict_path_str, result in pack.items():
file_path_tuple = get_file_path_tuple(file_dict_path_str)
pack = packs_by_file.setdefault(file_path_tuple, {})
pack[file_dict_path_str] = result
return packs_by_file
packs_by_file = get_packs_by_file(pack)
known_files = frozenset(packs_by_file.keys())
game_walker.set_file_path_filter(lambda f_p: tuple(f_p) in known_files)
def iterate_game_and_pick_translations(packs_by_file, game_walker):
"""Iterate with game_walker and drain packs_by_file
Return a sorted single pack with elements in the same order as
returned by game_walker with translations from packs_by_file.
This will drain packs_by_file in the process, so only stale strings
will remain there."""
output = {}
iterator = game_walker.walk(from_locale, False)
current_file = None
strings_for_file = None
def add_stale_for_current_file():
"""Add strings remaining in strings_for_file to stale translations
Called after iterating for all strings in one game file."""
if strings_for_file:
print("note: sorting", len(strings_for_file),
"stale nonexisting strings for", "/".join(current_file))
output.update(common.sort_dict(strings_for_file))
strings_for_file.clear()
for file_dict_path_str, _, _, _ in iterator:
file_path = get_file_path_tuple(file_dict_path_str)
if current_file != file_path:
add_stale_for_current_file()
current_file = file_path
strings_for_file = packs_by_file.pop(file_path, {})
result = strings_for_file.pop(file_dict_path_str, None)
if result is not None:
output[file_dict_path_str] = result
# sort remains of the last file
add_stale_for_current_file()
return output
output = iterate_game_and_pick_translations(packs_by_file, game_walker)
# sort the remaining stales file_path, and add them
for file_path, stale_pack in common.sort_dict(packs_by_file).items():
print("note: sorting", len(stale_pack), "strings for nonexisting",
"/".join(file_path))
output.update(common.sort_dict(stale_pack))
return output | 97cebe8db744aef876c7dd0018c49593e7c22888 | 32,889 |
from ..base.util.worker_thread import stop_all_threads
from typing import Sequence
import threading
def main(args: Sequence[str]) -> int:
"""
Entry for the program.
"""
try:
user_args = parse_args(args)
bus = bootstrap_petronia(user_args)
return run_petronia(bus, user_args)
except BaseException as err: # pylint: disable=broad-except
if isinstance(err, SystemExit):
if err.code == 0:
return 0
# Otherwise, treat as a normal error.
print_exception(err.__class__, err, err.__traceback__)
stop_all_threads()
for thread in threading.enumerate():
if thread != threading.current_thread() and thread.isAlive():
print("Thread {0} still alive".format(thread.name))
print("Exiting Petronia with error.")
return 1 | 099c0ab97569028a6ca8af3bc97103c1490c54ff | 32,890 |
def to_query_str(params):
"""Converts a dict of params to a query string.
Args:
params (dict): A dictionary of parameters, where each key is a
parameter name, and each value is either a string or
something that can be converted into a string. If `params`
is a list, it will be converted to a comma-delimited string
of values (e.g., "thing=1,2,3")
Returns:
str: A URI query string including the "?" prefix, or an empty string
if no params are given (the dict is empty).
"""
if not params:
return ''
# PERF: This is faster than a list comprehension and join, mainly
# because it allows us to inline the value transform.
query_str = '?'
for k, v in params.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
elif isinstance(v, list):
v = ','.join(map(str, v))
else:
v = str(v)
query_str += k + '=' + v + '&'
return query_str[:-1] | 11b27e17525cf05dabf0d36e1709be749e829264 | 32,891 |
def histogram(backend, qureg):
"""
Make a measurement outcome probability histogram for the given qubits.
Args:
backend (BasicEngine): A ProjectQ backend
qureg (list of qubits and/or quregs): The qubits,
for which to make the histogram
Returns:
A tuple (fig, axes, probabilities), where:
fig: The histogram as figure
axes: The axes of the histogram
probabilities (dict): A dictionary mapping outcomes as string
to their probabilities
Note:
Don't forget to call eng.flush() before using this function.
"""
qubit_list = []
for qb in qureg:
if isinstance(qb, list):
qubit_list.extend(qb)
else:
qubit_list.append(qb)
if len(qubit_list) > 5:
print('Warning: For {0} qubits there are 2^{0} different outcomes'.format(len(qubit_list)))
print("The resulting histogram may look bad and/or take too long.")
print("Consider calling histogram() with a sublist of the qubits.")
if hasattr(backend, 'get_probabilities'):
probabilities = backend.get_probabilities(qureg)
elif isinstance(backend, Simulator):
outcome = [0] * len(qubit_list)
n_outcomes = 1 << len(qubit_list)
probabilities = {}
for i in range(n_outcomes):
for pos in range(len(qubit_list)):
if (1 << pos) & i:
outcome[pos] = 1
else:
outcome[pos] = 0
probabilities[''.join([str(bit) for bit in outcome])] = backend.get_probability(outcome, qubit_list)
else:
raise RuntimeError('Unable to retrieve probabilities from backend')
# Empirical figure size for up to 5 qubits
fig, axes = plt.subplots(figsize=(min(21.2, 2 + 0.6 * (1 << len(qubit_list))), 7))
names = list(probabilities.keys())
values = list(probabilities.values())
axes.bar(names, values)
fig.suptitle('Measurement Probabilities')
return (fig, axes, probabilities) | 77227d1db0f90420134c18737248989481d384c1 | 32,892 |
import bisect
def crop(sequence, minimum, maximum, key=None, extend=False):
"""
Calculates crop indices for given sequence and range. Optionally the range
can be extended by adding additional adjacent points to each side. Such
extension might be useful to display zoomed lines etc. Note that this method
assumes that given sequence is sorted ascendantly.
Args:
sequence: list or tuple
Collection of items ordered by searched value.
minimum: float
Crop range minimum.
maximum: float
Crop range maximum.
key: callable or None
Function to be used to get specific value from item.
extend: bool
If set to True additional adjacent point is added to each side.
Returns:
(int, int)
Cropping indexes.
"""
# get indices
left_idx = bisect(sequence, minimum, key, 'left')
right_idx = bisect(sequence, maximum, key, 'right')
# extend range by adjacent values
if extend and left_idx > 0:
left_idx = bisect(sequence[:left_idx], sequence[left_idx-1], key, 'left')
if extend and right_idx < len(sequence):
right_idx += bisect(sequence[right_idx:], sequence[right_idx], key, 'right')
return left_idx, right_idx | af761dbdbcb40270a4aaf7c55921497e1872f8c1 | 32,893 |
def reverse_dict(dict_obj):
"""Reverse a dict, so each value in it maps to a sorted list of its keys.
Parameters
----------
dict_obj : dict
A key-value dict.
Returns
-------
dict
A dict where each value maps to a sorted list of all the unique keys
that mapped to it.
Example
-------
>>> dicti = {'a': 1, 'b': 3, 'c': 1}
>>> reverse_dict(dicti)
{1: ['a', 'c'], 3: ['b']}
"""
new_dict = {}
for key in dict_obj:
add_to_dict_val_set(dict_obj=new_dict, key=dict_obj[key], val=key)
for key in new_dict:
new_dict[key] = sorted(new_dict[key], reverse=False)
return new_dict | 94ff638e67de94a37754cfae7fd9d2605835b946 | 32,894 |
def remote_error_known():
"""Return a remote "error" code."""
return {"errorType": 1} | bd848143531a9f8e997af8ef64f2d1ee4ad3670b | 32,895 |
import re
def get_throttling_plan(js: str):
"""Extract the "throttling plan".
The "throttling plan" is a list of tuples used for calling functions
in the c array. The first element of the tuple is the index of the
function to call, and any remaining elements of the tuple are arguments
to pass to that function.
:param str js:
The contents of the base.js asset file.
:returns:
The full function code for computing the throttlign parameter.
"""
raw_code = get_throttling_function_code(js)
transform_start = r"try{"
plan_regex = re.compile(transform_start)
match = plan_regex.search(raw_code)
transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
# Steps are either c[x](c[y]) or c[x](c[y],c[z])
step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)"
step_regex = re.compile(step_start)
matches = step_regex.findall(transform_plan_raw)
transform_steps = []
for match in matches:
if match[4] != '':
transform_steps.append((match[0],match[1],match[4]))
else:
transform_steps.append((match[0],match[1]))
return transform_steps | c53eba9d018a6e3308f07031c4c8f26101f853dd | 32,896 |
def list_agg(object_list, func):
"""Aggregation function for a list of objects."""
ret = []
for elm in object_list:
ret.append(func(elm))
return ret | b2d8eef9c795e4700d111a3949922df940435809 | 32,897 |
from typing import Callable
def job_metadata_api(**kwargs: dict) -> Callable[[dict], str]:
"""
Job Metadata API route.
Arguments:
kwargs: required keyword arguments
Returns:
JSON response
"""
return job_resource.job_metadata_api(**kwargs) | 4641930a6c18066eac3dbaaaed99bfbd59560722 | 32,898 |
def parse_word(word: str) -> str:
"""Compile a word of uppercase letters as numeric digits. Non-uppercase letter words are returned unchanged."""
if not word.isupper():
return word
compiled_word = " + ".join([letter + "*" + str(10**index) for index, letter in enumerate(word[:: -1])])
return "(" + compiled_word + ")" | aa246c7d5e92035f14476327f5b2b694b383f7e1 | 32,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.