content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def rsa_obj(key_n, key_e, key_d=None, key_p=None, key_q=None):
""" Wrapper for the RSAObj constructor
The main reason for its existance is to compute the
prime factors if the private exponent d is being set.
In testing, the construct method threw exceptions because
it wasn't able to compute the prime factors. The
recover_prime_factors function seems work better.
"""
if key_n != None and key_e != None and key_d == None \
and key_p == None and key_q == None:
key = RSA.construct((key_n, key_e))
elif key_n != None and key_e != None and key_d != None \
and key_p == None and key_q == None:
key_p, key_q = recover_prime_factors(key_n, key_e, key_d)
key = RSA.construct((key_n, key_e, key_d, long(key_p), long(key_q)))
elif key_n != None and key_e != None and key_d != None \
and key_p != None and key_q == None:
key = RSA.construct((key_n, key_e, key_d, key_p, key_n/key_p))
elif key_n != None and key_e != None and key_d != None \
and key_p != None and key_q != None:
key = RSA.construct((key_n, key_e, key_d, key_p, key_q))
return key | 1ad6d0b4c6f96170b2452b87ea049f63df350b8f | 25,200 |
from operator import ge
import logging
def create_surface_and_gap(surf_data, radius_mode=False, prev_medium=None,
wvl=550.0, **kwargs):
""" create a surface and gap where surf_data is a list that contains:
[curvature, thickness, refractive_index, v-number] """
s = surface.Surface()
if radius_mode:
if surf_data[0] != 0.0:
s.profile.cv = 1.0/surf_data[0]
else:
s.profile.cv = 0.0
else:
s.profile.cv = surf_data[0]
if len(surf_data) > 2:
if isanumber(surf_data[2]): # assume all args are numeric
if len(surf_data) < 3:
if surf_data[2] == 1.0:
mat = m.Air()
else:
mat = m.Medium(surf_data[2])
else:
mat = m.Glass(surf_data[2], surf_data[3], '')
else: # string args
if surf_data[2].upper() == 'REFL':
s.interact_mode = 'reflect'
mat = prev_medium
else:
num_args = len(surf_data[2:])
if num_args == 2:
name, cat = surf_data[2], surf_data[3]
else:
name, cat = surf_data[2].split(',')
try:
mat = gfact.create_glass(name, cat)
except ge.GlassNotFoundError as gerr:
logging.info('%s glass data type %s not found',
gerr.catalog,
gerr.name)
logging.info('Replacing material with air.')
mat = m.Air()
else: # only curvature and thickness entered, set material to air
mat = m.Air()
thi = surf_data[1]
g = gap.Gap(thi, mat)
rndx = mat.rindex(wvl)
tfrm = np.identity(3), np.array([0., 0., thi])
return s, g, rndx, tfrm | 187cad25433db2f64aeb482bc5313c97b31a834e | 25,201 |
def deprecate_build(id):
"""Mark a build as deprecated.
**Authorization**
User must be authenticated and have ``deprecate_build`` permissions.
**Example request**
.. code-block:: http
DELETE /builds/1 HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx...
Connection: keep-alive
Content-Length: 0
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 2
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:29 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param id: ID of the build.
:statuscode 200: No error.
:statuscode 404: Build not found.
"""
build = Build.query.get_or_404(id)
build.deprecate_build()
db.session.commit()
return jsonify({}), 200 | 3002a8c46e4aa27a03d8b4fdb16fa94d4a7a8698 | 25,202 |
from re import A
def project_task_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Tasks on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_task.id"]
item_class = "thumbnail"
author = record["project_task.modified_by"]
name = record["project_task.name"]
assigned_to = record["project_task.pe_id"] or ""
description = record["project_task.description"]
date_due = record["project_task.date_due"]
source_url = raw["project_task.source_url"]
status = raw["project_task.status"]
priority = raw["project_task.priority"]
project_id = raw["project_task.project_id"]
if project_id:
project = record["project_task.project_id"]
project = SPAN(A(project,
_href = URL(c="project", f="project",
args=[project_id, "profile"])
),
" > ",
_class="task_project_title"
)
else:
project = ""
if priority in (1, 2):
# Urgent / High
priority_icon = DIV(ICON("exclamation"),
_class="task_priority")
elif priority == 4:
# Low
priority_icon = DIV(ICON("arrow-down"),
_class="task_priority")
else:
priority_icon = ""
# @ToDo: Support more than just the Wrike/MCOP statuses
status_icon_colour = {2: "#AFC1E5",
6: "#C8D571",
7: "#CEC1FF",
12: "#C6C6C6",
}
active_statuses = current.s3db.project_task_active_statuses
status_icon = DIV(ICON("active" if status in active_statuses else "inactive"),
_class="task_status",
_style="background-color:%s" % (status_icon_colour.get(status, "none"))
)
location = record["project_task.location_id"]
org_logo = ""
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
#org_logo = raw["org_organisation.logo"]
#if org_logo:
# org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
# _class="media-object",
# ),
# _href=org_url,
# _class="pull-left",
# )
#else:
# # @ToDo: use a dummy logo image
# org_logo = A(IMG(_class="media-object"),
# _href=org_url,
# _class="pull-left",
# )
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_task
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="task",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
if source_url:
source_btn = A(ICON("link"),
_title=source_url,
_href=source_url,
_target="_blank"
)
else:
source_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
source_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(date_due, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
priority_icon,
DIV(project,
name, _class="card-title task_priority"),
status_icon,
DIV(DIV((description or ""),
DIV(author,
" - ",
assigned_to,
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item | 7e9ee4d7e808921a430ee7cf7792038513ddff34 | 25,203 |
def subtractNums(x, y):
"""
subtract two numbers and return result
"""
return y - x | 2b16636e74a2d1a15e79e4669699c96adcd3833b | 25,204 |
def getTraceback(error=None):
"""Get formatted exception"""
try:
return traceback.format_exc( 10 )
except Exception, err:
return str(error) | 62da3e5b13860c2ecefa1da202aa63531c4fbc19 | 25,205 |
import os
import shutil
def _copy_and_rename_file(file_path: str, dest_dir: str, new_file_name):
"""
Copies the specified file to the dest_dir (creating the directory if necessary) and renames it to the new_file_name
:param file_path: file path of the file to copy
:param dest_dir: directory to copy the file to
:param new_file_name: name the file should be changed to
:return: file path of the new file
"""
# Copy File
try:
# Creating new directory with year if does not exist
os.makedirs(dest_dir, exist_ok=True)
# Copying File
print("Copying file: {0}".format(file_path))
# new_file_copy = shutil.copyfile(file_path, dest_dir)
new_file_copy = shutil.copy(file_path, dest_dir)
print("Copied file to {0}".format(dest_dir))
# Renaming File
print("Renaming file: {0}".format(new_file_copy))
new_file_path = os.path.join(dest_dir, new_file_name)
os.rename(src=new_file_copy, dst=new_file_path)
print("File successfully renamed to " + new_file_path)
return new_file_path
except Exception as e:
print("Failed to copy or rename file.")
print(e) | b53ce9d5e968e257919c8f2eb7749a171eddb59d | 25,206 |
from datetime import datetime
def confirm_email(token):
""" Verify email confirmation token and activate the user account."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
is_valid, has_expired, object_id = user_manager.verify_token(
token,
user_manager.confirm_email_expiration)
if has_expired:
flash(_('Seu token de confirmacao expirou.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Token de confirmacao invalido.'), 'error')
return redirect(url_for('user.login'))
""" Confirm email by setting User.confirmed_at=utcnow() or
UserEmail.confirmed_at=utcnow()"""
user = None
if db_adapter.UserEmailClass:
user_email = user_manager.get_user_email_by_id(object_id)
if user_email:
user_email.confirmed_at = datetime.utcnow()
user = user_email.user
else:
user_email = None
user = user_manager.get_user_by_id(object_id)
if user:
user.confirmed_at = datetime.utcnow()
if user:
user.set_active(True)
db_adapter.commit()
else: # pragma: no cover
flash(_('Token de confirmacao invalido.'), 'error')
return redirect(url_for('user.login'))
# Send email_confirmed signal
signals.user_confirmed_email.send(
current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('Seu email foi confirmado.'), 'success')
# Auto-login after confirm or redirect to login page
safe_next = _get_safe_next_param(
'next', user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_confirm:
return _do_login_user(user, safe_next) # auto-login
else:
return redirect(
url_for('user.login')+'?next='+quote(safe_next)
) # redirect to login page | 83015e9fc74b88eeb57b1ef39decd37b7adf662d | 25,207 |
from typing import List
from pathlib import Path
import logging
def get_vector_paths_4_sample_set(set_name: str, base_path: str) -> List[PosixPath]:
"""
Gets the files for a given directory containing sample set
:param set_name: Str indicating the name of the directory for a given set of samples
:param base_path: Str indicating the location directory of samples
"""
paths = []
vectors_path = f"{base_path}/{set_name}"
path = Path(vectors_path)
# TODO: Validate existence of directory
# Iterate over all the samples for a set
for sample_directory in path.iterdir():
vectors_path = list(sample_directory.rglob('*-ft_vecs.npy'))
if len(vectors_path) == 0:
logging.warning(f"Could not load vectors for sample {str(directory)}")
continue
paths.append(vectors_path[0])
return paths | 60e75b96b3b8f685e034ae414e8af4983d105d12 | 25,208 |
def _GetInstanceField(instance, field):
"""Get the value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: string
"""
return _GetInstanceFields(instance, [field])[0] | fdf8eebf1dbd9cb443da21530058c6c7b30d8204 | 25,209 |
def infectious_rate_tweets(t,
p0=0.001,
r0=0.424,
phi0=0.125,
taum=2.,
t0=0,
tm=24,
bounds=None):
"""
Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
:param t: point to evaluate function at (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: intensity for point t
"""
if bounds is not None:
if not (bounds[0][0] < r0 < bounds[1][0]):
r0 = max(bounds[0][0], bounds[1][0] * sigmoid(taum / bounds[1][0]))
if not (bounds[0][1] < taum < bounds[1][1]):
taum = max(bounds[0][1],
bounds[1][1] * sigmoid(taum / bounds[1][1]))
return p0 * (1. - r0 * sin(
(48 / tm) * pi * ((t + t0) / 24 + phi0))) * exp(-t / (24 * taum)) | 939ddde24301badaf1c43731027d40167b5ab414 | 25,210 |
def fetch_user(username):
"""
This method 'fetch_user'
fetches an instances of an user
if any
"""
return User.objects.get(username=username) | 9861fc648c40312dea62450bd152d511867fcfe5 | 25,211 |
def get_script_name(key_combo, key):
""" (e.g. ctrl-shift, a -> CtrlShiftA, a -> A """
if key_combo != 'key':
return get_capitalized_key_combo_pattern(key_combo) + key.capitalize()
return key.capitalize() | a550c4b3852bf7ee3c30c4cecd497ae48a4d4a9d | 25,212 |
def layer(name, features):
"""Make a vector_tile.Tile.Layer from GeoJSON features."""
pbl = vector_tile_pb2.tile.layer()
pbl.name = name
pbl.version = 1
pb_keys = []
pb_vals = []
pb_features = []
for j, f in enumerate(
chain.from_iterable(singles(ob) for ob in features)):
pbf = vector_tile_pb2.tile.feature()
pbf.id = j
# Pack up the feature geometry.
g = f.get('geometry')
if g:
gtype = g['type']
coords = g['coordinates']
if gtype == 'Point':
geometry = [(1<<3)+1] + [
(n << 1) ^ (n >> 31) for n in imap(int, coords)]
elif gtype == 'LineString':
num = len(coords)
geometry = [0]*(4 + 2*(num-1))
geometry[0] = (1<<3)+1
geometry[1:3] = (
(n << 1) ^ (n >> 31) for n in imap(int, coords[0]))
geometry[3] = ((num-1)<<3)+2
for i, (prev, pair) in enumerate(pairwise(coords), 1):
prev = map(int, prev)
pair = map(int, pair)
geometry[2*i+2:2*i+4] = (
(n << 1) ^ (n >> 31) for n in (
pair[0]-prev[0], pair[1]-prev[1]))
pbf.geometry.extend(geometry)
elif gtype == 'Polygon':
rings = []
for ring in coords:
num = len(ring)
geometry = [0]*(5 + 2*(num-1))
geometry[0] = (1<<3)+1
geometry[1:3] = (
(n << 1) ^ (n >> 31) for n in imap(int, ring[0]))
geometry[3] = ((num-1)<<3)+2
for i, (prev, pair) in enumerate(pairwise(ring), 1):
prev = map(int, prev)
pair = map(int, pair)
geometry[2*i+2:2*i+4] = (
(n << 1) ^ (n >> 31) for n in (
pair[0]-prev[0], pair[1]-prev[1]))
geometry[-1] = (1<<3)+7
pbf.geometry.extend(geometry)
pbf.type = geom_type_map[gtype]
# Pack up feature properties.
props = f.get('properties', {})
tags = [0]*(2*len(props))
for i, (k, v) in enumerate(props.items()):
if k not in pb_keys:
pb_keys.append(k)
if v not in pb_vals:
pb_vals.append(v)
tags[i*2:i*2+2] = pb_keys.index(k), pb_vals.index(v)
pbf.tags.extend(tags)
pb_features.append(pbf)
# Finish up the layer.
pbl.keys.extend(map(str, pb_keys))
pbl.values.extend(map(value, ifilter(None, pb_vals)))
return pbl | a08e4dea809a938e1a451d34673f2845dd353a21 | 25,213 |
def serialize(formula, threshold=None):
"""Provides a string representing the formula.
:param formula: The target formula
:type formula: FNode
:param threshold: Specify the threshold
:type formula: Integer
:returns: A string representing the formula
:rtype: string
"""
return get_env().serializer.serialize(formula,
threshold=threshold) | 872b4d5e135a6b1b9c5964ca353edccd0a6d8a40 | 25,214 |
import collections
def find_single_network_cost(region, option, costs, global_parameters,
country_parameters, core_lut):
"""
Calculates the annual total cost using capex and opex.
Parameters
----------
region : dict
The region being assessed and all associated parameters.
option : dict
Contains the scenario and strategy. The strategy string controls
the strategy variants being tested in the model and is defined based
on the type of technology generation, core and backhaul, and the
strategy for infrastructure sharing, the number of networks in each
geotype, spectrum and taxation.
costs : dict
All equipment costs.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
core_lut : dict
Contains the number of existing and required, core and regional assets.
Returns
-------
region : dict
Contains all regional data.
"""
strategy = option['strategy']
generation = strategy.split('_')[0]
core = strategy.split('_')[1]
backhaul = strategy.split('_')[2]
new_mno_sites = region['new_mno_sites']
upgraded_mno_sites = region['upgraded_mno_sites']
all_sites = new_mno_sites + upgraded_mno_sites
new_backhaul = region['backhaul_new']
regional_cost = []
regional_asset_cost = []
for i in range(1, int(all_sites) + 1):
if i <= upgraded_mno_sites and generation == '4G':
cost_structure = upgrade_to_4g(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i <= upgraded_mno_sites and generation == '5G' and core == 'nsa':
cost_structure = upgrade_to_5g_nsa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i <= upgraded_mno_sites and generation == '5G' and core == 'sa':
cost_structure = upgrade_to_5g_sa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '4G':
cost_structure = greenfield_4g(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '5G' and core == 'nsa':
cost_structure = greenfield_5g_nsa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '5G' and core == 'sa':
cost_structure = greenfield_5g_sa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
counter = collections.Counter()
for d in regional_asset_cost:
counter.update(d)
counter_dict = dict(counter)
network_cost = 0
for k, v in counter_dict.items():
region[k] = v
network_cost += v
region['network_cost'] = network_cost
return region | 9602d13a85ee5d4273d6bb82d28a525f42714890 | 25,215 |
def get_headings(bulletin):
""""function to get the headings from text file
takes a single argument
1.takes single argument list of bulletin files"""
with open("../input/cityofla/CityofLA/Job Bulletins/"+bulletins[bulletin]) as f: ##reading text files
data=f.read().replace('\t','').split('\n')
data=[head for head in data if head.isupper()]
return data | 4b84928f8f4f13692c1236277aba6c6d4eb6c5ba | 25,216 |
def phi_text_page_parse(pageAnalyse_str, phi_main_url):
"""
params: pageAnalyse_str, str.
return: phi_page_dict, dict.
It takes the precedent functions and maps their
information to a dictionary.
"""
#
phi_page_dict = {}
phi_page_dict["phi_text_id_no"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[0]
phi_page_dict["phi_text_region"] = phi_lemma_region(pageAnalyse_str)
phi_page_dict["phi_text_url"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[1]
phi_page_dict["phi_text_info"] = phi_lemma_text_info(pageAnalyse_str)
phi_page_dict["phi_text"] = phi_lemma_text(pageAnalyse_str)
#
return phi_page_dict | e11a8b14726c2a65fb721f7a1f8c2f4148d18fea | 25,217 |
def fdfilt_lagr(tau, Lf, fs):
"""
Parameters
----------
tau : delay / s
Lf : length of the filter / sample
fs : sampling rate / Hz
Returns
-------
h : (Lf)
nonzero filter coefficients
ni : time index of the first element of h
n0 : time index of the center of h
"""
d = tau * fs
if Lf % 2 == 0:
n0 = np.ceil(d)
Lh = int(Lf/2)
idx = np.arange(n0-Lh, n0+Lh).astype(int)
elif Lf % 2 == 1:
n0 = np.round(d)
Lh = int(np.floor(Lf/2))
idx = np.arange(n0-Lh, n0+Lh+1).astype(int)
else:
print('Invalid value of Lf. Must be an integer')
return lagr_poly_barycentric2(idx, d), idx[0], n0 | 3a80d3682eb255b7190cc5cd6ddf44a9abbd58bc | 25,218 |
import tqdm
def hit_n_run(A_mat, b_vec, n_samples=200, hr_timeout=ALG_TIMEOUT_MULT):
""" Hit and Run Sampler:
1. Sample current point x
2. Generate a random direction r
3. Define gamma_i = ( b - a_i'x ) / ( r'a_i )
4. Calculate max(gamma < 0) gamma_i and min(gamma > 0) gamma_i
5. Sample uniformly from [min_gamma, max_gamma]
"""
m, n = A_mat.shape
curr_pt = hit_n_run_init(A_mat, b_vec)
pts = [curr_pt]
pts_len = 1
bar = tqdm(total=n_samples)
for _ in range(n_samples * hr_timeout):
direction = np.random.randn(n)
direction = direction / np.linalg.norm(direction)
# calculate gamma
numer = b_vec - np.dot(A_mat, curr_pt)
denom = np.dot(A_mat, direction)
gamma = [ n / d for n, d in zip(numer, denom) ]
gamma.append(0)
gamma = np.array(gamma)
if (gamma > 0).all():
gamma_min = 0
else:
gamma_min = max(gamma[gamma < 0])
if (gamma < 0).all():
gamma_max = 0
else:
gamma_max = min(gamma[gamma > 0])
magnitude = np.random.uniform(low=gamma_min, high=gamma_max)
curr_pt = curr_pt + magnitude * direction
if is_feasible(A_mat, b_vec, curr_pt):
pts.append(curr_pt)
bar.update(1)
pts_len += 1
if pts_len >= n_samples:
break
else:
pass
bar.close()
if len(pts) < min(0.4 * n_samples, 500):
raise Exception(
'Sampled {} points instead of {}'.format(len(pts), 0.4 * n_samples)
)
return pts | e867c66ec97b9bbb91ad373b93fb9772e2d519cb | 25,219 |
def construct_simulation_hydra_paths(base_config_path: str) -> HydraConfigPaths:
"""
Specifies relative paths to simulation configs to pass to hydra to declutter tutorial.
:param base_config_path: Base config path.
:return Hydra config path.
"""
common_dir = "file://" + join(base_config_path, 'config', 'common')
config_name = 'default_simulation'
config_path = join(base_config_path, 'config', 'simulation')
experiment_dir = "file://" + join(base_config_path, 'experiments')
return HydraConfigPaths(common_dir, config_name, config_path, experiment_dir) | 3353f910f9de708bbf0e5d46dc64b1d833230043 | 25,220 |
def delete_from_limits_by_id(id, connection, cursor):
"""
Delete row with a certain ID from limits table
:param id: ID to delete
:param connection: connection instance
:param cursor: cursor instance
:return:
"""
check_for_existence = get_limit_by_id(id, cursor)
if check_for_existence.get('failure') is None:
delete_query = '''Delete from limits where id = {}'''
cursor.execute(delete_query.format(id))
connection.commit()
print(f'Record with id={id} deleted')
return {'status': 'success', 'message': f'Record with id={id} deleted'}
else:
print(f'Failed to delete, ID={id} does not exist')
return {'failure': f'Failed to delete, ID={id} does not exist'} | 7e035550c2d9d22be1af48434d0e36cd6424ecb7 | 25,221 |
def article_search(request):
"""Пошук статті і використанням вектору пошуку (за полями заголовку і тексту з
ваговими коефіцієнтами 1 та 0.4 відповідно. Пошуковий набір проходить стемінг.
При пошуку враховується близькість шуканих слів одне до одного"""
query = ''
results = []
if 'query' in request.GET:
results, query = search_results(request)
return render(request, 'articles/post/search.html', {'query': query,
'results': results}) | 423fd3cc4be6cdcef8fd4ab47bc7aa70f52e32bf | 25,222 |
def get_campaign_data(api, campaign_id):
"""Return campaign metadata for the given campaign ID."""
campaign = dict()
# Pulls the campaign data as dict from GoPhish.
rawCampaign: dict = api.campaigns.get(campaign_id).as_dict()
campaign["id"] = rawCampaign["name"]
campaign["start_time"] = rawCampaign["launch_date"]
campaign["end_time"] = rawCampaign["completed_date"]
campaign["url"] = rawCampaign["url"]
campaign["subject"] = rawCampaign["template"]["subject"]
# Get the template ID from the GoPhish template name.
campaign["template"] = (
api.templates.get(rawCampaign["template"]["id"]).as_dict()["name"].split("-")[2]
)
campaign["clicks"] = get_click_data(api, campaign_id)
# Get the e-mail send status from GoPhish.
campaign["status"] = get_email_status(api, campaign_id)
return campaign | 6dc344079e73245ef280d770df3b07f62543d856 | 25,223 |
def create_small_map(sharing_model):
"""
Create small map and 2 BS
:returns: tuple (map, bs_list)
"""
map = Map(width=150, height=100)
bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0))
bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1))
bs_list = [bs1, bs2]
return map, bs_list | ecc56eb95f7d2d8188d7caa7353f5bc95793f46e | 25,224 |
def save_dataz(file_name, obj, **kwargs):
"""Save compressed structured data to files. The arguments will be passed to ``numpy.save()``."""
return np.savez(file_name, obj, **kwargs) | 2ea4ecff522409d79fbecd779710a27a9026dbe4 | 25,225 |
def step(x):
"""Heaviside step function."""
step = np.ones_like(x, dtype='float')
step[x<0] = 0
step[x==0] = 0.5
return step | 2e11c87668b04acef33b7c7499ad10373b33ed76 | 25,226 |
def createRaviartThomas0VectorSpace(context, grid, segment=None,
putDofsOnBoundaries=False,
requireEdgeOnSegment=True,
requireElementOnSegment=False):
"""
Create and return a space of lowest order Raviart-Thomas vector functions
with normal components continuous on boundaries between elements.
*Parameters:*
- context (Context)
A Context object that will determine the type used to represent the
values of the basis functions of the newly constructed space.
- grid (Grid)
Grid on which the functions from the newly constructed space will be
defined.
- segment (GridSegment)
(Optional) Segment of the grid on which the space should be defined.
If set to None (default), the whole grid will be used.
- putDofsOnBoundaries (bool)
(Optional) If set to False (default), degrees of freedom will not be
placed on edges lying on boundaries of the grid. This is usually the
desired behaviour for simulations of open perfectly conducting
surfaces (sheets). If set to True, degrees of freedom will be placed
on all edges belonging to the chosen segment of the grid.
*Returns* a newly constructed Space_BasisFunctionType object, with
BasisFunctionType determined automatically from the context argument and
equal to either float32, float64, complex64 or complex128.
"""
name = 'raviartThomas0VectorSpace'
dofMode = 0
if requireEdgeOnSegment:
dofMode |= 1
if requireElementOnSegment:
dofMode |= 2
return _constructObjectTemplatedOnBasis(
core, name, context.basisFunctionType(), grid, segment,
putDofsOnBoundaries, dofMode) | 100242b50f9aac6e55e6e02bb02c07f3b85c4195 | 25,227 |
def edus_toks2ids(edu_toks_list, word2ids):
""" 将训练cbos的论元句子们转换成ids序列, 将训练cdtb论元关系的论元对转成对应的论元对的tuple ids 列表并返回
"""
tok_list_ids = []
for line in edu_toks_list:
line_ids = get_line_ids(toks=line, word2ids=word2ids)
tok_list_ids.append(line_ids)
# 数据存储
return tok_list_ids | b3b87bfb0ae90c78cff3b02e04f316d917834e2b | 25,228 |
def pd_log_with_neg(ser: pd.Series) -> pd.Series:
"""log transform series with negative values by adding constant"""
return np.log(ser + ser.min() + 1) | cf67df4173df27c7b97d320f04cd607c0ee8b866 | 25,229 |
def filter_X_dilutions(df, concentration):
"""Select only one dilution ('high', 'low', or some number)."""
assert concentration in ['high','low'] or type(concentration) is int
df = df.sort_index(level=['CID','Dilution'])
df = df.fillna(999) # Pandas doesn't select correctly on NaNs
if concentration == 'low':
df = df.groupby(level=['CID']).first()
elif concentration == 'high':
df = df.groupby(level=['CID']).last()
else:
df = df.loc[[x for x in df.index if x[1]==concentration]]
df = df.groupby(level=['CID']).last()
df = df.replace(999,float('NaN')) # Undo the fillna line above.
return df | b886c87c1c5b96e6efc951ef197d3a0fb13707c1 | 25,230 |
def update_params(base_param: dict, additional: dict):
"""overwrite base parameter dictionary
Parameters
----------
base_param : dict
base param dictionary
additional : dict
additional param dictionary
Returns
-------
dict
updated parameter dictionary
"""
for key in additional:
base_param[key] = additional[key]
return base_param | e73581cb0b8d264343ead56da52c6dc12fe49dd7 | 25,231 |
import torch
def lanczos_generalized(
operator,
metric_operator=None,
metric_inv_operator=None,
num_eigenthings=10,
which="LM",
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
def _scipy_apply(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return operator.apply(x.float()).cpu().numpy()
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if isinstance(metric_operator, np.ndarray) or \
isinstance(metric_operator, ScipyLinearOperator):
metric_op = metric_operator
else:
def _scipy_apply_metric(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_operator.apply(x.float()).cpu().numpy()
metric_op = ScipyLinearOperator(shape, _scipy_apply_metric)
if isinstance(metric_inv_operator, np.ndarray) or \
isinstance(metric_inv_operator, ScipyLinearOperator):
metric_inv_op = metric_inv_operator
else:
def _scipy_apply_metric_inv(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_inv_operator.apply(x.float()).cpu().numpy()
metric_inv_op = ScipyLinearOperator(shape, _scipy_apply_metric_inv)
if init_vec is None:
init_vec = np.random.rand(size)
elif isinstance(init_vec, torch.Tensor):
init_vec = init_vec.cpu().numpy()
eigenvals, eigenvecs = eigsh(
A=scipy_op,
k=num_eigenthings,
M=metric_op,
Minv=metric_inv_op,
which=which,
maxiter=max_steps,
tol=tol,
ncv=num_lanczos_vectors,
return_eigenvectors=True,
)
return eigenvals, eigenvecs.T | 2a3c236817524f2656f9b1631801293b1acf5278 | 25,232 |
import urllib
import json
def get_articles_news(name):
"""
Function that gets the json response to our url request
"""
get_news_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey=988fb23113204cfcb2cf79eb7ad99b76'.format(name)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results | 3394112b15903671ec5522c128e9035a404f2650 | 25,233 |
import os
def execute(
name,
params=None,
constraints=None,
data_folder=None,
tag=None,
time_to_expire_secs=None,
suffix=None,
app_metrics=None,
app_params=None,
metadata=None,
):
"""
Create an instance of a workflow
:param name: name of the workflow to create the instance
:param params: parameters as dictionary to the workflow instance
:param constraints: a set of execution constraints such as a list of emails, ids, or groups to send task to
:param data_folder: a data folder to be uploaded and accessed from the instance through canotic.request.data()
:param tag: workflow auxiliary tag
:param time_to_expire_secs: an expiration time in secs
:return:
"""
if "CANOTIC_AGENT" in os.environ:
return schedule_workflow(
name,
params,
constraints,
data_folder,
tag,
time_to_expire_secs,
suffix,
app_metrics,
app_params,
metadata,
)
return None | 21fcf6dcd9f9910c3ec9769cf15b98c79bc8e4eb | 25,234 |
def coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData=None, refCo=None):
"""Converts a position from one coordinate system to another.
Inputs:
- fromP(3) cartesian position (au)
- fromV(3) cartesian velocity (au/year); ignored if fromSys
is Geocentric, Topocentric or Observed
- fromSys coordinate system from which to convert;
any of the entries in the table below; use opscore.RO.CoordSys constants.
- fromDate date*
- toSys coordinate system to which to convert (see fromSys)
- toDate date*
- obsData an opscore.RO.Astro.Cnv.ObserverData object; required if fromSys or toSys
is Topocentric or Observed; ignored otherwise.
- refCo(2) refraction coefficients; required if fromSys or toSys is Observed;
ignored otherwise.
Returns:
- toP(3) converted cartesian position (au)
- toV(3) converted cartesian velocity (au/year)
*the units of date depend on the associated coordinate system:
coord sys def date date
ICRS 2000.0 Julian epoch of observation
FK5 2000.0 Julian epoch of equinox and observation
FK4 1950.0 Besselian epoch of equinox and observation
Galactic now Julian epoch of observation
Geocentric now UT1 (MJD)
Topocentric now UT1 (MJD)
Observed now UT1 (MJD)
**Setting fromV all zero means the object is fixed. This slighly affects
conversion to or from FK4, which has fictitious proper motion.
Error Conditions:
- If obsData or refCo are absent and are required, raises ValueError.
Details:
The conversion is performed in two stages:
- fromP/fromSys/fromDate -> ICRS
- ICRS -> toP/toSys/toDate
Each of these two stages is performed using the following graph:
FK5 ------\
FK4 ------ ICRS --- Geocentric -*- Topocentric -**- Observed
Galactic--/
* obsData required
** refCo required
"""
return _TheCnvObj.coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData, refCo) | 29ef79ff896806171a9819fc5ad8bf071bd48969 | 25,235 |
def sample_recipe(user, **params):
"""create recipe"""
defaults = {
'title': 'paneer tikka',
'time_minute': 10,
'price': 5.00
}
defaults.update(**params)
return Recipe.objects.create(user=user, **defaults) | 50b53622c68e6385c20296206759bc54f24afa3c | 25,236 |
def briconToScaleOffset(brightness, contrast, drange):
"""Used by the :func:`briconToDisplayRange` and the :func:`applyBricon`
functions.
Calculates a scale and offset which can be used to transform a display
range of the given size so that the given brightness/contrast settings
are applied.
:arg brightness: Brightness, between 0.0 and 1.0.
:arg contrast: Contrast, between 0.0 and 1.0.
:arg drange: Data range.
"""
# The brightness is applied as a linear offset,
# with 0.5 equivalent to an offset of 0.0.
offset = (brightness * 2 - 1) * drange
# If the contrast lies between 0.0 and 0.5, it is
# applied to the colour as a linear scaling factor.
if contrast <= 0.5:
scale = contrast * 2
# If the contrast lies between 0.5 and 1, it
# is applied as an exponential scaling factor,
# so lower values (closer to 0.5) have less of
# an effect than higher values (closer to 1.0).
else:
scale = 20 * contrast ** 4 - 0.25
return scale, offset | b75ce49f4e79f7fef34a855f2897cfa6b4bd7cc7 | 25,237 |
def HostNameRequestHeader(payload_size):
"""
Construct a ``MessageHeader`` for a HostNameRequest command.
Sends local host name to virtual circuit peer. This name will affect
access rights. Sent over TCP.
Parameters
----------
payload_size : integer
Length of host name string.
"""
struct_args = (21, payload_size, 0, 0, 0, 0)
# If payload_size or data_count cannot fit into a 16-bit integer, use the
# extended header.
return (ExtendedMessageHeader(*struct_args)
if any((payload_size > 0xffff, ))
else MessageHeader(*struct_args)) | 2371dba58d974408be28390462b5e7eb943edd88 | 25,238 |
import os
import numpy
def california_quadtree_region(magnitudes=None, name="california-quadtree"):
"""
Returns object of QuadtreeGrid2D representing quadtree grid for California RELM testing region.
The grid is already generated at zoom-level = 12 and it is loaded through classmethod: QuadtreeGrid2D.from_quadkeys
The grid cells at zoom level 12 are selected using the external boundary of RELM california region.
This grid can be used to create gridded datasets for earthquake forecasts.
Args:
magnitudes: Magnitude discretization
name: string
Returns:
:class:`csep.core.spatial.QuadtreeGrid2D
"""
# use default file path from python package
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'california_qk_zoom=12.txt')
qk = numpy.genfromtxt(filepath, delimiter=',', dtype='str')
california_region = QuadtreeGrid2D.from_quadkeys(qk, magnitudes=magnitudes, name=name)
return california_region | e6096c174c2e5f139f4e4a4338b2611ec62b27cd | 25,239 |
def cinema_trip(persons, day, premium_seating, treat):
"""
The total cost of going to the cinema
Parameters:
----------
persons: int
number of people who need a ticket
day: int
day of the week to book (1 = Monday, 7 = Sunday)
preimum_seating: bool
boolean True/False if premium seats are required
treat: str
string value representing a choice of refreshment
Returns:
-------
float
"""
#fill in your code here
return tickets(persons, day, premium_seating) + refreshment(treat) | 8a2c4418124251ae16dddee6c1a134e3b883b1b8 | 25,240 |
import pathlib
def check_path(path: pathlib.Path) -> bool:
"""Check path."""
return path.exists() and path.is_file() | 2279dde6912ae6f6eb51d90ed5e71e0b3892fea9 | 25,241 |
def omega2kwave(omega, depth, grav=9.81):
"""
Solve the linear dispersion relation close to machine precision::
omega**2 = kwave * grav * tanh(kwave*depth)
Parameters
----------
omega : float
Wave oscillation frequency [rad/s]
depth : float
Constant water depth. [m] (<0 indicates infinite depth)
grav : float, optional
Acceleration of gravity [m/s^2]
Returns
-------
float
Wave number (kwave) [1/m]
Raises
------
None
"""
if depth < 0.0:
return omega**2 / grav
# Solve equivalent equation system: c == y * tanh(y), kwave = y / depth
c = depth * omega**2 / grav
# High accuracy fix point schemes
if c > 2.5:
def f(y0):
# y == c/tanh(y)
# tanh(y) = 1 - eps, Near y=y0 the RHS is almost c.
# Solve y== c / tanh(y0)
return c / np.tanh(y0)
else:
def f(y0):
# y*(k0 + k1*(y-y0)) == c*(k0 + k1*(y-y0))/tanh(y0)
# tanh(y0) = k0 + k1*(y-y0) + ...
# Near y=y0 the RHS is almost c.
# Solve y*(k0 + k1*(y-y0)) == c for y
k0 = np.tanh(y0)
k1 = 1.0 - k0**2
b = k0 - k1 * y0
return 0.5 * (-b + np.sqrt(b**2 + 4.0 * c * k1)) / k1
# Fist initial guess (MIT lecture notes... 4 digits accuracy)
if c > 2.4:
# Deeper water...
y = c * (1.0 + 3.0 * np.exp(-2 * c) - 12.0 * np.exp(-4 * c))
# using fixed point iteration: y <= c + y - y * tanh(y)
else:
# Shallower water...
y = np.sqrt(c) * (1.0 + 0.169 * c + 0.031 * c ** 2)
# using fixed point iteration: y <= sqrt(c * y / tanh(y))
y_prev = -1.0
while abs(y - y_prev) > 100 * np.finfo(y).eps:
y_prev = y
y = f(y)
kwave = y / depth
return kwave | c448780d0edc3eb59ea79b4025182501875bb82f | 25,242 |
def is_true(a: Bool) -> bool:
"""Returns whether the provided bool can be simplified to true.
:param a:
:return:
"""
return z3.is_true(a.raw) | e579a9793700132f38526d5cb737f3540d550821 | 25,243 |
from typing import Counter
def traverse_caves_recursive(cave: str, cave_system: dict, current_path: list[str]):
"""Recursively traverse through all paths in the cave."""
if cave != "START":
# build the current path traversed
current_path = current_path[:]
current_path.append(cave)
if cave == "END":
return current_path
previous_cave_counts = Counter(current_path)
small_caves_previously_visited = [
cave
for cave in previous_cave_counts.keys()
if cave.islower() and previous_cave_counts[cave] > 0
]
potential_next_caves = [
cave_
for cave_ in cave_system[cave]
if cave_ not in small_caves_previously_visited
]
if len(potential_next_caves) > 0:
return [
traverse_caves_recursive(next_cave, cave_system, current_path)
for next_cave in potential_next_caves
] | e78680ce8e1c3e7675d8fed980c4c706c87c1758 | 25,244 |
from sys import path
def check_dummybots():
"""
Checks the availablity of dummybots and set the global flag. Runs once per
test session.
"""
global DUMMYBOTS
if not DUMMYBOTS['tested']:
DUMMYBOTS['tested'] = True
# Load bot configuration
fp = open(path.join(TEST_CONFIG_PATH, 'bots.json'), 'r')
bot_connections = loads(fp.read())
fp.close()
# Check the connection to dummybots concurrently
def worker(bot_url):
try:
r = head('{}/askmeanything?q=test'.format(bot_url), timeout=5)
assert r.ok
return r
except (RequestException, AssertionError):
return None
urls = []
for connection in bot_connections:
urls.append(connection['url'])
pool = ThreadPool(processes=3)
bot_available = pool.map(worker, urls)
# Check the results of the connection tests and update flags
for i, available in enumerate(bot_available):
if available is None:
DUMMYBOTS['available'] = False
return
DUMMYBOTS['available'] = True | 2d122c1a5aa5f6381424bcb3700a111bec7d1dae | 25,245 |
def count(A,target):
"""invoke recursive function to return number of times target appears in A."""
def rcount(lo, hi, target):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi:
return 1 if A[lo] == target else 0
mid = (lo+hi)//2
left = rcount(lo, mid, target)
right = rcount(mid+1, hi, target)
return left + right
return rcount(0, len(A)-1, target) | 79d9be64d332a11993f65f3c0deba8b4de39ebda | 25,246 |
def asset_get_current_log(asset_id):
"""
"""
db = current.db
s3db = current.s3db
table = s3db.asset_log
query = ( table.asset_id == asset_id ) & \
( table.cancel == False ) & \
( table.deleted == False )
# Get the log with the maximum time
asset_log = db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage() | 38bbdaade290e0f60a2dd7faa628b2c72dd48a8c | 25,247 |
def _filter_to_k_shot(dataset, num_classes, k):
"""Filters k-shot subset from a dataset."""
# !!! IMPORTANT: the dataset should *not* be shuffled. !!!
# Make sure that `shuffle_buffer_size=1` in the call to
# `dloader.get_tf_data`.
# Indices of included examples in the k-shot balanced dataset.
keep_example = []
# Keep track of the number of examples per class included in
# `keep_example`.
class_counts = np.zeros([num_classes], dtype=np.int32)
for _, label in dataset.as_numpy_iterator():
# If there are less than `k` examples of class `label` in `example_indices`,
# keep this example and update the class counts.
keep = class_counts[label] < k
keep_example.append(keep)
if keep:
class_counts[label] += 1
# When there are `k` examples for each class included in `keep_example`,
# stop searching.
if (class_counts == k).all():
break
dataset = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(keep_example),
dataset
)).filter(lambda keep, _: keep).map(lambda _, example: example).cache()
return dataset | d61f064dbbdc00b68fffc580baf7e658610e44eb | 25,248 |
def _create_tf_example(entry):
""" Creates a tf.train.Example to be saved in the TFRecord file.
Args:
entry: string containing the path to a image and its label.
Return:
tf_example: tf.train.Example containing the info stored in feature
"""
image_path, label = _get_image_and_label_from_entry(entry)
# Convert the jpeg image to raw image.
image = Image.open(image_path)
image_np = np.array(image)
image_raw = image_np.tostring()
# Data which is going to be stored in the TFRecord file
feature = {
'image': tfrecord_utils.bytes_feature(image_raw),
'image/height': tfrecord_utils.int64_feature(image_np.shape[0]),
'image/width': tfrecord_utils.int64_feature(image_np.shape[1]),
'label': tfrecord_utils.int64_feature(label),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
return tf_example | c50c2ff02eccc286319db6263841472d7c2b9fe3 | 25,249 |
def _gen_parameters_section(names, parameters, allowed_periods=None):
"""Generate the "parameters" section of the indicator docstring.
Parameters
----------
names : Sequence[str]
Names of the input parameters, in order. Usually `Ind._parameters`.
parameters : Mapping[str, Any]
Parameters dictionary. Usually `Ind.parameters`, As this is missing `ds`, it is added explicitly.
"""
section = "Parameters\n----------\n"
for name in names:
if name == "ds":
descstr = "Input dataset."
defstr = "Default: None."
unitstr = ""
annotstr = "Dataset, optional"
else:
param = parameters[name]
descstr = param["description"]
if param["kind"] == InputKind.FREQ_STR and allowed_periods is not None:
descstr += (
f" Restricted to frequencies equivalent to one of {allowed_periods}"
)
if param["kind"] == InputKind.VARIABLE:
defstr = f"Default : `ds.{param['default']}`. "
elif param["kind"] == InputKind.OPTIONAL_VARIABLE:
defstr = ""
else:
defstr = f"Default : {param['default']}. "
if "choices" in param:
annotstr = str(param["choices"])
else:
annotstr = KIND_ANNOTATION[param["kind"]]
if param.get("units", False):
unitstr = f"[Required units : {param['units']}]"
else:
unitstr = ""
section += f"{name} : {annotstr}\n {descstr}\n {defstr}{unitstr}\n"
return section | 9bc249ca67dcc1c0f7ff8538b8078bfcd5c231a1 | 25,250 |
import os
def unload_agent():
"""returns zero in case of success or if the plist does not exist"""
plist_path = installed_plist_path()
ret = 0
if os.path.exists(plist_path):
ret = sync_task([LAUNCHCTL_PATH, "unload", "-w", "-S", "Aqua", plist_path])
else:
log_message("nothing to unload")
if ret:
log_message("unable to unload agent %s" % (plist_path))
return 0 | 4814df3a60eb4294899edb830882d557fce2f88b | 25,251 |
def bins(df):
"""Segrega os dados de notas de 10 em 10 pontos para construção de gráficos.
Parameters
----------
df : type Pandas DataFrame
DataFrame de início.
Returns
-------
type Pandas DataFrame
DataFrame final.
"""
df_bins = pd.DataFrame(df['ALUNO'].rename('Contagem').groupby(pd.cut(
df['Pontuação final'].rename('Intervalos'), np.arange(0, 101, 10), right=False)).count())
df_bins['Contagem /%'] = round(100 * df_bins['Contagem'] /
df_bins['Contagem'].sum(), 2)
df_bins['Contagem cumulativa'] = df_bins['Contagem'].cumsum()
df_bins['Contagem /% cumulativa'] = df_bins['Contagem /%'].cumsum()
return df_bins | 7c4570866fcb5795dc9052222479e23574fbf64b | 25,252 |
import os
import logging
def _GetBrowserSharedRelroConfig():
"""Returns a string corresponding to the Linker's configuration of shared
RELRO sections in the browser process. This parses the Java linker source
file to get the appropriate information.
Return:
None in case of error (e.g. could not locate the source file).
'NEVER' if the browser process shall never use shared RELROs.
'LOW_RAM_ONLY' if if uses it only on low-end devices.
'ALWAYS' if it always uses a shared RELRO.
"""
source_path = \
os.path.join(constants.DIR_SOURCE_ROOT, _LINKER_JAVA_SOURCE_PATH)
if not os.path.exists(source_path):
logging.error('Could not find linker source file: ' + source_path)
return None
with open(source_path) as f:
configs = _RE_LINKER_BROWSER_CONFIG.findall(f.read())
if not configs:
logging.error(
'Can\'t find browser shared RELRO configuration value in ' + \
source_path)
return None
if configs[0] not in ['NEVER', 'LOW_RAM_ONLY', 'ALWAYS']:
logging.error('Unexpected browser config value: ' + configs[0])
return None
logging.info('Found linker browser shared RELRO config: ' + configs[0])
return configs[0] | 913d35acfdde7b4044cddeb841b2dbe514f709b5 | 25,253 |
from datetime import datetime
def insert_video(ID):
"""
The function gets a valid YouTube ID,
checks for its existence in database,
if not found calls YouTube API and
inserts into the MongoDB database.
"""
client = MongoClient('localhost:27017')
db = client['PreCog']
collection = db['YoutubeRaw']
check_collection = db['YoutubeProcessed']
check = check_collection.find_one({"ID" : ID})
if check == None:
video = youtube_search(ID)
if video is not None:
result = collection.insert_one(video)
print(result.inserted_id, datetime.datetime.now())
return True
else:
print("Already in DataBase")
return False | 9c4f453db72739973384ea1890614463855fcca2 | 25,254 |
def get_neighbor_v6_by_search(search=None):
"""Return a list of NeighborV6's by dict."""
try:
objects = NeighborV6.objects.filter()
search_dict = search if search else dict()
object_map = build_query_to_datatable_v3(objects, search_dict)
except FieldError as e:
raise api_rest_exceptions.ValidationAPIException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
else:
return object_map | 3ed22479c140b7f71cd02d03be6c0fc82b0e81ca | 25,255 |
import functools
def validate_customer(fn):
"""
Validates that credit cards are between 1 and 5 and that each is 16 chars long
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# Validate credit card length
cc_list = kwargs.get("credit_cards")
trimmed_cc = [remove_non_numeric(cc.replace(' ', '')) for cc in cc_list]
num_credit_cards = len(trimmed_cc)
if num_credit_cards < 1 or num_credit_cards > 5:
return "Number of credit cards must be between 1 and 5, inclusive", 400
# Validate credit card composition
for cc in trimmed_cc:
if len(cc) != 16 or not cc.isdigit():
return f"Credit card {cc} must be 16 digits long", 400
# If passed, continue with registration
kwargs["credit_cards"] = trimmed_cc
return fn(*args, **kwargs)
return wrapped | ecd5b63ed5f1ae8ecf94a27ba3f353f371dabe39 | 25,256 |
from typing import Any
from typing import Sequence
from typing import Mapping
def format_field(value: Any) -> str:
"""
Function that formats a single field for output on a table or CSV output, in order to deal with nested arrays or
objects in the JSON outputs of the API.
:param value: the value to format
:return: a string that is fit for console output
"""
if isinstance(value, Sequence) and not isinstance(value, (str, bytes)):
if all(isinstance(x, (str, bytes, int, float)) for x in value):
return ", ".join([str(x) for x in value])
return dumps(value)
if isinstance(value, Mapping):
return dumps(value)
return value | 5eef5c433924807b195c574c568d0e0a0a433eb7 | 25,257 |
def count_honeypot_events():
"""
Get total number of honeypot events
Returns:
JSON/Dict number of honeypot events
"""
date = fix_date(
get_value_from_request("date")
)
if date:
try:
return jsonify(
{
"count_honeypot_events_by_date": connector.honeypot_events.count_documents(
{
"date": {
"$gte": date[0],
"$lte": date[1]
}
}
),
"date": date
}
), 200
except Exception as _:
return flask_null_array_response()
else:
try:
return jsonify(
{
"count_honeypot_events": connector.honeypot_events.estimated_document_count()
}
), 200
except Exception as _:
return flask_null_array_response() | 22a0fd932098ec9e846daaa097c01a9f62763cc6 | 25,258 |
def k_i_grid(gridsize, boxsize):
"""k_i_grid(gridsize, boxlen)"""
halfgrid = gridsize // 2
boxsize = egp.utils.boxsize_tuple(boxsize)
dk = 2 * np.pi / boxsize
kmax = gridsize * dk
_ = np.newaxis
k1, k2, k3 = dk[:, _, _, _] * np.mgrid[0:gridsize, 0:gridsize, 0:halfgrid + 1]
k1 -= kmax[0] * (k1 > dk[0] * (halfgrid - 1)) # shift the second half to negative k values
k2 -= kmax[1] * (k2 > dk[1] * (halfgrid - 1))
return np.array((k1, k2, k3)) | 29ffb72367672b8dd3f2e0a37923af565fb26306 | 25,259 |
def is_bool_type(typ):
"""
Check if the given type is a bool.
"""
if hasattr(typ, '__supertype__'):
typ = typ.__supertype__
return isinstance(typ, type) and issubclass(typ, bool) | 3d8dfae184be330c8cbd7c0e7382311fef31ede5 | 25,260 |
def gaussians_entropy(covars, ns=nt.NumpyLinalg):
"""
Calculates entropy of an array Gaussian distributions
:param covar: [N*D*D] covariance matrices
:return: total entropy
"""
N = covar.shape[0]
D = covar.shape[-1]
return 0.5 * N * D * (1 + log(2*ns.pi)) + 0.5 * ns.sum(ns.det(covar)) | fe858d891cb4243b0aaf8b73fc7f38e542c5130d | 25,261 |
def genBetaModel(matshape, cen, betaparam):
"""
Generate beta model with given parameters
inputs
======
matshape: tuple or list
Shape of the matrix
cen: tuple or list
Location of the center pixel
betaparam: dict
Parameters of the beta function
{ "A": float,
"r0": float,
"theta": float,
"beta": float,
"majaxis": float,
"minaxis": float,
}
output
======
matbeta: np.ndarray
The matrix with modelled beta values
"""
if len(betaparam) != 6:
print("There might be short of parameter.")
return None
# Init matbeta
matbeta = np.zeros(matshape)
# load paramters
A = betaparam['A']
r0 = betaparam['r0']
theta = betaparam['theta']
beta = betaparam['beta']
majaxis = betaparam['majaxis']
minaxis = betaparam['minaxis']
ecc = majaxis / minaxis # eccentricity
# Generate meshgrids
X = np.linspace(1, matshape[0], matshape[0])
Y = np.linspace(1, matshape[1], matshape[1])
# anti-clock
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta),np.cos(theta)]])
# Calc
for j, x in enumerate(X):
for i,y in enumerate(Y):
x_r = x - cen[0]
y_r = y - cen[1]
r = np.matmul(rot, np.array([x_r, y_r]))
r = r[0]**2 + r[1]**2 * ecc**2
matbeta[i, j] = A * (1 + r/r0**2)**(-np.abs(beta))
return matbeta | cabe4ff2d217bf918af291ad1fe875a66de2ca2a | 25,262 |
def folders_to_create(search_path, dirs, base_path=""):
"""
Recursively traverse through folder paths looking for the longest existing subpath.
Return the dir info of the longest subpath and the directories that
need to be created.
"""
# Allow user to pass in a string, but use a list in the recursion
if isinstance(search_path, list):
parts = search_path
else:
parts = search_path.strip("/").split("/")
# shared drives don't start with a /
if base_path == "" and not search_path.startswith("/"):
base_path = parts.pop(0)
parent = [dr for dr in dirs if dr.get("path", "") == base_path]
if len(parent) == 0:
parent = {"id": "root"}
else:
parent = parent.pop()
# Stop if we ran out of parts to create
if len(parts) == 0:
return parent, []
base_path += "/" + parts[0]
dirs = [dr for dr in dirs if dr.get("path", "").startswith(base_path)]
# If there's base_path matches, then keep looking for a longer path
if len(dirs) > 0:
return folders_to_create(parts[1:], dirs, base_path)
else:
return parent, parts | 91750afa8a4756a09b71cc397a5991b106fd8909 | 25,263 |
def profile_line(image, src, dst, linewidth=1,
order=1, mode='constant', cval=0.0):
"""Return the intensity profile of an image measured along a scan line.
Parameters
----------
image : numeric array, shape (M, N[, C])
The image, either grayscale (2D array) or multichannel
(3D array, where the final axis contains the channel
information).
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line. The destination point is *included*
in the profile, in contrast to standard numpy indexing.
linewidth : int, optional
Width of the scan, perpendicular to the line
order : int in {0, 1, 2, 3, 4, 5}, optional
The order of the spline interpolation to compute image values at
non-integer coordinates. 0 means nearest-neighbor interpolation.
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
How to compute any values falling outside of the image.
cval : float, optional
If `mode` is 'constant', what constant value to use outside the image.
Returns
-------
return_value : array
The intensity profile along the scan line. The length of the profile
is the ceil of the computed length of the scan line.
Examples
--------
>>> x = np.array([[1, 1, 1, 2, 2, 2]])
>>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)])
>>> img
array([[0, 0, 0, 0, 0, 0],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0]])
>>> profile_line(img, (2, 1), (2, 4))
array([ 1., 1., 2., 2.])
>>> profile_line(img, (1, 0), (1, 6), cval=4)
array([ 1., 1., 1., 2., 2., 2., 4.])
The destination point is included in the profile, in contrast to
standard numpy indexing.
For example:
>>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds
array([ 1., 1., 1., 2., 2., 2., 0.])
>>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row
array([ 1., 1., 1., 2., 2., 2.])
"""
perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)
if image.ndim == 3:
pixels = [ndi.map_coordinates(image[..., i], perp_lines,
order=order, mode=mode, cval=cval)
for i in range(image.shape[2])]
pixels = np.transpose(np.asarray(pixels), (1, 2, 0))
else:
pixels = ndi.map_coordinates(image, perp_lines,
order=order, mode=mode, cval=cval)
intensities = pixels.mean(axis=1)
return intensities | 29a020f77448c394d96be3fbd0b382b4657d401e | 25,264 |
def set_out(pin, state):
"""
Set simple digital (high/low) output
:param pin: pun number or logical name
:param state: state: 1/0 = True/False
:return: verdict
"""
__digital_out_init(pin).value(state)
return {'pin': pin, 'state': state} | c76fb07d52fce9e0c66a6b63b5518b6575589807 | 25,265 |
import requests
def get_session():
"""Define a re-usable Session object"""
session = requests.Session()
session.auth = auth
session.verify = False
return session | 3539e4a7433ffb58aa726c7fef87af080b011f64 | 25,266 |
import os
def main():
"""Main function."""
options = get_options()
LOG.debug("Options are %s", options)
entry_point = get_entrypoint()
plugin = get_plugin(entry_point)
inventory = plugin.get_dynamic_inventory()
if options.list:
dumps(inventory)
elif options.host in inventory["_meta"]["hostvars"]:
dumps(inventory["_meta"]["hostvars"][options.host])
else:
raise exceptions.InventoryError(
"Cannot find required host {0}".format(options.host))
return os.EX_OK | 4b8edf05088ce45aff455ebbbd2dea6852870156 | 25,267 |
def create_test_action(context, **kw):
"""Create and return a test action object.
Create a action in the DB and return a Action object with appropriate
attributes.
"""
action = get_test_action(context, **kw)
action.create()
return action | 8093d8f9b73ad0871ee422e2ba977f34907a3ae1 | 25,268 |
def parse(header_array, is_paper=False):
""" Decides which version of the headers to use."""
if not is_paper:
version = clean_entry(header_array[2])
if old_eheaders_re.match(version):
headers_list = old_eheaders
elif new_eheaders_re.match(version):
headers_list = new_eheaders
else:
raise UnknownHeaderError ("Couldn't find parser for electronic version %s" % (version))
else:
version = clean_entry(header_array[1])
if paper_headers_v1_re.match(version):
headers_list = paper_headers_v1
elif paper_headers_v2_2_re.match(version):
headers_list = paper_headers_v2_2
elif paper_headers_v2_6_re.match(version):
headers_list = paper_headers_v2_6
else:
raise UnknownHeaderError ("Couldn't find parser for paper version %s" % (version))
headers = {}
for i in range(0, len(headers_list)):
this_arg = "" # It's acceptable for header rows to leave off delimiters, so enter missing trailing args as blanks.
try:
this_arg = clean_entry(header_array[i])
except IndexError:
# [JACOB WHAT DOES THIS INDICATE?]
pass
headers[headers_list[i]] = this_arg
return headers | 91f692b20300f96fac5d67e40950f7af17552ecb | 25,269 |
def _GetIssueIDsFromLocalIdsCond(cnxn, cond, project_ids, services):
"""Returns global IDs from the local IDs provided in the cond."""
# Get {project_name: project} for all projects in project_ids.
ids_to_projects = services.project.GetProjects(cnxn, project_ids)
ref_projects = {pb.project_name: pb for pb in ids_to_projects.itervalues()}
# Populate default_project_name if there is only one project id provided.
default_project_name = None
if len(ref_projects) == 1:
default_project_name = ref_projects.values()[0].project_name
# Populate refs with (project_name, local_id) pairs.
refs = []
for val in cond.str_values:
project_name, local_id = tracker_bizobj.ParseIssueRef(val)
if not project_name:
if not default_project_name:
# TODO(rmistry): Support the below.
raise MalformedQuery(
'Searching for issues accross multiple/all projects without '
'project prefixes is ambiguous and is currently not supported.')
project_name = default_project_name
refs.append((project_name, int(local_id)))
issue_ids, _misses = services.issue.ResolveIssueRefs(
cnxn, ref_projects, default_project_name, refs)
return issue_ids | 26c41523adef27ae28b576707eb1c9f24961d8a0 | 25,270 |
import json
def lookup_plex_media(hass, content_type, content_id):
"""Look up Plex media for other integrations using media_player.play_media service payloads."""
content = json.loads(content_id)
if isinstance(content, int):
content = {"plex_key": content}
content_type = DOMAIN
plex_server_name = content.pop("plex_server", None)
plex_server = get_plex_server(hass, plex_server_name)
if playqueue_id := content.pop("playqueue_id", None):
try:
playqueue = plex_server.get_playqueue(playqueue_id)
except NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = content.pop("shuffle", 0)
media = plex_server.lookup_media(content_type, **content)
if media is None:
raise HomeAssistantError(
f"Plex media not found using payload: '{content_id}'"
)
playqueue = plex_server.create_playqueue(media, shuffle=shuffle)
return (playqueue, plex_server) | 060b5cded7bbb8d149a7bf2b94d5a5352114d671 | 25,271 |
import os
def get_file_paths(directory, file=None):
"""
Collects the file paths from the given directory if the file is not given, otherwise creates a path
joining the given directory and file.
:param directory: The directory where the file(s) can be found
:param file: A file in the directory
:return: The sorted list of collected file paths
"""
file_paths = []
# get the absolute path of the one given file
if file is not None:
source_file_path = os.path.join(directory, file)
if os.path.isfile(source_file_path):
file_paths.append(source_file_path)
# if the given doesn't exist or it wasn't given, all files from the directory will be loaded
# except desktop.ini
if len(file_paths) == 0:
for child in os.listdir(directory):
if child != 'desktop.ini':
child_path = os.path.join(directory, child)
if os.path.isfile(child_path):
file_paths.append(child_path)
return sorted(file_paths) | 62e79a52682e046b83f9ad89df18d4dece7bf37a | 25,272 |
def dsym_test(func):
"""Decorate the item as a dsym test."""
if isinstance(func, type) and issubclass(func, unittest2.TestCase):
raise Exception("@dsym_test can only be used to decorate a test method")
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
if lldb.dont_do_dsym_test:
self.skipTest("dsym tests")
except AttributeError:
pass
return func(self, *args, **kwargs)
# Mark this function as such to separate them from the regular tests.
wrapper.__dsym_test__ = True
return wrapper | d31c5bd87311b2582b9668fb25c1d4648663d1b8 | 25,273 |
from datetime import datetime
def count_meetings(signups=None, left: datetime=None, right: datetime=None) -> int:
""" Returns the number of meetings the user has been to, between two
date ranges. Left bound is chosen as an arbitrary date guaranteed to
be after any 8th periods from the past year, but before any from the
current year. Right bound is chosen to be today. """
# can't use default arguments initialized at function definition
# in a long-running app right would be the date of last reset not "today"
if signups is None:
signups = get_signups()
if left is None:
left = summer(get_year() - 1)
if right is None:
right = datetime.today()
return len(list(
filter(lambda signup: left < ion_date(signup["block"]["date"]) < right,
signups))) | 8cb5290111db947b3daa01248452cfd36c80ec46 | 25,274 |
def fake_surroundings(len_poem, size_surroundings=5):
"""
Retourne une liste d'indices tirée au sort
:param len_poem: nombre de vers dans le poème
:param size_surroundings: distance du vers de référence du vers (default 5)
:return: liste
"""
# bornes inférieures
lower_bounds_w_neg = np.array([row - size_surroundings for row in range(len_poem)])
lower_bounds_2d = np.stack([np.zeros(len_poem), lower_bounds_w_neg])
# calcul max entre 0 et le rang - surroundings
lower_bounds = np.max(lower_bounds_2d, axis=0)
# bornes supérieures
higher_bounds_w_neg = np.array([row + size_surroundings for row in range(len_poem)])
higher_bounds_2d = np.stack([np.repeat(len_poem, len_poem), higher_bounds_w_neg])
# calcul min entre longueur du poeme et le rang + surroundings
higher_bounds = np.min(higher_bounds_2d, axis=0)
# tirage
fake_within_poem = np.random.randint(low=lower_bounds, high=higher_bounds).tolist()
return fake_within_poem | 91bbdefedd3ed2aa4d63db1ef7281129adb20036 | 25,275 |
import numpy
def interpolate_missing(sparse_list):
"""Use linear interpolation to estimate values for missing samples."""
dense_list = list(sparse_list)
x_vals, y_vals, x_blanks = [], [], []
for x, y in enumerate(sparse_list):
if y is not None:
x_vals.append(x)
y_vals.append(y)
else:
x_blanks.append(x)
if x_blanks:
interpolants = numpy.interp(x_blanks, x_vals, y_vals)
for x, y in zip(x_blanks, interpolants):
dense_list[x] = y
return dense_list | a2983a08f00b4de2921c93cc14d3518bc8bd393d | 25,276 |
import os
def auto_format_rtf(file_path, debug=False):
""" Input complete filepath to .rtf file
replaces all instances of "\\line" to "\\par".
writes new data to new file with "MODFIED" appended.
Prints debug messages to console if debug=True.
"""
# Separates file name and extension for processing later.
file_name, file_ext = os.path.splitext(os.path.basename(file_path))
# Verifies that file exists and is .rtf before starting
if os.path.exists(file_path) and file_ext == ".rtf":
if debug:
print("\nFile Operation Confirmed".format(
file_path=file_path))
print(" Modifiying \"{filename}\".".format(
filename=os.path.basename(file_path)))
# Opens file and copies data to text_data object.
with open(file_path, "r") as file:
text_data = file.read()
if debug:
print(" Successfully read data")
# Replaces the unwanted "\\line" with "\\par"
# Operation performed on the entire data set instead of line by line.
new_text_data = text_data.replace("\\line", "\\par")
if debug:
print(" Data format operation successful")
# Gets location of file
file_location = os.path.dirname(file_path)
# Creates new file name from original name.
new_file_name = file_name + " MODIFIED" + file_ext
# Creates new complete file path from new name and original path.
new_file = os.path.join(file_location, new_file_name)
# Creates new file @ new path and writes data to new file.
with open(new_file, "w+") as file:
file.write(new_text_data)
if debug:
print(" Created new file at \"{new_file}\"."
.format(new_file=new_file))
print(" Wrote data to \"{new_file_name}\".\n"
.format(new_file_name=new_file_name))
return new_file | e5b0ba32b0299b4ede115a6378e6ac3c4de59baf | 25,277 |
def classifier_uncertainty(classifier: BaseEstimator, X: modALinput, **predict_proba_kwargs) -> np.ndarray:
"""
Classification uncertainty of the classifier for the provided samples.
Args:
classifier: The classifier for which the uncertainty is to be measured.
X: The samples for which the uncertainty of classification is to be measured.
**predict_proba_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier.
Returns:
Classifier uncertainty, which is 1 - P(prediction is correct).
"""
# calculate uncertainty for each point provided
try:
classwise_uncertainty = classifier.predict_proba(X, **predict_proba_kwargs)
except NotFittedError:
return np.ones(shape=(X.shape[0], ))
# for each point, select the maximum uncertainty
uncertainty = 1 - np.max(classwise_uncertainty, axis=1)
return uncertainty | 419da65825fff7de53ab30f4be31f2be0cf4bbbd | 25,278 |
from typing import Optional
import os
from typing import Union
def fetch_nature_scene_similarity(data_home: Optional[os.PathLike] = None, download_if_missing: bool = True,
shuffle: bool = True, random_state: Optional[np.random.RandomState] = None,
return_triplets: bool = False) -> Union[Bunch, np.ndarray]:
""" Load the nature scene similarity dataset (odd-one-out).
=================== =====================
Triplets 3355
Objects (Scenes) 120
=================== =====================
See :ref:`nature_vogue_dataset` for a detailed description.
>>> dataset = fetch_nature_scene_similarity(shuffle=True) # doctest: +REMOTE_DATA
>>> dataset.image_label[[0, -1]].tolist() # doctest: +REMOTE_DATA
['art114.jpg', 'n344019.jpg']
>>> dataset.triplet.shape # doctest: +REMOTE_DATA
(3355, 3)
Args:
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, default=True
shuffle: default = True
Shuffle the order of triplet constraints.
random_state: optional, default = None
Initialization for shuffle random generator
return_triplets : boolean, default=False.
If True, returns numpy array instead of a Bunch object.
Returns:
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
triplet : ndarray, shape (n_triplets, 3)
Each row corresponding odd-one-out query.
The columns represent the odd image and two others.
class_label : ndarray, shape (120, )
Names of the scene images.
DESCR : string
Description of the dataset.
triplets : numpy arrays (n_triplets, 3)
Only present when `return_triplets=True`.
Raises:
IOError: If the data is not locally available, but download_if_missing=False
"""
return _fetch_nature_vogue('nature', data_home, download_if_missing, shuffle, random_state, return_triplets) | f34d4700887df5ef00956ce8dfade1250a0814b8 | 25,279 |
def test_preserve_scalars():
""" test the preserve_scalars decorator """
class Test():
@misc.preserve_scalars
def meth(self, arr):
return arr + 1
t = Test()
assert t.meth(1) == 2
np.testing.assert_equal(t.meth(np.ones(2)), np.full(2, 2)) | ae48d49e5dd6781a304f75abd9b43e67faa09ee1 | 25,280 |
def from_string(zma_str, one_indexed=True, angstrom=True, degree=True):
""" read a z-matrix from a string
"""
syms, key_mat, name_mat, val_dct = ar.zmatrix.read(zma_str)
val_mat = tuple(tuple(val_dct[name] if name is not None else None
for name in name_mat_row)
for name_mat_row in name_mat)
zma = automol.create.zmat.from_data(
syms, key_mat, val_mat, name_mat, one_indexed=one_indexed,
angstrom=angstrom, degree=degree)
return zma | cf3817268cab7e79bf924f9d52cb70d01188ea48 | 25,281 |
def search(raw_query, query_type='/fast/all'):
"""
Hit the FAST API for names.
"""
out = []
unique_fast_ids = []
query = text.normalize(raw_query, PY3).replace('the university of', 'university of').strip()
query_type_meta = [i for i in refine_to_fast if i['id'] == query_type]
if query_type_meta == []:
query_type_meta = default_query
query_index = query_type_meta[0]['index']
try:
#FAST api requires spaces to be encoded as %20 rather than +
url = api_base_url + '?query=' + urllib.quote(query)
url += '&rows=30&queryReturn=suggestall%2Cidroot%2Cauth%2cscore&suggest=autoSubject'
url += '&queryIndex=' + query_index + '&wt=json'
app.logger.debug("FAST API url is " + url)
resp = requests.get(url)
results = resp.json()
except Exception, e:
app.logger.warning(e)
return out
for position, item in enumerate(results['response']['docs']):
match = False
name = item.get('auth')
alternate = item.get('suggestall')
if (len(alternate) > 0):
alt = alternate[0]
else:
alt = ''
fid = item.get('idroot')
fast_uri = make_uri(fid)
#The FAST service returns many duplicates. Avoid returning many of the
#same result
if fid in unique_fast_ids:
continue
else:
unique_fast_ids.append(fid)
score_1 = fuzz.token_sort_ratio(query, name)
score_2 = fuzz.token_sort_ratio(query, alt)
#Return a maximum score
score = max(score_1, score_2)
if query == text.normalize(name, PY3):
match = True
elif query == text.normalize(alt, PY3):
match = True
resource = {
"id": fast_uri,
"name": name,
"score": score,
"match": match,
"type": query_type_meta
}
out.append(resource)
#Sort this list by score
sorted_out = sorted(out, key=itemgetter('score'), reverse=True)
#Refine only will handle top three matches.
return sorted_out[:3] | 0b543f662d0b26f89abc9ebcfb540c4ce925852e | 25,282 |
def get_jmp_addr(bb):
"""
@param bb List of PseudoInstructions of one basic block
@return Address of jump instruction in this basic block
"""
for inst in bb:
if inst.inst_type == 'jmp_T':
return inst.addr
return None | 13e69032bc7d6ed5413b5efbb42729e11661eab1 | 25,283 |
import os
def mock_publish_from_s3_to_redis_err(
work_dict):
"""mock_publish_from_s3_to_redis_err
:param work_dict: dictionary for driving the task
"""
env_key = 'TEST_S3_CONTENTS'
redis_key = work_dict.get(
'redis_key',
env_key)
str_dict = ae_consts.ev(
env_key,
None)
log.info(
'mock_publish_from_s3_to_redis_err - '
f'setting key={redis_key} value={str_dict}')
data = None
if str_dict:
os.environ[redis_key] = str_dict
data = str_dict.encode('utf-8')
else:
os.environ[redis_key] = ''
data = None
status = ae_consts.ERR
err = None
return {
'status': status,
'err': err,
'rec': {
'data': data
}
} | 04cbc9dd5f29970fc46acc561dbf9f95ca141cab | 25,284 |
import sqlite3
def open_db_conn(db_file=r'/home/openwpm/Desktop/crawl-data.sqlite'):
""""
open connection to sqlite database
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Exception as e:
print(e)
return None | 28338f3ff3679c83a1e9040aa9b6e4f5026e4606 | 25,285 |
def totals_per_time_frame(data_points, time_frame):
"""For a set of data points from a single CSV file,
calculate the average percent restransmissions per time frame
Args:
data_points (List[List[int,int,float]]): A list of data points.
Each data points consist of
0: 1 if is a transmission,
1: 1 if is a retransmission,
2: time in seconds
time_frame (float): increment of time in seconds in which new data points are calculated
Returns:
List[List[float,float]]: A list of data points containing the percent retransmissions, and the time in seconds
"""
time_frame_min = 0
time_frame_max = time_frame
percent_retransmissions_list = []
transmissions_in_frame = 0
retransmissions_in_frame = 0
index = 0
while time_frame_max < data_points[-1][2] and index < len(data_points):
if data_points[index][2] >= time_frame_min and data_points[index][2] < time_frame_max:
transmissions_in_frame += data_points[index][0] + data_points[index][1]
retransmissions_in_frame += data_points[index][1]
index += 1
else:
if transmissions_in_frame > 0:
percent_retransmissions = 100*retransmissions_in_frame/transmissions_in_frame
else:
percent_retransmissions = 0
percent_retransmissions_list.append([percent_retransmissions,time_frame_min])
time_frame_min = time_frame_max
time_frame_max += time_frame
transmissions_in_frame = 0
retransmissions_in_frame = 0
return percent_retransmissions_list | 9e71ac2fe7deabd36d7df8ae099575b191260c5d | 25,286 |
import os
def extract_lesional_clus(label, input_scan, scan, options):
"""
find cluster components in the prediction
corresponding to the true label cluster
"""
t_bin = options['t_bin']
# t_bin = 0
l_min = options['l_min']
output_scan = np.zeros_like(input_scan)
# threshold input segmentation
t_segmentation = input_scan > t_bin
# t_segmentation = input_scan > 0
# perform morphological operations (dilation of the erosion of the input)
morphed = binary_opening(t_segmentation, iterations=1)
# morphed = t_segmentation
# label connected components
morphed = nd.binary_fill_holes(morphed, structure=np.ones((5,5,5))).astype(int)
pred_labels, _ = nd.label(morphed, structure=np.ones((3,3,3)))
label_list = np.unique(pred_labels)
num_elements_by_lesion = nd.labeled_comprehension(morphed, pred_labels, label_list, np.sum, float, 0)
Y = np.zeros((len(num_elements_by_lesion > l_min)))
for l in range(len(num_elements_by_lesion > l_min)):
Y[l] = dc(label, (pred_labels == l))
clus_ind = np.where(Y == Y.max())
lesion_pred = np.copy(pred_labels)
lesion_pred[lesion_pred != clus_ind] = 0
lesion_pred[lesion_pred == clus_ind] = 1
lesion_pred_out = nib.Nifti1Image(lesion_pred, np.eye(4))
options['test_lesion_pred'] = options['experiment'] + '_' + options['test_scan'] + '_out_lesion_pred_only.nii.gz'
lesion_pred_out.to_filename(os.path.join(options['pred_folder'], options['test_lesion_pred']))
return lesion_pred | 4502b7e1ac3bfcfe21781f04dea2ff12f3955ea9 | 25,287 |
def add_port_fwd(
zone, src, dest, proto="tcp", dstaddr="", permanent=True, force_masquerade=False
):
"""
Add port forwarding.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.add_port_fwd public 80 443 tcp
force_masquerade
when a zone is created ensure masquerade is also enabled
on that zone.
"""
if force_masquerade and not get_masquerade(zone):
add_masquerade(zone)
cmd = "--zone={0} --add-forward-port=port={1}:proto={2}:toport={3}:toaddr={4}".format(
zone, src, proto, dest, dstaddr
)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd) | 15c3cc5cbfb3e2921232df0508ea18d727d7861c | 25,288 |
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values | 0a952caf700216e67c7dd81958dd62d7734bb0fe | 25,289 |
import struct
def read_int(handle):
"""
Helper function to parse int from file handle
Args:
handle (file): File handle
Returns:
numpy.int32
"""
return struct.unpack("<i", handle.read(4))[0] | cd175251fed79c8d79ea4a73d713457c06cbda6b | 25,290 |
def prem_to_av(t):
"""Premium portion put in account value
The amount of premiums net of loadings, which is put in the accoutn value.
.. seealso::
* :func:`load_prem_rate`
* :func:`premium_pp`
* :func:`pols_if_at`
"""
return prem_to_av_pp(t) * pols_if_at(t, "BEF_DECR") | 7d72e7e2e0b10ffb3958383817ddb7cc5f72a06a | 25,291 |
from scipy.spatial import cKDTree
def remove_close(points, radius):
"""
Given an nxd set of points where d=2or3 return a list of points where no point is closer than radius
:param points: a nxd list of points
:param radius:
:return:
author: revised by weiwei
date: 20201202
"""
tree = cKDTree(points)
# get the index of every pair of points closer than our radius
pairs = tree.query_pairs(radius, output_type='ndarray')
# how often each vertex index appears in a pair
# this is essentially a cheaply computed "vertex degree"
# in the graph that we could construct for connected points
count = np.bincount(pairs.ravel(), minlength=len(points))
# for every pair we know we have to remove one of them
# which of the two options we pick can have a large impact
# on how much over-culling we end up doing
column = count[pairs].argmax(axis=1)
# take the value in each row with the highest degree
# there is probably better numpy slicing you could do here
highest = pairs.ravel()[column + 2 * np.arange(len(column))]
# mask the vertices by index
mask = np.ones(len(points), dtype=np.bool)
mask[highest] = False
if tol.strict:
# verify we actually did what we said we'd do
test = cKDTree(points[mask])
assert len(test.query_pairs(radius)) == 0
return points[mask], mask | 42f8727488018e7f27802e81dbecfd300b38f45a | 25,292 |
import torch
def pose_mof2mat_v1(mof, rotation_mode='euler'):
"""
### Out-of-Memory Issue ###
Convert 6DoF parameters to transformation matrix.
Args:
mof: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6, H, W]
Returns:
A transformation matrix -- [B, 3, 4, H, W]
"""
bs, _, hh, ww = mof.size()
mof = mof.permute(0,2,3,1).reshape(-1,6) # [B*N, 6]
translation = mof[:,:3].unsqueeze(-1) # [B*N, 3, 1]
rot = mof[:,3:] # [B*N, 3]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B*N, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B*N, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B*N, 3, 4]
transform_mat = transform_mat.reshape(bs, hh, ww, 3, 4).permute(0,3,4,1,2) # [B, 3, 4, H, W]
# pdb.set_trace()
return transform_mat | 78d42b36e64c0b6ba0eab46b6c19a96d44ed29fb | 25,293 |
import os
def kmercountexact(forward_in, reverse_in='NA', returncmd=False, **kwargs):
"""
Wrapper for kmer count exact.
:param forward_in: Forward input reads.
:param reverse_in: Reverse input reads. Found automatically for certain conventions.
:param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value.
:param kwargs: Arguments to give to kmercountexact in parameter='argument' format.
See kmercountexact documentation for full list.
:return: out and err: stdout string and stderr string from running kmercountexact.
"""
options = kwargs_to_string(kwargs)
if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in:
reverse_in = forward_in.replace('_R1', '_R2')
cmd = 'kmercountexact.sh in={} in2={} {}'.format(forward_in, reverse_in, options)
elif reverse_in == 'NA':
cmd = 'kmercountexact.sh in={} {}'.format(forward_in, options)
else:
cmd = 'kmercountexact.sh in={} in2={} {}'.format(forward_in, reverse_in, options)
out, err = accessoryfunctions.run_subprocess(cmd)
if returncmd:
return out, err, cmd
else:
return out, err | fb7166388002a1ad037fc7751f660f79be31cb27 | 25,294 |
def update(isamAppliance, local, remote_address, remote_port, remote_facility, check_mode=False, force=False):
"""
Updates logging configuration
"""
json_data = {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
}
change_required, warnings = _check(isamAppliance, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating logging configuration attributes", module_uri, json_data,
requires_modules=requires_modules,
requires_version=requires_versions,
requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings) | ed8af5d9d1b2f59726622ca6ebdfb4a20b88982f | 25,295 |
from typing import Tuple
def get_model_and_tokenizer(
model_name_or_path: str,
tokenizer_name_or_path: str,
auto_model_type: _BaseAutoModelClass,
max_length: int = constants.DEFAULT_MAX_LENGTH,
auto_model_config: AutoConfig = None,
) -> Tuple[AutoModelForSequenceClassification, AutoTokenizer]:
"""Get transformer model and tokenizer
Args:
model_name_or_path (str): model name
tokenizer_name_or_path (str): tokenizer name
auto_model_type (_BaseAutoModelClass): auto model object such as AutoModelForSequenceClassification
max_length (int): max length of text
auto_model_config (AutoConfig): AutoConfig object
Returns:
Tuple[AutoModelForSequenceClassification, AutoTokenizer]: model and tokenizer
"""
logger.info(f"Loading model: {model_name_or_path}")
if auto_model_config:
model = auto_model_type.from_pretrained(
model_name_or_path, config=auto_model_config
)
else:
model = auto_model_type.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, max_length=max_length
)
return model, tokenizer | b65cc9cb6e32c65b4d91becb51358146651fddb8 | 25,296 |
def get_forecast_by_coordinates(
x: float,
y: float,
language: str = "en"
) -> str:
"""
Get the weather forecast for the site closest to the coordinates (x, y).
Uses the scipy kd-tree nearest-neighbor algorithm to find the closest
site.
Parameters
----------
x : float
Longitude of the query point.
y : float
Latitude of the query point.
language: str
The language to retrieve the forecast in. Allowed values: "en", "fr".
Returns
-------
str
The XML weather forecast.
"""
nearest_site = get_weather_site_by_coordinates(x, y)
site_code = nearest_site['properties']['Codes']
province_code = nearest_site['properties']['Province Codes']
forecast = get_forecast_by_site_code(
site_code=site_code,
province_code=province_code,
language=language
)
return forecast | 061007f152328929f2ed1c70f5a8b1401f3268f7 | 25,297 |
def get_calculated_energies(stem, data=None):
"""Return the energies from the calculation"""
if data is None:
data = {}
stem = stem.find('calculation')
for key, path in VASP_CALCULATED_ENERGIES.items():
text = get_text_from_section(stem, path, key)
data[key] = float(text.split()[0])
return data | d2c0fcf9023874e6890b34e6950dc81e8d2073ce | 25,298 |
from sys import argv
import pip
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code | 55c72e23657e546c9ef371ebcf84cd010f8212cb | 25,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.