content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def max_sum_naive(arr: list, length: int, index: int, prev_max: int) -> int:
"""
We can either take or leave the current number depending on previous max number
"""
if index >= length:
return 0
cur_max = 0
if arr[index] > prev_max:
cur_max = arr[index] + max_sum_naive(arr, length, index + 1, arr[index])
return max(cur_max, max_sum_naive(arr, length, index + 1, prev_max)) | 644b5cb294e78a10add253cad96d3c3e2c3d67d7 | 3,636,500 |
import torch
def accuracy(X, X_ref):
""" Compute classification accuracy.
Parameters
----------
X : torch.Tensor
The classification score tensor of shape [..., num_classes]
X_ref : torch.Tensor
The target integer labels of shape [...]
Returns
-------
The average accuarcy
"""
X_label = torch.argmax(X, dim=-1)
correct = (X_label == X_ref).sum()
return correct / np.prod(X.shape[:-1]) | a62adda146de6573cdc190b636b0269852604608 | 3,636,501 |
import requests
def download_dataset(filepath=DATASET_PATH, url=DATASET_URL, **kwargs) -> pd.DataFrame:
"""
Download dataset
:param: file Output filename
"""
kwargs.setdefault("session", requests.Session())
r = kwargs['session'].get(url)
# Jos lataus epäonnistuu, epäonnistu nyt.
# `raise_for_status()` tarkastaa onko webpalvelin palauttanut
# tiedoston vai ei, ja onko se ladattu onnistuneesti.
# Nimensä mukaan aiheuttaa keskeytyksen - raise - jos virhe havaittiin.
r.raise_for_status()
if r.headers['content-type'] != "application/zip":
raise ConnectionError(f"Expected zip file, received {r.headers['content-type']}")
with ZipFile(BytesIO(r.content)) as pakattu_tiedosto:
# Avataan saatu data Bitteinä, tällöin meidän ei tarvitse tallettaa
# zip tiedostoa tiedostojärjestelmään odottamaan.
# Puretaan haluttu tiedosto, ja kääritään pandan dataframen ympärille.
data = pd.read_csv(BytesIO(pakattu_tiedosto.read(DATASET_NAME)))
# Add names, if data has none.
if "name" not in data.columns:
logger.debug("Names are missing. Generating fake names.")
names = pd.Series(generate_names(data.shape[0]), name="name")
data = data.assign(name=names)
data.to_csv(filepath, index_label=INDEX)
logger.debug("File downloaded as: %s", filepath)
# Scrape extra bits.
if Config.getboolean("build", "allow_dirty", fallback=False):
hae_dataa()
return data | 83cde597622e7708b90fc789c42925fcc0c8c00a | 3,636,502 |
from datetime import datetime
import uuid
def create_cert_builder(subject, issuer_name, public_key, days=365, is_ca=False):
"""
The method to create a builder for all types of certificates.
:param subject: The subject of the certificate.
:param issuer_name: The name of the issuer.
:param public_key: The public key of the certificate.
:param days: The number of days for which the certificate is valid. The default is 1 year or 365 days.
:param is_ca: Boolean to indicate if a cert is ca or non ca.
:return: The certificate builder.
:rtype: :class `x509.CertificateBuilder`
"""
builder = x509.CertificateBuilder()
builder = builder.subject_name(subject)
builder = builder.issuer_name(issuer_name)
builder = builder.public_key(public_key)
builder = builder.not_valid_before(datetime.today())
builder = builder.not_valid_after(datetime.today() + timedelta(days=days))
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.add_extension(
x509.BasicConstraints(ca=is_ca, path_length=None), critical=True
)
return builder | 04739a7b81c3e4b6d70bba96e646e64c2d5fdbb7 | 3,636,503 |
def _table_row(line):
"""
Return all elements of a data line.
Return all elements of a data line. Simply splits it.
Parameters
----------
line: string
A stats line.
Returns
-------
list of strings
A list of strings, containing the data on the line, split at white space.
"""
return line.split() | dc5d76db80059b0da257b45f12513d75c2765d55 | 3,636,504 |
def ack_alert_alarm_definition(definition_id):
""" Acknowledge all alert(s) or an alarm(s) associated with the definition identified by definition_id.
"""
try:
# Get definition identified in request
definition = SystemEventDefinition.query.get(definition_id)
if definition is None:
message = 'Failed to retrieve SystemEventDefinition for id provided: %d' % definition_id
return bad_request(message)
# Verify definition is not in active state; otherwise error
if definition.active:
message = '%s definition must be disabled before clearing any associated instances.' % definition.event_type
return bad_request(message)
# Determine current user who is auto clearing alert or alarm instances (written to log)
assigned_user = User.query.get(g.current_user.id)
if assigned_user is not None:
name = assigned_user.first_name + ' ' + assigned_user.last_name
else:
name = 'Unknown/unassigned user with g.current_user.id: %s' % str(g.current_user.id)
# Identify default user and message for auto acknowledgment; log activity
ack_by = 1
ack_value = 'Log: Auto acknowledge (ooi-ui-services) OBO user \'%s\'; %s definition id: %d' % \
(name,definition.event_type, definition.id)
current_app.logger.info(ack_value)
# Get all active instances for this definition which have not been acknowledged.
instances = SystemEvent.query.filter_by(system_event_definition_id=definition.id,acknowledged=False).all()
for instance in instances:
if instance.event_type=='alarm':
if not (uframe_acknowledge_alert_alarm(instance.uframe_event_id, ack_value)):
message = 'Failed to acknowledge alarm (id:%d) in uframe, prior to clearing instance.' % instance.id
current_app.logger.info('[clear_alert_alarm] %s ' % message)
return bad_request(message)
# Update alert_alarm acknowledged, ack_by and ts_acknowledged
instance.acknowledged = True
instance.ack_by = ack_by
instance.ts_acknowledged = dt.datetime.strftime(dt.datetime.now(), "%Y-%m-%dT%H:%M:%S")
try:
db.session.add(instance)
db.session.commit()
except:
db.session.rollback()
return bad_request('IntegrityError during auto-acknowledgment of %s by %s.' %
(instance.event_type, str(ack_by)))
result = 'ok'
return jsonify( {'result' : result }), 200
except Exception as err:
message = 'Insufficient data, or bad data format. %s' % str(err.message)
current_app.logger.info(message)
return conflict(message) | 6b15f6019fad506937ed0bcc0c6eeb34ce21faf4 | 3,636,505 |
def range2d(range_x, range_y):
"""Creates a 2D range."""
range_x = list(range_x)
return [ (x, y) for y in range_y for x in range_x ] | ca33799a277f0f72e99836e81a7ffc98b191fc37 | 3,636,506 |
def kmeans_(X, sample_weights, n_clusters, init='kmeans++', max_iter=300):
"""
Weighted K-Means implementation (Lloyd's Algorithm).
:param X:
:param sample_weights:
:param n_clusters:
:param init: string in {'random', 'kmeans++'}, default 'kmeans++'
:param max_iter: maximum number of iterations
:return cluster_centers_:
"""
n_samples, n_features = X.shape
# TODO: find a better way to handle negtive weights
cluster_centers_ = None
if init == 'kmeans++':
cluster_centers_ = kmeans_pp_(X, np.clip(sample_weights, 0, np.inf), n_clusters)
elif init == 'random':
centers_idxs = np.random.choice(n_samples, n_clusters, replace=False)
cluster_centers_ = X[centers_idxs]
elif isinstance(init, np.ndarray):
cluster_centers_ = init
diff = np.inf
i = 0
while diff > 1e-3 and i < max_iter:
clusters = update_clusters_(X, cluster_centers_)
new_centers = update_centers_(X, sample_weights, clusters)
if len(new_centers) == len(cluster_centers_):
diff = np.linalg.norm(new_centers - cluster_centers_)
cluster_centers_ = new_centers
i += 1
# if the program finishes before finding k'<k centers, we use the FarthestNeighbor
# method to produce the remained k-k' centers
if len(cluster_centers_) < n_clusters:
centers = [c for c in cluster_centers_]
_, dists_to_centers = pairwise_distances_argmin_min(X, np.atleast_2d(centers))
for i in range(0, n_clusters - len(cluster_centers_)):
next_idx = np.argmax(dists_to_centers)
centers.append(X[next_idx])
_, next_dist = pairwise_distances_argmin_min(X, np.atleast_2d(centers[-1]))
dists_to_centers = np.minimum(dists_to_centers, next_dist)
cluster_centers_ = np.array(centers)
return cluster_centers_ | 7c62df5c98312d1f2372939d28fecf632fd0c45b | 3,636,507 |
import os
def CPCT_LambdaPitch(refdir,main_fastfile,Lambda=None,Pitch=np.linspace(-10,40,5),WS=None,Omega=None, # operating conditions
TMax=20,bStiff=True,bNoGen=True,bSteadyAero=True, # simulation options
reRun=True,
fastExe=None,showOutputs=True,nCores=4): # execution options
""" Computes CP and CT as function of tip speed ratio (lambda) and pitch.
There are two main ways to define the inputs:
- Option 1: provide Lambda and Pitch (deg)
- Option 2: provide WS (m/s), Omega (in rpm) and Pitch (deg), in which case len(WS)==len(Omega)
"""
WS_default=5 # If user does not provide a wind speed vector, wind speed used
# if the user provided a full path to the main file, we scrap the directory. TODO, should be cleaner
if len(os.path.dirname(main_fastfile))>0:
main_fastfile=os.path.basename(main_fastfile)
# --- Reading main fast file to get rotor radius
fst = fi.FASTInputFile(os.path.join(refdir,main_fastfile))
ed = fi.FASTInputFile(os.path.join(refdir,fst['EDFile'].replace('"','')))
R = ed['TipRad']
# --- Making sure we have
if (Omega is not None):
if (Lambda is not None):
WS = np.ones(Omega.shape)*WS_default
elif (WS is not None):
if len(WS)!=len(Omega):
raise Exception('When providing Omega and WS, both vectors should have the same dimension')
else:
WS = np.ones(Omega.shape)*WS_default
else:
Omega = WS_default * Lambda/R*60/(2*np.pi) # TODO, use more realistic combinations of WS and Omega
WS = np.ones(Omega.shape)*WS_default
# --- Defining flat vectors of operating conditions
WS_flat = []
RPM_flat = []
Pitch_flat = []
for pitch in Pitch:
for (rpm,ws) in zip(Omega,WS):
WS_flat.append(ws)
RPM_flat.append(rpm)
Pitch_flat.append(pitch)
# --- Setting up default options
baseDict={'TMax': TMax, 'DT': 0.01, 'DT_Out': 0.1} # NOTE: Tmax should be at least 2pi/Omega
baseDict = paramsNoController(baseDict)
if bStiff:
baseDict = paramsStiff(baseDict)
if bNoGen:
baseDict = paramsNoGen(baseDict)
if bSteadyAero:
baseDict = paramsSteadyAero(baseDict)
# --- Creating set of parameters to be changed
# TODO: verify that RtAeroCp and RtAeroCt are present in AeroDyn outlist
PARAMS = paramsWS_RPM_Pitch(WS_flat,RPM_flat,Pitch_flat,baseDict=baseDict, FlatInputs=True)
# --- Generating all files in a workDir
workDir = refdir.strip('/').strip('\\')+'_CPLambdaPitch'
print('>>> Generating inputs files in {}'.format(workDir))
RemoveAllowed=reRun # If the user want to rerun, we can remove, otherwise we keep existing simulations
fastFiles=templateReplace(PARAMS, refdir, outputDir=workDir,removeRefSubFiles=True,removeAllowed=RemoveAllowed,main_file=main_fastfile)
# --- Running fast simulations
print('>>> Running {} simulations...'.format(len(fastFiles)))
runner.run_fastfiles(fastFiles, showOutputs=showOutputs, fastExe=fastExe, nCores=nCores, reRun=reRun)
# --- Postpro - Computing averages at the end of the simluation
print('>>> Postprocessing...')
outFiles = [os.path.splitext(f)[0]+'.outb' for f in fastFiles]
# outFiles = glob.glob(os.path.join(workDir,'*.outb'))
ColKeepStats = ['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]','Wind1VelX_[m/s]']
result = postpro.averagePostPro(outFiles,avgMethod='periods',avgParam=1,ColKeep=ColKeepStats,ColSort='RotSpeed_[rpm]')
# print(result)
# --- Adding lambda, sorting and keeping only few columns
result['lambda_[-]'] = result['RotSpeed_[rpm]']*R*2*np.pi/60/result['Wind1VelX_[m/s]']
result.sort_values(['lambda_[-]','BldPitch1_[deg]'],ascending=[True,True],inplace=True)
ColKeepFinal=['lambda_[-]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]']
result=result[ColKeepFinal]
print('>>> Done')
# --- Converting to a matrices
CP = result['RtAeroCp_[-]'].values
CT = result['RtAeroCt_[-]'].values
MCP =CP.reshape((len(Lambda),len(Pitch)))
MCT =CT.reshape((len(Lambda),len(Pitch)))
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
# --- CP max
i,j = np.unravel_index(MCP.argmax(), MCP.shape)
MaxVal={'CP_max':MCP[i,j],'lambda_opt':LAMBDA[j,i],'pitch_opt':PITCH[j,i]}
return MCP,MCT,Lambda,Pitch,MaxVal,result | 8013e10fe68cc79f427afd5a203e55dabc29dcd9 | 3,636,508 |
import time
def segment(im, pad=0, caffemodel=None):
"""
Function which segments an input image. uses pyramidal method of scaling, performing
inference, upsampling results, and averaging results.
:param im: image to segment
:param pad: number of pixels of padding to add
:param caffemodel: path to caffemodel file
:return: The upsampled and averaged results of inference on input image at 3 scales.
"""
caffe.set_mode_gpu()
padded_image = add_padding(im, pad) # Add padding to original image
resized_images = resize_images(padded_image) # Resize original images
outputs = [classify(image, caffemodel=caffemodel) for image in resized_images] # Perform classification on images
upsample_start = time.time()
average_prob_maps = get_average_prob_maps(outputs, im.shape, pad)
print("Total segmenting time: {:.3f} ms".format((time.time() - upsample_start) * 1000))
return average_prob_maps | 0e6f0d8cfd363c7b59007105178eaea3f0238261 | 3,636,509 |
def check_consistency( # pylint: disable=too-many-arguments
num_users=None,
num_items=None,
users_hat=None,
items_hat=None,
users=None,
items=None,
user_item_scores=None,
default_num_users=None,
default_num_items=None,
default_num_attributes=None,
num_attributes=None,
attributes_must_match=True,
):
"""Validate that the inputs to the recommender system are consistent
based on their dimensions. Furthermore, if all of the inputs
are consistent, we return the number of users and items that are inferred
from the inputs, or fall back to a provided default number.
Parameters
-----------
num_users: int, optional
An integer representing the number of users in the system
num_items: int, optional
An integer representing the number of items in the system
users_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. Typically this matrix refers to the system's
internal representation of user profiles, not the "true" underlying
user profiles, which are unknown to the system.
items_hat: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. Typically this matrix refers to the system's
internal representation of item attributes, not the "true" underlying
item attributes, which are unknown to the system.
users: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension should be equal to the number of
users in the system. This is the "true" underlying user profile
matrix.
items: :obj:`numpy.ndarray`, optional
A 2D matrix whose second dimension should be equal to the number of
items in the system. This is the "true" underlying item attribute
matrix.
user_item_scores: :obj:`numpy.ndarray`, optional
A 2D matrix whose first dimension is the number of users in the system
and whose second dimension is the number of items in the system.
default_num_users: int, optional
If the number of users is not specified anywhere in the inputs, we return
this value as the number of users to be returned.
default_num_items: int, optional
If the number of items is not specified anywhere in the inputs, we return
this value as the number of items to be returned.'
default_num_attributes: int, optional
If the number of attributes in the item/user representations is not
specified or cannot be inferred, this is the default number
of attributes that should be used. (This applies only to users_hat
and items_hat.)
num_attributes: int, optional
Check that the number of attributes per user & per item are equal to
this specified number. (This applies only to users_hat and items_hat.)
attributes_must_match: bool (optional, default: True)
Check that the user and item matrices match up on the attribute dimension.
If False, the number of columns in the user matrix and the number of
rows in the item matrix are allowed to be different.
Returns
--------
num_users: int
Number of users, inferred from the inputs (or provided default).
num_items: int
Number of items, inferred from the inputs (or provided default).
num_attributes: int (optional)
Number of attributes per item/user profile, inferred from inputs
(or provided default).
"""
if not is_array_valid_or_none(items_hat, ndim=2):
raise ValueError("items matrix must be a 2D matrix or None")
if not is_array_valid_or_none(users_hat, ndim=2):
raise ValueError("users matrix must be a 2D matrix or None")
if not is_valid_or_none(num_attributes, int):
raise TypeError("num_attributes must be an int")
num_items_vals = non_none_values(
getattr(items_hat, "shape", [None, None])[1],
getattr(items, "shape", [None, None])[1],
getattr(user_item_scores, "shape", [None, None])[1],
num_items,
)
num_users_vals = non_none_values(
getattr(users, "shape", [None])[0],
getattr(users_hat, "shape", [None])[0],
getattr(user_item_scores, "shape", [None])[0],
num_users,
)
num_users = resolve_set_to_value(
num_users_vals, default_num_users, "Number of users is not the same across inputs"
)
num_items = resolve_set_to_value(
num_items_vals, default_num_items, "Number of items is not the same across inputs"
)
if attributes_must_match:
# check attributes matching for users_hat and items_hat
num_attrs_vals = non_none_values(
getattr(users_hat, "shape", [None, None])[1],
getattr(items_hat, "shape", [None])[0],
num_attributes,
)
num_attrs = resolve_set_to_value(
num_attrs_vals,
default_num_attributes,
"User representation and item representation matrices are not "
"compatible with each other",
)
return num_users, num_items, num_attrs
else:
return num_users, num_items | 4139a684751d25bef08d8f4806735be8769bb09e | 3,636,510 |
from datetime import datetime
def verify_forgot_password(request):
"""
Check the forgot-password verification and possibly let the user
change their password because of it.
"""
# get form data variables, and specifically check for presence of token
formdata = _process_for_token(request)
if not formdata['has_userid_and_token']:
return render_404(request)
formdata_token = formdata['vars']['token']
formdata_userid = formdata['vars']['userid']
formdata_vars = formdata['vars']
# check if it's a valid user id
user = User.query.filter_by(id=formdata_userid).first()
if not user:
return render_404(request)
# check if we have a real user and correct token
if ((user and user.fp_verification_key and
user.fp_verification_key == unicode(formdata_token) and
datetime.datetime.now() < user.fp_token_expire
and user.email_verified and user.status == 'active')):
cp_form = auth_forms.ChangePassForm(formdata_vars)
if request.method == 'POST' and cp_form.validate():
user.pw_hash = auth_lib.bcrypt_gen_password_hash(
request.form['password'])
user.fp_verification_key = None
user.fp_token_expire = None
user.save()
messages.add_message(
request,
messages.INFO,
_("You can now log in using your new password."))
return redirect(request, 'mediagoblin.auth.login')
else:
return render_to_response(
request,
'mediagoblin/auth/change_fp.html',
{'cp_form': cp_form})
# in case there is a valid id but no user with that id in the db
# or the token expired
else:
return render_404(request) | 7ff10e96701c2733702a717fe9bd4fd7103189d1 | 3,636,511 |
import pywemo
import requests
def setup(hass, config):
"""Set up for WeMo devices."""
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.debug("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
model_name = discovery_info.get('model_name')
serial = discovery_info.get('serial')
# Only register a device once
if serial in KNOWN_DEVICES:
return
_LOGGER.debug('Discovered unique device %s', serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, 'switch')
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
discovery.listen(hass, SERVICE_WEMO, discovery_dispatch)
def setup_url_for_device(device):
"""Determine setup.xml url for given device."""
return 'http://{}:{}/setup.xml'.format(device.host, device.port)
def setup_url_for_address(host, port):
"""Determine setup.xml url for given host and port pair."""
if not port:
port = pywemo.ouimeaux_device.probe_wemo(host)
if not port:
return None
return 'http://{}:{}/setup.xml'.format(host, port)
devices = []
for host, port in config.get(DOMAIN, {}).get(CONF_STATIC, []):
url = setup_url_for_address(host, port)
if not url:
_LOGGER.error(
'Unable to get description url for %s',
'{}:{}'.format(host, port) if port else host)
continue
try:
device = pywemo.discovery.device_from_description(url, None)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error('Unable to access %s (%s)', url, err)
continue
devices.append((url, device))
disable_discovery = config.get(DOMAIN, {}).get(CONF_DISABLE_DISCOVERY)
if not disable_discovery:
_LOGGER.debug("Scanning for WeMo devices.")
devices.extend(
(setup_url_for_device(device), device)
for device in pywemo.discover_devices())
for url, device in devices:
_LOGGER.debug('Adding wemo at %s:%i', device.host, device.port)
discovery_info = {
'model_name': device.model_name,
'serial': device.serialnumber,
'mac_address': device.mac,
'ssdp_description': url,
}
discovery.discover(hass, SERVICE_WEMO, discovery_info)
return True | 1b77fb4ae2f2435cc7974c82b230f01420693bf2 | 3,636,512 |
def extend_node(node, out_size, axis=-1, value=0):
"""Extend size of `node` array
For now, this function works same with `extend_array` method,
this is just an alias function.
Args:
node (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (int): node feature axis to be extended.
Default is `axis=-1`, which extends only last axis.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended `node` array, extended place is filled
with `value`
"""
return extend_arrays_to_size(
node, out_size=out_size, axis=axis, value=value) | 08ef9f3f1cff5dce22ca8b4afacbc496e7d803ad | 3,636,513 |
def forward(X, weights, bias):
"""
Simulate the forward pass on one layer.
:param X: input matrix.
:param weights: weight matrix.
:param bias: bias vector.
:return:
"""
a = np.matmul(weights, np.transpose(X))
b = np.reshape(np.repeat(bias, np.shape(X)[0], axis=0), np.shape(a))
output = sigmoid_activation(a + b)
y_pred = np.where(output < 0.5, 0, 1)
return y_pred | fb330d01a42965c003367997381ca8929200d57e | 3,636,514 |
from typing import Any
def sanitize_for_params(x: Any) -> Any:
"""Sanitizes the input for a more flexible usage with AllenNLP's `.from_params()` machinery.
For now it is mainly used to transform numpy numbers to python types
Parameters
----------
x
The parameter passed on to `allennlp.common.FromParams.from_params()`
Returns
-------
sanitized_x
"""
# AllenNLP has a similar function (allennlp.common.util.sanitize) but it does not work for my purpose, since
# numpy types are checked only after the float type check, and:
# isinstance(numpy.float64(1), float) == True !!!
if isinstance(x, util.numpy.number):
return x.item()
elif isinstance(x, util.numpy.bool_):
# Numpy bool_ need to be converted to python bool.
return bool(x)
if isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, dict):
# Dicts need their values sanitized
return {key: sanitize_for_params(value) for key, value in x.items()}
# Lists and Tuples need their values sanitized
elif isinstance(x, list):
return [sanitize_for_params(x_i) for x_i in x]
elif isinstance(x, tuple):
return tuple(sanitize_for_params(x_i) for x_i in x)
# We include `to_json` function customize sanitization for user defined classes
elif hasattr(x, "to_json"):
return x.to_json()
return x | 538e2268f15960683bfe85e03b96076e7f2241db | 3,636,515 |
import os
def filename_fixture():
"""The name of the cities csv file for testing"""
return os.path.join('tests', 'fixtures', 'cities.csv') | d96b38d2ab616de9297526712521207c656593ea | 3,636,516 |
import os
def _get_color_context():
""" Run at beginning of color workflow functions (ex start() or resume()) to orient the function.
Assumes python current working directory = the relevant AN subdirectory with session.log in place.
Adapted from package mp_phot, workflow_session.py._get_session_context(). Required for .resume().
TESTED OK 2021-01-08.
:return: 3-tuple: (this_directory, mp_string, an_string) [3 strings]
"""
this_directory = os.getcwd()
defaults_dict = ini.make_defaults_dict()
color_log_filename = defaults_dict['color log filename']
color_log_fullpath = os.path.join(this_directory, color_log_filename)
if not os.path.isfile(color_log_fullpath):
raise ColorLogFileError('No color log file found. You probably need to run start() or resume().')
with open(color_log_fullpath, mode='r') as log_file:
lines = log_file.readlines()
if len(lines) < 5:
raise ColorLogFileError('Too few lines.')
if not lines[0].lower().startswith('color log file'):
raise ColorLogFileError('Header line cannot be parsed.')
directory_from_color_log = lines[1].strip().lower().replace('\\', '/').replace('//', '/')
directory_from_cwd = this_directory.strip().lower().replace('\\', '/').replace('//', '/')
if directory_from_color_log != directory_from_cwd:
print()
print(directory_from_color_log, directory_from_cwd)
raise ColorLogFileError('Header line does not match current working directory.')
mp_string = lines[2][3:].strip().upper()
an_string = lines[3][3:].strip()
# definition_string = lines[4][len('Definition:'):].strip()
return this_directory, mp_string, an_string | c9d2a62d490a4d5f54054777d60b35caf0ec3e81 | 3,636,517 |
import dns.resolver
def host_ip():
"""Test fixture to resolve and return host_ip as a string."""
query = dns.resolver.query("scanme.nmap.org")
assert len(query) > 0, "could not resolve target host name"
return query[0].address | ee801bc2be6311fb1fe0805f5d3efb0a4fe589be | 3,636,518 |
def with_behavior(strict=UNSET, extras=UNSET, hook=UNSET):
"""
Args:
strict (bool | Exception | callable): False: don't perform any schema validation
True: raise ValidationException when schema is not respected
Exception: raise given exception when schema is not respected
callable: call callable(reason) when schema is not respected
extras (bool | Exception | callable | (callable, list)):
False: don't do anything when there are extra fields in deserialized data
True: call LOG.debug(reason) to report extra (not in schema) fields seen in data
Exception: raise given Exception(reason) when extra fields are seen in data
callable: call callable(reason) when extra fields are seen in data
(callable, list): call callable(reason), except for extras mentioned in list
hook (callable): If provided, call callable(meta: ClassMetaDescription) at the end of ClassMetaDescription initialization
Returns:
(type): Internal temp class (compatible with `Serializable` metaclass) indicating how to handle Serializable type checking
"""
return BaseMetaInjector("_MBehavior", tuple(), {"behavior": DefaultBehavior(strict=strict, extras=extras, hook=hook)}) | 1d80cf51d679e7cec9a5645253b64300fa9688a4 | 3,636,519 |
def get_subnet_from_list_by_id(subnet_id, subnets_list):
"""Get Neutron subnet by id from provided subnets list.
:param subnet_id: Neutron subnet ID
:param subnets_list: List of Neutron subnets, where target subnet should
be searched
"""
for subnet in subnets_list:
if subnet['id'] == subnet_id:
return subnet
LOG.warning("Cannot obtain subnet with id='%s' from provided subnets "
"list", subnet_id) | 93e294131a96de321d18ce5a0e5d3b6fb5913b72 | 3,636,520 |
def grafana_logo(dispatcher):
"""Construct an image_element containing the locally hosted Grafana logo."""
return dispatcher.image_element(dispatcher.static_url(GRAFANA_LOGO_PATH), alt_text=GRAFANA_LOGO_ALT) | 2311c77cf2b5054c3a103693a2d9b862a3e503af | 3,636,521 |
import json
def is_json(payload):
"""Check if a payload is valid JSON."""
try:
json.loads(payload)
except (TypeError, ValueError):
return False
else:
return True | a02499ffd0a890fa4697f1002c5deb0fc894cac0 | 3,636,522 |
def gram_schmidt(M):
"""
@param M:
A mxn matrix whose columns to be orthogonalized
@return ret
Matrix whose columns being orthogonalized
"""
columns = M.T
res = []
res.append(columns[0])
for x in range(1, columns.shape[0]):
tmp = np.array([0 for x in range(M.shape[0])])
for vec in res:
y = (np.dot(vec, columns[x]) / np.dot(vec, vec)) * vec
tmp = tmp + y
res.append(columns[x] - tmp)
return np.array(res).T | e64b2ea4e36c3a5f8394887ba666b4c392d0284c | 3,636,523 |
import networkx as nx
def contract_variation_linear(G, A=None, K=10, r=0.5, mode='neighborhood'):
"""
Sequential contraction with local variation and general families.
This is an implemmentation that improves running speed,
at the expense of being more greedy (and thus having slightly larger error).
See contract_variation() for documentation.
"""
N, deg, W_lil = G.N, G.dw, G.W.tolil()
# The following is correct only for a single level of coarsening.
# Normally, A should be passed as an argument.
if A is None:
lk, Uk = sp.sparse.linalg.eigsh(G.L, k=K, which='SM', tol=1e-3) # this is not optimized!
lk[0] = 1
lsinv = lk**(-0.5)
lsinv[0] = 0
lk[0] = 0
D_lsinv = np.diag(lsinv)
A = Uk @ D_lsinv
# cost function for the subgraph induced by nodes array
def subgraph_cost(nodes):
nc = len(nodes)
ones = np.ones(nc)
W = W_lil[nodes,:][:,nodes]#.tocsc()
L = np.diag(2*deg[nodes] - W.dot(ones)) - W
B = (np.eye(nc) - np.outer(ones, ones) / nc ) @ A[nodes,:]
unnormalized_cost = np.linalg.norm(B.T @ L @ B)
return unnormalized_cost / (nc-1) if nc != 1 else 0.
class CandidateSet:
def __init__(self, candidate_list):
self.set = candidate_list
self.cost = subgraph_cost(candidate_list)
def __lt__(self, other):
return self.cost < other.cost
family = []
W_bool = G.A + sp.sparse.eye(G.N, dtype=np.bool, format='csr')
if 'neighborhood' in mode:
for i in range(N):
#i_set = G.A[i,:].indices # get_neighbors(G, i)
#i_set = np.append(i_set, i)
i_set = W_bool[i,:].indices
family.append(CandidateSet(i_set))
if 'cliques' in mode:
Gnx = nx.from_scipy_sparse_matrix(G.W)
for clique in nx.find_cliques(Gnx):
family.append(CandidateSet(np.array(clique)))
else:
if 'edges' in mode:
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
family.append(CandidateSet(edges[:,e]))
if 'triangles' in mode:
triangles = set([])
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
[u,v] = edges[:,e]
for w in range(G.N):
if G.W[u,w] > 0 and G.W[v,w] > 0:
triangles.add(frozenset([u,v,w]))
triangles = list(map(lambda x: np.array(list(x)), triangles))
for triangle in triangles:
family.append(CandidateSet(triangle))
family = SortedList(family)
marked = np.zeros(G.N, dtype=np.bool)
# ----------------------------------------------------------------------------
# Construct a (minimum weight) independent set.
# ----------------------------------------------------------------------------
coarsening_list = []
#n, n_target = N, (1-r)*N
n_reduce = np.floor(r*N) # how many nodes do we need to reduce/eliminate?
while len(family) > 0:
i_cset = family.pop(index=0)
i_set = i_cset.set
# check if marked
i_marked = marked[i_set]
if not any(i_marked):
n_gain = len(i_set) - 1
if n_gain > n_reduce: continue # this helps avoid over-reducing
# all vertices are unmarked: add i_set to the coarsening list
marked[i_set] = True
coarsening_list.append(i_set)
#n -= len(i_set) - 1
n_reduce -= n_gain
#if n <= n_target: break
if n_reduce <= 0: break
# may be worth to keep this set
else:
i_set = i_set[~i_marked]
if len(i_set) > 1:
# todo1: check whether to add to coarsening_list before adding to family
# todo2: currently this will also select contraction sets that are disconnected
# should we eliminate those?
i_cset.set = i_set
i_cset.cost = subgraph_cost(i_set)
family.add(i_cset)
return coarsening_list | 57368b7b55b061312346644970435a7ae679ea3b | 3,636,524 |
def horizon_main_nav(context):
""" Generates top-level dashboard navigation entries. """
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': getattr(current_dashboard, 'slug', None)} | 40f2e5e5b8661d52d3688a04ac93b7c0d48b99f2 | 3,636,525 |
import warnings
def _filter_out_bad_segments(img1, seg1, img2, seg2):
"""
It's possible for shearing or scaling augmentation to sample
one segment completely out of the image- use this function
to filter out those cases
"""
minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))
if minval < 0.5:
warnings.warn("filtering bad segment")
return False
else:
return True | fa74ae956c063c15b7fd1e8c21fb6e0788fc19e6 | 3,636,526 |
def _seconds_to_hours(time):
"""Convert time: seconds to hours"""
return time / 3600.0 | d6abd9144882587833601e64d5c2226446f1bbdc | 3,636,527 |
import sys
def execute_command(api_instance, pod_info, exec_command):
"""
Execute a command inside a specified pod
exec_command = list of strings
"""
name = pod_info['name']
resp = None
try:
resp = api_instance.read_namespaced_pod(name=name,
namespace='default')
except ApiException as excep:
if excep.status != 404:
print("Unknown error: %s" % excep)
sys.exit(0)
if not resp:
print("Pod %s does not exist. Creating it..." % name)
return -1
# Calling exec and waiting for response
resp = stream(api_instance.connect_get_namespaced_pod_exec,
name,
'default',
command=exec_command,
stderr=True, stdin=False,
stdout=True, tty=False)
print("Response: " + resp)
return resp | 5b42bf12e96c6b516d2c3ee78750bd107b1dac75 | 3,636,528 |
async def process_cmd_entry_erase(guild_id: int, txt_channel: str, bosses: list,
channel = None):
"""Processes boss `entry` `erase` subcommand.
Args:
guild_id (int): the id of the Discord guild of the originating message
txt_channel (str): the id of the channel of the originating message,
belonging to Discord guild of `guild_id`
bosses (list): a list of bosses to check
channel (int, optional): the channel for the record;
defaults to None
Returns:
str: an appropriate message for success or fail of command,
e.g. confirmation or list of entries
"""
if type(bosses) is str:
bosses = [bosses]
vdb = vaivora.db.Database(guild_id)
if channel and bosses in boss_conf['bosses']['world']:
records = await vdb.rm_entry_db_boss(bosses=bosses, channel=channel)
else:
records = await vdb.rm_entry_db_boss(bosses=bosses)
if records:
records = [f'**{record}**' for record in records]
return cleandoc(
f"""Your queried records ({len(records)}) have been """
f"""successfully erased.
- {bullet_point.join(records)}
"""
)
else:
return '*(But **nothing** happened...)*' | e385768fc34ebb419f51124e0a0f5a4e1577ad00 | 3,636,529 |
import warnings
import scipy
def mle_iid_gamma(n):
"""Perform maximum likelihood estimates for parameters for i.i.d.
NBinom measurements, parametrized by alpha, b=1/beta"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = scipy.optimize.minimize(
fun=lambda log_params, n: -log_like_iid_gamma_log_params(log_params, n),
x0=np.array([2, 1/300]),
args=(n,),
method='L-BFGS-B',
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message) | 089d181a85a72d42457c7ea1eae3aaabb3d6dd60 | 3,636,530 |
def build_tabnet_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "tab"),
"bbox":
[bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results | a564f28751436be9648b60a7badae651be3c4583 | 3,636,531 |
from typing import Callable
from typing import Dict
from typing import Any
def make_raw_serving_input_receiver_fn(
feature_spec: features_specs_type,
transform_input_tensor: Callable[[Dict[str, tf.Tensor]], None],
is_model_canned_estimator: bool = False,
batched_predictions: bool = True
) -> Callable[[], tf.estimator.export.ServingInputReceiver]:
"""
Build the serving_input_receiver_fn used for serving/inference.
transform_input_tensor: method that takes the input tensors and will mutate them so prediction
will have its correct input. For instance, it could be to generate feature transfo from
"raw dimensions" tensors.
is_model_canned_estimator: if the model you want to serve is a canned estimator, the serving
function has to be generated differently
"""
def serving_input_receiver_fn() -> Any:
# generate all tensor placeholders:
raw_tensors, prediction_input_tensors = featurespec_to_input_placeholders(
feature_spec, batched_predictions)
# Add transformations (for instance, feature transfos) to prediction_input_tensors
transform_input_tensor(prediction_input_tensors)
if is_model_canned_estimator:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors={},
receiver_tensors_alternatives={"raw_input": raw_tensors})
else:
return tf.estimator.export.ServingInputReceiver(
features=prediction_input_tensors, receiver_tensors=raw_tensors)
return serving_input_receiver_fn | 2780f36ae373b1bd4623b6988ec7b4d130fb21ff | 3,636,532 |
from typing import List
import json
from typing import Set
def load_parentheses_dataset(path: str, depths: List[int]) -> torchtext.data.Dataset:
"""
Load equation verification data as a sequential torchtext Dataset, in infix
notation with parentheses.
The Dataset is additionally populated with `leaf_vocab`, `unary_vocab`, and
`binary_vocab` sets.
"""
with open(path, "r") as f:
data_by_depth = json.load(f)
leaf_vocab: Set[str] = set()
unary_vocab: Set[str] = set()
binary_vocab: Set[str] = set()
def make_example(serialized):
tree = ExpressionTree.from_serialized(serialized["equation"])
label = int(serialized["label"] == "1")
left_root_index = sequence_root_index(tree.left)
right_root_index = sequence_root_index(tree.right)
nonlocal leaf_vocab, unary_vocab, binary_vocab
leaf_vocab = leaf_vocab.union(tree.leaf_vocab())
unary_vocab = unary_vocab.union(tree.unary_vocab())
binary_vocab = binary_vocab.union(tree.binary_vocab())
return torchtext.data.Example.fromlist(
[str(tree.left), str(tree.right), label, left_root_index, right_root_index],
list(_PARENTHESES_FIELD_MAP.items()),
)
examples = []
for depth in depths:
examples.extend(list(map(make_example, data_by_depth[depth - 1])))
dataset = torchtext.data.Dataset(examples, _PARENTHESES_FIELD_MAP)
dataset.leaf_vocab = leaf_vocab
dataset.unary_vocab = unary_vocab
dataset.binary_vocab = binary_vocab
return dataset | 048c5cc42660c21ab59d8dd2dc205aeeafb22bd3 | 3,636,533 |
from typing import List
def get_angle(v1: List[int], v2: List[int]):
"""
:param v1: 2D vector
:param v2: 2D vector
:return: the angle of v1 and v2 in degree
"""
dot = np.dot(v1, v2)
norm = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.degrees(np.arccos(dot / norm)) | 669a4119c1b6da1bcf0fb84f3d2ce0056acd8170 | 3,636,534 |
import logging
def get_plasma_intersection(lon, lat, alt, plasma_alt=300., tx_lon=-75.552,
tx_lat=45.403, tx_alt=0.07):
"""
This function finds where a ray from a transmitter toward a satellite
intersects the peak plasma in the middle.
*** PARAMS ***
Satellite ephemeris point(s): lon, lat, alt (deg, deg, km)
Transmitter location [optionally]: tx_lon, tx_lat, tx_alt (deg, deg, km)
Altitude of peak plasma density: plasma_alt (km.)
***RETURNS***
plasma_lon (float): longitude of plasma intersection(s)
plasma_lat (float): latitude of plasma intersection(s)
"""
vec_inp = True if (type(lon)==list or type(lon)==np.ndarray) else False
#lon = (lon + 360.) % 360.
#tx_lon = (tx_lon + 360.) % 360.
dist = haversine(lon, lat, tx_lon, tx_lat)
if dist > 2500.:
logging.error("This approximation isn't valid for large distances")
logging.error("dist: {0}".format(dist))
return (-1,-1)
if plasma_alt > np.min(alt):
logging.error("Input altitudes are too low for the plasma")
logging.error('plasma_alt: {0}'.format(plasma_alt))
logging.error('alt: {0}'.format(alt))
return (-1,-1)
if vec_inp:
tx_lon = tx_lon*np.ones(len(lon))
tx_lat = tx_lat*np.ones(len(lat))
tx_alt = tx_alt*np.ones(len(alt))
x = (plasma_alt/alt)*dist
#only need initial bearing
bearing,__ = get_bearing(tx_lon, tx_lat, lon, lat)
delta_EW = x*np.sin(np.deg2rad(bearing))
delta_NS = x*np.cos(np.deg2rad(bearing))
# convert to longitude (deg):
delta_lon = delta_EW*360./(2*np.pi*6371.*np.sin(np.deg2rad(lat)))
delta_lat = delta_NS*360./(2*np.pi*6371.)
plasma_lon = tx_lon + delta_lon
plasma_lat = tx_lat + delta_lat
logging.info('delta_EW, delta_NS: {0},{1}'.format(delta_EW, delta_NS))
logging.info('delta_lon, delta_lat: {0},{1}'.format(delta_lon, delta_lat))
logging.info('plasma_lon, plasma_lat: {0},{1}'.format(plasma_lon, plasma_lat))
return (plasma_lon, plasma_lat) | 8234bf61ef2b0d501a723ef9553c6b63d3c51998 | 3,636,535 |
def plot_clusters(g, c):
"""
Draws a given graph g with vertex colours corresponding to clusters c and
displays the corresponding sizes of the clusters.
===========================================================================
Parameters
---------------------------------------------------------------------------
g : a graph
c : a list of vertex colours (clusters)
---------------------------------------------------------------------------
"""
if type(c) == dict:
c = list(c.values())
g.vs['color'] = c
g.vs['label'] = c
palette = ig.ClusterColoringPalette(len(g.vs))
df = pd.DataFrame(columns=['Frequency'])
df.index.name = 'Colour'
for i in set(c):
df.loc[int(i)] = [c.count(i)]
display(df)
visual_style = {}
visual_style['vertex_color'] = [palette[col] for col in g.vs['color']]
visual_style['vertex_label'] = [col for col in g.vs['color']]
visual_style['vertex_frame_width'] = 0
visual_style['bbox'] = (300, 300)
visual_style['margin'] = 10
return ig.plot(g, **visual_style) | dbeec2b421a23c7b503dc71e29cd7caca3300dc5 | 3,636,536 |
def make_cat_advanced(simple=True, yolo=False):
"""fill the categories manually"""
cat_list = get_cat_list(simple)
if simple:
if yolo:
cat_mapping = {
"benign": 0,
"malign": 1,
}
else:
cat_mapping = [0, 1]
return cat_list, cat_mapping
# The names from datainfo are used here!
cat_mapping = {
# malign
"Chondrosarkom": 1,
"Osteosarkom": 2,
"Ewing-Sarkom": 3,
"Plasmozytom / Multiples Myelom": 4,
"NHL vom B-Zell-Typ": 5,
# benign
"Osteochondrom": 6,
"Enchondrom": 7,
"Chondroblastom": 8,
"Osteoidosteom": 9,
"NOF": 10,
"Riesenzelltumor": 11,
"Chordom": 12,
"Hämangiom": 13,
"Knochenzyste, aneurysmatische": 14,
"Knochenzyste, solitär": 15,
"Dysplasie, fibröse": 16,
}
return cat_list, cat_mapping | 5b4f0bac9126ce3a84ec5ea8d27203f7dfe41e10 | 3,636,537 |
import operator
def process_fuel(context):
"""
Reformats Fuel consumed
"""
fuel = {
0: 'Petrol',
1: 'Desiel'
}
data = []
totals = []
for index, type in enumerate(context['Fuel']):
litresSold = operator.sub(type.closing_meter, type.opening_meter)
total = operator.mul(litresSold, type.unit_price)
totals.append(total)
data.append([
{'type': fuel[index],
'opening_meter': type.opening_meter,
'closing_meter': type.closing_meter,
'unit_price': type.unit_price,
'litresSold': litresSold,
'total': total}])
return {
'data': data,
'total': totals
} | fea31cb306417cf1dfcef8859ed2585c2903849b | 3,636,538 |
from bs4 import BeautifulSoup
import requests
def prepare_df_financials(
ticker: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company
Parameters
----------
ticker : str
Company's stock ticker
statement : str
Either income, balance or cashflow
quarter : bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls:
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
period = "quarter" if quarter else "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(ticker),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = [
financials_header.text.strip("\n").split("\n")[0]
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
)
]
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials | 1fdd3488c81bdf404764bba3b797f931ba77ad93 | 3,636,539 |
def build_si(cp, instruction):
"""
Build the integer representation of the source indices.
:param cp: CoreParameters instance for the target architecture
:param instruction: Instruction instance
:return: integer representation of si
"""
# Check sizing.
if len(instruction.source_indices) > 3:
exception_string = f"The {len(instruction.source_indices)} sources exceed the architecture's specified " \
+ f"maximum of 3."
raise AssemblyException(exception_string)
# Concatenate the values together.
si = 0
num_source_indices = len(instruction.source_indices)
if num_source_indices > 2:
if instruction.source_indices[2] != 0:
si |= instruction.source_indices[2]
si <<= cp.single_si_width
if num_source_indices > 1:
if instruction.source_indices[1] != 0:
si |= instruction.source_indices[1]
si <<= cp.single_si_width
if num_source_indices > 0:
if instruction.source_indices[0] != 0:
si |= instruction.source_indices[0]
# Check sizing, and return the integer.
si = int(si)
if si.bit_length() > cp.si_width:
raise AssemblyException("si exceeds its allotted bit width.")
return si | 2d78d75486432c1e41847074ed819194b1f0e643 | 3,636,540 |
def getRegSampledPrfFitsByOffset(prfArray, colOffset, rowOffset):
"""
The 13x13 pixel PRFs on at each grid location are sampled at a 9x9 intra-pixel grid, to
describe how the PRF changes as the star moves by a fraction of a pixel in row or column.
To extract out a single PRF, you need to address the 117x117 array in a funny way
(117 = 13x9). Essentially you need to pull out every 9th element in the array, i.e.
.. code-block:: python
img = array[ [colOffset, colOffset+9, colOffset+18, ...],
[rowOffset, rowOffset+9, ...] ]
Inputs
------
prfArray
117x117 interleaved PRF array
colOffset, rowOffset
The offset used to address the column and row in the interleaved PRF
Returns
------
prf
13x13 PRF image for the specified column and row offset
"""
gridSize = 9
assert colOffset < gridSize
assert rowOffset < gridSize
# Number of pixels in regularly sampled PRF. Should be 13x13
nColOut, nRowOut = prfArray.shape
nColOut /= float(gridSize)
nRowOut /= float(gridSize)
iCol = colOffset + (np.arange(nColOut) * gridSize).astype(np.int)
iRow = rowOffset + (np.arange(nRowOut) * gridSize).astype(np.int)
tmp = prfArray[iRow, :]
prf = tmp[:,iCol]
return prf | 551ec8624c9c41bca850cf5110d59f65179d6505 | 3,636,541 |
import click
def generate_list_display(object, attrs):
"""Generate a display string for an object based on some attributes.
Args:
object: An object which has specific attributes.
attrs: An interable of strings containing attributes to get from
the above object.
Returns:
A string containing a list display of the object with respect to
the passed in attributes.
"""
return "\n".join(
click.style(attr, bold=True) + ": %s" % getattr(object, attr)
for attr in attrs
) | 17c876261bede0c38d91b4bd3e7b0048616f8cbf | 3,636,542 |
import tempfile
def create_temporary_config_file():
""" Create a minimal config file with some default values
"""
toml_config = document()
toml_config.add("name", "Test Suite")
tmp_config_file = tempfile.NamedTemporaryFile(delete=False)
with tmp_config_file:
content = dumps(toml_config).encode("utf-8")
tmp_config_file.write(content)
return tmp_config_file.name | ff7c226eb035aa6b5d8e79efa2acc8a92a925659 | 3,636,543 |
def shear(image, shear_factor, **kwargs):
"""
Shear image.
For details see:
http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform
>>> image = np.eye(3, dtype='uint8')
>>> rotated = rotate(image, 45)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float shear_factor: Shear factor [0, 1]
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
warp function, e.g. order=1 for linear interpolation.
:return: Sheared image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
transform = skt.AffineTransform(shear=shear_factor)
return skt.warp(image, transform, preserve_range=True,
**kwargs).astype('uint8') | 97f6cc57d1aa41569c84601470242350b2805ffc | 3,636,544 |
def segments():
"""Yields all segments in the unnannotated training Qatar-Living dataset."""
return (segment for document in documents() for segment in document.segments) | eea12bb25ca3c143c5b867987444e4d141982e94 | 3,636,545 |
from datetime import datetime
def get_options_between_dates(
start_date,
end_date):
"""get_options_between_dates
:param start_date: start date
:param end_date: end date
"""
valid_options = []
for rec in historical_options():
opt_date = datetime.datetime.strptime(
str(rec),
'%m-%d-%Y').date()
if start_date <= opt_date <= end_date:
valid_options.append(opt_date.strftime('%Y-%m-%d'))
return valid_options | c2528e85f5e1fce9f537639d0ec88ca20477b93d | 3,636,546 |
def format_timestamp(timestamp_str, datetime_formatter):
"""Parse and stringify a timestamp to a specified format.
Args:
timestamp_str (str): A timestamp.
datetime_formatter (str): A format string.
Returns:
str: The formatted, stringified timestamp.
"""
try:
if '"' in timestamp_str or '\'' in timestamp_str:
# Make sure the timestamp is not surrounded by any quotes
timestamp_str = timestamp_str.replace('"', '')
timestamp_str = timestamp_str.replace('\'', '')
formatted_timestamp = (
dateutil_parser.parse(timestamp_str).strftime(datetime_formatter))
except (TypeError, ValueError) as e:
LOGGER.warn('Unable to parse/format timestamp: %s\n,'
' datetime_formatter: %s\n%s',
timestamp_str, datetime_formatter, e)
formatted_timestamp = None
return formatted_timestamp | 528f1a5f7fd2a45de9d4ee77a8eaf29e06dcb310 | 3,636,547 |
import math
def dyn_stdev(val, prev_stdev, prev_mean, n):
"""Dynamic stdev: computes the standard deviation based on a previous stdev plus a new value. Useful when stdev
is built incrementally, it saves the usage of huge arrays.
Keyword arguments:
val -- new val to add to the mean
prev_stdev -- previous stdev value
n -- number of total elements in the mean including the new val
"""
if n < 1:
raise ValueError("n < 1, stdev only defined for a positive number of elements")
if n == 1:
return 0
curr_mean = dyn_mean(val, prev_mean, n)
return math.sqrt(((n-1)*prev_stdev*prev_stdev + (val - prev_mean)*(val - curr_mean)) / float(n)) | ccf58f769650b209128bc370fc67144f82e68850 | 3,636,548 |
import warnings
def get_detail_backtest_results(input_df,
features,
return_col_name='returns',
equity_identifier='Equity Parent',
date_col_name='date',
n_bins=5,
bin_labels=None,
corr_method='spearman',
items_per_bin_deviation_threshold=1,
drop_months_outside_of_threshold=False):
"""
Description: This function generates the back testing results for a list of features.
This procedure does not handle subsetting for a specified date range.
Subset to a specified date range needs to be done prior to passing the input dataframe.
The procedure works on the assumption that the bin_labels are specified in descending order.
eg: ['Q1','Q2','Q3','Q4'] implies Q1 is the highest portfolio and Q4 is the lowest.
The generation of bins work ideally with a sufficient number of unique values in a feature.
The items_per_bin_deviation_threshold parameter can be used to decide how strict we want to be with the effect of non-unique values.
items_per_bin_deviation_threshold acts on the difference between the expected number of items in a bin
vs the actual number of items.
drop_months_outside_of_threshold can be set to True, if the months deviating from the
above threshold should be excluded from back testing.
:param input_df: Type pandas dataframe. long format dataframe.
:param features: Type list. list of features for which backtesting needs to be perforrmed. These should correspond
to the names of the columns in the df_long dataframe.
:param return_col_name: Type str. Name of the return column.
:param equity_identifier : Type str. Name of the equity identifier column.
:param date_col_name:Type str. Name of the date column.
:param n_bins:Type int. number of bins to split the equities into.
:param bin_labels:Type list. list of bin labels. It is assumed that the labels are in descending order.
eg: ['Q1','Q2','Q3'] implies Q1 is the highest portfolio and Q3 is the lowest.
:param corr_method:Type string. correlation method being used.
:param items_per_bin_deviation_threshold:Type int. Permissible deviation from the expected number of items per bin.
:param drop_months_outside_of_threshold:Type boolean. Decision to drop months that break deviate beyond the acceptable
items_per_bin_deviation_threshold.
:return:Type pandas dataframe. detail backtesting results for each period
"""
if bin_labels is None:
bin_labels = ['Q' + str(i + 1) for i in range(n_bins)]
df_long = input_df.copy()
long_cols = list(df_long.columns)
if date_col_name not in long_cols:
df_long = df_long.reset_index()
df_long.rename(columns={'index': 'date'}, inplace=True)
if return_col_name in features:
features.remove(return_col_name)
detail_results = []
features = sorted(features)
feature_cnt = 0
total_features = len(features)
print('Total features for processing: ' + str(total_features))
warnings.formatwarning = custom_formatwarning
for feature in features:
category = feature.split('_bshift')[0]
feature_cols = [equity_identifier, date_col_name, return_col_name, feature]
df_feature_detail = df_long[feature_cols].copy()
df_feature_detail = get_ranks(df_feature_detail,
date_col_name,
feature)
df_feature_detail = add_bins_col_to_rank_df(df_feature_detail,
n_bins)
df_bin_check = pd.DataFrame(df_feature_detail.groupby(date_col_name)['bin_no'].max())
bin_check_mask = df_bin_check['bin_no'] != n_bins
insufficient_bins_dates = [item.date().strftime("%Y-%m-%d") for item in df_bin_check[bin_check_mask].index.tolist()]
if len(insufficient_bins_dates) > 0:
warnings.warn('\nInsufficient bins warning:\nFeature: ' + feature+'\n'+'\n' +
'Months with insufficient bins:' + str(insufficient_bins_dates)+ '\n' + '\n' +
'These months are excluded from the back testing computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(insufficient_bins_dates)]
print(df_feature_detail.shape)
total_no_of_items = df_feature_detail[equity_identifier].unique().shape[0]
expected_no_of_items_per_bin = total_no_of_items/n_bins
mask_bin_lowest = df_feature_detail['bin_no'] == 1
mask_bin_highest = df_feature_detail['bin_no'] == n_bins
df_bin_lowest = df_feature_detail[mask_bin_lowest].copy()
df_bin_highest = df_feature_detail[mask_bin_highest].copy()
bin_lowest_bad_dates = get_dates_deviating_from_threshold(df_bin_lowest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
bin_highest_bad_dates = get_dates_deviating_from_threshold(df_bin_highest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
if len(bin_lowest_bad_dates) > 0 or len(bin_highest_bad_dates) > 0:
warnings.warn('\nDeviation from threshold warning:\nFeature: ' + feature+'\n'+'\n' +
'Top Portfolio - Months which deviate from threshold: '+str(bin_highest_bad_dates)+'\n'+'\n' +
'Bottom Portfolio - Months which deviate from threshold: '+str(bin_lowest_bad_dates))
if drop_months_outside_of_threshold:
months_to_drop = bin_lowest_bad_dates + bin_highest_bad_dates
warnings.warn('\nMonths dropped warning:\nFeature: ' + feature + '\n'+'\n' +
'Months: '+str(months_to_drop) +' will be dropped from computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(months_to_drop)]
df_feature_detail_agg = compute_date_level_metrics(df_feature_detail,
bin_labels,
date_col_name,
return_col_name,
feature,
corr_method)
df_feature_detail_agg['feature'] = feature
df_feature_detail_agg['category'] = category
detail_results.append(df_feature_detail_agg)
feature_cnt += 1
if feature_cnt % 100 == 0:
print(str(feature_cnt) + ' features completed')
detail_results_df = pd.concat(detail_results)
return detail_results_df | 702f80d378d12e3570af6bf69a786ec913eed4e9 | 3,636,549 |
def sid_invalid_retry(retry=1):
""" 鉴权失败,一般是因为在手机上登录,导致上次登录的 token 失效。
该函数用于返回一个函数修饰器,被修饰的 API 函数如果发生鉴权错误,
则重新登录后重新调用 API 函数,最高 retry 次
Args:
retry int 重新尝试的次数,默认只重试一次
Raises:
JoyrunSidInvalidError 超过重复校验次数后仍然鉴权失败
"""
def func_wrapper(func):
@wraps(func)
def return_wrapper(self, *args, **kwargs):
count = 0
while True:
try:
return func(self, *args, **kwargs)
except JoyrunSidInvalidError: # 该错误类捕获 鉴权失败 的异常
count += 1
if count > retry:
break
else:
self.logger.debug("sid invalid, retry %s" % count)
self.login()
except Exception as err:
raise err
raise JoyrunSidInvalidError("reach retry limit %s" % retry)
return return_wrapper
return func_wrapper | ceafd65bc67fe05e19287e2b690724d89123dfcf | 3,636,550 |
from typing import Counter
def check_train_balance(df,idx_train,keys):
"""
check the balance of the training set.
if only one of the classes has more 2 instances than the other
we will randomly take out those 'extra instances' from the major
class
"""
Counts = dict(Counter(df.iloc[idx_train]['targets'].values))
if np.abs(Counts[keys[0]] - Counts[keys[1]]) > 2:
if Counts[keys[0]] > Counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train]['id'][df.iloc[idx_train]['targets'] == key_major]
idx_train_new = idx_train.copy()
for n in range(len(idx_train_new)):
random_pick = np.random.choice(np.unique(ids_major),size = 1)[0]
# print(random_pick,np.unique(ids_major))
idx_train_new = np.array([item for item,id_temp in zip(idx_train_new,df.iloc[idx_train_new]['id']) if (id_temp != random_pick)])
ids_major = np.array([item for item in ids_major if (item != random_pick)])
new_counts = dict(Counter(df.iloc[idx_train_new]['targets']))
if np.abs(new_counts[keys[0]] - new_counts[keys[1]]) > 3:
if new_counts[keys[0]] > new_counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train_new]['id'][df.iloc[idx_train_new]['targets'] == key_major]
elif np.abs(new_counts[keys[0]] - new_counts[keys[1]]) < 3:
break
return idx_train_new
else:
return idx_train | d99c9e1c4ae0d6124da576b91ce2b2786d53f07b | 3,636,551 |
def eye(w, n):
"""Create diagonal matrix with w on diagonal."""
return np.array([[w if i==j else 0.0*w for i in range(n)] for j in range(n)]) | e12ff719981ff7311c21339ad651d7bd38f204f6 | 3,636,552 |
def Messaging():
"""
Messaging
Creates JMS resources.
Only to use in a resource block.
"""
if state().block and state().block != 'resources':
raise SyntaxError('Messaging can only be used in a resources block')
return subscope('messaging') | 9017c8c0452cf75b05f422d6335b8ba5bcd7bc90 | 3,636,553 |
from datetime import datetime
def timestamp2WP(timestamp):
"""
Converts a Unix Epoch-based timestamp (seconds since Jan. 1st 1970 GMT)
timestamp to one acceptable by Wikipedia.
:Parameters:
timestamp : int
Unix timestamp to be converted
:Return:
string Wikipedia style timestamp
"""
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y%m%d%H%M%S') | c4b9bef9e555c178991569472f3962c7a17d996c | 3,636,554 |
def get_available_node_types(nodenet_uid):
""" Return a dict of available built-in node types and native module types"""
return True, runtime.get_available_node_types(nodenet_uid) | 509730edf1c3ea7958a7356e3c784893c2b4c769 | 3,636,555 |
def dos_element_orbitals(
folder,
element_orbital_dict,
output='dos_element_orbitals.png',
fill=True,
alpha=0.3,
linewidth=1.5,
sigma=0.05,
energyaxis='x',
color_list=None,
legend=True,
total=True,
figsize=(4, 3),
erange=[-6, 6],
spin='up',
soc_axis=None,
combination_method='add',
fontsize=12,
save=True,
shift_efermi=0,
):
"""
This function plots the element projected density of states on specific orbitals.
Parameters:
folder (str): This is the folder that contains the VASP files
element_orbital_dict (dict[str:list]): A dictionary that contains the individual elements and the corresponding
orbitals to project onto. For example, if the user wants to project onto the s, py, pz, and px orbitals
of In and the s orbital of As for and InAs structure then the dictionary would be {'In':[0,1,2,3], 'As':[0]}
output (str): File name of the resulting plot.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
linewidth (float): Linewidth of lines
sigma (float): Standard deviation for gaussian filter
energyaxis (str): Determines the axis to plot the energy on ('x' or 'y')
color_list (list): List of colors that is the same length as the number of projections
in the plot.
legend (bool): Determines whether to draw the legend or not
total (bool): Determines wheth to draw the total density of states or not
spin (str): Which spin direction to parse ('up' or 'down')
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list): Energy range for the DOS plot ([lower bound, upper bound])
combination_method (str): If spin == 'both', this determines if the spin up and spin down
desnities are added or subtracted. ('add' or 'sub')
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
dos = Dos(shift_efermi=shift_efermi, folder=folder, spin=spin, combination_method=combination_method)
fig = plt.figure(figsize=figsize, dpi=400)
ax = fig.add_subplot(111)
_figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis)
dos.plot_element_orbitals(
ax=ax,
element_orbital_dict=element_orbital_dict,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis=energyaxis,
color_list=color_list,
legend=legend,
total=total,
erange=erange,
)
plt.tight_layout(pad=0.4)
if save:
plt.savefig(output)
else:
return fig, ax | 957e21298077ece088ef5f6c2c2c7ad5c3e599aa | 3,636,556 |
def get_ceph_nodes():
"""Query named relation 'ceph' to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts | 35ee1775c9e4d2636e8373cf0936e6e1a8cb0b76 | 3,636,557 |
def metadataAbstractElementIllegalChildElementTest1():
"""
No child elements, child elements not allowed.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementIllegalChildElements,
... metadataAbstractElementIllegalChildElementTest1())
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test>
</test>
"""
return ElementTree.fromstring(metadata) | c8405bbe81db5d86941a68c62ba19a6576789e50 | 3,636,558 |
def discover_fields(layout):
"""Discover all fields defined in a layout object
This is used to avoid defining the field list in two places --
the layout object is instead inspected to determine the list
"""
fields = []
try:
comps = list(layout)
except TypeError:
return fields
for comp in comps:
if isinstance(comp, str):
fields.append(comp)
else:
fields.extend(discover_fields(comp))
return fields | 359a6ed1d23e1c56a699895e8c15a93bce353750 | 3,636,559 |
def replace(project_symbols):
"""
replace old source with non annotated signatures
:param project_symbols: symbols we will use to write out new source code
:return: bool
"""
for module_symbols in project_symbols:
if not write_new_source(module_symbols, access_attribute("get_non_annotated_source")):
return False
return True | b3a00199f336b6711fba2096bec9a0fc0c4976b8 | 3,636,560 |
def element_list_as_string(elements):
"""Flatten a list of elements into a space separated string."""
names = []
for element in elements:
if isinstance(element, AOVGroup):
names.append("@{}".format(element.name))
else:
names.append(element.variable)
return " ".join(names) | baa0c1aaa6bd11932f388756c807c9240abb3958 | 3,636,561 |
from typing import Dict
import os
def load_sample_metadata(path: str, sample_id: str) -> Dict[str, str]:
"""
Read sample metadata from either an sqlite '.db' database or a tab-delimited file.
The tab-file has to have one sample per line and a header with a column
labelled 'Name' or 'SampleId' in which a match to function argument sample_id should be found.
Args:
path (str): path to sqlite database (.db) or tab-delimited metadata file
sample_id (str): Id of sample to annotate
Returns:
A Dict of metadata keys -> values
"""
if not os.path.exists(path):
raise ValueError(f"Samples metadata file '{path}' not found.")
if path.endswith(".db"):
# sqlite3
with sqlite.connect(path) as db:
cursor = db.cursor()
cursor.execute("SELECT * FROM sample WHERE name = ?", (sample_id,))
keys = [x[0] for x in cursor.description]
vals = cursor.fetchone()
if vals is not None:
return dict(zip(keys, vals))
raise ValueError(f"SampleID '{sample_id}' was not found in the samples database.")
else:
result = {}
with open(path) as f:
headers = [x.lower() for x in f.readline()[:-1].split("\t")]
if "sampleid" not in headers and 'name' not in headers:
raise ValueError("Required column 'SampleID' or 'Name' not found in sample metadata file")
if "sampleid" in headers:
sample_metadata_key_idx = headers.index("sampleid")
else:
sample_metadata_key_idx = headers.index("name")
sample_found = False
for line in f:
items = line[:-1].split("\t")
if len(items) > sample_metadata_key_idx and items[sample_metadata_key_idx] == sample_id:
for i, item in enumerate(items):
result[headers[i]] = item
sample_found = True
if not sample_found:
raise ValueError(f"SampleID '{sample_id}' not found in sample metadata file")
return result | 7b4351be995018a0d9654e9cedc79574fb8acf06 | 3,636,562 |
def encode_corpus(storage: LetterStorage, corpus: tuple) -> tuple:
"""
Encodes sentences by replacing letters with their ids
:param storage: an instance of the LetterStorage class
:param corpus: a tuple of sentences
:return: a tuple of the encoded sentences
"""
if not isinstance(storage, LetterStorage) or not isinstance(corpus, tuple):
return ()
encoded_corpus = []
for element in corpus:
list_element = []
for word in element:
list_element.append(tuple([storage.get_id_by_letter(letter) for letter in word]))
encoded_corpus.append(tuple(list_element))
return tuple(encoded_corpus) | 0fa6b4c6b5dd4a33c9e9aee8b1c81fdb119625a7 | 3,636,563 |
from typing import Type
def aggregated_column_unique(chart: Type[BaseChart], data):
"""
description:
main function to calculate histograms
input:
- chart
- data
output:
list_of_unique_values
"""
a_range = cuda.to_device(np.array([chart.min_value, chart.max_value]))
temp_df = cudf.DataFrame()
temp_df.add_column(
chart.x,
get_binwise_reduced_column(
data[chart.x].copy().to_gpu_array(), chart.stride, a_range
),
)
return temp_df[chart.x].unique().to_pandas().tolist() | 79f2f896e5a8ad06dba5589896eadfe224e42246 | 3,636,564 |
from pathlib import Path
import os
from unittest.mock import call
def convert_image(qcow_image=None):
"""
:return:
"""
# A convert QCOW to VMDK
new_filename = Path(qcow_image).stem + ".vmdk"
path_to_vmdk = str(Path(qcow_image).parent.joinpath(new_filename))
if os.path.isfile(path_to_vmdk) is False:
print("Creating vmdk image.")
# convert qcow2 to vmdk
call(["/usr/local/bin/qemu-img",
"convert", "-f", "qcow2", "-O", "vmdk",
"-o", "subformat=streamOptimized",
"-o", "adapter_type=lsilogic", PATH_TO_QCOW, path_to_vmdk])
# fix qemu-img bug and write hdr
payload = b'\xe2\x80\x98\x78\x30\x33\xe2\x80\x99'
with open(path_to_vmdk, "r+b") as f:
f.seek(4)
f.write(payload)
f.close()
return path_to_vmdk | 4f882ffd06949b62339847ea80d1c1e40602e69f | 3,636,565 |
def collocations_table_exist(con):
"""Return True if the collocations table exist"""
query = con.query(
"select 1 from information_schema.tables "
"where table_name='collocations'")
return bool(list(query.dictresult())) | 9ffa05f698056d9fab6bb9651427b6bc64f414ea | 3,636,566 |
from bs4 import BeautifulSoup
import re
def ftp_profile(publish_settings):
"""Takes PublishSettings, extracts ftp user, password, and host"""
soup = BeautifulSoup(publish_settings, 'html.parser')
profiles = soup.find_all('publishprofile')
ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0]
matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl'])
host = matches.group(1) if matches else ''
username = ftp_profile['username'].replace("\\$", "%5C%24")
password = ftp_profile['userpwd']
return host, username, password, ftp_profile['publishurl'] | 003218e6d58d01afcbf062a14e68294d0033b8af | 3,636,567 |
from typing import List
def train(name,train_data:List[tuple],test_data=None)->tuple:
"""
Train Naive Bayes Classifier for Multinomial Models
:param list train_data: list train data of tuple (text,tag)
:param object get_features: function of features
:param list test_data: list test data of tuple (text,tag)
:return: tuple(model,accuracy)
"""
X_data=[]
y_data=[]
for sent in train_data:
X_data.append(sent[0])
y_data.append(sent[1])
nb.fit(X_data, y_data)
if test_data!=None:
X_test=[]
y_test=[]
for sent in test_data:
X_test.append(sent[0])
y_test.append(sent[1])
y_pred = nb.predict(X_test)
return (nb, accuracy_score(y_pred, y_test))
return (nb,) | 32fce3f0c69bdb85878549f95c159a1277104f97 | 3,636,568 |
def validate_article(article_json):
"""
Validate the content of a raw article
"""
if article_json is None:
return False
try:
# Filter title
if not vstrlen(article_json['title'], 16):
return False
# Filter contents
if not vstrlen(article_json['contents'], 30):
return False
# Filter Outlet
if not vstrlen(article_json['feedlabel']):
return False
# Filter Article link
if not vstrlen(article_json['url'], 10):
return False
except KeyError:
return False
return True | 493a539ec933d43980a7724afadc6a478b4a1a6a | 3,636,569 |
def get_model_memory_usage(batch_size, model):
"""
Estimate how much memory the model will take, assuming all parameters is in float32
and float32 takes 4 bytes (32 bits) in memory.
:param batch_size:
:param model:
:return:
"""
# Calculate the total number of outputs from all layers
shapes_mem_count = 0
for l in model.layers:
single_layer_mem = 1
for s in l.output_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
# Calculate the total number of trainable parameters
trainable_count = np.sum(
[keras.backend.count_params(p) for p in set(model.trainable_weights)]
)
# Calculate the total number of non trainable parameters
non_trainable_count = np.sum(
[keras.backend.count_params(p) for p in set(model.non_trainable_weights)]
)
# total memory = 4 bytes * total number of numbers in each run * number of images in each run
total_memory = 4.0 * batch_size * (shapes_mem_count + 2 * trainable_count + non_trainable_count)
# convert to GB
gbytes = np.round(total_memory / (1024.0 ** 3), 3)
return gbytes | 0452e9943ff2a0c9dbcb8c870237344740502fe4 | 3,636,570 |
def calculate_deltaangle_distance(
org_ligs,
smiles_keys,
fg_factory,
file_prefix=None
):
"""
Calculate the change of bite angle of each ligand in the cage.
This function will not work for cages built from FGs other than
metals + AromaticCNC and metals + AromaticCNN.
Parameters
----------
org_lig : :class:`dict` of :class:`stk.BuildingBlock`
Dictionary of building blocks where the key is the file name,
and the value is the stk building block.
smiles_keys : :class:`dict` of :class:`int`
Key is the linker smiles, value is the idx of that smiles.
fg_factory :
:class:`iterable` of :class:`stk.FunctionalGroupFactory`
Functional groups to asign to molecules.
NN_distance calculator will not work for cages built from FGs
other than metals + AromaticCNC and metals + AromaticCNN.
file_prefix : :class:`str`, optional
Prefix to file name of each output ligand structure.
Eventual file name is:
"file_prefix"{number of atoms}_{idx}_{i}.mol
Where `idx` determines if a molecule is unique by smiles.
Returns
-------
delta_angles : :class:`dict`
Bite angle in cage - free optimised ligand for each ligand.
Output is absolute values.
"""
delta_angles = {}
# Iterate over ligands.
for lig in org_ligs:
stk_lig = org_ligs[lig]
smiles_key = stk.Smiles().get_key(stk_lig)
idx = smiles_keys[smiles_key]
sgt = str(stk_lig.get_num_atoms())
# Get optimized ligand name that excludes any cage
# information.
if file_prefix is None:
filename_ = f'organic_linker_s{sgt}_{idx}_opt.mol'
else:
filename_ = f'{file_prefix}{sgt}_{idx}_opt.mol'
_in_cage = stk.BuildingBlock.init_from_molecule(
stk_lig,
functional_groups=fg_factory
)
_in_cage = _in_cage.with_functional_groups(
functional_groups=get_furthest_pair_FGs(_in_cage)
)
_free = stk.BuildingBlock.init_from_file(
filename_,
functional_groups=fg_factory
)
_free = _free.with_functional_groups(
functional_groups=get_furthest_pair_FGs(_free)
)
angle_in_cage = calculate_bite_angle(bb=_in_cage)
angle_free = calculate_bite_angle(bb=_free)
delta_angles[lig] = abs(angle_in_cage - angle_free)
return delta_angles | dfc367300b92561c8b167081121c90e5313187a1 | 3,636,571 |
import logging
import time
def wait_for_file_to_finish_writing(**args) -> tuple:
"""
This wait shouldn't be required but appears to be help with larger files.
"""
config = args.get('config')
logging.info("waiting {} seconds for file to finish writing and unlock".format(config.BULK_IMPORT_WAIT))
time.sleep(config.BULK_IMPORT_WAIT)
return True, args | 915c5b159030c5860891e95f09ffc725f755c584 | 3,636,572 |
def get_max_id(connection, generic_sensor_type: str) -> int:
"""
Get the max id of a given generic sensor type.
:param generic_sensor_type: "asset", "market", or "weather_sensor"
"""
t_generic_sensor = sa.Table(
generic_sensor_type,
sa.MetaData(),
sa.Column("id", sa.Integer),
)
max_id = connection.execute(
sa.select(
[
sa.sql.expression.func.max(
t_generic_sensor.c.id,
)
]
)
).scalar() # None if there are none
max_id = 0 if max_id is None else max_id
return max_id | 11a35d9e43e7c403271675fd7b6207d6e16e0c80 | 3,636,573 |
def _temp_dict_file_name():
"""Name of the expected python dictionary as a json file from run_external_python().
.. versionadded:: 9.1
"""
return '__shared_dictionary__' | 94f33562d775b041387b477d838a5efadfe38f00 | 3,636,574 |
from typing import Union
from typing import List
def pSpectrum(
data: Union[np.ndarray, List, None], samplefreq: float = 44100
) -> (np.ndarray, np.ndarray):
"""Power spectrum computation.
Compute the power spectrum of a data set using standard ffts, after padding
the data set to the next higher power of 2. Assumes data is regularly
sampled in the time domain.
Parameters
----------
data : list or numpy array
the signal for which the power spectrume will be computed
arg2 : float
The sample frequency for the data set, in Hz
Returns
-------
(powerspectrum, frequency)
Tuple of numpy arrays with the computed power spectrum, and
the associated frequency arrays (in Hz)
"""
npts = len(data)
# we should window the data here
if npts == 0:
print("? no data in pSpectrum")
return
# pad to the nearest higher power of 2
(a, b) = np.frexp(npts)
if a <= 0.5:
b = b = 1
npad = 2 ** b - npts
if debugFlag:
print("npts: %d npad: %d npad+npts: %d" % (npts, npad, npad + npts))
padw = np.append(data, np.zeros(npad))
npts = len(padw)
sigfft = spFFT.fft(padw)
nUniquePts = int(np.ceil((npts + 1) / 2.0))
# print nUniquePts
sigfft = sigfft[0:nUniquePts]
spectrum = abs(sigfft)
spectrum = spectrum / float(npts) # scale by the number of points so that
# the magnitude does not depend on the length
# of the signal or on its sampling frequency
spectrum = spectrum ** 2 # square it to get the power
spmax = np.amax(spectrum)
spectrum = spectrum + 1e-12 * spmax
# multiply by two (see technical document for details)
# odd nfft excludes Nyquist point
if npts % 2 > 0: # we've got odd number of points fft
spectrum[1 : len(spectrum)] = spectrum[1 : len(spectrum)] * 2
else:
spectrum[1 : len(spectrum) - 1] = (
spectrum[1 : len(spectrum) - 1] * 2
) # we've got even number of points fft
freqAzero = np.arange(0, nUniquePts, 1.0) * (samplefreq / npts)
return (spectrum, freqAzero) | d8e5d279ef50df2e475d9e6fae4e041081f3e759 | 3,636,575 |
import sys
def get_environment(config, stage):
"""Find default environment name in stage."""
stage_data = get_stage_data(stage, config.get('stages', {}))
if not stage_data:
sys.exit(NO_STAGE_DATA.format(stage))
try:
return stage_data['environment']
except KeyError:
sys.exit(NO_ENV_IN_STAGE.format(stage)) | f8fc4642110be2ceae9347739a5dd7514bed28c1 | 3,636,576 |
def anisotropic_Gaussian(ksize=25, theta=np.pi, l1=6, l2=6):
"""
https://github.com/cszn/KAIR/blob/master/utils/utils_sisr.py
Generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 25, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k | 259ae1590807e11d5805c1065fed82acf430b60b | 3,636,577 |
def get_mail(db, messages_obj, msg_id, user_id='me'):
"""Fetch a message from GMail by id and adds it to passed db.
Args:
db: Local db connection object.
messages_obj: Authenticated GMail user object.
msg_id: Id of Gmail message.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
"""
message = messages_obj.get(
userId=user_id, id=msg_id, format='full'
).execute()
if set.intersection(set(_cfg.labels_to_skip()), set(message['labelIds'])) != set():
return False
_log.logger.debug("adding message: %s" % (message['id']))
if not _db.add_message(db, message):
return False
return True | b4622592137fbe988ca2c1c59ad518ad4fa606c6 | 3,636,578 |
import os
def get_history(ticker, start_date, end_date = None, granularity=granularity):
"""Fetch/load historical data from Coinbase API at specified granularity
params:
start_date: (str) (see pandas.to_datetime for acceptable formats)
end_date: (str)
granularity: (int) seconds (default: 300)
price data is saved by ticker and date and stored in audiolizer_temp_dir
"""
start_date = pd.to_datetime(start_date).tz_localize(None)
today = pd.Timestamp.now().tz_localize(None)
if end_date is None:
end_date = today + pd.Timedelta('1D')
else:
end_date = min(today, pd.to_datetime(end_date).tz_localize(None))
fnames = []
for int_ in pd.interval_range(start_date,
end_date):
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, int_.left.strftime('%Y-%m-%d'))
if not os.path.exists(fname):
int_df = load_date(ticker, granularity, int_)
int_df.to_csv(fname, compression='gzip')
fnames.append(fname)
df = pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True),
fnames)).drop_duplicates()
gaps = get_gaps(df, granularity)
if len(gaps) > 0:
print('found data gaps')
# fetch the data for each date
for start_date in gaps.groupby(pd.Grouper(freq='1d')).first().index:
print('\tfetching {}'.format(start_date))
int_ = pd.interval_range(start=start_date, periods=1, freq='1d')
int_ = pd.Interval(int_.left[0], int_.right[0])
int_df = load_date(ticker, granularity, int_)
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, int_.left.strftime('%Y-%m-%d'))
int_df.to_csv(fname, compression='gzip')
df = pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True, compression='gzip'),
fnames)).drop_duplicates()
return df | 1dd4a4342923a758c49b2d6605c636a4a31ed08e | 3,636,579 |
import sys
import time
import ast
import json
def run_command(net,cmd_list,cmd_str):
"""
This function returns the output of IOS command
Exit script if there is no any network device.
Parameters
----------
net (list): network device ip andid
cmd_list (list): available IOS commands in list
cmd (string): available IOS commands in string
Return:
-------
str: output of running IOS command
"""
# getting command input
while True:
cmd = input('\n=> Enter IOS command you like to run on this device, ip='+net[0]+' or "exit" to exit: ')
cmd = cmd.lstrip() # Ignore leading space
cmd = cmd.lower() # to lower case
if cmd == 'exit':
sys.exit()
elif cmd == "": # no input
print ("Oops! command cannot be NULL please try again or enter 'exit'")
elif cmd.split(' ')[0] not in cmd_list: # retrieve the first word of command string
print("Invalid command, valid commands are the following (some of them maybe not available in certain devices) : ",cmd_str,'\n')
else:
break
# JOSN for posting IOS command
cmd_json = {
"commands" : [cmd],
"deviceUuids" : [net[1]] # net = [ip,deviceid]
}
try:
# print (cmd_json)
# print("\nExecuting \"",cmd,"\" please wait ........\n\n")
resp = post(api="network-device-poller/cli/read-request", data=cmd_json)
# prettyPrint(resp)
response_json = resp.json()
taskId=response_json["response"]["taskId"]
except:
print ("\n For some reason cannot get taskId")
sys.exit()
else:
r = get(api="task/"+taskId)
response_json = r.json()
progress = response_json["response"]["progress"]
count = 0
# We can only see fileId when tsak is finished
while "fileId" not in progress:
try:
r = get(api="task/"+taskId)
response_json = r.json()
progress = response_json["response"]["progress"]
except:
# Something is wrong
print ("\nSomething is wrong when executing get task/"+taskId)
sys.exit()
time.sleep(1)
count += 1
if count > 20: # timeout after ~20 seconds
print ("\nTaking too long, script time out!")
return ("Error")
sys.exit()
# convert string to dict
p=ast.literal_eval(progress)
fileid=p["fileId"]
# now retrieve the output of running IOS command
r = get(api="file/"+fileid)
response_json = r.json()
# real output
if response_json[0]["commandResponses"]["FAILURE"] != {}:
print (response_json[0]["commandResponses"]["FAILURE"][cmd])
else:
try:
output = response_json[0]["commandResponses"]["SUCCESS"][cmd]
print (output)
return output
except:
# Something is wrong
if cmd.split(' ')[1] == '?':
output = response_json[0]["commandResponses"]["FAILURE"][cmd]
print (output)
else:
print ("Response from get task\n",json.dumps(response_json,indent=4))
print ("\nSomething is wrong when parsing the command output")
return ("Error")
sys.exit() | ae25c0fb7ee8f0b5c9bad66228c536849b292384 | 3,636,580 |
import warnings
def fakemag_to_parallax(fakemag, mag, fakemag_err=None):
"""
To convert fakemag to parallax, Magic Number will be preserved
:param fakemag: astroNN fakemag
:type fakemag: Union[float, ndarray]
:param mag: apparent magnitude
:type mag: Union[float, ndarray]
:param fakemag_err: Optional, fakemag_err
:type fakemag_err: Union[NoneType, float, ndarray]
:return: array of parallax in mas with astropy Quantity (with additional return of propagated error if fakemag_err is provided)
:rtype: astropy Quantity
:History: 2018-Aug-11 - Written - Henry Leung (University of Toronto)
"""
fakemag = np.array(fakemag)
mag = np.array(mag)
# treat non-positive fakemag as MAGIC_NUMBER, check for magic number and negative fakemag
magic_idx = ((fakemag == MAGIC_NUMBER) | (mag == MAGIC_NUMBER) | (fakemag <= 0.) | np.isnan(fakemag) | np.isnan(mag))
with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER
warnings.simplefilter("ignore")
parallax = fakemag / (10. ** (0.2 * mag))
if fakemag.shape != (): # check if its only 1 element
parallax[magic_idx] = MAGIC_NUMBER
else: # for float
parallax = MAGIC_NUMBER if magic_idx == [1] else parallax
if fakemag_err is None:
return parallax * u.mas
else:
with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER
warnings.simplefilter("ignore")
parallax_err = (fakemag_err / fakemag) * parallax
if fakemag.shape != (): # check if its only 1 element
parallax_err[magic_idx] = MAGIC_NUMBER
else: # for float
parallax_err = MAGIC_NUMBER if magic_idx == [1] else parallax_err
return parallax * u.mas, parallax_err * u.mas | 0086436f41707a74974d6358d101eacc3149777a | 3,636,581 |
def provenance_stamp(routine):
"""Return dictionary satisfying QCSchema,
https://github.com/MolSSI/QCSchema/blob/master/qcschema/dev/definitions.py#L23-L41
with QCElemental's credentials for creator and version. The
generating routine's name is passed in through `routine`.
"""
return {'creator': 'QCElemental', 'version': get_versions()['version'], 'routine': routine} | 34c1e11c69d0b0354e356bd0463a9f89cd438d51 | 3,636,582 |
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = _check_1d(y)
return np.arange(y.shape[0], dtype=float), y | fae630e18bf20f1c9762e6c6f9d2d1b2f5cf93e2 | 3,636,583 |
def get_centered_box(center: np.ndarray, box_size: np.ndarray):
"""
Get box of size ``box_size``, centered in the ``center``.
If ``box_size`` is odd, ``center`` will be closer to the right.
"""
start = center - box_size // 2
stop = center + box_size // 2 + box_size % 2
return start, stop | 4d5ce84547281b27d8405894ce280139696329ba | 3,636,584 |
def _make_context(frames, cameras):
"""
Generate Context named tuple using camera, frame information
Args:
- cameras:
- frames:
Returns: A Context named tuple encapsulating given information
"""
return Context(cameras=cameras, frames=frames) | b338795bf367c7e12b769fa33049e3e52a0daf00 | 3,636,585 |
from sys import path
def isfile(hdfs_path, project=None):
"""
Return True if path refers to a file.
Args:
:hdfs_path: You can specify either a full hdfs pathname or a relative one (relative to your Project's path in HDFS).
:project: If this value is not specified, it will get the path to your project. If you need to path to another project, you can specify the name of the project as a string.
Returns:
True if path refers to a file.
Raises: IOError
"""
if project == None:
project = project_name()
hdfs_path = _expand_path(hdfs_path, project)
return path.isfile(hdfs_path) | 442279aca04271fdde353e3a662f453a35266f46 | 3,636,586 |
def get_list_from_file(filename):
"""
Returns a list of containers stored in a file (one on each line)
"""
with open(filename) as fh:
return [_ for _ in fh.read().splitlines() if _] | 8d9a271aa4adea81f62bf74bb1d3c308870f1baf | 3,636,587 |
def import_recipe():
"""Import recipe from base64 encoded text."""
form = Import()
errors = None
if form.validate_on_submit():
encoded = request.form["encoded"]
try:
decoded = loads(b64decode(encoded.encode("utf-8")).decode("utf-8"))
# recipe table
title = decoded["title"]
servings = decoded["servings"]
source = decoded["source"]
notes = decoded["notes"]
directions = dumps(decoded["directions"])
db.execute("INSERT INTO recipes (title, servings, source, notes, directions) VALUES "
"(?, ?, ?, ?, ?)", (title, servings, source, notes, directions))
# categories table
recipe_id = db.execute("SELECT recipe_id FROM recipes WHERE title = ?",
(title,))[0]["recipe_id"]
for category in decoded["categories"]:
db.execute("INSERT INTO categories (category, recipe_id) VALUES (?, ?)",
(category, recipe_id))
# ingredients table
for ingredient in decoded["ingredients"]:
db.execute("INSERT INTO ingredients (ingredient, recipe_id) VALUES (?, ?)",
(ingredient, recipe_id))
# owners table
db.execute("INSERT INTO owners (recipe_id, user_id) VALUES (?, ?)",
(recipe_id, session["user_id"]))
flash("Recipe imported!")
return redirect(url_for("index"))
except TypeError:
errors = ["Invalid text."]
return render_template("import.html", form=form, errors=errors) | 7665af330f029c29e3cf5a2667204a3ba94409c2 | 3,636,588 |
def parse_xyz(filename, nbits):
"""Read xyz format point data and return header, points and points data."""
pointstrings = []
with open(filename) as points_file:
for line in points_file:
if not line.startswith('#'):
if not line.isspace():
line = line.replace(',', ' ')
line = line.split()
pointstrings.append(line)
points = np.zeros((len(pointstrings), 3), dtype=np.float)
if nbits > 0:
datalen = len(pointstrings[0][3:])
pointsdata = np.zeros((len(pointstrings), datalen), dtype=np.int)
for idx, line in enumerate(pointstrings):
coords = line[: 3]
coords = [float(i) for i in coords]
points[idx] = coords
if nbits > 0:
data = line[3:]
data = [int(i) for i in data]
pointsdata[idx] = data
minvals = points.min(axis=0).tolist()
maxvals = points.max(axis=0).tolist()
bbox = [minvals, maxvals]
if nbits > 0:
return bbox, points, pointsdata
else:
return bbox, points, None | 51a9f7f34bbae5eeddd8b97139ed59ec53e43939 | 3,636,589 |
def compare_vecs(est, truth, zero_tol=0):
"""
Parameters
----------
est: array-like
The estimated vector.
truth: array-like
The true vector parameter.
zero_tol: float
Zero tolerance for declaring an element equal to zero.
Output
------
out: dict
Dictonary containing various error measurements.
"""
est = np.array(est).reshape(-1)
truth = np.array(truth).reshape(-1)
# true norms
true_L2 = np.sqrt((truth ** 2).sum())
true_L1 = abs(truth).sum()
# we will divide by this later so make sure it isn't zero
true_L2 = max(true_L2, np.finfo(float).eps)
true_L1 = max(true_L1, np.finfo(float).eps)
assert len(est) == len(truth)
support_est = abs(est) > zero_tol
support_true = abs(truth) > zero_tol
n = len(est)
resid = est - truth
out = {}
####################
# size of residual #
####################
out['L2'] = np.sqrt((resid ** 2).sum())
out['L1'] = abs(resid).sum()
out['L2_rel'] = out['L2'] / true_L2
out['L1_rel'] = out['L1'] / true_L1
out['MSE'] = out['L2'] / np.sqrt(n)
out['MAE'] = out['L1'] / n
out['max'] = abs(resid).max()
# compare supports
out.update(compare_supports(support_est, support_true))
out['support_auc'] = roc_auc_score(y_true=support_true,
y_score=abs(est))
#################
# compare signs #
#################
_est = deepcopy(est)
_est[~support_est] = 0
_truth = deepcopy(truth)
_truth[~support_true] = 0
out['sign_error'] = np.mean(np.sign(_est) != np.sign(_truth))
# # only compute at true non-zero
# est_at_true_nz = est[nz_mask_true]
# true_at_true_nz = truth[nz_mask_true]
# sign_misses = np.sign(est_at_true_nz) != np.sign(true_at_true_nz)
# out['sign_error'] = np.mean(sign_misses)
return out | ef977c31bbca818809f7d708d0ed6f754912239e | 3,636,590 |
def accumulator(init, update):
"""
Generic accumulator function.
.. code-block:: python
# Simplest Form
>>> a = 'this' + ' '
>>> b = 'that'
>>> c = functools.reduce(accumulator, a, b)
>>> c
'this that'
# The type of the initial value determines output type.
>>> a = 5
>>> b = Hello
>>> c = functools.reduce(accumulator, a, b)
>>> c
10
:param init: Initial Value
:param update: Value to accumulate
:return: Combined Values
"""
return (
init + len(update)
if isinstance(init, int) else
init + update
) | 6a4962932c8dba4d5c01aa8936787b1332a6323f | 3,636,591 |
def process_po_folder(domain, folder, extra=''):
""" Process each PO file in folder """
result = True
for fname in glob.glob(os.path.join(folder, '*.po')):
basename = os.path.split(fname)[1]
name = os.path.splitext(basename)[0]
mo_path = os.path.normpath('%s/%s%s' % (MO_DIR, name, MO_LOCALE))
mo_name = '%s.mo' % domain
if not os.path.exists(mo_path):
os.makedirs(mo_path)
# Create the MO file
mo_file = os.path.join(mo_path, mo_name)
print 'Compile %s' % mo_file
ret, output = run('%s %s -o "%s" "%s"' % (TOOL, extra, mo_file, fname))
if ret != 0:
print '\nMissing %s. Please install this package first.' % TOOL
exit(1)
if 'WARNING:' in output:
print output
result = False
return result | c89a7952d9961ec096dac98f1d830a24b4d62ecd | 3,636,592 |
def create_service(
*,
db_session: Session = Depends(get_db),
service_in: ServiceCreate = Body(
...,
example={
"name": "myService",
"type": "pagerduty",
"is_active": True,
"external_id": "234234",
},
),
):
"""
Create a new service.
"""
service = get_by_external_id_and_project_name(
db_session=db_session,
external_id=service_in.external_id,
project_name=service_in.project.name,
)
if service:
raise HTTPException(
status_code=400,
detail=f"A service with this identifier ({service_in.external_id}) already exists.",
)
service = create(db_session=db_session, service_in=service_in)
return service | 890928f0a5b1a990ea27594886031bf6ede1a0db | 3,636,593 |
import os
def GetBuildRevisions(src_dir, webkit_dir=None, revision_dir=None):
"""Parses build revisions out of the provided directories.
Args:
src_dir: The source directory to be used to check the revision in.
webkit_dir: Optional WebKit directory, relative to src_dir.
revision_dir: If provided, this dir will be used for the build revision
instead of the mandatory src_dir.
Returns a tuple of the build revision and (optional) WebKit revision.
NOTICE: These revisions are strings, since they can be both Subversion numbers
and Git hashes.
"""
abs_src_dir = os.path.abspath(src_dir)
webkit_revision = None
if webkit_dir:
webkit_dir = os.path.join(abs_src_dir, webkit_dir)
webkit_revision = GetHashOrRevision(webkit_dir)
if revision_dir:
revision_dir = os.path.join(abs_src_dir, revision_dir)
build_revision = GetHashOrRevision(revision_dir)
else:
build_revision = GetHashOrRevision(src_dir)
return (build_revision, webkit_revision) | e1e8993921a95c54c47c9bb293e8ea516b47e3c2 | 3,636,594 |
from typing import Dict
import sysconfig
import os
def _get_scheme_dict(distribution_name: str) -> Dict[str, str]:
"""Calculate the scheme dictionary for the current Python environment."""
scheme_dict = sysconfig.get_paths()
installed_base = sysconfig.get_config_var("base")
assert installed_base
# calculate 'headers' path, not currently in sysconfig - see
# https://bugs.python.org/issue44445. This is based on what distutils does.
# TODO: figure out original vs normalised distribution names
scheme_dict["headers"] = os.path.join(
sysconfig.get_path("include", vars={"installed_base": installed_base}),
distribution_name,
)
return scheme_dict | 4064bebdc0db24756a556908a6acec365d1cae12 | 3,636,595 |
import os
import easydev
def biokit_data(filename, where=None):
"""Simple utilities to retrieve data sets from biokit/data directory"""
biokit_path = easydev.get_package_location('biokit')
share = os.sep.join([biokit_path , "biokit", 'data'])
# in the code one may use / or \
if where:
filename = os.sep.join([share, where, filename])
else:
filename = os.sep.join([share, filename])
if os.path.exists(filename) is False:
raise Exception('unknown file %s' % filename)
return filename | f81680593b54a496de84a1683c9f83b3f77ed633 | 3,636,596 |
import json
def get_handler(event, context): # pylint: disable=unused-argument
"""REST API GET method to get data about a Minecraft game server."""
# gather the server data
name = event.get('pathParameters', {}).get('name')
server = gather(name)
# return the HTTP payload
return {
'statusCode': 200,
'body': json.dumps(server)
} | 61326050cbac4ad3a7a727ebef01bd7e496a254c | 3,636,597 |
def get_item(dataframe: DataFrame, col: str, new_col: str, index: any) -> DataFrame:
"""Return DF with a column that contains one item for an array
:param str col: name of the column
:param str new_col: type of the new column
:param any index: the index key
Examples:
```
SectionName:
Type: transform::generic
Input: InputBlock
Properties:
Functions:
- get_item:
col: name
new_col: firstname
index: 2
```
"""
_validate_column_exists(dataframe, col)
return dataframe.withColumn(new_col, F.col(col).getItem(index)) | e06090dad60f7522b1727d69926994bb94f669d6 | 3,636,598 |
import os
def appdatadirectory():
"""Attempt to retrieve the current user's app-data directory
This is the location where application-specific
files should be stored. On *nix systems, this will
be the ${HOME}/.config directory. On Win32 systems, it will be
the "Application Data" directory. Note that for
Win32 systems it is normal to create a sub-directory
for storing data in the Application Data directory.
"""
if shell:
# on Win32 and have Win32all extensions, best-case
return shell_getShellFolder(shellcon.CSIDL_APPDATA)
if winreg:
# on Win32, but no Win32 shell com available, this uses
# a direct registry access, likely to fail on Win98/Me
return _winreg_getShellFolder('AppData')
# okay, what if for some reason winreg is missing? would we want to allow ctypes?
# default case, look for name in environ...
for name in ['APPDATA', 'HOME']:
if name in os.environ:
return os.path.join(os.environ[name], '.config')
# well, someone's being naughty, see if we can get ~ to expand to a directory...
possible = os.path.abspath(os.path.expanduser('~/.config'))
if os.path.exists(possible):
return possible
raise OSError(
"""Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""") | a360204a7c7796c7130251070f468c1eba63d532 | 3,636,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.