content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def numpy_bbox_to_image(image, bbox_list, labels=None, scores=None, class_name=[], config=None):
""" Numpy function used to display the bbox (target or prediction)
"""
assert(image.dtype == np.float32 and image.dtype == np.float32 and len(image.shape) == 3)
if config is not None and config.normalized_method == "torch_resnet":
channel_avg = np.array([0.485, 0.456, 0.406])
channel_std = np.array([0.229, 0.224, 0.225])
image = (image * channel_std) + channel_avg
image = (image*255).astype(np.uint8)
elif config is not None and config.normalized_method == "tf_resnet":
image = image[..., ::-1]
image = image / 255
bbox_xcycwh = bbox.np_rescale_bbox_xcycwh(bbox_list, (image.shape[0], image.shape[1]))
bbox_x1y1x2y2 = bbox.np_xcycwh_to_xy_min_xy_max(bbox_xcycwh)
# Set the labels if not defined
if labels is None:
labels = np.zeros((bbox_x1y1x2y2.shape[0]))
bbox_area = []
# Go through each bbox
for b in range(0, bbox_x1y1x2y2.shape[0]):
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
bbox_area.append((x2-x1)*(y2-y1))
# Go through each bbox
for b in np.argsort(bbox_area)[::-1]:
# Take a new color at reandon for this instance
instance_color = np.random.randint(0, 255, (3))
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
x1, y1, x2, y2 = max(0, x1), max(0, y1), min(image.shape[1], x2), min(image.shape[0], y2)
# Select the class associated with this bbox
class_id = labels[int(b)]
if scores is not None and len(scores) > 0:
label_name = class_name[int(class_id)]
label_name = "%s:%.2f" % (label_name, scores[b])
else:
label_name = class_name[int(class_id)]
class_color = CLASS_COLOR_MAP[int(class_id)]
color = instance_color
multiplier = image.shape[0] / 500
cv2.rectangle(image, (x1, y1), (x1 + int(multiplier*15)*len(label_name), y1 + 20), class_color.tolist(), -10)
cv2.putText(image, label_name, (x1+2, y1 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6 * multiplier, (0, 0, 0), 1)
cv2.rectangle(image, (x1, y1), (x2, y2), tuple(class_color.tolist()), 2)
return image
|
c9070cb917e376357363f99e9d951d84e3274684
| 3,641,000
|
import json
def loads(text, template=None, colored=None, comments=None, **kwargs):
"""
Deserialize `text` (a `str` or `unicode` instance containing a JSON
document supporting template references `{$.key}`) to a Python object.
:param text: serialized JSON string
:type text: str
:param template: (optional) None, str, dict, list, io.IOBase - Causes template values to be sourced form this object
:type template: dict
:type template: list
:param kwargs: all the arguments that `json.loads <http://docs.python.org/
2/library/json.html#json.loads>`_ accepts.
:returns: dict or list.
# TODO update docstring
"""
if not isinstance(text, (str, bytes, bytearray)):
# just use the default json library to raise the normal error (TypeError)
json.loads(text)
return _loads(text=text, template=template, colored=colored, comments=comments, **kwargs)
|
fb128bc0c5cea6074d1ae43634d06853f05010e1
| 3,641,001
|
def AsyncSleep(delay, name=None):
"""Pause for `delay` seconds (which need not be an integer).
This is an asynchronous (non-blocking) version of a sleep op. It includes
any time spent being blocked by another thread in `delay`. If it is blocked
for a fraction of the time specified by `delay`, it only calls `sleep`
(actually `usleep`) only for the remainder. If it is blocked for the full
time specified by `delay` or more, it returns without explictly calling
`sleep`.
Args:
delay: tf.Tensor which is a scalar of type float.
name: An optional name for the op.
Returns:
The `delay` value.
"""
return examples_async_sleep(delay=delay, name=name)
|
c6ccb12aa7e27a28591ac5282b0e78baa68df9df
| 3,641,002
|
def palide(string, length, ellipsis="...", pad=" ", position=1.0, left=False):
"""
A combination of `elide` and `pad`.
"""
return globals()["pad"](
elide(string, length, ellipsis=ellipsis, position=position),
length, pad=pad, left=left)
|
be14ecb386ef7d49a6c85514bb5bee93d482be3d
| 3,641,003
|
def get_email_adderess(email_addr):
""" Return dict from opalstack for given email address, or None """
mails = get_request("mail/list/")
for record in mails['mails']:
if record['address'] == email_addr:
return get_request("mail/read/{}".format(record['id']))
return None
|
f31ae883b40da8b9ddf743744a3611dd0968e787
| 3,641,004
|
def GK3toUTM(ea, no=None, zone=32):
"""Transform Gauss-Krueger zone 3 into UTM (for backward compatibility)."""
return GKtoUTM(ea, no, zone, gkzone=3)
|
aeab5433c8a676f862c9271f62a574bff2f74444
| 3,641,005
|
from typing import Optional
def get_all_predictions(
model: nn.Module,
dataloader: DataLoader,
device: _Device,
threshold_prob: Optional[float] = None,
decouple_fn: Optional[_DecoupleFnTest] = None,
) -> _TestResult:
"""
Make predictions on entire dataset and return raw outputs
and optionally class predictions and probabilities if it's
a classification model.
See `perform_one_epoch()` for more details.
"""
return perform_one_epoch(
phase="test",
model=model,
dataloader=dataloader,
device=device,
threshold_prob=threshold_prob,
decouple_fn=decouple_fn,
)
|
e2d04376a935a1acd1d2e0645209cb865997669e
| 3,641,006
|
def plot_perf_stats(returns, factor_returns):
"""
Create box plot of some performance metrics of the strategy.
The width of the box whiskers is determined by a bootstrap.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
bootstrap_values = timeseries.perf_stats_bootstrap(returns,
factor_returns,
return_stats=False)
bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns')
return bootstrap_values
|
f948c29df18bc93d08fd6ad1b366db310b4bf8c2
| 3,641,007
|
def create(user):
"""
This function creates a new user in the database
based on the passed-in user data.
:param user: User to create in database
:return: 201 on success, 400 on bad postal code, 406 on user exists
"""
username = user.get("username", None)
postalcode = user.get("postalcode", None)
cityname = _get_cityname(postalcode)
# Does the user already exist?
existing_master = Master.query.filter(Master.username == username).one_or_none()
if existing_master is None and cityname is not None:
# Create a user instance using the schema and the passed-in user.
user_master = Master(username=username)
db.session.add(user_master)
user_detail = Detail(postalcode=postalcode, cityname=cityname)
db.session.add(user_detail)
# Save changes to database.
db.session.commit()
return make_response(
"{username} successfully created".format(username=username), 201,
)
# If the Postal Code doesn't return any hits in Geonames
elif cityname is None:
abort(
400, "Postal code {postalcode} is invalid".format(postalcode=postalcode),
)
# Otherwise, they exist, and that's an error
else:
abort(
406,
"User with username {username} already exists".format(username=username),
)
|
2b3a64211fd66c7fbe93f51aecbceab6675f5b99
| 3,641,008
|
def show_profile(uid):
"""
Return serializable users data
:param uid:
:return String: (JSON)
"""
user = get_user_by_id(uid)
return jsonify(user.serialize)
|
cb8bc7934575b99b6098a911c4dc7ad9fb1b7a48
| 3,641,009
|
def degree_correlation(coeffs_1, coeffs_2):
"""
Correlation per spherical harmonic degree between two models 1 and 2.
Parameters
----------
coeffs_1, coeffs_2 : ndarray, shape (N,)
Two sets of coefficients of equal length `N`.
Returns
-------
C_n : ndarray, shape (nmax,)
Degree correlation of the two models. There are `N = nmax(nmax+2)`
coefficients.
"""
if coeffs_1.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_1.ndim} != 1')
if coeffs_2.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_2.ndim} != 1')
if coeffs_1.size != coeffs_2.size:
raise ValueError(
'Number of coefficients is '
'not equal ({0} != {1}).'.format(coeffs_1.size, coeffs_2.size))
nmax = int(np.sqrt(coeffs_1.size + 1) - 1)
C_n = np.zeros((nmax,))
R_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_1
S_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_2
coeffs_12 = coeffs_1*coeffs_2
for n in range(1, nmax+1):
min = n**2 - 1
max = min + (2*n + 1)
R_n[n-1] = np.sum(coeffs_1[min:max]**2)
S_n[n-1] = np.sum(coeffs_2[min:max]**2)
C_n[n-1] = (np.sum(coeffs_12[min:max]) / np.sqrt(R_n[n-1]*S_n[n-1]))
return C_n
|
10dea06b6e1f9a1c4202f3478523fe7bdcc8ab6e
| 3,641,010
|
import cdr_cleaner.args_parser as parser
def parse_args():
"""
Add file_path to the default cdr_cleaner.args_parser argument list
:return: an expanded argument list object
"""
help_text = 'path to csv file (with header row) containing pids whose observation records are to be removed'
additional_argument_1 = {parser.SHORT_ARGUMENT: '-f',
parser.LONG_ARGUMENT: '--file_path',
parser.ACTION: 'store',
parser.DEST: 'file_path',
parser.HELP: help_text,
parser.REQUIRED: True}
args = parser.default_parse_args([additional_argument_1])
return args
|
f5f89a55799ceee801b06a51e902cdd252068e50
| 3,641,011
|
def name(model):
"""A repeatable way to get the formatted model name."""
return model.__name__.replace('_', '').lower()
|
3d9ca275bfbfff6d734f49a47459761c559d906e
| 3,641,012
|
from typing import List
from typing import Optional
def render_fields(
fields: List[Field], instance_name: Optional[str] = None
) -> List[str]:
"""Renders fields to string.
Arguments:
fields:
The fields to render.
instance_name:
The name of model instance for which the fields are written.
If given, automatically insert the value for FK fields.
This assumes that the FK variables are defined before this class and follow
the convention `column_name1_column_name2_...`.
Sorts fields by being optional or not.
"""
descriptions = []
optional_descriptions = []
for field in fields:
text = render_field(field, instance_name=instance_name)
if field.null:
optional_descriptions.append(text)
else:
descriptions.append(text)
return descriptions + optional_descriptions
|
ba75827eac0ccea3e68259e27274feea17121cb2
| 3,641,013
|
def get_primary_tasks_for_service(service_arn):
"""Get the task ARN of the primary service"""
response = ecs.describe_services(cluster=cluster, services=[service_arn])
for deployment in response['services'][0]['deployments']:
if deployment['status'] == 'PRIMARY':
return get_tasks_for_task_definition(deployment['taskDefinition'])
return None
|
113e3ab3a20646d20caf08a6e1dfc4d546d2d950
| 3,641,014
|
import os
import requests
def post_to_slack(alarm_name, reason, config):
""" Send message text to slack channel
INPUTS:
* alarm_name - subject of the message
* reason - message text
"""
# get params from config file
proxy_server = config['proxy_server']
if proxy_server !='':
os.environ['HTTP_PROXY'] = proxy_server
os.environ['HTTPS_PROXY'] = proxy_server
slack_webhook_url = config['slack_webhook_url']
slack_message = build_pr_message(alarm_name,reason)
data={"text":slack_message['text'], "attachments" : slack_message['attachments'] }
reponse=requests.post(slack_webhook_url, json=data)
return reponse.text
|
d8561a764c9e125e8d918c86fbc96413529933fc
| 3,641,015
|
def load_data(csv_file):
"""
@type csv_file: string
@param csv_file: path to csv file
Loads data from specified csv file
@rtype: pandas.DataFrame
@return: DataFrame from csv file without Month column
"""
return pd.read_csv(csv_file).drop('Month', 1)
|
5a458c104d763e431f0faf63a98bf4a59fd7902c
| 3,641,016
|
def nearest(array,value):
"""
Find the index of the array that is close to value
Args:
array (array): array to be tested
value (float): value to be tested
Returns:
int: index
"""
return (np.abs(array-value)).argmin()
|
5197d0cae968557b519d1fa4025d2b834d7065c5
| 3,641,017
|
def fine_tune_model(trainX: np.ndarray, trainy: np.ndarray, cv: int = 5) -> SVC:
"""Receives training set and run a grid search to find the best
hyperparameters. It returns the best model, already trained.
Args:
trainX (np.ndarray): train array containg embedding images.
trainy (np.ndarray): train array containg labels.
cv (int, optional): Number of folds to apply in cross validation.
Defaults to 5.
Returns:
SVC: Trained model.
"""
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': ['auto', 'scale'],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'probability': [True]}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=1,
return_train_score=True, cv=cv)
grid.fit(trainX, trainy)
return grid.best_estimator_
|
d493a0b0023f57116858878163b81463c1a7166e
| 3,641,018
|
from datetime import datetime
import numpy
def get_empty_array_year(year=datetime.now().year, start_end=True, variable_list=['TEST', ], variable_list_dtype=None, record_interval='HH'):
"""
Allocates and returns new empty record array for given year using list of dtypes
(or variable labels as 8byte floats if no dtype list provided) for variables plus
TIMESTAMP_START and TIMESTAMP_END at beginning
:param year: year to be represented in array (current year if not provided)
:type year: int
:param start_end: if True, uses TIMESTAMP_START and TIMESTAMP_END, if not, uses only TIMESTAMP (end)
:type start_end: bool
:param variable_list: list of strings to be used as variable labels (assumed f8 type)
:type variable_list: list (of str)
:param variable_list_dtype: list of dtype tuples (label, data type) to be used as variables
:type variable_list_dtype: list (of (str, str)-tuples)
:param record_interval: resolution to be used for record ['HR' for hourly, 'HH' for half-hourly (default)]
:type record_interval: str
"""
# record_interval
if record_interval.lower() == 'hh':
step = timedelta(minutes=30)
elif record_interval.lower() == 'hr':
step = timedelta(minutes=60)
else:
msg = 'Unknown record_interval: {r}'.format(r=record_interval)
log.critical(msg)
raise ONEFluxError(msg)
# timestamp list
timestamp_list = []
current_timestamp = datetime(int(year), 1, 1, 0, 0, 0)
while current_timestamp.year < int(year) + 1:
timestamp_list.append(current_timestamp)
current_timestamp += step
timestamp_list.append(current_timestamp)
timestamp_list_begin = timestamp_list[:-1]
timestamp_list_end = timestamp_list[1:]
# array dtype
dtype = ([(var, 'f8') for var in variable_list] if variable_list_dtype is None else variable_list_dtype)
if start_end:
dtype = [('TIMESTAMP_START', 'a25'), ('TIMESTAMP_END', 'a25')] + dtype
else:
dtype = [('TIMESTAMP', 'a25'), ] + dtype
# record array
data = numpy.zeros(len(timestamp_list_begin), dtype=dtype)
data[:] = -9999.0
if start_end:
data['TIMESTAMP_START'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_begin]
data['TIMESTAMP_END'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
else:
data['TIMESTAMP'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
return data
|
d703ddc41233125b2b426c0423b0a3dcb85f73a0
| 3,641,019
|
def validate_geojson(data):
"""
Validate geojson
"""
if not (isinstance(data, dict)):
return False
if not isinstance(data.get('features'), list):
return False
gj = geojson.FeatureCollection([geojson.Feature(f) for f in data['features']])
return gj.is_valid
|
c48dfb76ff6d0255299f3913a644edff679b1a1a
| 3,641,020
|
from pathlib import Path
import time
def run_wps(conn, config_wpsprocess, **kwargs):
"""
primary function to orchestrate running the wps job from submission to download (if required)
Parameters:
-----------
conn: dict,
Connection parameters
Example: conn = {'domain': 'https://earthobs.defra.gov.uk',
'username': '<insert-username>',
'access_token': '<insert-access-token>'}
config_wpsprocess: list or dict,
list of dictionaries for individual wps submission requests.
users can generate a list of multiple dictionaries, one dict per wps job
with "xml_config", this is dict of variables that templated into the xml
payload for the WPS request submission
Example:
config_wpsprocess = [{'template_xml':'gsdownload_template.xml',
'xml_config':{
'template_layer_name':lyr,
'template_outputformat':'image/tiff',
'template_mimetype':'application/zip'},
'dl_bool':True
}]
output_dir: str or Pathlib object, optional,
user specified output directory
verify: str, optional:
add custom path to any organisation certificate stores that the
environment needs
Default Value:
* True
Possible Value:
* 'dir/dir/cert.file'
Returns:
-----------
list_download_paths: list,
list of pathlib objects for downloaded output for further reuse
"""
# set output path if not specified
if 'output_dir' not in kwargs:
kwargs['output_dir']=Path.cwd()
if 'verify' not in kwargs:
kwargs['verify'] = True
# set the request config dictionary
request_config = {
'wps_server':conn['domain'] + '/geoserver/ows',
'access_token':conn['access_token'],
'headers':{'Content-type': 'application/xml','User-Agent': 'python'},
'verify':kwargs['verify']
}
# submit wps jobs
try:
execution_dict = submit_wps_queue(request_config, config_wpsprocess)
except Exception as error:
print(error.args)
print('The WPS submission has failed')
else:
# INITIALISE VARIABLES and drop the wps log file if it exists
path_output = make_output_dir(kwargs['output_dir'])
# keep calling the wps job status until 'continue_process' = False
while True:
execution_dict = poll_api_status(execution_dict, request_config, path_output)
if execution_dict['continue_process']:
time.sleep(15)
else:
break
# after download is complete, process downloaded files (eg renames and extracting zips)
if execution_dict['job_status'] == 'DOWNLOAD-SUCCESSFUL':
execution_dict = process_wps_downloaded_files(execution_dict)
# set log file and job duration in dict
execution_dict['log_file_path'] = path_output / 'wps-log.csv'
execution_dict['total_job_duration'] = (execution_dict['timestamp_job_end'] - execution_dict['timestamp_job_start']).total_seconds() / 60
return execution_dict
|
d629939aaa32399a52a2f8ed1b0c8b5e94206f29
| 3,641,021
|
from chat.models import Chat
from ct.models import Role
def get_redirect_url(user):
"""
Analyse user and redirect:
Instructor:
onboarding is disabled - to /ctms/
onboarding is enabled and not achieved needed percent - to /ctms/onboarding/
onboarding is enabled and achieved needed percent - to /ctms/
Student:
Depends on type of chat student took part of and redirect to:
/lms/courses/<course_id> or /lms/tester/courses/<course_pk>
If user doesn't have any chat:
look at user's role and get lms type whether from invite or course of role
Arguments:
user (obj): User model of django.contrib.auth.models
Return:
redirect_url (str)
"""
redirect_url = reverse('ct:home') # default
if not user:
return
if getattr(user, 'instructor', None):
if waffle.switch_is_active('ctms_onboarding_enabled') and \
get_onboarding_percentage(user.id) < settings.ONBOARDING_PERCENTAGE_DONE:
redirect_url = reverse('ctms:onboarding')
else:
redirect_url = reverse('ctms:my_courses')
else:
chat = Chat.objects.filter(user=user).order_by('-timestamp').first()
if chat:
view_identificator = ''
if chat.is_test:
view_identificator = 'tester_'
course = chat.enroll_code.courseUnit.course
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': course.id}
)
else:
view_identificator = ''
role = user.role_set.filter(role__in=[Role.ENROLLED, Role.SELFSTUDY]).last()
if role:
last_invite = role.course.invite_set.filter(status='joined', user=user, type='tester').last()
if last_invite:
view_identificator = 'tester_'
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': role.course.id}
)
return redirect_url
|
28faf82e8b22eb3602ba70e00a97a97ae50d93a1
| 3,641,022
|
def _ds_to_arrraylist(
ds, bands, time_dim, x_dim, y_dim, percentile_stretch, image_proc_func=None
):
"""
Converts an xarray dataset to a list of numpy arrays for plt.imshow plotting
"""
# Compute percents
p_low, p_high = ds[bands].to_array().quantile(percentile_stretch).values
array_list = []
for i, timestep in enumerate(ds[time_dim]):
# Select single timestep from the data array
ds_i = ds[{time_dim: i}]
# Get shape of array
x = len(ds[x_dim])
y = len(ds[y_dim])
if len(bands) == 1:
# Create new one band array
img_toshow = exposure.rescale_intensity(
ds_i[bands[0]].values, in_range=(
p_low, p_high), out_range="image"
)
else:
# Create new three band array
rawimg = np.zeros((y, x, 3), dtype=np.float32)
# Add xarray bands into three dimensional numpy array
for band, colour in enumerate(bands):
rawimg[:, :, band] = ds_i[colour].values
# Stretch contrast using percentile values
img_toshow = exposure.rescale_intensity(
rawimg, in_range=(p_low, p_high), out_range=(0, 1.0)
)
# Optionally image processing
if image_proc_func:
img_toshow = image_proc_func(img_toshow).clip(0, 1)
array_list.append(img_toshow)
return array_list, p_low, p_high
|
85b05277cf874a32eb006045437eee8ae02ce0ef
| 3,641,023
|
import six
import binascii
def derive_key(secret, salt, iterations=1000, keylen=32):
"""
Computes a derived cryptographic key from a password according to PBKDF2.
.. seealso:: http://en.wikipedia.org/wiki/PBKDF2
:param secret: The secret.
:type secret: bytes or unicode
:param salt: The salt to be used.
:type salt: bytes or unicode
:param iterations: Number of iterations of derivation algorithm to run.
:type iterations: int
:param keylen: Length of the key to derive in bytes.
:type keylen: int
:return: The derived key in Base64 encoding.
:rtype: bytes
"""
assert(type(secret) in [six.text_type, six.binary_type])
assert(type(salt) in [six.text_type, six.binary_type])
assert(type(iterations) in six.integer_types)
assert(type(keylen) in six.integer_types)
if type(secret) == six.text_type:
secret = secret.encode('utf8')
if type(salt) == six.text_type:
salt = salt.encode('utf8')
key = pbkdf2(secret, salt, iterations, keylen)
return binascii.b2a_base64(key).strip()
|
04adaf71f3f9cf94e602029a242fedd037a40187
| 3,641,024
|
import imp
import os
def pseudo_import( pkg_name ):
"""
return a new module that contains the variables of pkg_name.__init__
"""
init = os.path.join( pkg_name, '__init__.py' )
# remove imports and 'from foo import'
lines = open(init, 'r').readlines()
lines = filter( lambda l: l.startswith('__'), lines)
code = '\n'.join(lines)
module = imp.new_module(pkg_name)
exec(code, module.__dict__)
return module
|
5982282d545c361d5198459073498ce5cba740a8
| 3,641,025
|
import torch
def calc_params_l2_norm(model: torch.nn.Module, bf16: bool):
"""Calculate l2 norm of parameters """
# args = get_args()
if not isinstance(model, list):
model = [model]
# Remove duplicate params.
params_data = []
for model_ in model:
for param in model_.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = parallel_state.param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
if bf16:
params_data.append(param.data.float())
else:
params_data.append(param.data)
# Calculate norm
dummy_overflow_buf = torch.cuda.IntTensor([0])
norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False # no per-parameter norm
)
norm_2 = norm * norm
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
norm_2, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
return norm_2.item() ** 0.5
|
399fc47296ac1ba3398dd5be834358f5ef50c9a4
| 3,641,026
|
def gradient_descent(f,init_val_dict, learning_rate=0.001, max_iter=1000, stop_stepsize=1e-6,return_history=False):
"""
Gradient Descent finding minimum for a
single expression
INPUTS
=======
f: expression
init_val_dict:dictionary containing initial value of variables
learning_rate: the step size between iterations
max_iter: maximum iteration before the algorithm stops
stop_stepsize: tolerance, the minimum threshold for absolute
difference of value of f from 0 for the algorithm to stop
return_history: default set to False. If True, return the trajectory
of the algorithm including the final answer
RETURNS
========
If return_history = False: variable values corresponding to the
minimum value of f
If return_history = True, return the trajectory
of the algorithm including the final answer
"""
f_grad = f.gradient_at(init_val_dict)
variables = [var for var in init_val_dict.keys()]
curr_point = np.array([v for k, v in init_val_dict.items()])
history = [curr_point.tolist()]
for i in range(max_iter):
prev_point =curr_point
prev_val_dict = {var: val for var, val in zip(variables, prev_point)}
f_grad =f.gradient_at(prev_val_dict)
curr_point =curr_point - learning_rate*f_grad
history.append(curr_point.tolist())
if np.linalg.norm(curr_point-prev_point, ord=2) < stop_stepsize: break
if return_history:
return history
return {var: val for var, val in zip(variables, curr_point)}
|
ba9edd1b41b7ac8e2e1d14b0a2958b7fe07bcf2a
| 3,641,027
|
from typing import Dict
from typing import Any
def gcp_iam_organization_role_permission_remove_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Remove permissions from custom organization role.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
return remove_custom_role_permissions(client_request_get_method=client.gcp_iam_organization_role_get_request,
client_request_update_method=client.gcp_iam_organization_role_update_request,
args=args)
|
42f56361d36b4fa0fbdea09031923235a0a3eb47
| 3,641,028
|
import socket
def get_hostname(ipv) -> str:
"""
Get hostname from IPv4 and IPv6.
:param ipv: ip address
:return: hostname
"""
return socket.gethostbyaddr(ipv)[0]
|
e7d660dc3c5e30def646e56fa628099e997145be
| 3,641,029
|
import torch
def load_model():
"""
Load CLIP model into memory.
Will download the model from the internet if it's not found in `WAGTAIL_CLIP_DOWNLOAD_PATH`.
"""
device = torch.device("cpu")
model, preprocess = clip.load("ViT-B/32", device, download_root=DOWNLOAD_PATH)
return model, device, preprocess
|
deebe0a5d5edb82b34d386771367ab65aaf6cb4b
| 3,641,030
|
from traceback import format_exception
def exception_response(ex: Exception):
"""Generate JSON payload from ApiException or Exception object."""
if not ex:
app.logger.error("Function received argument: None!")
return __make_response(
500,
{
"error" : "Unknown",
"details" : "api.exception_response() received: None!"
}
)
#
try:
if isinstance(ex, Exception):
# Member variable '.ApiException' reveals the type
if getattr(ex, 'ApiException', None):
app.logger.error(
"ApiException: '{}'"
.format(str(ex))
)
response_code = ex.code
response_payload = ex.to_dict()
else:
# Unexpected error, log trace by using logger.exception()
app.logger.exception(str(ex))
e = format_exception(type(ex), ex, ex.__traceback__)
response_payload = {
"error" : e[-1],
"trace" : "".join(e[1:-1])
}
response_code = 500
return __make_response(response_code, response_payload)
else:
return __make_response(
500,
{
"error" : "Uknown",
"details" : "api.exception_response() received unsupported argument",
"type" : type(ex)
}
)
except Exception as e:
app.logger.exception("api.exception_response(): Internal Error!")
return __make_response(
500,
{
"error" : "Internal Error",
"details" : "api.exception_response() internal failure!"
}
)
|
d3b8d58d3214d3543cea5135a46455fc824b78d7
| 3,641,031
|
def check(pack, inst):
"""
A function to check if an instruction is present in the packet
Input:
- pack: The packet to be checked
- inst: The instruction
Output:
Returns True if the instruction is present in the packet else Fase
"""
inst_key = getPacketKey(inst[0])
for key in inst_key:
if key:
if pack[key] == inst:
return True
return False
|
905fb2061b2fd5c129cdb0903ef84184c55844af
| 3,641,032
|
import os
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename,)))
if os.path.isdir(filename):
errors.append((filename, 'Error: %s is a directory. Directories are'
' not yet supported' % (filename,)))
return errors
|
c207442e08fa7a2188ab8e792ee76d596b4f19f0
| 3,641,033
|
def get_score(true, predicted):
"""Returns F1 per instance"""
numerator = len(set(predicted.tolist()).intersection(set(true.tolist())))
p = numerator / float(len(predicted))
r = numerator / float(len(true))
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
|
115a4847e3d991f47415554401df25d72d74bb2f
| 3,641,034
|
def check_ratio_argv(_argv):
"""Return bool, check optional argument if images are searched by same ratio"""
# [-1] To avoid checking 3 places at one, this argument is always last
return bool(_argv[-2] in ARGV["search by ratio"] and _argv[-1] in ARGV["search by ratio"])
|
23a677af4042fcc3616a25378af4e7721971de56
| 3,641,035
|
def binary_erosion(input, structure = None, iterations = 1, mask = None,
output = None, border_value = 0, origin = 0, brute_force = False):
"""Multi-dimensional binary erosion with the given structure.
An output array can optionally be provided. The origin parameter
controls the placement of the filter. If no structuring element is
provided an element is generated with a squared connectivity equal
to one. The border_value parameter gives the value of the array
outside the border. The erosion operation is repeated iterations
times. If iterations is less than 1, the erosion is repeated until
the result does not change anymore. If a mask is given, only those
elements with a true value at the corresponding mask element are
modified at each iteration.
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force)
|
06de7142d7eca9ca3f5712f76318215950f4c710
| 3,641,036
|
def chord_to_freq_ratios(chord):
"""Return the frequency ratios of the pitches in <chord>
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
list of ints:
"""
numerators = [JI_NUMS[i] for i in chord]
denoms = [JI_DENOMS[i] for i in chord]
denominator = get_lcm(denoms)
numerators = [(numerators[i] * denominator) // denoms[i] for i in \
range(len(numerators))]
return numerators, denominator
|
4811a6b69e6fd646adf5dc7e7a31a23be8fa6708
| 3,641,037
|
import torch
def proto_factor_cosine(local_proto, global_proto):
"""
[C, D]: D is 64 or 4
"""
# factor = 1
norm_local = torch.norm(local_proto, dim=-1, keepdim=False)
norm_global = torch.norm(global_proto, dim=-1, keepdim=False) # [C]
factor_refined = torch.sum(local_proto*global_proto, dim=-1, keepdim=False)/(norm_local*norm_global+1e-6)
return factor_refined
|
6e9f7540ec1339efe3961b103633f5175cb38c49
| 3,641,038
|
def urlparse(d, keys=None):
"""Returns a copy of the given dictionary with url values parsed."""
d = d.copy()
if keys is None:
keys = d.keys()
for key in keys:
d[key] = _urlparse(d[key])
return d
|
91cd40ef294443431a772ec14ef4aa54dab34ea8
| 3,641,039
|
import numpy
import math
def doFDR(pvalues,
vlambda=numpy.arange(0,0.95,0.05),
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
R.assign( "pi0", pi0 )
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# The estimated q-values calculated here
#u = numpy.argsort( p )
# change by Alan
# ranking function which returns number of observations less than or equal
R.assign( "pvalues", pvalues )
R.assign( "robust", robust )
qvalues = R("""u <- order(pvalues)
qvalues.rank <- function(x)
{
idx <- sort.list(x)
fc <- factor(x)
nl <- length(levels(fc))
bin <- as.integer(fc)
tbl <- tabulate(bin)
cs <- cumsum(tbl)
tbl <- rep(cs, tbl)
tbl[idx] <- tbl
return(tbl)
}
v <- qvalues.rank(pvalues)
m <- length(pvalues)
qvalues <- pi0 * m * pvalues / v
if(robust)
{
qvalues <- pi0*m*pvalues/(v*(1-(1-pvalues)^m))
}
qvalues[u[m]] <- min(qvalues[u[m]],1)
for(i in (m-1):1)
{
qvalues[u[i]] <- min(qvalues[u[i]],qvalues[u[i+1]],1)
}
qvalues
""")
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
return result
|
17919d989ca07b4fb87930141cef3ce392b66ad4
| 3,641,040
|
def sequence(ini, end, step=1):
""" Create a sequence from ini to end by step. Similar to
ee.List.sequence, but if end != last item then adds the end to the end
of the resuting list
"""
end = ee.Number(end)
if step == 0:
step = 1
amplitude = end.subtract(ini)
mod = ee.Number(amplitude).mod(step)
seq = ee.List.sequence(ini, end, step)
condition = mod.neq(0)
final = ee.Algorithms.If(condition, seq.add(end), seq)
return ee.List(final)
|
cca23fd00ddf1237a95a53b7f6a3f1bc264f84da
| 3,641,041
|
def kBET_single(
matrix,
batch,
k0=10,
knn=None,
verbose=False
):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
returns:
kBET observed rejection rate
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
if verbose:
print("kBET estimation")
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
ro.r(
"batch.estimate <- kBET("
" data_mtrx,"
" batch,"
" knn=knn_graph,"
" k0=k0,"
" plot=FALSE,"
" do.pca=FALSE,"
" heuristic=FALSE,"
" adapt=FALSE,"
f" verbose={str(verbose).upper()}"
")"
)
try:
score = ro.r("batch.estimate$summary$kBET.observed")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
score = np.nan
anndata2ri.deactivate()
return score
|
42ecfb9dee65806a25e92764ea7c1ef54316be02
| 3,641,042
|
from typing import Tuple
import numpy
def get_eye_center_position(face: Face) -> Tuple[numpy.int64, numpy.int64]:
"""Get the center position between the eyes of the given face.
Args:
face (:class:`~.types.Face`):
The face to extract the center position from.
Returns:
Tuple[:data:`numpy.int64`, :data:`numpy.int64`]:
The position directly between the eyes of the face
"""
(left_start, left_end), (right_start, right_end) = get_eye_positions(face)
return (left_start + right_start) // 2, (left_end + right_end) // 2
|
562dca1971996e8d7497d146aa7ccefcb3ce8006
| 3,641,043
|
def esc_quotes(strng):
""" Return the input string with single and double quotes escaped out.
"""
return strng.replace('"', '\\"').replace("'", "\\'")
|
25956257e06901d4f59088dd2c17ddd5ea620407
| 3,641,044
|
def gen_fake_game_data():
"""Creates an example Game object"""
game = Game(
gameday_id='2014/04/04/atlmlb-wasmlb-1',
venue='Nationals Park',
start_time=parser.parse('2014-04-04T13:05:00-0400'),
game_data_directory='/components/game/mlb/year_2014/month_04/day_04/gid_2014_04_04_atlmlb_wasmlb_1',
home_name_abbrev='WSH',
home_team_city='Washington',
home_team_name='Nationals',
away_name_abbrev='ATL',
away_team_city='Atlanta',
away_team_name='Braves',
home_team_runs=1,
away_team_runs=2
)
return game
|
415c85eb0ba4e03c135ab56791177ebb634ea5e3
| 3,641,045
|
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
|
f6307614e776b221ec22b063680f34f5e2ddf789
| 3,641,046
|
import os
def random_name(url, type):
"""
对文件或文件夹进行随机重命名(防止产生因同名而无法重命名的问题)(具体类型则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)
:param url: 用户传入的文件夹的地址
:return: 返回文件夹中所有文件或文件夹重命名之前的名字的列表
"""
if not os.path.exists(url):
url=resource_manager.Properties.getRootPath() + resource_manager.getSeparator() +url
doc = os.listdir(url)
if type == 'D':
con=config_parser.ConfigParser('D')
else:
con=config_parser.ConfigParser('F')
for files in doc:
filetype = os.path.splitext(files)[1]
if os.path.exists(url):
old=url+resource_manager.getSeparator()+files
else:
old=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+files
if os.path.isdir(old) and type=='D':
random = random_string()
New = url + resource_manager.getSeparator() + random+ filetype
os.rename(old, New);
_store_(con,url, files, random + filetype)
elif os.path.isfile(old) and type=='F':
random = random_string()
if os.path.exists(url):
New = url + resource_manager.getSeparator() + random+ filetype
else:
New = url + resource_manager.getSeparator() + random
os.rename(old, New);
_store_(con,url, files, random + filetype)
con.save()
list = doc
return list;
|
d251765704be00291361085a6665858b09543fc8
| 3,641,047
|
import sys
import array
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
|
fdd21aeded390f5b8a71823f195548f8977aefe0
| 3,641,048
|
def jaccard(set1, set2):
"""
computes the jaccard coefficient between two sets
@param set1: first set
@param set2: second set
@return: the jaccard coefficient
"""
if len(set1) == 0 or len(set2) == 0:
return 0
inter = len(set1.intersection(set2))
return inter / (len(set1) + len(set2) - inter)
|
9a99c6c5251bdb7cb10f6d3088ac6ac52bb02a55
| 3,641,049
|
from typing import Dict
from typing import Any
import json
def create_cluster_custom_object(group: str, version: str, plural: str,
resource: Dict[str, Any] = None,
resource_as_yaml_file: str = None,
secrets: Secrets = None) -> Dict[str, Any]:
"""
Delete a custom object in the given namespace.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
body = load_body(resource, resource_as_yaml_file)
try:
r = api.create_cluster_custom_object(
group, version, plural, body, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
if x.status == 409:
logger.debug(
"Custom resource object {}/{} already exists".format(
group, version))
return json.loads(x.body)
else:
raise ActivityFailed(
"Failed to create custom resource object: '{}' {}".format(
x.reason, x.body))
|
38b24e9bbb0c5e2e24789b1d06984b338582b7a2
| 3,641,050
|
import os
import sys
import pprint
import tempfile
import yaml
def normalize_flags(flags, user_config):
"""Combine the argparse flags and user configuration together.
Args:
flags (argparse.Namespace): The flags parsed from sys.argv
user_config (dict): The user configuration taken from
~/.artman/config.yaml.
Returns:
tuple (str, dict): 2-tuple containing:
- pipeline name
- pipeline arguments
"""
if flags.root_dir:
flags.root_dir = os.path.abspath(flags.root_dir)
flags.config = os.path.join(flags.root_dir, flags.config)
else:
flags.root_dir = os.getcwd()
flags.config = os.path.abspath(flags.config)
root_dir = flags.root_dir
flags.output_dir = os.path.abspath(flags.output_dir)
pipeline_args = {}
# Determine logging verbosity and then set up logging.
verbosity = INFO
if getattr(flags, 'verbosity', None):
verbosity = getattr(flags, 'verbosity')
setup_logging(verbosity)
# Save local paths, if applicable.
# This allows the user to override the path to api-client-staging or
# toolkit on his or her machine.
pipeline_args['root_dir'] = root_dir
pipeline_args['toolkit_path'] = user_config.local.toolkit
pipeline_args['generator_args'] = flags.generator_args
artman_config_path = flags.config
if not os.path.isfile(artman_config_path):
logger.error(
'Artman config file `%s` doesn\'t exist.' % artman_config_path)
sys.exit(96)
try:
artifact_config = loader.load_artifact_config(
artman_config_path, flags.artifact_name, flags.aspect)
except ValueError as ve:
logger.error('Artifact config loading failed with `%s`' % ve)
sys.exit(96)
legacy_config_dict = converter.convert_to_legacy_config_dict(
artifact_config, root_dir, flags.output_dir)
logger.debug('Below is the legacy config after conversion:\n%s' %
pprint.pformat(legacy_config_dict))
language = Artifact.Language.Name(
artifact_config.language).lower()
# Set the pipeline
artifact_type = artifact_config.type
pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
if artifact_type == Artifact.GAPIC_ONLY:
pipeline_name = 'GapicOnlyClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.GAPIC:
pipeline_name = 'GapicClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.DISCOGAPIC:
pipeline_name = 'DiscoGapicClientPipeline'
pipeline_args['language'] = language
pipeline_args['discovery_doc'] = artifact_config.discovery_doc
elif artifact_type == Artifact.GRPC:
pipeline_name = 'GrpcClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.GAPIC_CONFIG:
pipeline_name = 'GapicConfigPipeline'
elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
pipeline_name = 'DiscoGapicConfigPipeline'
pipeline_args['discovery_doc'] = artifact_config.discovery_doc
if os.path.abspath(flags.output_dir) != os.path.abspath(DEFAULT_OUTPUT_DIR):
logger.warning("`output_dir` is ignored in DiscoGapicConfigGen. "
+ "Yamls are saved at the path specified by `gapic_yaml`.")
pipeline_args['output_dir'] = tempfile.mkdtemp()
elif artifact_type == Artifact.PROTOBUF:
pipeline_name = 'ProtoClientPipeline'
pipeline_args['language'] = language
else:
raise ValueError('Unrecognized artifact.')
# Parse out the full configuration.
config_args = config_util.load_config_spec(legacy_config_dict, language)
config_args.update(pipeline_args)
pipeline_args = config_args
# Print out the final arguments to stdout, to help the user with
# possible debugging.
pipeline_args_repr = yaml.dump(
pipeline_args,
block_seq_indent=2,
default_flow_style=False,
indent=2, )
logger.info('Final args:')
for line in pipeline_args_repr.split('\n'):
if 'token' in line:
index = line.index(':')
line = line[:index + 2] + '<< REDACTED >>'
logger.info(' {0}'.format(line))
# Return the final arguments.
return pipeline_name, pipeline_args
|
88609f2307f9147387a0419292b427e46c08d7e5
| 3,641,051
|
import json
def get_fast_annotations():
"""
Title: Get Fast Annotations
Description : Get annotations for a list of sequences in a compressed form
URL: /sequences/get_fast_annotations
Method: GET
URL Params:
Data Params: JSON
{
"sequences": list of str ('ACGT')
the list of sequence strings to query the database (can be any length). Alternatively, can be list of SILVA IDs (in case dbname is set to 'silva')
"region": int (optional)
the region id (default=1 which is V4 515F 806R)
"get_term_info": bool (optional)
True (default) to return also information about each term, False not to return
"get_taxonomy": bool (optional)
True (default) to get the dbbact assigned taxonomy for each query sequence
"get_parents": bool (optional)
True (default) to get the parent terms for each annotation ontology term, False to just get tge annotation terms
"get_all_exp_annotations": bool (optional)
True (default) to get all the annotations from each experiment containing one annotation with the sequence, False to just get the annotations with the sequence
"use_sequence_translator": bool (optional)
True (default) to get also annotations for dbbact sequences from other regions linked to the query sequences using the wholeseqdb (i,e, SILVA)
False to get just annotations for dbbact sequences that match exactly the queryy sequences
"dbname": str, optional
If supplied (i.e. 'silva'), assume sequence is the identifier in dbname (i.e. 'FJ978486' for 'silva' instead of acgt sequence)
Success Response:
Code : 200
Content :
{
annotations: dict of (annotationid: details):
annotationid : the annotationid used in seqannotations
details:
{
"annotationid" : int
the id of the annotation
"user" : str
name of the user who added this annotation
(userName from UsersTable)
"addedDate" : str (DD-MM-YYYY HH:MM:SS)
date when the annotation was added
(addedDate from CurationsTable)
"expid" : int
the ID of the experiment from which this annotation originated
(uniqueId from ExperimentsTable)
(see Query Experiment)
"currType" : str
curration type (differential expression/contaminant/etc.)
(description from CurationTypesTable)
"method" : str
The method used to detect this behavior (i.e. observation/ranksum/clustering/etc")
(description from MethodTypesTable)
"agentType" : str
Name of the program which submitted this annotation (i.e. heatsequer)
(description from AgentTypesTable)
"description" : str
Free text describing this annotation (i.e. "lower in green tomatoes comapred to red ones")
"private" : bool
True if the curation is private, False if not
"CurationList" : list of
{
"detail" : str
the type of detail (i.e. ALL/HIGH/LOW)
(description from CurationDetailsTypeTable)
"term" : str
the ontology term for this detail (i.e. feces/ibd/homo sapiens)
(description from OntologyTable)
}
"parents" : list of tuples (type, list of terms)
{
type : type of the annotation type ('high'/'low','all')
list of terms - list of ontology terms which are annotated or parents of annotated ontology term
}
}
seqannotations : list of (seqid, annotationids):
{
seqpos : position of the sequence in the list
annotationids : list of int
the annotationsid associated with this sequence
}
term_info : dict of {term, dict}:
Information about each term which appears in the annotation parents. Key is the ontolgy term. the value dict is:
{
'total_annotations' : int
total number of annotations where this term appears (as a parent)
'total_sequences' : int
total number of sequences in annotations where this term appears (as a parent)
}
taxonomy : list of str
The dbbact assigned taxonomy for each sequence (ordered in the same order as query sequences)
}
Details :
Return a dict of details for all the annotations associated with at least one of the sequences used as input, and a list of seqpos and the associated annotationids describing it
(i.e. a sparse representation of the annotations vector for the input sequence list)
Validation:
If an annotation is private, return it only if user is authenticated and created the curation. If user not authenticated, do not return it in the list
If annotation is not private, return it (no need for authentication)
"""
debug(3, 'get_fast_annotations', request)
cfunc = get_fast_annotations
alldat = request.get_json()
if alldat is None:
return(getdoc(cfunc))
sequences = alldat.get('sequences')
if sequences is None:
return('sequences parameter missing', 400)
region = alldat.get('region')
get_term_info = alldat.get('get_term_info', True)
get_taxonomy = alldat.get('get_taxonomy', True)
get_parents = alldat.get('get_parents', True)
use_sequence_translator = alldat.get('use_sequence_translator', True)
dbname = alldat.get('dbname', None)
if dbname is not None:
use_sequence_translator = True
get_all_exp_annotations = alldat.get('get_all_exp_annotations', True)
if use_sequence_translator:
seq_translate_api = g.seq_translate_api
else:
seq_translate_api = None
err, annotations, seqannotations, term_info, taxonomy = dbannotations.GetFastAnnotations(g.con, g.cur, sequences, region=region, userid=current_user.user_id, get_term_info=get_term_info, get_taxonomy=get_taxonomy, get_parents=get_parents, get_all_exp_annotations=get_all_exp_annotations, seq_translate_api=seq_translate_api, dbname=dbname)
if err:
errmsg = 'error encountered while getting the fast annotations: %s' % err
debug(6, errmsg)
return(errmsg, 400)
res = {'annotations': annotations, 'seqannotations': seqannotations, 'term_info': term_info, 'taxonomy': taxonomy}
debug(3, 'returning fast annotations for %d original sequences. returning %s annotations' % (len(sequences), len(res['annotations'])))
return json.dumps(res)
|
fe8c043128c80c885f0e6d424fb8a6e8eafe171b
| 3,641,052
|
def to_numeric(arg):
"""
Converts a string either to int or to float.
This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
"""
if isinstance(arg, str):
if '.' in arg:
return float(arg)
else:
return int(arg)
return arg
|
e82746e1c5c84b57e59086030ff7b1e93c89a8ec
| 3,641,053
|
def get_kde_polyfit_estimator(samples, N=100000, bandwidth=200, maxlength=150000, points=500, degree=50):
"""多項式近似したバージョンを返すやつ 一応両方かえす"""
f = get_kde_estimator(samples, N, bandwidth)
x = np.linspace(1, maxlength, points)
z = np.polyfit(x, f(x), degree)
return (lambda x: np.where(x<=maxlength, np.poly1d(z)(x), np.poly1d(z)(maxlength))), f
|
72801a8be25826ef42ab700e5cae742ed59fcea4
| 3,641,054
|
def read_tracker(file_name):
"""
"""
with open(file_name, "r") as f:
return int(f.readline())
|
9d1f43b8f833b5ca86c247760ae79e18f33aa019
| 3,641,055
|
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
|
71d7ee564d07292ef4831bf39e54f51071b7d7dd
| 3,641,056
|
def sigmoid_derivative(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z+1e-8))
'''
s = np.zeros_like(Z)
for i in range(len(Z)):
if Z[i] >= 0:
t = np.exp(-Z[i])
s[i] = 1 / (1 + t)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
t = np.exp(Z[i])
s[i] = t/(1+t)
'''
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
|
6b45e722bc48b4cf60170de267c235d0943d022c
| 3,641,057
|
from typing import Counter
def part2(lines, rounds=100):
"""
>>> data = load_example(__file__, '24')
>>> part2(data, 0)
10
>>> part2(data, 1)
15
>>> part2(data, 2)
12
>>> part2(data, 3)
25
>>> part2(data, 4)
14
>>> part2(data, 5)
23
>>> part2(data, 6)
28
>>> part2(data, 7)
41
>>> part2(data, 8)
37
>>> part2(data, 9)
49
>>> part2(data, 10)
37
>>> part2(data, 20)
132
>>> part2(data, 30)
259
>>> part2(data, 40)
406
>>> part2(data, 50)
566
>>> part2(data, 60)
788
>>> part2(data, 70)
1106
>>> part2(data, 80)
1373
>>> part2(data, 90)
1844
>>> part2(data, 100)
2208
"""
endpoints = prepare_endpoints(lines)
real_endpoints = {ep: True for ep, count in Counter(endpoints).items() if count % 2 == 1}
return simulate(real_endpoints, rounds)
|
ff33f249628b1dec33f7ea886f5423012914fc4c
| 3,641,058
|
def binary_search(
items,
target_key,
target_key_hi=None,
key=None,
lo=None,
hi=None,
target=Target.any,
):
"""
Search for a target key using binary search and return (found?,
index / range).
The returned index / range is as follows according to the desired
target:
* Target.lo: lo
* Target.hi: hi
* Target.any: Any `x` such that `lo <= x < hi`
* Target.range: (lo, hi)
Where:
* `lo` is the smallest index s.t. `target_key <= key(items[lo])`
* `hi` is the smallest index s.t. `target_key_hi < key(items[hi])`
Thus, the slice of items matching the target key(s) is `[lo:hi]`.
Arguments:
* items: Indexable such that its keys are sorted.
* target_key: What to search for. Keys must be orderable.
* key: Key function taking arguments (index, item) that returns the
sort key for the item at the given index. (This allows one to
have a separate array of keys.) If `None`, items are their own
keys.
* lo: Initial lower bound index (inclusive)
* hi: Initial upper bound index (exclusive)
* target: What in the items to target: existence, low index, high
index, or the whole range. See `Target`.
* target_key_hi: If searching for a range, search for target keys k
in `target_key <= k < target_key_hi`. (Ignored otherwise.)
"""
if target == Target.range:
if target_key_hi is None:
target_key_hi = target_key
_, lo_idx, hi_le, hi_gt = _binary_search(
items, target_key, target_key_hi, key, lo, hi, Target.lo)
_, hi_idx, _, _ = _binary_search(
items, target_key_hi, None, key, hi_le, hi_gt, Target.hi)
return (lo_idx < hi_idx, (lo_idx, hi_idx))
else:
found, idx, _, _ = _binary_search(
items, target_key, None, key, lo, hi, target)
return (found, idx)
|
c0539322c0a2dd9cd3b53fa65bf6ecf28840546e
| 3,641,059
|
def func_real_dirty_gauss(dirty_beam):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
This function is a wrapper that defines the interpolated
dirty beam.
Parameters
----------
dirty_beam : scipy.interpolate.interp1d
Interpolation function that takes as an argument el = sin(za)
and outputs an np.ndarray[nel, nra] that represents the dirty
beam evaluated at the same right ascension as the map.
Returns
-------
real_dirty_gauss : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
def real_dirty_gauss(
coord, peak_amplitude, centroid_x, centroid_y, fwhm_x, offset, fringe_rate
):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
Parameter
---------
coord : [ra, dec]
Tuple containing the right ascension and declination, each
of which is coordinate vectors of length nra and ndec, respectively.
peak_amplitude : float
Model parameter. Normalization of the gaussian
in the right ascension direction.
centroid_x : float
Model parameter. Centroid of the gaussian in degrees in the
right ascension direction.
centroid_y : float
Model parameter. Centroid of the dirty beam in degrees in the
declination direction.
fwhm_x : float
Model parameter. Full width at half maximum of the gaussian
in degrees in the right ascension direction.
offset : float
Model parameter. Constant background value of the map.
fringe_rate : float
Model parameter. Frequency of the sinusoid.
Returns
-------
model : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
x, y = coord
model = (
peak_amplitude
* np.exp(
-4.0 * np.log(2.0) * ((x[:, np.newaxis] - centroid_x) / fwhm_x) ** 2
)
* dirty_beam(y - _dec_to_el(centroid_y))
) + offset
phase = np.exp(
2.0j
* np.pi
* np.cos(np.radians(centroid_y))
* np.sin(-np.radians(x - centroid_x))
* fringe_rate
)
return (model * phase[:, np.newaxis]).real.ravel()
return real_dirty_gauss
|
2a0da2176f67a9cd9dd31af48b987f7f3d9d8342
| 3,641,060
|
def shortest_path(graph, a_node, b_node):
""" code by Eryk Kopczynski """
front = deque()
front.append(a_node)
came_from = {a_node: [a_node]}
while front:
cp = front.popleft()
for np in graph.neighbors(cp):
if np not in came_from:
front.append(np)
came_from[np] = [came_from[cp], np]
"""flatten added by Bruce Wernick. This is purely cosmetic and not ideal.
It looks like the came_from dict is storing unnecessary information!
"""
return flatten(came_from.get(b_node))
|
6c2f04282eac52fcad7c4b15fa23753373940d6b
| 3,641,061
|
def rank_compute(prediction, att_plt, key, byte):
"""
- prediction : predictions of the NN
- att_plt : plaintext of the attack traces
- key : Key used during encryption
- byte : byte to attack
"""
(nb_trs, nb_hyp) = prediction.shape
idx_min = nb_trs
min_rk = 255
key_log_prob = np.zeros(nb_hyp)
rank_evol = np.full(nb_trs,255)
prediction = np.log(prediction+1e-40)
for i in range(nb_trs):
for k in range(nb_hyp):
key_log_prob[k] += prediction[i,AES_Sbox[k^att_plt[i,byte]]] #Computes the hypothesis values
rank_evol[i] = rk_key(key_log_prob,key)
return rank_evol
|
a21a5fbf5db64cab1fe87ed37c4a9770f5ccd9f8
| 3,641,062
|
def purelin(n):
"""
Linear
"""
return n
|
493c4ae481702194fe32eec44e589e5d15614b99
| 3,641,063
|
def arrayDimension(inputArray):
"""Returns the dimension of a list-formatted array.
The dimension of the array is defined as the number of nested lists.
"""
return len(arraySize(inputArray))
|
4d98b76ce6f5aeae9a8171918a78c31ef75dd33e
| 3,641,064
|
from datetime import datetime
def process_data(records, root) -> bool:
"""Creates the xml file that will be imported in pure."""
for record in records:
item_metadata = record["metadata"]
# If the rdm record has a uuid means that it was imported from pure - REVIEW
if "uuid" in item_metadata:
continue
# Checks if the record was created today
if record["created"] <= datetime.today().strftime("%Y-%m-%d"):
return False
# Adds fields to the created xml element
populate_xml(item_metadata, record, root)
return True
|
3e78792c2f147cb56ac502d783fb2a1e3346be53
| 3,641,065
|
def hopcroft(G, S):
"""Hopcroft's algorthm for computing state equivalence.
Parameters
----------
G : fully deterministic graph
S : iterable
one half of the initial (bi)partition
Returns
-------
Partition
"""
sigma = alphabet(G)
partition = Partition(list(G))
p1, p2 = partition.split(S)[0]
smaller = partition.select_smaller(p1, p2)
wait_set = set()
for a in sigma:
wait_set.add((smaller, a))
while wait_set:
p, a = wait_set.pop()
inv_a_p = G.in_edges(partition.parts[p], data="label")
inv_a_p = (p for (p, q, label) in inv_a_p if label == a)
for (p1, p2) in partition.split(inv_a_p):
for b in sigma:
if (p1, b) in wait_set:
wait_set.add((p2, b))
else:
smaller = partition.select_smaller(p1, p2)
wait_set.add((smaller, b))
return partition
|
a841dc0a77bb82a27937b20a7335c399ff9b53f5
| 3,641,066
|
import logging
def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
plotflag=_has_matplotlib, diagflag=True, precondflag=True,
diffkernel='abs', cgtol=1e-4, cgmaxit=100):
"""
Estimate derivatives from noisy data based using the Total
Variation Regularized Numerical Differentiation (TVDiff)
algorithm.
Parameters
----------
data : ndarray
One-dimensional array containing series data to be
differentiated.
itern : int
Number of iterations to run the main loop. A stopping
condition based on the norm of the gradient vector g
below would be an easy modification. No default value.
alph : float
Regularization parameter. This is the main parameter
to fiddle with. Start by varying by orders of
magnitude until reasonable results are obtained. A
value to the nearest power of 10 is usally adequate.
No default value. Higher values increase
regularization strenght and improve conditioning.
u0 : ndarray, optional
Initialization of the iteration. Default value is the
naive derivative (without scaling), of appropriate
length (this being different for the two methods).
Although the solution is theoretically independent of
the initialization, a poor choice can exacerbate
conditioning issues when the linear system is solved.
scale : {large' or 'small' (case insensitive)}, str, optional
Default is 'small'. 'small' has somewhat better boundary
behavior, but becomes unwieldly for data larger than
1000 entries or so. 'large' has simpler numerics but
is more efficient for large-scale problems. 'large' is
more readily modified for higher-order derivatives,
since the implicit differentiation matrix is square.
ep : float, optional
Parameter for avoiding division by zero. Default value
is 1e-6. Results should not be very sensitive to the
value. Larger values improve conditioning and
therefore speed, while smaller values give more
accurate results with sharper jumps.
dx : float, optional
Grid spacing, used in the definition of the derivative
operators. Default is the reciprocal of the data size.
plotflag : bool, optional
Flag whether to display plot at each iteration.
Default is True. Useful, but adds significant
running time.
diagflag : bool, optional
Flag whether to display diagnostics at each
iteration. Default is True. Useful for diagnosing
preconditioning problems. When tolerance is not met,
an early iterate being best is more worrying than a
large relative residual.
precondflag: bool, optional
Flag whether to use a preconditioner for conjugate gradient solution.
Default is True. While in principle it should speed things up,
sometimes the preconditioner can cause convergence problems instead,
and should be turned off. Note that this mostly makes sense for 'small'
scale problems; for 'large' ones, the improved preconditioner is one
of the main features of the algorithms and turning it off defeats the
point.
diffkernel: str, optional
Kernel to use in the integral to smooth the derivative. By default it's
the absolute value, |u'| (value: "abs"). However, it can be changed to
being the square, (u')^2 (value: "sq"). The latter produces smoother
derivatives, whereas the absolute values tends to make them more blocky.
Default is abs.
cgtol: float, optional
Tolerance to use in conjugate gradient optimisation. Default is 1e-4.
cgmaxit: int, optional
Maximum number of iterations to use in conjugate gradient optimisation.
Default is 100
Returns
-------
u : ndarray
Estimate of the regularized derivative of data. Due to
different grid assumptions, length(u) = length(data) + 1
if scale = 'small', otherwise length(u) = length(data).
"""
# Make sure we have a column vector
data = np.array(data)
assert len(data.shape) == 1, "data is not one-dimensional"
# Get the data size.
n = len(data)
# Default checking. (u0 is done separately within each method.)
if dx is None:
dx = 1.0 / n
# Different methods for small- and large-scale problems.
if (scale.lower() == 'small'):
# Differentiation operator
d0 = -np.ones(n)/dx
du = np.ones(n-1)/dx
dl = np.zeros(n-1)
dl[-1] = d0[-1]
d0[-1] *= -1
D = sparse.diags([dl, d0, du], [-1, 0, 1])
DT = D.transpose()
# Antidifferentiation and its adjoint
def A(x): return (np.cumsum(x) - 0.5 * (x + x[0])) * dx
def AT(x): return np.concatenate([[sum(x[1:])/2.0],
(sum(x)-np.cumsum(x)+0.5*x)[1:]])*dx
# Default initialization is naive derivative
if u0 is None:
u0 = D*data
u = u0.copy()
# Since Au( 0 ) = 0, we need to adjust.
ofst = data[0]
# Precompute.
ATb = AT(ofst - data) # input: size n
# Main loop.
for ii in range(1, itern+1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = dx * DT * Q * D
elif diffkernel == 'sq':
L = dx * DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) + ATb + alph * L * u
#print(g)
# Prepare to solve linear equation.
if precondflag:
# Simple preconditioner.
P = alph * sparse.spdiags(L.diagonal() + 1, 0, n, n)
else:
P = None
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, g, x0=None, tol=cgtol, maxiter=cgmaxit,
callback=None, M=P, atol='legacy')
#print(s)
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update solution.
u = u - s
#print(u)
# # Test the convergence condition
# s_norm = np.sqrt(np.sum(np.array(s).ravel() ** 2))
# u_norm = np.sqrt(np.sum(np.array(u).ravel() ** 2))
# norm = s_norm / u_norm
# print(norm)
# Display plot.
if plotflag:
plt.plot(u)
plt.show()
elif (scale.lower() == 'large'):
# Construct anti-differentiation operator and its adjoint.
def A(v): return np.cumsum(v)
def AT(w): return (sum(w) * np.ones(len(w)) -
np.transpose(np.concatenate(([0.0],
np.cumsum(w[:-1])))))
# Construct differentiation matrix.
c = np.ones(n)
D = sparse.spdiags([-c, c], [0, 1], n, n) / dx
mask = np.ones((n, n))
mask[-1, -1] = 0.0
D = sparse.dia_matrix(D.multiply(mask))
DT = D.transpose()
# Since Au( 0 ) = 0, we need to adjust.
data = data - data[0]
# Default initialization is naive derivative.
if u0 is None:
u0 = np.concatenate(([0], np.diff(data)))
u = u0
# Precompute.
ATd = AT(data)
# Main loop.
for ii in range(1, itern + 1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = DT * Q * D
elif diffkernel == 'sq':
L = DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) - ATd
g = g + alph * L * u
# Build preconditioner.
if precondflag:
c = np.cumsum(range(n, 0, -1))
B = alph * L + sparse.spdiags(c[::-1], 0, n, n)
# droptol = 1.0e-2
R = sparse.dia_matrix(np.linalg.cholesky(B.todense()))
P = np.dot(R.transpose(), R)
else:
P = None
# Prepare to solve linear equation.
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, -g, x0=None, tol=cgtol, maxiter=cgmaxit, callback=None,
M=P, atol='legacy')
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update current solution
u = u + s
# Display plot
if plotflag:
plt.plot(u / dx)
plt.show()
u = u / dx
return u
|
b4497f4ac6d09f5240f551f6d25077d2e7624af2
| 3,641,067
|
def replaceidlcode(lines,mjd,day=None):
"""
Replace IDL code in lines (array of strings) with the results of code
execution. This is a small helper function for translate_idl_mjd5_script().
"""
# day
# psfid=day+138
# domeid=day+134
if day is not None:
ind,nind = dln.where( (lines.lower().find('day')>-1) &
(lines.lower().startswith('day=')==False) )
if nind>0:
lines[ind] = lines[ind].replace('day',str(day))
# indgen
# ims=day+149+indgen(2)
ind,nind = dln.where(lines.lower().find('indgen(')>-1)
if nind>0:
lines[ind] = lines[ind].replace('indgen(','np.arange(')
# Deal with assignment lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')==-1) )
for i in range(nind):
line1 = lines[ind[i]]
lo = line1.find('=')
key = line1[0:lo]
val = eval(line1[lo+1:])
if (type(val) is int) | (type(val) is str):
lines[ind[i]] = key+'='+str(val)
else:
lines[ind[i]] = key+'='+str(list(val))
# Deal with mkplan lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')>-1) )
for i in range(nind):
line1 = lines[ind[i]]
raise ValueError('This has not been implemented yet')
return lines
|
fd8a2bc9a374c36e7973cfc01d38582e25ce9438
| 3,641,068
|
import torch
import tqdm
def test(
model: nn.Module,
classes: dict,
data_loader: torch.utils.data.DataLoader,
criterion: nn.Module,
# scheduler: nn.Module,
epoch: int,
num_iteration: int,
use_cuda: bool,
tensorboard_writer: torch.utils.tensorboard.SummaryWriter,
name_step: str,
):
""" Test a given model
Args:
model (nn.Module): model to test.
classes (dict): dictionnary containing the classes and their indice.
data_loader (torch.utils.data.DataLoader): data loader with the data to test the model on.
criterion (nn.Module): loss function.
epoch (int): epoch of training corresponding to the model.
num_iteration (int): number of iterations since the beginning of the training corresponding to the model.
use_cuda (bool): boolean to decide if cuda should be used.
tensorboard_writer (torch.utils.tensorboard.SummaryWriter): writer to write the metrics in tensorboard.
name_step (str): name of the step to write it in the description of the progress_bar
Returns:
loss (float): final loss
accuracy_top1 (float): final accuracy top1
accuracy_top5 (float): final accuracy top5
confidence_mean (float): mean confidence
"""
# Switch the model to eval mode
model.eval()
# Initialize the trackers for the loss and the accuracy
loss_tracker = utils.MetricTracker()
accuracy_top1_tracker = utils.MetricTracker()
accuracy_top5_tracker = utils.MetricTracker()
confidence_tracker = utils.MetricTracker()
# Initialize confusing matrix
confusion_matrix_tracker = utils.ConfusionMatrix(classes)
# create BackgroundGenerator and wrap it in tqdm progress bar
progress_bar = tqdm(BackgroundGenerator(data_loader, max_prefetch=32), total=len(data_loader))
for data in progress_bar:
inputs, targets = data
if use_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
# forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
confidence, prediction = outputs.topk(dim=1, k=5)
# scheduler.step(loss)
# Track loss, accuracy and confidence
loss_tracker.update(loss.item())
accuracy_top1_tracker.update(
(prediction[:, 0] == targets).sum().item(), targets.numel())
accuracy_top5_tracker.update(
(prediction[:, :5] == targets[:, None]).sum().item(), targets.numel())
confidence_tracker.update(confidence[:, 0].sum().item(), targets.numel())
# Update the confusion matrix
confusion_matrix_tracker.update_confusion_matrix(targets.cpu(), prediction[:, 0].cpu())
# Update the progress_bar information
progress_bar.set_description(f"Epoch {epoch + 1}/{args.epochs} {name_step}")
progress_bar.set_postfix(
loss=f"{loss_tracker.average:05.5f}",
accuracy_top1=f"{100 * accuracy_top1_tracker.average:05.2f}",
accuracy_top5=f"{100 * accuracy_top5_tracker.average:05.2f}",)
# Add the new values to the tensorboard summary writer
tensorboard_writer.add_scalar("loss", loss_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top1", accuracy_top1_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top5", accuracy_top5_tracker.average, num_iteration)
tensorboard_writer.add_scalar(
"confidence_mean", confidence_tracker.average, num_iteration
)
tensorboard_writer.add_figure("confusion_matrix", confusion_matrix_tracker.plot_confusion_matrix(normalize=True), num_iteration)
tensorboard_writer.flush()
return (
loss_tracker.average,
accuracy_top1_tracker.average,
accuracy_top5_tracker.average,
confidence_tracker.average,
)
|
306b43730554492d1d541be1a8c8d4c202b932f4
| 3,641,069
|
import torch
def get_grad_spherical_harmonics(xyz, l, m):
"""Compute the gradient of the Real Spherical Harmonics of the AO.
Args:
xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, distance component of each
point from each RBF center
l : array(Nrbf) l quantum number
m : array(Nrbf) m quantum number
Returns:
Y array (Nbatch,Nelec,Nrbf,3) : value of each grad SH at each point
"""
Y = torch.zeros_like(xyz)
# l=0
ind = (l == 0).nonzero().view(-1)
Y[:, :, ind, :] = _grad_spherical_harmonics_l0(xyz[:, :, ind, :])
# l=1
indl = (l == 1)
if torch.any(indl):
for mval in [-1, 0, 1]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
# _tmp = _grad_spherical_harmonics_l1(xyz[:, :, ind, :], mval)
Y[:, :, ind, :] = _grad_spherical_harmonics_l1(
xyz[:, :, ind, :], mval)
# l=2
indl = (l == 2)
if torch.any(indl):
for mval in [-2, -1, 0, 1, 2]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
Y[:, :, ind, :] = _grad_spherical_harmonics_l2(
xyz[:, :, ind, :], mval)
return Y
|
a01b529f98276e41fa3ed8c9934db770979d8702
| 3,641,070
|
import os
def send_photo(self, user_ids, filepath, thread_id=None):
"""
:param self: bot
:param filepath: file path to send
:param user_ids: list of user_ids for creating group or
one user_id for send to one person
:param thread_id: thread_id
"""
user_ids = _get_user_ids(self, user_ids)
if not isinstance(user_ids, (list, str)):
self.logger.error("user_ids must be a list or string")
return False
if self.reached_limit("messages"):
self.logger.info("Out of messages for today.")
return False
if not os.path.exists(filepath):
self.logger.error("File %s is not found", filepath)
return False
mime_type = guess_type(filepath)
if mime_type[0] != "image/jpeg":
self.logger.error("Only jpeg files are supported")
return False
self.delay("message")
if not self.api.send_direct_item(
"photo", user_ids, filepath=filepath, thread=thread_id
):
self.logger.info("Message to %s wasn't sent", user_ids)
return False
self.total["messages"] += 1
return True
|
8a03bfced7d9fd6d9d837fa2db10b760e31db3f7
| 3,641,071
|
from typing import List
from typing import Union
import re
def references_from_string(string: str) -> List[
Union[InputReference, TaskReference, ItemReference]
]:
"""Generate a reference object from a reference string
Arguments:
string {str} -- A reference string (eg: `{{inputs.example}}`)
Raises:
ValueError: Input string cannot be parsed as a reference object
Returns:
List[Union[InputReference, TaskReference, ItemReference]] -- A list of reference objects
"""
pattern = r"{{\s*([_a-zA-Z0-9.\-\$#\?]*)\s*}}"
match = re.findall(pattern, string, flags=re.MULTILINE)
refs = []
for ref in match:
split_ref = ref.split('.')
ref_type = split_ref[0]
if ref_type == 'input':
assert len(split_ref) == 2, \
f'Input Reference must be in formatted as "input.variable" not {ref}.'
ref = InputReference(variable=split_ref[1])
elif ref_type == 'tasks':
assert len(split_ref) == 3, \
ValueError(
f'Task Reference should be in format "tasks.task-name.variable" but'
f' found: {ref}'
)
ref = TaskReference(
name=split_ref[1], variable=split_ref[2])
elif ref_type == 'item':
variable = '.'.join(split_ref[1:])
ref = ItemReference(variable=variable)
else:
raise ValueError(f'Reference of type {ref_type} not recognized: {ref}')
refs.append(ref)
return refs
|
4e28d082e7fc638470b2f4753e6283d9630be073
| 3,641,072
|
def arrange_images(normalized_posters, blur_factor, blur_radius):
"""
Arranges images to create a collage.
Arguments:
norm_time_posters: tuple(float, PIL.Image)
Normalized instances of time and area for time and posters respectively.
blur_factor:
Number of times to apply a blurring operation to diffuse wasted space.
blur_radius:
Radius of neighbourhood for use as Gaussian blurring parameter.
Returns:
collage: np.array
A collage of images heuristically packed together.
"""
# as a greedy heuristic sort by size first to minimize wasted area
normalized_posters = sorted(
normalized_posters, key=lambda x: x.size[0] * x.size[1], reverse=True
)
sizes = [x.size for x in normalized_posters]
positions = rpack.pack(sizes)
max_width = max(a[0] + b[0] for a, b in zip(positions, sizes))
max_height = max(a[1] + b[1] for a, b in zip(positions, sizes))
collage = np.full([max_height + 1, max_width + 1, 3], 255, dtype=np.uint8)
deadspace = np.full_like(collage, True)
# place images
for (x, y), img in zip(positions, normalized_posters):
dx, dy = img.size
collage[y : y + dy, x : x + dx] = img
deadspace[y : y + dy, x : x + dx] = False
# identify all deadspace which looks harsh on the eyes
deadspace = np.where(deadspace)
# diffuse deadspace to get a softer background
gaussian_blur = ImageFilter.GaussianBlur(radius=blur_radius)
for _ in range(blur_factor):
blurred = Image.fromarray(collage).filter(gaussian_blur)
collage[deadspace] = np.array(blurred)[deadspace]
return collage
|
e3d4dc90c8ff4ec435061a3507423cd8c5f7c6d4
| 3,641,073
|
def make_text(text, position=(0, 0, 0), height=1):
"""
Return a text object at the specified location with a given height
"""
sm = SpriteMaterial(map=TextTexture(string=text, color='white', size=100, squareTexture=False))
return Sprite(material=sm, position = position, scaleToTexture=True, scale=[1, height, 1])
|
19daad7ae7f93ce1a4f06596fe2799c8e9701b72
| 3,641,074
|
import copy
def get_features(user_features, documents, ARGS, BOW = False, Conversational = False, User = False, SNAPSHOT_LEN = False, Questions = False, COMMENT_LEN = True):
"""
Generates Features:
Type of Features:
- BOW: bag of words features
- Conversational: features extracted from the conversation
- User: features based on participant information
- SNAPSHOT_LEN: number of comments in the final snapshot
- Questions: question features
- COMMENT_LEN: number of comments added to the conversation
"""
STATUS, ASPECTS, attacker_profile_ASPECTS, LEXICONS, QUESTIONS, UNIGRAMS_LIST, BIGRAMS_LIST = ARGS
feature_sets = []
# BOW features
bow_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# update feature set
feature_set.update(_get_term_features(comments_actions, UNIGRAMS_LIST, BIGRAMS_LIST))
bow_features.append((copy.deepcopy(feature_set), clss))
# Conversational featrues
conv_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
# only keep comment adding and section creation
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# conversational features from all actions that adds a comment
feature_set.update(_get_global_action_features(comments_actions))
# conversational features from the last N actions that adds a comment
feature_set.update(_get_last_n_action_features(comments_actions, 1, LEXICONS))
# conversational features from the last action that adds a comment of each participant
feature_set.update(_get_action_features(comments_actions, LEXICONS))
# conversational features based on a single participant's behavior in the conversation
feature_set.update(_get_repeatition_features(comments_actions))
# question features
if Questions:
feature_set.update(_get_question_features(conv_id, QUESTIONS))
actions = actions[::-1]
# conversational features based on reply relations
feature_set.update(_get_balance_features(actions))
# number of comments in last snapshot
if SNAPSHOT_LEN:
feature_set['snapshot_len'] = conversation['snapshot_len']
conv_features.append((copy.deepcopy(feature_set), clss))
# pariticipant features
# extract the last participant's profile
participant_features = []
starter_attack_profiles = {0: [], 1:[]}
non_starter_attack_profiles = {0: [], 1: []}
all_profiles = {0: [], 1: []}
blocks = []
user_info = []
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
# is the starter of the conversation also the last participant in the conversation
actions = conversation['action_feature']
start_time = min([a['timestamp_in_sec'] for a in actions])
end_time = max([a['timestamp_in_sec'] for a in actions])
for a in actions:
if a['timestamp_in_sec'] == start_time:
if 'user_text' in a:
starter = a['user_text']
else:
starter = 'anon'
if a['timestamp_in_sec'] == end_time:
if 'user_text' in a:
ender = a['user_text']
else:
ender = 'anon'
feature_set, user_infos = _user_features(actions, user_features[conv_id], ASPECTS, STATUS, QUESTIONS[conv_id])
# last participant's profile
p, b = attacker_profile(conversation, user_infos, attacker_profile_ASPECTS)
user_info.append(user_infos)
if starter == ender:
starter_attack_profiles[clss].append(p)
else:
non_starter_attack_profiles[clss].append(p)
all_profiles[clss].append(p)
# participants' block histories
blocks.append(int(b))
# update participant features
participant_features.append((copy.deepcopy(feature_set), clss))
feature_sets = []
# update the returned feature set given the parameters
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
feature_set = {}
if COMMENT_LEN:
feature_set = {'no_comments': len(comments_actions)}
if BOW:
feature_set.update(bow_features[ind][0])
if Conversational:
feature_set.update(conv_features[ind][0])
if User:
feature_set.update(participant_features[ind][0])
feature_sets.append((feature_set, clss))
return user_info, starter_attack_profiles, non_starter_attack_profiles, all_profiles, feature_sets
|
b200c9783661db13bfecb76eee18f39bc67301b6
| 3,641,075
|
import time
import traceback
def incomeStat(headers):
"""
收益统计
:param headers:
:return:
"""
time.sleep(0.3)
url = f'https://kd.youth.cn/wap/user/balance?{headers["Referer"].split("?")[1]}'
try:
response = requests_session().get(url=url, headers=headers, timeout=50).json()
print('收益统计')
print(response)
if response['status'] == 0:
return response
else:
return
except:
print(traceback.format_exc())
return
|
8c023314835ab46b354ae0d793d6de3694711a65
| 3,641,076
|
def t_matrix(phi, theta, psi, sequence):
""" Return t_matrix to convert angle rate to angular velocity"""
if sequence == 'ZYX':
t_m = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)],\
[0, np.cos(phi), -np.sin(phi)],\
[0, np.sin(phi)/np.cos(theta), np.cos(phi)/np.cos(theta)]])
else:
t_m = np.eye(3)
return t_m
|
65c5595cc286a442777651f888a6e3fee032ba21
| 3,641,077
|
def point_in_fence(x, y, points):
"""
计算点是否在围栏内
:param x: 经度
:param y: 纬度
:param points: 格式[[lon1,lat1],[lon2,lat2]……]
:return:
"""
count = 0
x1, y1 = points[0]
x1_part = (y1 > y) or ((x1 - x > 0) and (y1 == y)) # x1在哪一部分中
points.append((x1, y1))
for point in points[1:]:
x2, y2 = point
x2_part = (y2 > y) or ((x2 > x) and (y2 == y)) # x2在哪一部分中
if x2_part == x1_part:
x1, y1 = x2, y2
continue
mul = (x1 - x) * (y2 - y) - (x2 - x) * (y1 - y)
if mul > 0: # 叉积大于0 逆时针
count += 1
elif mul < 0:
count -= 1
x1, y1 = x2, y2
x1_part = x2_part
if count == 2 or count == -2:
return True
else:
return False
|
bb25f399eadf818fbafdeee6c8adbb1254a579f7
| 3,641,078
|
def parse_prior(composition, alphabet, weight=None):
"""Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
* None or 'none'
No composition sepecified
* 'auto' or 'automatic'
Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
* 'equiprobable'
All monomers have the same probability.
* a percentage, e.g. '45%' or a fraction '0.45'
The fraction of CG bases for nucleotide alphabets
* a species name, e.g. 'E. coli', 'H. sapiens',
Use the average CG percentage for the species's genome.
* An explicit distribution
e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None:
return None
comp = composition.strip()
if comp.lower() == "none":
return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight < 0:
raise ValueError("Weight cannot be negative.")
if comp.lower() == "equiprobable":
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == "auto" or comp.lower() == "automatic":
if alphabet == unambiguous_protein_alphabet:
prior = weight * asarray(aa_composition, float64)
else:
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG:
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == "%":
prior = weight * base_distribution(float(comp[:-1]))
elif isfloat(comp):
prior = weight * base_distribution(float(comp) * 100.0)
elif composition[0] == "{" and composition[-1] == "}":
explicit = composition[1:-1]
explicit = (
explicit.replace(",", " ")
.replace("'", " ")
.replace('"', " ")
.replace(":", " ")
.split()
)
if len(explicit) != len(alphabet) * 2:
raise ValueError("Explicit prior does not match length of alphabet")
prior = -ones(len(alphabet), float64)
try:
for r in range(len(explicit) // 2):
letter = explicit[r * 2]
index = alphabet.ord(letter)
value = float(explicit[r * 2 + 1])
prior[index] = value
except ValueError:
raise ValueError("Cannot parse explicit composition")
if any(prior == -1.0):
raise ValueError(
"Explicit prior does not match alphabet"
) # pragma: no cover
prior /= sum(prior)
prior *= weight
else:
raise ValueError("Unknown or malformed composition: %s" % composition)
if len(prior) != len(alphabet):
raise ValueError(
"The sequence alphabet and composition are incompatible."
) # pragma: no cover
return prior
|
50795a21231138bb3576a50a3791d9136264754e
| 3,641,079
|
def get_queues(prefix=None):
"""
Gets a list of SQS queues. When a prefix is specified, only queues with names
that start with the prefix are returned.
:param prefix: The prefix used to restrict the list of returned queues.
:return: A list of Queue objects.
"""
if prefix:
queue_iter = sqs.queues.filter(QueueNamePrefix=prefix)
else:
queue_iter = sqs.queues.all()
queues = list(queue_iter)
if queues:
logger.info("Got queues: %s", ', '.join([q.url for q in queues]))
else:
logger.warning("No queues found.")
return queues
|
c459cad66561d887abdcc40157fea09481e267c7
| 3,641,080
|
def _genNodesNormal(numNodes=None, center=None, standardDeviation=None):
"""
Generate randomized node using Normal distribution within a bounding area
Parameters
----------
numNodes: int
Required, number of nodes to be generated
centerLat: float, Required
Latitude of the center point
centerLon: float, Required
Longitude of the center point
standardDeviation: float, Required
StandardDeviation of normal distribution
Returns
-------
list of lists
A list of coordinates uniformly distributed
"""
# Initialize
locs = []
# Randomized generate nodes in normal distribution
for i in range(numNodes):
rndUniform = np.random.uniform(0, 360)
rndNormal = np.random.normal(0, standardDeviation)
newLoc = geoPointInDistance2D(center, rndUniform, rndNormal)
locs.append(newLoc)
return locs
|
62d7f44056621786a1b796bfe85df4eac0ec9574
| 3,641,081
|
def util_test_normalize(mean, std, op_type):
"""
Utility function for testing Normalize. Input arguments are given by other tests
"""
if op_type == "cpp":
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize(mean, std)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=normalize_op)
elif op_type == "python":
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor(),
py_vision.Normalize(mean, std)
]
transform = py_vision.ComposeOp(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=transform())
else:
raise ValueError("Wrong parameter value")
return data
|
43fe33a124a8d52252738697eccd4775edb6e4b8
| 3,641,082
|
def calc_sub_from_constant(func, in_data, **kwargs):
"""[SubFromConstant](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.add.html)
See the documentation for [AddConstant](#addconstant)
"""
return _calc(func, in_data, **kwargs)
|
d40fcf668c2dc8e0c7d889d2cf145de208cc0ae6
| 3,641,083
|
import torch
def mask_distance_matrix(dmat, weight_bins=weight_bins):
"""
Answer: yep, a larger weight is assigned to a pair of residues forming a contact.
I assigned 20.5, 5.4, 1 to the distance 0-8, 8-15, and >15, respectively, for residue pairs (i, j) where |i-j| >=24.
These numbers were derived from simple statistics of an old training set.
However, you don't have to be very accurate here.
When |i-j| is small, you can reduce 20.5 and 5.4 to smaller values.
:param dmat: A distance matrix
:param bins: The quantized distance matrix
:return: The quantized distance matrix
"""
b, m, n = dmat.size()
imj = b * [[[abs(i-j) >= 24 for j in range(n)] for i in range(m)]]
t_imj = torch.tensor(imj, dtype=torch.float, device=device)
masks = quantize_distance_matrix(dmat, weight_bins, False)
return masks, t_imj
|
4d8ca64834b6de9e4dd0077278cb4a687f7cf33e
| 3,641,084
|
def get_output():
"""Return the set output setting."""
return _output
|
4332661d333d4ca2c364761b35bb2d9ed0b9d302
| 3,641,085
|
def cash_grouped_nb(target_shape, cash_flow_grouped, group_lens, init_cash_grouped):
"""Get cash series per group."""
check_group_lens(group_lens, target_shape[1])
out = np.empty_like(cash_flow_grouped)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
cash_now = init_cash_grouped[group]
for i in range(cash_flow_grouped.shape[0]):
flow_value = cash_flow_grouped[i, group]
cash_now = add_nb(cash_now, flow_value)
out[i, group] = cash_now
from_col = to_col
return out
|
3a7978493503cbae4fc867d8ca864193d913f33f
| 3,641,086
|
def agreement():
"""Input for Accepting license
"""
form = LicenseForm()
if form.validate_on_submit():
gluu_settings.db.set("ACCEPT_GLUU_LICENSE", "Y" if form.accept_gluu_license.data else "N")
return redirect(url_for(wizard_steps.next_step()))
with open("./LICENSE", "r") as f:
agreement_file = f.read()
if request.method == "GET":
# populate form data from settings
form.accept_gluu_license.data = gluu_settings.db.get("ACCEPT_GLUU_LICENSE")
wizard_steps.current_step = 'license'
return render_template("wizard/index.html",
license=agreement_file,
form=form,
current_step=wizard_steps.step_number(),
template="license")
|
e213d9ce33014b03fd97fdd3991eb72c52e3e9e7
| 3,641,087
|
import torch
def rotmat2quat(mat: torch.Tensor) -> torch.Tensor:
"""Converts rotation matrix to quaternion.
This uses the algorithm found on
https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
, and follows the code from ceres-solver
https://github.com/ceres-solver/ceres-solver/blob/master/include/ceres/rotation.h
"""
mat_shape = mat.shape
assert mat_shape[-2:] == (3, 3)
mat = torch.reshape(mat, [-1, 3, 3])
# Case A: Easy case
r = torch.sqrt(torch.clamp_min(1. + mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2], 0.0))
s = 0.5 / r
quat = torch.stack([
0.5 * r,
(mat[:, 2, 1] - mat[:, 1, 2]) * s,
(mat[:, 0, 2] - mat[:, 2, 0]) * s,
(mat[:, 1, 0] - mat[:, 0, 1]) * s
], dim=-1)
near_pi = isclose(r, 0.0)
if torch.sum(near_pi) > 0:
# Case B0, B1, B2: ~180deg rotation
quats1 = mat.new_zeros([mat.shape[0], 3, 4])
case_idx = torch.argmax(torch.diagonal(mat, dim1=-1, dim2=-2), dim=-1)
for case, (i, j, k) in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):
r = torch.sqrt(mat[..., i, i] - mat[..., j, j] - mat[..., k, k] + 1.0)
s = 0.5 / r
quats1[:, case, 0] = (mat[:, k, j] - mat[:, j, k]) * s
quats1[:, case, i + 1] = 0.5 * r
quats1[:, case, j + 1] = (mat[:, i, j] + mat[:, j, i]) * s
quats1[:, case, k + 1] = (mat[:, k, i] + mat[:, i, k]) * s
quat1 = quats1[torch.arange(mat.shape[0]), case_idx, :]
quat[near_pi] = quat1[near_pi]
quat = torch.reshape(quat, [*mat_shape[:-2], 4])
return quat
|
470d2890eb5c07dff1fdc3de7d347fc86dd3fd1e
| 3,641,088
|
def equalize(image):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
Args:
image (PIL image): Image to be equalized
Returns:
image (PIL image), Equalized image.
"""
return ImageOps.equalize(image)
|
5283609b316452da5aa9969e999dcdeb4de26b2b
| 3,641,089
|
def validate_output(value):
"""Validate "output" parameter."""
if value is not None:
if isinstance(value, str):
value = value.split(",")
# filter out empty names
value = list(filter(None, value))
return value
|
f00773674868ebde741f64b47fdc3372ad6a1e7d
| 3,641,090
|
def dummy_img(w, h, intensity=200):
"""Creates a demodata test image"""
img = np.zeros((int(h), int(w)), dtype=np.uint8) + intensity
return img
|
a416753d8aa8682aef2b344f7de416139c9ed33a
| 3,641,091
|
def getHandler(database):
"""
a function instantiating and returning this plugin
"""
return Events(database, 'events', public_endpoint_extensions=['insert'])
|
e42e69b379e2053437ac75fe3fe8fc81229579c1
| 3,641,092
|
import logging
def RegQueryValueEx(key, valueName=None):
""" Retrieves the type and data for the specified registry value.
Parameters
key A handle to an open registry key.
The key must have been opened with the KEY_QUERY_VALUE access right
valueName The name of the registry value. it is optional.
Return Value
If the function succeeds, the return a tuple of the value's name and RegistryValue object data.
If the function fails, a RegistryBaseException exception is raised, unless:
If the key is not open, an InvalidHandleException is raised
If access is denied, an AccesDeniedException isRaised
If the value does not exist, the function raises KeyError
"""
try:
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName)
data = (dtypes.BYTE * dataLength.value)()
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName,
data=data, dataLength=dataLength)
return RegistryValueFactory().by_type(dataType)(data)
except errors.WindowsError as exception:
errors.catch_and_raise_general_errors(exception)
logging.exception(exception)
raise errors.RegistryBaseException(exception.winerror, exception.strerror)
|
5cceedabfaef44040067424696d13eb8d0f15550
| 3,641,093
|
def read_molecules(filename):
"""Read a file into an OpenEye molecule (or list of molecules).
Parameters
----------
filename : str
The name of the file to read (e.g. mol2, sdf)
Returns
-------
molecule : openeye.oechem.OEMol
The OEMol molecule read, or a list of molecules if multiple molecules are read.
If no molecules are read, None is returned.
"""
ifs = oechem.oemolistream(filename)
molecules = list()
for mol in ifs.GetOEMols():
mol_copy = oechem.OEMol(mol)
molecules.append(mol_copy)
ifs.close()
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
return molecules
|
420ba85dc768435927441500fe8005e3f009b9af
| 3,641,094
|
def euler(derivative):
"""
Euler method
"""
return lambda t, x, dt: (t + dt, x + derivative(t, x) * dt)
|
08d636ec711f4307ab32f9a8bc3672197a3699d9
| 3,641,095
|
def detect_encoding_type(input_geom):
"""
Detect geometry encoding type:
- ENC_WKB: b'\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_EWKB: b'\x01\x01\x00\x00 \xe6\x10\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_WKB_HEX: '0101000000000000000048934000000000009DB640'
- ENC_EWKB_HEX: '0101000020E6100000000000000048934000000000009DB640'
- ENC_WKB_BHEX: b'0101000000000000000048934000000000009DB640'
- ENC_EWKB_BHEX: b'0101000020E6100000000000000048934000000000009DB640'
- ENC_WKT: 'POINT (1234 5789)'
- ENC_EWKT: 'SRID=4326;POINT (1234 5789)'
"""
if isinstance(input_geom, shapely.geometry.base.BaseGeometry):
return ENC_SHAPELY
if isinstance(input_geom, str):
if _is_hex(input_geom):
return ENC_WKB_HEX
else:
srid, geom = _extract_srid(input_geom)
if not geom:
return None
if srid:
return ENC_EWKT
else:
return ENC_WKT
if isinstance(input_geom, bytes):
try:
ba.unhexlify(input_geom)
return ENC_WKB_BHEX
except Exception:
return ENC_WKB
return None
|
4361abf695edad5912559175bd48cfa0bad92769
| 3,641,096
|
import os
def GetFilesToConcatenate(input_directory):
"""Get list of files to concatenate.
Args:
input_directory: Directory to search for files.
Returns:
A list of all files that we would like to concatenate relative
to the input directory.
"""
file_list = []
for dirpath, _, files in os.walk(input_directory):
for input_file in files:
file_list.append(
os.path.relpath(
os.path.join(dirpath, input_file),
input_directory))
return file_list
|
f8e2805b94171645ef9c4d51ded83a8f2f9e7675
| 3,641,097
|
def unet_weights(input_size = (256,256,1), learning_rate = 1e-4, weight_decay = 5e-7):
"""
Weighted U-net architecture.
The tuple 'input_size' corresponds to the size of the input images and labels.
Default value set to (256, 256, 1) (input images size is 256x256).
The float 'learning_rate' corresponds to the learning rate value for the training.
Defaut value set to 1e-4.
The float 'weight_decay' corresponds to the weight decay value for the training.
Default value set to 5e-7.
"""
# Get input.
input_img = Input(input_size)
# Get weights.
weights = Input(input_size)
# Layer 1.
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_img)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# Layer 2.
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Layer 3.
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# Layer 4.
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# layer 5.
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# Layer 6.
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
# Layer 7.
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
# Layer 8.
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
# Layer 9.
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
# Final layer (output).
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
# Specify input (image + weights) and output.
model = Model(inputs = [input_img, weights], outputs = conv10)
# Use Adam optimizer, custom weighted binary cross-entropy loss and specify metrics
# Also use weights inside the loss function.
model.compile(optimizer = Adam(lr = learning_rate, decay = weight_decay), loss = binary_crossentropy_weighted(weights), metrics = ['accuracy'])
return model
|
f091627475f13985e33f2960afd4b0136e9d10f4
| 3,641,098
|
from re import T
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis. """
return T.std(x, axis=axis, keepdims=keepdims)
|
ad3c547d19507243ec143d38dd22f9fc92becffb
| 3,641,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.