content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def person(request):
"""
Display information on the specified borrower (person)
"""
title = "Find a person"
if 'person_id' in request.GET:
person_id = request.GET['person_id']
try:
person = Person.objects.get(id_number=person_id)
title = unicode(person)
checked_out_items = person.item_set.all()
transaction_history = person.transaction_set.all()
except Person.DoesNotExist:
error_message = "No person with id number %s" % person_id
else:
message = "Enter or scan the person's ID number"
people = Person.objects.enrolled() # For clickable list of names
return render_to_response("person.html", locals()) | 5,329,700 |
def remove_routes_from_app(app_name: str, route_host: str,
configuration: Configuration, secrets: Secrets,
org_name: str = None, space_name: str = None):
"""
Remove routes from a given application.
See
https://apidocs.cloudfoundry.org/280/apps/remove_route_from_the_app.html
"""
app = get_app_by_name(
app_name, configuration, secrets, org_name=org_name,
space_name=space_name)
routes = get_app_routes_by_host(
app_name, route_host, configuration, secrets, org_name=org_name,
space_name=space_name)
app_guid = app["metadata"]["guid"]
for route in routes:
route_guid = route["metadata"]["guid"]
path = "/v2/apps/{a}/routes/{r}".format(a=app_guid, r=route_guid)
call_api(path, configuration, secrets, method="DELETE") | 5,329,701 |
def _follow_word_from_node(node, word):
"""Follows the link with given word label from given node.
If there is a link from ``node`` with the label ``word``, returns the end
node and the log probabilities and transition IDs of the link. If there are
null links in between, returns the sum of the log probabilities and the
concatenation of the transition IDs.
:type node: Lattice.Node
:param node: node where to start searching
:type word: str
:param word: word to search for
:rtype: tuple of (Lattice.Node, float, float, str)
:returns: the end node of the link with the word label (or ``None`` if the
word is not found), and the total acoustic log probability, LM log
probability, and transition IDs of the path to the word
"""
if word not in node.word_to_link:
return (None, None, None, None)
link = node.word_to_link[word]
if link.word is not None:
return (link.end_node,
link.ac_logprob if link.ac_logprob is not None else 0.0,
link.lm_logprob if link.lm_logprob is not None else 0.0,
link.transitions if link.transitions is not None else "")
end_node, ac_logprob, lm_logprob, transitions = \
_follow_word_from_node(link.end_node, word)
if end_node is None:
return (None, None, None, None)
else:
if link.ac_logprob is not None:
ac_logprob += link.ac_logprob
if link.lm_logprob is not None:
lm_logprob += link.lm_logprob
if link.transitions is not None:
transitions += link.transitions
return (end_node, ac_logprob, lm_logprob, transitions) | 5,329,702 |
def get_cbday():
"""
クラバト日時確認用
bot本体では未使用
return
----
```
None
```
"""
cb_status=fetch_status()
#cb_status = {
# 'cb_start': datetime.strptime('2020/02/23 5:00:00',
# '%Y/%m/%d %H:%M:%S'),
# 'cb_end': datetime.strptime('2020/02/28 23:59:59',
# '%Y/%m/%d %H:%M:%S'),
# 'cb_days': 6
#}
now_datetime=datetime.now()
#now = "2020-02-29 15:00:00"
#now_datetime = datetime.strptime(now, '%Y-%m-%d %H:%M:%S')
now_cbday = (now_datetime - cb_status["cb_start"]).days + 1
print("現在日時:", now_datetime)
print("開始日時:", cb_status["cb_start"])
print("終了日時:", cb_status["cb_end"])
print("開催期間:", cb_status["cb_days"])
print(cb_status['cb_days'] - now_cbday)
print(cb_status["cb_end"])
if now_cbday <= 0:
print(f"クラバト開催まであと{now_cbday+1}日")
elif (now_cbday >= 1) and (now_cbday <= cb_status["cb_days"]) and now_datetime<cb_status["cb_end"]:
print(f"クラバト開催中! {now_cbday}/{cb_status['cb_days']}日目")
else:
print(f"クラバトは終了しています。{now_cbday-cb_status['cb_days']}日経過")
#print(now_cbday) | 5,329,703 |
def normalize_code(code):
"""Normalize object codes to avoid duplicates."""
return slugify(code, allow_unicode=False).upper() if code else None | 5,329,704 |
def convert_raw_data_to_hdf5(trainIdx, validateIdx, fileIdx,
filename, dataDir, json_data):
"""
Go through the Decathlon dataset.json file.
We've already split into training and validation subsets.
Read in Nifti format files. Crop images and masks.
Save to HDF5 format.
This code is will convert the 3D images and masks
into a stack of 2D slices.
"""
hdf_file = h5py.File(filename, "w")
# Save the dataset attributes
attach_attributes(hdf_file, str(json_data["modality"]), "modalities")
attach_attributes(hdf_file, json_data["licence"], "license")
attach_attributes(hdf_file, json_data["reference"], "reference")
attach_attributes(hdf_file, json_data["name"], "name")
attach_attributes(hdf_file, json_data["description"], "description")
attach_attributes(hdf_file, json_data["release"], "release")
attach_attributes(hdf_file, json_data["tensorImageSize"], "tensorImageSize")
# Training filenames
train_image_files = []
train_label_files = []
for idx in trainIdx:
train_image_files.append(fileIdx[idx]["image"])
train_label_files.append(fileIdx[idx]["label"])
# Validation filenames
validate_image_files = []
validate_label_files = []
for idx in validateIdx:
validate_image_files.append(fileIdx[idx]["image"])
validate_label_files.append(fileIdx[idx]["label"])
attach_attributes(hdf_file, train_image_files, "training_input_files")
attach_attributes(hdf_file, train_label_files, "training_label_files")
attach_attributes(hdf_file, validate_image_files, "validation_input_files")
attach_attributes(hdf_file, validate_label_files, "validation_label_files")
"""
Print shapes of raw data
"""
print("Data shapes")
print("===========")
print("n.b. All tensors converted to stacks of 2D slices.")
print("If you want true 3D tensors, then modify this code appropriately.")
data_filename = os.path.join(dataDir, train_image_files[0])
img = np.array(nib.load(data_filename).dataobj)
print("Raw Image shape = ", img.shape)
crop_shape = preprocess_inputs(img).shape[1:]
print("Cropped Image shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
data_filename = os.path.join(dataDir, train_label_files[0])
msk = np.array(nib.load(data_filename).dataobj)
print("Raw Masks shape = ", msk.shape)
crop_shape = preprocess_labels(msk).shape[1:]
print("Cropped Masks shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
# Save training set images
print("Step 1 of 4. Save training set images.")
first = True
for idx in tqdm(train_image_files):
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_train_dset = hdf_file.create_dataset("imgs_train",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_train_dset[:] = img
else:
row = img_train_dset.shape[0] # Count current dataset rows
img_train_dset.resize(row+num_rows, axis=0) # Add new row
# Insert data into new row
img_train_dset[row:(row+num_rows), :] = img
# Save validation set images
print("Step 2 of 4. Save validation set images.")
first = True
for idx in tqdm(validate_image_files):
# Nibabel should read the file as X,Y,Z,C
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_validation_dset = hdf_file.create_dataset("imgs_validation",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_validation_dset[:] = img
else:
row = img_validation_dset.shape[0] # Count current dataset rows
img_validation_dset.resize(row+num_rows, axis=0) # Add new row
# Insert data into new row
img_validation_dset[row:(row+num_rows), :] = img
# Save training set masks
print("Step 3 of 4. Save training set masks.")
first = True
for idx in tqdm(train_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_train_dset = hdf_file.create_dataset("msks_train",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_train_dset[:] = msk
else:
row = msk_train_dset.shape[0] # Count current dataset rows
msk_train_dset.resize(row+num_rows, axis=0) # Add new row
# Insert data into new row
msk_train_dset[row:(row+num_rows), :] = msk
# Save testing/validation set masks
print("Step 4 of 4. Save validation set masks.")
first = True
for idx in tqdm(validate_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_validation_dset = hdf_file.create_dataset("msks_validation",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_validation_dset[:] = msk
else:
row = msk_validation_dset.shape[0] # Count current dataset rows
msk_validation_dset.resize(row+num_rows, axis=0) # Add new row
# Insert data into new row
msk_validation_dset[row:(row+num_rows), :] = msk
hdf_file.close()
print("Finished processing.")
print("HDF5 saved to {}".format(filename)) | 5,329,705 |
def get_weights_for_all(misfit_windows, stations, snr_threshold, cc_threshold, deltat_threshold, calculate_basic, print_info=True):
"""
get_weights_for_all: calculate weights.
"""
weights_for_all = {}
# * firstly we update the weight of snr,cc,deltat
for net_sta in misfit_windows:
weights_for_all[net_sta] = {}
for category in misfit_windows[net_sta]:
weights_for_all[net_sta][category] = []
for each_misfit_window in misfit_windows[net_sta][category].windows:
wsnr = cal_snr_weight(each_misfit_window,
snr_threshold[0], snr_threshold[1])
wcc = cal_cc_weight(each_misfit_window,
cc_threshold[0], cc_threshold[1])
wdeltat = cal_deltat_weight(each_misfit_window,
deltat_threshold[0], deltat_threshold[1])
weights_for_all[net_sta][category].append(
Weight(wsnr, wcc, wdeltat, None, None))
if(not calculate_basic):
# * get the station list for the geographical weighting (remove all 0 cases)
used_geographical_net_sta_list = []
for net_sta in weights_for_all:
status = False
for category in weights_for_all[net_sta]:
for each_weight in weights_for_all[net_sta][category]:
wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat
if (wsnr_cc_deltat > 0):
status = True
if (status):
used_geographical_net_sta_list.append(net_sta)
# build stations_mapper
stations_mapper = get_stations_mapper(stations)
# get geographical weighting and update
geographical_weight_dict = cal_geographical_weight(
stations_mapper, used_geographical_net_sta_list, list(weights_for_all.keys()))
for net_sta in weights_for_all:
for category in weights_for_all[net_sta]:
for index, each_weight in enumerate(weights_for_all[net_sta][category]):
weights_for_all[net_sta][category][index] = each_weight._replace(
geographical=geographical_weight_dict[net_sta])
# * get the number of items for each category
# firstly we get all the category names
rep_net_sta = list(weights_for_all.keys())[0]
all_categories = list(weights_for_all[rep_net_sta].keys())
# here we should weight based on number of windows but not the number of usable stations.
number_each_category = {}
for each_category in all_categories:
number_each_category[each_category] = 0
for net_sta in weights_for_all:
for each_weight in weights_for_all[net_sta][each_category]:
# if this window is usable or not
wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat
if (wsnr_cc_deltat > 0):
number_each_category[each_category] += 1
# get category weighting and update
# here we should weight based on number of windows but not the number of usable stations.
# * collect all events information
number_each_category_all_events = mpi_collect_category_number(
number_each_category, print_info=print_info)
weight_each_category = {}
for each_category in number_each_category_all_events:
weight_each_category[each_category] = cal_category_weight(
number_each_category_all_events[each_category])
for net_sta in weights_for_all:
for category in weights_for_all[net_sta]:
# * we will not use the category that not existing in this event
for index, each_weight in enumerate(weights_for_all[net_sta][category]):
weights_for_all[net_sta][category][index] = each_weight._replace(
category=weight_each_category[category])
return weights_for_all | 5,329,706 |
def tree_feature_importance(tree_model, X_train):
"""
Takes in a tree model and a df of training data and prints out
a ranking of the most important features and a bar graph of the values
Parameters
----------
tree_model: the trained model instance. Must have feature_importances_ and estimators_ attributes
X_train: DataFrame that the model was training on
Returns
-------
This function currently does not return any values, but that may change
"""
importances = tree_model.feature_importances_
std = np.std([tree.feature_importances_ for tree in tree_model.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
features = X_train.columns.to_list()
# Print the feature ranking
print("Feature ranking:")
print()
ordered_features = []
for f in range(X_train.shape[1]):
#feature_name = features[indices[f]]
print(f'{f + 1}. {features[indices[f]]}, {importances[indices[f]]}')
ordered_features.append(features[indices[f]])
print()
# Plot the impurity-based feature importances of the forest
fig = plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), ordered_features, rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.show()
return fig | 5,329,707 |
def chhop_microseconds(delta: timedelta) -> timedelta:
"""
chop microseconds from timedelta object.
:param delta:
:return:
"""
return delta - timedelta(microseconds=delta.microseconds) | 5,329,708 |
def __renumber(dictionary) :
"""Renumber the values of the dictionary from 0 to n
"""
count = 0
ret = dictionary.copy()
new_values = dict([])
for key in dictionary.keys() :
value = dictionary[key]
new_value = new_values.get(value, -1)
if new_value == -1 :
new_values[value] = count
new_value = count
count = count + 1
ret[key] = new_value
return ret | 5,329,709 |
def get_user_list():
"""
return user list if the given
authenticated user has admin permission
:return:
"""
if requires_perm() is True:
return jsonify({'user_list': USER_LIST,
'successful': True}), 200
return jsonify({'message': 'You are not '
'permitted to access this resource',
'successful': False}), 403 | 5,329,710 |
def rm_words(user_input, stop_words):
"""Sanitize using intersection and list.remove()"""
# Downsides:
# - Looping over list while removing from it?
# http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python
stop_words = set(stop_words)
for sw in stop_words.intersection(user_input):
while sw in user_input:
user_input.remove(sw)
return user_input | 5,329,711 |
def getValueForCoordinate(inputFile, lon, lat, noDataAsNone):
"""
Reads the pixel value of a GeoTIFF for a geographic coordinate
:param inputFile: full path to input GeoTIFF file
:type inputFile: str
:param lon: longitude
:type lon: float
:param lat: latitude
:type lat: float
:param noDataAsNone: switch to decide wether to return NODATA as None or the value stored in the GeoTIFF.
:type noDataAsNone: bool
:returns: pixel value of coordinate
:rtype: float
"""
inputRaster = gdal.Open(inputFile)
geotransform = inputRaster.GetGeoTransform()
rb = inputRaster.GetRasterBand(1)
noDataVal = rb.GetNoDataValue()
# this converts from map coordinates to raster coordinates
# this will only work for CRS without rotation! If this is needed, we have to do some matrix
# multiplication magic here ;-)
px = int((lat - geotransform[0]) / geotransform[1]) # (pos - origin) / pixelsize
py = int((lon - geotransform[3]) / geotransform[5])
structval = rb.ReadRaster(px, py, 1, 1, buf_type=gdal.GDT_Float64)
val = struct.unpack('d', structval) # this unpacks a C data structure into a Python value.
if noDataAsNone and val[0] == noDataVal:
return None
else:
return val[0] | 5,329,712 |
def convert_xrandr_to_index(xrandr_val: float):
"""
:param xrandr_val: usually comes from the
config value directly, as a string (it's
just the nature of directly retrieving
information from a .ini file)
:return: an index representation
of the current brightness level, useful
for switch functions (where we switch
based on indexes and not string values)
Example: 0.2 is converted to 1
"""
return int(xrandr_val * 10 - 1) | 5,329,713 |
def test_get_secret_value_string_metadata_only():
"""
Tests get secret value and the secret has string value. metadtaonly is set
"""
trace_factory.metadata_only = True
client = boto3.client('secretsmanager', region_name='us-west-1')
client.create_secret(Name=TEST_SECRET_NAME, SecretString=TEST_SECRET_VALUE_STRING)
client.get_secret_value(SecretId=TEST_SECRET_NAME)
trace = _get_active_trace()
event = trace.events[-1]
_validate_get_secret_value_metadata(event)
assert 'Response' not in event.resource['metadata'] | 5,329,714 |
def get_message(name, value):
"""Provides the message for a standard Python exception"""
if hasattr(value, "msg"):
return f"{name}: {value.msg}\n"
return f"{name}: {value}\n" | 5,329,715 |
def calculate_page_info(offset, total_students):
"""
Takes care of sanitizing the offset of current page also calculates offsets for next and previous page
and information like total number of pages and current page number.
:param offset: offset for database query
:return: tuple consist of page number, query offset for next and previous pages and valid offset
"""
# validate offset.
if not (isinstance(offset, int) or offset.isdigit()) or int(offset) < 0 or int(offset) >= total_students:
offset = 0
else:
offset = int(offset)
# calculate offsets for next and previous pages.
next_offset = offset + MAX_STUDENTS_PER_PAGE_GRADE_BOOK
previous_offset = offset - MAX_STUDENTS_PER_PAGE_GRADE_BOOK
# calculate current page number.
page_num = ((offset / MAX_STUDENTS_PER_PAGE_GRADE_BOOK) + 1)
# calculate total number of pages.
total_pages = int(math.ceil(float(total_students) / MAX_STUDENTS_PER_PAGE_GRADE_BOOK)) or 1
if previous_offset < 0 or offset == 0:
# We are at first page, so there's no previous page.
previous_offset = None
if next_offset >= total_students:
# We've reached the last page, so there's no next page.
next_offset = None
return {
"previous_offset": previous_offset,
"next_offset": next_offset,
"page_num": page_num,
"offset": offset,
"total_pages": total_pages
} | 5,329,716 |
def find_files(topdirs, py = False):
"""Lists all python files under any topdir from the topdirs lists.
Returns an appropriate list for data_files,
with source and destination directories the same"""
ret = []
for topdir in topdirs:
for r, _ds, fs in os.walk(topdir):
ret.append((r, [ os.path.join(r, f) for f in fs if (f.endswith('.py') or not py)]))
return ret | 5,329,717 |
def get_api_map(level,SYSTEM):
"""Search API from rules, if match the pattern then we said it is API."""
level_config = {
#最高權限(但不包含APIDOC)
"LEVEL1":r'/api/(?!APIDOC)',
#只能看到通用系統api
"LEVEL2":r'/api/<SYSTEM>',
#只看得到APIDOC
"LEVEL3":r'/api/APIDOC'
}
# regexStr = r'/api/{}/+'.format(system)
for rule in appPaaS.url_map.iter_rules():
if re.search(level_config[level],str(rule)):
func = appPaaS.view_functions[rule.endpoint]
if (isinstance(_get_api_doc(func),dict) and \
((_get_api_doc(func).get("ACCESS_SYSTEM_LIST") is None) or \
(_get_api_doc(func).get("ACCESS_SYSTEM_LIST") is not None and SYSTEM in _get_api_doc(func)["ACCESS_SYSTEM_LIST"]))\
) or (isinstance(_get_api_doc(func),str)):
yield str(rule), rule.endpoint, rule.methods, endpoint_api(rule.endpoint,onlydoc=True) | 5,329,718 |
def _create_argument_parser():
"""Creates the command line arg parser."""
parser = argparse.ArgumentParser(description='create a zip file',
fromfile_prefix_chars='@')
parser.add_argument('-o', '--output', type=str,
help='The output zip file path.')
parser.add_argument(
'-d', '--directory', type=str, default='/',
help='An absolute path to use as a prefix for all files in the zip.')
parser.add_argument(
'-t', '--timestamp', type=int, default=ZIP_EPOCH,
help='The unix time to use for files added into the zip. values prior to'
' Jan 1, 1980 are ignored.')
parser.add_argument('--stamp_from', default='',
help='File to find BUILD_STAMP in')
parser.add_argument(
'-m', '--mode',
help='The file system mode to use for files added into the zip.')
parser.add_argument('--manifest',
help='manifest of contents to add to the layer.',
required=True)
parser.add_argument(
'files', type=str, nargs='*',
help='Files to be added to the zip, in the form of {srcpath}={dstpath}.')
return parser | 5,329,719 |
def download_office(load=True): # pragma: no cover
"""Download office dataset.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.StructuredGrid or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_office()
>>> dataset.contour().plot()
See :ref:`clip_with_plane_box_example` for an example using this
dataset.
"""
return _download_and_read('office.binary.vtk', load=load) | 5,329,720 |
def test_duplicated_topics(host):
"""
Check if can remove topics options
"""
# Given
duplicated_topic_name = get_topic_name()
def get_topic_config():
topic_configuration = topic_defaut_configuration.copy()
topic_configuration.update({
'name': duplicated_topic_name,
'options': {
'retention.ms': 66574936,
'flush.ms': 564939
}
})
return topic_configuration
topic_configuration = {
'topics': [
get_topic_config(),
get_topic_config()
]
}
# When
results = ensure_kafka_topics(
host,
topic_configuration
)
time.sleep(0.3)
# Then
for result in results:
assert not result['changed']
assert 'duplicated topics' in result['msg'] | 5,329,721 |
def hazel_custom_package_hackage(
package_name,
version,
sha256 = None,
build_file = None,
build_file_content = None):
"""Generate a repo for a Haskell package fetched from Hackage.
Args:
package_name: string, package name.
version: string, package version.
sha256: string, SHA256 hash of archive.
build_file: string,
the file to use as the BUILD file for this package.
Defaults to //third_party/haskel:BUILD.<package_name> if
neither build_file nor build_file_content are specified.
This attribute is a label relative to the main workspace.
build_file and build_file_content are mutually exclusive.
build_file_content: string,
the content for the BUILD file for this repository.
Will fall back to build_file if not specified.
build_file and build_file_content are mutually exclusive.
"""
package_id = package_name + "-" + version
url = "http://hackage.fpcomplete.com/package/{}.tar.gz".format(
package_id,
)
if not build_file and not build_file_content:
build_file = "//third_party/haskell:BUILD.{0}".format(package_name)
http_archive(
name = hazel_workspace(package_name),
build_file = build_file,
build_file_content = build_file_content,
sha256 = sha256,
strip_prefix = package_id,
urls = [url],
) | 5,329,722 |
def register(request):
"""
注册账号界面
"""
message = ""
if request.session.get('is_login', None):
return redirect('/account/')
if request.method == 'POST':
username = request.POST.get('username')
email = request.POST.get('email')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
message = "请检查填写的内容!"
if _makesure_password(password1, password2):
message = _makesure_password(password1, password2)
else:
same_username = User.objects.filter(username=username)
same_email = User.objects.filter(email=email)
if same_username:
message = '用户名已经存在!'
elif same_email:
message = '该邮箱已经被注册了!'
else:
new_user = User()
new_user.username = username
new_user.email = email
new_user.password = _hash_code(password1)
new_user.save()
code = _make_confirm_string(new_user)
_send_email(email, code)
message = '请前往邮箱进行确认!'
return render(request, 'account/login.html', {'message': message})
captcha_form = forms.captchaForm(request.POST)
content = {'captcha_form': captcha_form,
'message': message, 'page_register': True}
return render(request, 'account/register.html', content) | 5,329,723 |
def asset_type_check(asset):
"""
Verify asset 'type' key abides by naming convention standards.
Naming Convention Rules:
- Asset Types are broken into three categories: <vendor>_<service>_<resource>
- Example: aws_s3_bucket, gcp_iam_user, github_repository (service ommitted as there isn't any)
- Always use common abbreviations for a service if there are an any.
- For multiple words, only use camelCase. This is only allowed for services and resources.
- For vendors, always use lowercase. Even if the vendor might capitalize their own name in parts. use 'github' instead of GitHub.
Assets Types aren't allowed to have:
- Spaces.
- Hyphens.
- Uppercase first words.
- No less than vendor + resource.
- No more than vendor + service + resource.
"""
asset_type = asset['type']
if " " in asset_type:
raise ValueError(
"asset types aren't allowed to have spaces. Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
if "-" in asset_type:
raise ValueError(
"asset types aren't allowed to have hyphens. Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
if "_" not in asset_type:
raise ValueError(
"asset types require at least a vendor and a resource specification following snake case (ex. github_repo). Violated on asset type '{type}'. Standard asset type structure is (vendor_service_resource). Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
split_asset_type = asset_type.split("_")
if len(split_asset_type) > 3:
raise ValueError(
"asset types are only allowed to follow the structure: (vendor_service_resource) (ex. aws_s3_bucket) (service only applies where available). Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
for index, word in enumerate(split_asset_type):
if len(word) < 2:
raise ValueError(
"asset types need to have at least two characters per vendor, service, resource instantiantion (ex. aws_s3_bucket). Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
if index == 0:
for char in word:
if char.isupper():
raise ValueError(
"asset type vendor instantiation is required to be all lowercase. (ex. aws_iam_role). Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type))
if word[0].isupper():
raise ValueError(
"asset type service and resource instantiations are required to follow camelCase for multiple words. (ex. aws_iam_accessKey). Violated on asset type '{type}'. Information: https://developer.vectrix.io/dev/components/output".format(type=asset_type)) | 5,329,724 |
def test_service_span(encoding):
"""Tests that zipkin_attrs can be passed in"""
mock_transport_handler, mock_logs = mock_logger()
zipkin_attrs = ZipkinAttrs(
trace_id="0", span_id="1", parent_span_id="2", flags="0", is_sampled=True,
)
with zipkin.zipkin_span(
service_name="test_service_name",
span_name="service_span",
zipkin_attrs=zipkin_attrs,
transport_handler=mock_transport_handler,
encoding=encoding,
):
pass
span = json.loads(mock_logs[0])[0]
assert span["name"] == "service_span"
assert span["traceId"] == "0"
assert span["id"] == "1"
assert span["parentId"] == "2"
if encoding == Encoding.V1_JSON:
# Spans continued on the server don't log timestamp/duration, as it's
# assumed the client part of the pair will log them.
assert "timestamp" not in span
assert "duration" not in span
elif encoding == Encoding.V2_JSON:
assert span["shared"] is True | 5,329,725 |
def enter(ctx, work_dir) :
"""
Enters a running pde container.
"""
pCmd = "podman exec -it"
if work_dir is not None :
logging.info("Using the [{}] working directory".format(work_dir))
pCmd = pCmd + " --workdir {}".format(work_dir)
pCmd = pCmd + " {} {}".format(
ctx.obj['pdeName'], ctx.obj['pde']['interactiveShell']
)
logging.info("using podman command:\n-----\n" + pCmd + "\n-----")
click.echo("Entering {}".format(ctx.obj['pdeName']))
click.echo("-------------------------------------------")
sys.stdout.flush()
os.system(pCmd)
sys.stdout.flush()
click.echo("-------------------------------------------")
click.echo("Leaving {}".format(ctx.obj['pdeName'])) | 5,329,726 |
def cosh(x):
"""Evaluates the hyperbolic cos of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.cosh(x), np.cosh(x))
elif isinstance(x, interval):
#both signs
if x.start < 0 and x.end > 0:
end = max(np.cosh(x.start), np.cosh(x.end))
return interval(1, end, is_valid=x.is_valid)
else:
#Monotonic
start = np.cosh(x.start)
end = np.cosh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError | 5,329,727 |
def tree_intersection(tree_one, tree_two):
"""Checks for duplicate values between two trees and returns those values as a set."""
first_values = []
second_values = []
table = HashTable()
dupes = set([])
tree_one.pre_order(first_values.append)
tree_two.pre_order(second_values.append)
for value in first_values:
table.set(value, value)
for value in second_values:
if table.get(value):
dupes.add(value)
if len(dupes) == 0:
return 'There are no duplicates.'
return dupes | 5,329,728 |
def saveDataset(X_train, Y_train, X_test, Y_test, path):
"""
A function which saves numpy arrays of a dataset to binary files.
"""
np.save(os.path.join(path, "X_train.npy"), X_train)
np.save(os.path.join(path, "Y_train.npy"), Y_train)
np.save(os.path.join(path, "X_test.npy"), X_test)
np.save(os.path.join(path, "Y_test.npy"), Y_test) | 5,329,729 |
def new():
"""Deliver new-question interface."""
return render_template('questionNew.html', question_id='') | 5,329,730 |
def merge_dict_recursive(base, other):
"""Merges the *other* dict into the *base* dict. If any value in other is itself a dict and the base also has a dict for the same key, merge these sub-dicts (and so on, recursively).
>>> base = {'a': 1, 'b': {'c': 3}}
>>> other = {'x': 4, 'b': {'y': 5}}
>>> want = {'a': 1, 'x': 4, 'b': {'c': 3, 'y': 5}}
>>> got = merge_dict_recursive(base, other)
>>> got == want
True
>>> base == want
True
"""
for (key, value) in list(other.items()):
if (isinstance(value, dict) and
(key in base) and
(isinstance(base[key], dict))):
base[key] = merge_dict_recursive(base[key], value)
else:
base[key] = value
return base | 5,329,731 |
def two_points_line(feature):
"""Convert a Polyline to a Line composed of only two points."""
features = []
coords = feature['geometry']['coordinates']
for i in range(0, len(coords) - 1):
segment_coords = [coords[i], coords[i+1]]
geom = geojson.LineString(segment_coords)
features.append(geojson.Feature(geometry=geom))
return features | 5,329,732 |
def main():
"""
Main runs the astroid lookup program
"""
astroid_map = []
with open(sys.argv[1]) as map_file:
for line in map_file:
astroid_map.append([x for x in line.strip()])
marked = mark_astroids(astroid_map)
best = {"coordinates": (0, 0), "sees": 0}
# Check the line from all marked coordiantes and the number of astroids
# visible from that point.
for coordinates in marked:
all_coords = get_line_of_sight(astroid_map, coordinates)
sees = len(all_coords)
if sees > best["sees"] or best["sees"] == 0:
best = {"coordinates": coordinates, "sees": sees}
print(
"most stars to be seen: {}, from {}".format(
best["sees"], best["coordinates"]
)
)
shots = 0
animate = False
two_hundred = None
# Remove the best spot to ensure has_astroids report correct.
best_x, best_y = best["coordinates"]
astroid_map[best_x][best_y] = SPACE
while has_astroids(astroid_map):
all_coords = get_line_of_sight(astroid_map, best["coordinates"])
sorted_coords = sorted(all_coords.items())
sorted_coords.reverse()
for _, data in sorted_coords:
row, col = data["coordinates"]
shots += 1
if shots == 200:
two_hundred = (col, row)
astroid_map[row][col] = DESTROYED
if animate:
print_map(astroid_map, best["coordinates"])
time.sleep(0.05)
print("200th astroid shot down is at {}".format(two_hundred)) | 5,329,733 |
def transform_world_to_camera(poses_set, cams, ncams=4):
"""
Project 3d poses from world coordinate to camera coordinate system
Args
poses_set: dictionary with 3d poses
cams: dictionary with cameras
ncams: number of cameras per subject
Return:
t3d_camera: dictionary with 3d poses in camera coordinate
"""
t3d_camera = {}
for t3dk in sorted(poses_set.keys()):
subj, action, seqname = t3dk
t3d_world = poses_set[t3dk]
for c in range(ncams):
R, T, f, c, k, p, name = cams[(subj, c + 1)]
camera_coord = world_to_camera_frame(np.reshape(t3d_world, [-1, 3]), R, T)
camera_coord = np.reshape(camera_coord, [-1, len(H36M_NAMES) * 3])
sname = seqname[:-3] + "." + name + ".h5" # e.g.: Waiting 1.58860488.h5
t3d_camera[(subj, action, sname)] = camera_coord
return t3d_camera | 5,329,734 |
def generate_static():
"""
Generate CSS and JavaScript files
"""
util.copytree('static', os.path.join(_deploy_dir, 'static')) | 5,329,735 |
def cwebp(input_image: str, output_image: str, option: str,
logging: str = "-v", bin_path: str = None) -> Dict:
"""
now convert image to .webp format
input_image: input image(.jpeg, .pnp ....)
output_image: output image .webp
option: options and quality,it should be given between 0 to 100
:param input_image:
:param output_image:
:param option:
:param logging:
:param bin_path:
:return:
"""
cmd = f"{getcwebp(bin_path=bin_path)} {option} {input_image} -o {output_image} {logging}"
p = subprocess.Popen(cmd, shell=True, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
result = {'exit_code': p.returncode, 'stdout': stdout, 'stderr': stderr,
'command': cmd}
return result | 5,329,736 |
def assert_exists(exists, Model, **kwargs):
"""Check if given object exists or doesn't exist"""
if Model.objects.filter(**kwargs).exists() != exists:
raise AssertionError("Object with arguments {} {}!".format(
kwargs,
"doesn't exist" if exists else "exists"
)) | 5,329,737 |
def dbRegister(app):
"""
注册数据库
"""
env = getEnv()
app.config['MONGODB_SETTINGS'] = {
'db': configObj[env].DBNAME,
'host': configObj[env].DBHOST,
'port': configObj[env].DBPORT
}
db.init_app(app) | 5,329,738 |
def div66():
"""
Returns the divider OOOOOOOOOOOO
:return: divider66
"""
return divider66 | 5,329,739 |
def extract_square_from_file(image_number=1):
"""Given a number of the image file return a cropped sudoku."""
image_string = '/home/james/Documents/projects/sudoku/img/'
image_string += str(image_number) + '.jpg'
binary = read_binary(image_string)
threshold = get_threshold(binary)
square = get_square_coordinates(threshold)
game = extract_sudoku(square, threshold)
return game | 5,329,740 |
def CSV2GRID(strPathInCSV, strPathOutASC, intCol):
""" Function CSV2GRID
args:
Command Syntax: CSV2GRID [switches] inputfile column outputfile
"""
lstCMD = [strPathFuInstall + os.sep + "CSV2GRID",
strPathInCSV,
str(intCol),
strPathOutASC]
return ' '.join(lstCMD) | 5,329,741 |
def get_object_output(bucket: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
range: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
version_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetObjectResult]:
"""
The S3 object data source allows access to the metadata and
_optionally_ (see below) content of an object stored inside S3 bucket.
> **Note:** The content of an object (`body` field) is available only for objects which have a human-readable `Content-Type` (`text/*` and `application/json`). This is to prevent printing unsafe characters and potentially downloading large amount of data which would be thrown away in favour of metadata.
## Example Usage
The following example retrieves a text object (which must have a `Content-Type`
value starting with `text/`) and uses it as the `user_data` for an EC2 instance:
```python
import pulumi
import pulumi_aws as aws
bootstrap_script = aws.s3.get_object(bucket="ourcorp-deploy-config",
key="ec2-bootstrap-script.sh")
example = aws.ec2.Instance("example",
instance_type="t2.micro",
ami="ami-2757f631",
user_data=bootstrap_script.body)
```
The following, more-complex example retrieves only the metadata for a zip
file stored in S3, which is then used to pass the most recent `version_id`
to AWS Lambda for use as a function implementation. More information about
Lambda functions is available in the documentation for
[`lambda.Function`](https://www.terraform.io/docs/providers/aws/r/lambda_function.html).
```python
import pulumi
import pulumi_aws as aws
lambda_ = aws.s3.get_object(bucket="ourcorp-lambda-functions",
key="hello-world.zip")
test_lambda = aws.lambda_.Function("testLambda",
s3_bucket=lambda_.bucket,
s3_key=lambda_.key,
s3_object_version=lambda_.version_id,
role=aws_iam_role["iam_for_lambda"]["arn"],
handler="exports.test")
```
:param str bucket: The name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified
:param str key: The full path to the object inside the bucket
:param Mapping[str, str] tags: A map of tags assigned to the object.
:param str version_id: Specific version ID of the object returned (defaults to latest version)
"""
... | 5,329,742 |
def sequence_extractor(graph, path):
"""
returns the sequence of the path
:param graph: a graph object
:param path: a list of nodes ordered according to the path
:return: sequence of the path
"""
# check if path exists
if len(path) == 1:
return graph.nodes[path[0]].seq
elif not path_checker(graph, path):
return ""
if graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 0):
direction = 0
sequence = reverse_complement(graph.nodes[path[0]].seq)
elif graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 1):
direction = 1
sequence = graph.nodes[path[0]].seq
for i in range(len(path) - 1):
current_node = graph.nodes[path[i]]
next_node = graph.nodes[path[i+1]]
if current_node.in_direction(next_node.id, direction):
direction, overlap = next_direction(current_node, next_node, direction)
# if next direction is one this means current node connects to
# next node from 0 so I don't need to take the reverse complement
# Otherwise I need to
if direction == 1:
sequence += next_node.seq[overlap:]
else:
sequence += reverse_complement(next_node.seq)[overlap:]
return sequence | 5,329,743 |
def plot_matplotlib(
tree: CassiopeiaTree,
depth_key: Optional[str] = None,
meta_data: Optional[List[str]] = None,
allele_table: Optional[pd.DataFrame] = None,
indel_colors: Optional[pd.DataFrame] = None,
indel_priors: Optional[pd.DataFrame] = None,
orient: Union[Literal["up", "down", "left", "right"], float] = 90.0,
extend_branches: bool = True,
angled_branches: bool = True,
add_root: bool = False,
figsize: Tuple[float, float] = (7.0, 7.0),
colorstrip_width: Optional[float] = None,
colorstrip_spacing: Optional[float] = None,
clade_colors: Optional[Dict[str, Tuple[float, float, float]]] = None,
internal_node_kwargs: Optional[Dict] = None,
leaf_kwargs: Optional[Dict] = None,
branch_kwargs: Optional[Dict] = None,
colorstrip_kwargs: Optional[Dict] = None,
continuous_cmap: Union[str, mpl.colors.Colormap] = "viridis",
vmin: Optional[float] = None,
vmax: Optional[float] = None,
categorical_cmap: Union[str, mpl.colors.Colormap] = "tab10",
value_mapping: Optional[Dict[str, int]] = None,
ax: Optional[plt.Axes] = None,
random_state: Optional[np.random.RandomState] = None,
) -> Tuple[plt.Figure, plt.Axes]:
"""Generate a static plot of a tree using Matplotlib.
Args:
tree: The CassiopeiaTree to plot.
depth_key: The node attribute to use as the depth of the nodes. If
not provided, the distances from the root is used by calling
`tree.get_distances`.
meta_data: Meta data to plot alongside the tree, which must be columns
in the CassiopeiaTree.cell_meta variable.
allele_table: Allele table to plot alongside the tree.
indel_colors: Color mapping to use for plotting the alleles for each
cell. Only necessary if `allele_table` is specified.
indel_priors: Prior probabilities for each indel. Only useful if an
allele table is to be plotted and `indel_colors` is None.
orient: The orientation of the tree. Valid arguments are `left`, `right`,
`up`, `down` to display a rectangular plot (indicating the direction
of going from root -> leaves) or any number, in which case the
tree is placed in polar coordinates with the provided number used
as an angle offset. Defaults to 90.
extend_branches: Extend branch lengths such that the distance from the
root to every node is the same. If `depth_key` is also provided, then
only the leaf branches are extended to the deepest leaf.
angled_branches: Display branches as angled, instead of as just a
line from the parent to a child.
add_root: Add a root node so that only one branch connects to the
start of the tree. This node will have the name `synthetic_root`.
figsize: Size of the plot. Defaults to (7., 7.,)
colorstrip_width: Width of the colorstrip. Width is defined as the
length in the direction of the leaves. Defaults to 5% of the tree
depth.
colorstrip_spacing: Space between consecutive colorstrips. Defaults to
half of `colorstrip_width`.
clade_colors: Dictionary containing internal node-color mappings. These
colors will be used to color all the paths from this node to the
leaves the provided color.
internal_node_kwargs: Keyword arguments to pass to `plt.scatter` when
plotting internal nodes.
leaf_kwargs: Keyword arguments to pass to `plt.scatter` when
plotting leaf nodes.
branch_kwargs: Keyword arguments to pass to `plt.plot` when plotting
branches.
colorstrip_kwargs: Keyword arguments to pass to `plt.fill` when plotting
colorstrips.
continuous_cmap: Colormap to use for continuous variables. Defaults to
`viridis`.
vmin: Value representing the lower limit of the color scale. Only applied
to continuous variables.
vmax: Value representing the upper limit of the color scale. Only applied
to continuous variables.
categorical_cmap: Colormap to use for categorical variables. Defaults to
`tab10`.
value_mapping: An optional dictionary containing string values to their
integer mappings. These mappings are used to assign colors by
calling the `cmap` with the designated integer mapping. By default,
the values are assigned pseudo-randomly (whatever order the set()
operation returns). Only applied for categorical variables.
ax: Matplotlib axis to place the tree. If not provided, a new figure is
initialized.
random_state: A random state for reproducibility
Returns:
If `ax` is provided, `ax` is returned. Otherwise, a tuple of (fig, ax)
of the newly initialized figure and axis.
"""
is_polar = isinstance(orient, (float, int))
(
node_coords,
branch_coords,
node_colors,
branch_colors,
colorstrips,
) = place_tree_and_annotations(
tree,
depth_key,
meta_data,
allele_table,
indel_colors,
indel_priors,
orient,
extend_branches,
angled_branches,
add_root,
colorstrip_width,
colorstrip_spacing,
clade_colors,
continuous_cmap,
vmin,
vmax,
categorical_cmap,
value_mapping,
random_state,
)
fig = None
if ax is None:
fig, ax = plt.subplots(figsize=figsize, tight_layout=True)
ax.set_axis_off()
# Plot all nodes
_leaf_kwargs = dict(x=[], y=[], s=5, c="black")
_node_kwargs = dict(x=[], y=[], s=0, c="black")
_leaf_kwargs.update(leaf_kwargs or {})
_node_kwargs.update(internal_node_kwargs or {})
for node, (x, y) in node_coords.items():
if node in node_colors:
continue
if is_polar:
x, y = utilities.polar_to_cartesian(x, y)
if tree.is_leaf(node):
_leaf_kwargs["x"].append(x)
_leaf_kwargs["y"].append(y)
else:
_node_kwargs["x"].append(x)
_node_kwargs["y"].append(y)
ax.scatter(**_leaf_kwargs)
ax.scatter(**_node_kwargs)
_leaf_colors = []
_node_colors = []
_leaf_kwargs.update({"x": [], "y": []})
_node_kwargs.update({"x": [], "y": []})
for node, color in node_colors.items():
x, y = node_coords[node]
if is_polar:
x, y = utilities.polar_to_cartesian(x, y)
if tree.is_leaf(node):
_leaf_kwargs["x"].append(x)
_leaf_kwargs["y"].append(y)
_leaf_colors.append(color)
else:
_node_kwargs["x"].append(x)
_node_kwargs["y"].append(y)
_node_colors.append(color)
_leaf_kwargs["c"] = _leaf_colors
_node_kwargs["c"] = _node_colors
ax.scatter(**_leaf_kwargs)
ax.scatter(**_node_kwargs)
# Plot all branches
_branch_kwargs = dict(linewidth=1, c="black")
_branch_kwargs.update(branch_kwargs or {})
for branch, (xs, ys) in branch_coords.items():
if branch in branch_colors:
continue
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.plot(xs, ys, **_branch_kwargs)
for branch, color in branch_colors.items():
_branch_kwargs["c"] = color
xs, ys = branch_coords[branch]
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.plot(xs, ys, **_branch_kwargs)
# Colorstrips
_colorstrip_kwargs = dict(linewidth=0)
_colorstrip_kwargs.update(colorstrip_kwargs or {})
for colorstrip in colorstrips:
# Last element is text, but this can not be shown in static plotting.
for xs, ys, c, _ in colorstrip.values():
_colorstrip_kwargs["c"] = c
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.fill(xs, ys, **_colorstrip_kwargs)
return (fig, ax) if fig is not None else ax | 5,329,744 |
def print_order(order: Order, user_id: int = 0):
"""
订单打印
:param order:
:param user_id:
:return:
"""
shop_id = order.shop.id
shop = get_shop_by_shop_id(shop_id)
receipt_config = get_receipt_by_shop_id(shop_id)
printer = ylyPrinter()
template = jinja2.Template(ORDER_TPL_58)
body = template.render(
order=order,
print_time=make_aware(datetime.datetime.now()).strftime("%Y-%m-%d %H:%M:%S"),
shop=shop,
receipt_config=receipt_config,
)
printer_config = get_printer_by_shop_id(shop_id)
if not printer_config:
return False, "请先添加打印机"
partner = "1693" # 用户ID
apikey = "664466347d04d1089a3d373ac3b6d985af65d78e" # API密钥
timenow = str(int(time.time())) # 当前时间戳
machine_code = printer_config.code # 打印机终端号 520
mkey = printer_config.key # 打印机密钥 110110
if machine_code and mkey:
sign = "{}machine_code{}partner{}time{}{}".format(
apikey, machine_code, partner, timenow, mkey
)
sign = hashlib.md5(sign.encode("utf-8")).hexdigest().upper()
else:
return False, "打印机配置错误"
data = {
"partner": partner,
"machine_code": machine_code,
"content": body,
"time": timenow,
"sign": sign,
}
success, msg = printer.send_request(data, receipt_config.copies)
if success and user_id >= 0:
log_info = {
"order_num": order.order_num,
"shop_id": order.shop.id,
"operator_id": user_id,
"operate_type": OrderLogType.PRINT,
}
create_order_log(log_info)
return success, msg | 5,329,745 |
def get_IoU_from_matches(match_pred2gt, matched_classes, ovelaps):
"""
if given an image, claculate the IoU of the segments in the image
:param match_pred2gt: maps index of predicted segment to index of ground truth segment
:param matched_classes: maps index of predicted segment to class number
:param ovelaps: maps [predicted segment index, gt segment index] to the IoU value of the segments
:return:
1. IoUs - IoU for all segments
2. IoUs_classes - mean IoU per class
"""
IoUs = [ [] for _ in range(5) ]
match_pred2gt = match_pred2gt.astype(np.int32)
for pred, gt in enumerate(match_pred2gt):
if gt < 0:
continue
IoUs[matched_classes[pred]].append(ovelaps[pred, gt])
# mean segments's IoU according to classes
IoUs_classes = np.zeros((5, 1))
for class_idx, lst in enumerate(IoUs):
if not lst:
continue
arr = np.array(lst)
IoUs_classes[class_idx] = (np.mean(arr))
return IoUs, IoUs_classes | 5,329,746 |
def check_audit_unchanged(results, platform):
"""Crash out if the audit in the result doesn't match the one in the
platform"""
from krun.audit import Audit
if Audit(platform.audit) != results.audit:
error_msg = (
"You have asked Krun to resume an interrupted benchmark. "
"This is only valid if the machine you are using is "
"identical to the one on which the last results were "
"gathered, which is not the case.")
fatal(error_msg) | 5,329,747 |
def id_queue(obs_list, prediction_url='http://plants.deep.ifca.es/api', shuffle=False):
"""
Returns generator of identifications via buffer.
Therefore we perform the identification query for the nxt observation
while the user is still observing the current information.
"""
print "Generating the identification buffer ..."
if shuffle:
indices = np.arange(len(obs_list))
np.random.shuffle(indices)
def gen(obs_list):
for obs in obs_list:
yield make_prediction(obs, prediction_url)
return buffered_gen_threaded(gen(obs_list)) | 5,329,748 |
def periodogram(x, nfft=None, fs=1):
"""Compute the periodogram of the given signal, with the given fft size.
Parameters
----------
x : array-like
input signal
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
Examples
--------
Generate a signal with two sinusoids, and compute its periodogram:
>>> fs = 1000
>>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> px, fx = periodogram(x, 512, fs)
Notes
-----
Only real signals supported for now.
Returns the one-sided version of the periodogram.
Discrepency with matlab: matlab compute the psd in unit of power / radian /
sample, and we compute the psd in unit of power / sample: to get the same
result as matlab, just multiply the result from talkbox by 2pi"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
pxx = np.abs(fft(x, nfft)) ** 2
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
fgrid = np.linspace(0, fs * 0.5, pn)
return pxx[:pn] / (n * fs), fgrid | 5,329,749 |
def handle_429(e):
"""Renders full error page for too many site queries"""
html = render.html("429")
client_addr = get_ipaddr()
count_ratelimit.labels(e, client_addr).inc()
logger.error(f"Error: {e}, Source: {client_addr}")
return html, 429 | 5,329,750 |
def url_path_join(*items):
"""
Make it easier to build url path by joining every arguments with a '/'
character.
Args:
items (list): Path elements
"""
return "/".join([item.lstrip("/").rstrip("/") for item in items]) | 5,329,751 |
def f_match (pattern, string, flags = None):
""" Match function
Args:
pattern (string): regexp (pattern|/pattern/flags)
string (string): tested string
flags (int): regexp flage
Return:
boolean
"""
if build_regexp(pattern, flags).search(to_string(string)):
return True
return False | 5,329,752 |
def validate_epoch(val_loader, model, criterion, epoch, args):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(val_loader))
for batch_idx, (input, target) in enumerate(val_loader):
data_time.update(time.time() - end)
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} '.format(
batch=batch_idx + 1,
size=len(val_loader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
)
bar.next()
bar.finish()
return losses.avg, top1.avg | 5,329,753 |
def test_debugging_when_source_code_is_missing():
"""Test debugging code that its source code is not available.
Note:
In this kind of code we cannot stop at an error, so we fall-back to
simply running this code without interference.
"""
exec("def function(): 1 / 0", locals(), globals())
func = debug(globals()["function"])
with pytest.raises(ArithmeticError):
func() | 5,329,754 |
def printBoardStatistic(boardname, conn):
"""
Print Statistic for current board.
"""
c = conn.cursor()
for i in c.execute('SELECT * FROM `boards` WHERE `name` IS ?', (boardname,)):
break
if i == None:
raise Exception('SQL Query Failed! in printBoardStatistic()')
print('name: {0}\ncname: {1}\narchived_posts: {2}\nlatest_post: {3}\nlatest_post_time: {4}'.format(i[0], i[1], i[2], i[3], time.strftime("%a, %d %b %Y %H:%M:%S %z", time.localtime(i[3]))))
return | 5,329,755 |
def utf8_german_fix( uglystring ):
"""
If your string contains ugly characters (like ü, ö, ä or ß) in your source file, run this string through here.
This adds the German "Umlaute" to your string, making (ÄÖÜäöü߀) compatible for processing.
\tprint( utf8_german_fix("ü߀") ) == ü߀
"""
uglystring = uglystring.replace('ü','ü')
uglystring = uglystring.replace('ö','ö')
uglystring = uglystring.replace('ä','ä')
uglystring = uglystring.replace('Ä','Ä')
uglystring = uglystring.replace('Ö','Ö')
uglystring = uglystring.replace('Ü','Ü')
uglystring = uglystring.replace('ß','ß')
# This was born out of necessity, as there were some issues with a certain API not processing German properly.
# I am always looking for a smarter way to do this.
nicestring = uglystring.replace('€','€')
return nicestring | 5,329,756 |
def subtends(a1, b1, a2, b2, units='radians'):
""" Calculate the angle subtended by 2 positions on a sphere """
if units.lower() == 'degrees':
a1 = radians(a1)
b1 = radians(b1)
a2 = radians(a2)
b2 = radians(b2)
x1 = cos(a1) * cos(b1)
y1 = sin(a1) * cos(b1)
z1 = sin(b1)
x2 = cos(a2) * cos(b2)
y2 = sin(a2) * cos(b2)
z2 = sin(b2)
theta = Angle.fromDegrees(degrees(acos(x1 * x2 + y1 * y2 + z1 * z2)))
return theta | 5,329,757 |
def get_pcgr_bin():
"""Return abs path to e.g. conda/env/pcgr/bin
"""
return os.path.dirname(os.path.realpath(sys.executable)) | 5,329,758 |
def test_response_data():
"""Ensure response with expected data is returned."""
detail = 'Dummy detail'
code = 'dummy_code'
exception = rest_exceptions.APIException(detail=detail, code=code)
response = handlers.handle_rest_framework_api_exception(exception, {})
assert code in str(response.data)
assert detail in str(response.data) | 5,329,759 |
async def test_update(opp, aioclient_mock):
"""Tests switch refreshes status periodically."""
now = dt_util.utcnow()
future = now + timedelta(minutes=10)
aioclient_mock.get(
"http://1.1.1.1/status.xml",
text="<response><relay0>0</relay0><relay1>0</relay1></response>",
)
MockConfigEntry(
domain=DOMAIN, data={"host": "1.1.1.1", "username": "foo", "password": "bar"}
).add_to_opp(opp)
assert await async_setup_component(opp, DOMAIN, {})
await opp.async_block_till_done()
state = opp.states.get("switch.relay1")
assert state.state == "off"
aioclient_mock.clear_requests()
aioclient_mock.get(
"http://1.1.1.1/status.xml",
text="<response><relay0>1</relay0><relay1>1</relay1></response>",
)
async_fire_time_changed(opp, future)
await opp.async_block_till_done()
state = opp.states.get("switch.relay1")
assert state.state == "on" | 5,329,760 |
def get_args(obj):
"""Get a list of argument names for a callable."""
if inspect.isfunction(obj):
return inspect.getargspec(obj).args
elif inspect.ismethod(obj):
return inspect.getargspec(obj).args[1:]
elif inspect.isclass(obj):
return inspect.getargspec(obj.__init__).args[1:]
elif hasattr(obj, '__call__'):
return inspect.getargspec(obj.__call__).args[1:]
else:
raise TypeError("Can't inspect signature of '%s' object." % obj) | 5,329,761 |
def reset_cli(api_client, pipeline_id):
"""
Resets a delta pipeline by truncating tables and creating new checkpoint folders so data is
reprocessed from scratch.
Usage:
databricks pipelines reset --pipeline-id 1234
"""
_validate_pipeline_id(pipeline_id)
PipelinesApi(api_client).reset(pipeline_id)
click.echo("Reset triggered for pipeline {}".format(pipeline_id)) | 5,329,762 |
def write_feedback(section, messages, folder=os.getcwd()):
"""Append messages to the status file."""
filepath = os.path.join(folder, STATUS_FILE)
with open(filepath) as stream:
feedback = json.load(stream)
if section not in feedback:
feedback[section] = []
for message in messages:
feedback[section].append(message)
logging.warning('[%s] %s', section, message)
with open(filepath, 'w+') as stream:
json.dump(feedback, stream, indent=4) | 5,329,763 |
def get_assumed_role_creds(service_name, assume_role_policy):
"""
Returns a new assume role object with AccessID, SecretKey and SessionToken
:param service_name:
:param assume_role_policy:
"""
sts_client = boto3.client("sts", region_name=os.environ["AWS_REGION"])
assumed_role = sts_client.assume_role(
RoleArn=os.environ["IAMROLE_LMDEXEC_ARN"],
RoleSessionName="aws-saasfactory-s3",
Policy=json.dumps(assume_role_policy),
)
credentials = assumed_role["Credentials"]
logger.info("helper.get_assumed_role_creds: assumed_role --> %s", assumed_role)
return credentials | 5,329,764 |
def name_value(obj):
"""
Convert (key, value) pairs to HAR format.
"""
return [{"name": k, "value": v} for k, v in obj.items()] | 5,329,765 |
def get_ids(id_type):
"""Get unique article identifiers from the dataset.
Parameters
----------
id_type : str
Dataframe column name, e.g. 'pubmed_id', 'pmcid', 'doi'.
Returns
-------
list of str
List of unique identifiers in the dataset, e.g. all unique PMCIDs.
"""
global doc_df
if doc_df is None:
doc_df = get_metadata_df()
unique_ids = list(doc_df[~pd.isna(doc_df[id_type])][id_type].unique())
return unique_ids | 5,329,766 |
def ccw_wkt_from_shapefile(shapefile, out_txt):
"""
Creates wkt with coordinates oriented counter clockwise for a given
shapefile. Shapefiles are oriented clockwise, which is incompatible with
spatial queries in many database management systems. Use this to generate
wkt that you can copy and paste into queries.
Parameters
----------
shapefile : path to the shpefile to read; string
out_txt : path to the text file to write the wkt to; string
Returns
-------
Nothing
"""
# Read in a shapefile of polygon of interest. It must be in CRS 4326
# First get a fiona collection
c = fiona.open(shapefile, 'r')
if c.crs['init'] == 'epsg:4326':
# Next make it a shapely polygon object
poly = shape(c[0]['geometry'])
# Use LinearRing to determine if coordinates are listed clockwise
coords = c[0]["geometry"]["coordinates"][0]
lr = LinearRing(coords)
if lr.is_ccw == False:
# Reverse coordinates to make them counter clockwise
print("Points were clockwise......reversing")
# Make the polygon's outer ring counter clockwise
poly2 = shapely.geometry.polygon.orient(poly, sign=1.0)
# Get the well-known text version of the polygon
wkt = poly2.wkt
else:
print("Points were already counter clockwise")
# Get the well-known text version of the polygon
wkt = poly.wkt
# Write WKT to text file
with open(out_txt, 'w+') as file:
file.write(wkt)
print("WKT written to {0}".format(out_txt))
# close the collections
c.close()
else:
print("You need to reproject the shapefile to EPSG:4326")
return | 5,329,767 |
def get_items_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Item).all()
results=[]
for c in result:
results.append({'id':c.id, 'markup':c.markup})
return results | 5,329,768 |
def test_UniformTimeSeries_repr():
"""
>>> t=ts.UniformTime(length=3,sampling_rate=3)
>>> tseries1 = ts.UniformTimeSeries(data=[3,5,8],time=t)
>>> t.sampling_rate
3.0 Hz
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1 = ts.UniformTimeSeries(data=[3,5,8],sampling_rate=3)
>>> tseries1.time
UniformTime([ 0. , 0.33333333, 0.66666667], time_unit='s')
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1.sampling_interval
0.33333333333300003 s
In [435]: a = ts.UniformTime(length=1,sampling_rate=2)
In [436]: b = ts.UniformTimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
In [437]: b.sampling_rate
Out[437]: 2.0 Hz
In [361]: a = ts.UniformTime(length=1,sampling_rate=1)
In [362]: b = ts.UniformTimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
In [363]: b.sampling_rate
Out[363]: 1.0 Hz
""" | 5,329,769 |
def _test(value, *args, **keywargs):
"""
A function that exists for test purposes.
>>> checks = [
... '3, 6, min=1, max=3, test=list(a, b, c)',
... '3',
... '3, 6',
... '3,',
... 'min=1, test="a b c"',
... 'min=5, test="a, b, c"',
... 'min=1, max=3, test="a, b, c"',
... 'min=-100, test=-99',
... 'min=1, max=3',
... '3, 6, test="36"',
... '3, 6, test="a, b, c"',
... '3, max=3, test=list("a", "b", "c")',
... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
... "test='x=fish(3)'",
... ]
>>> v = Validator({'test': _test})
>>> for entry in checks:
... print v.check(('test(%s)' % entry), 3)
(3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
(3, ('3',), {})
(3, ('3', '6'), {})
(3, ('3',), {})
(3, (), {'test': 'a b c', 'min': '1'})
(3, (), {'test': 'a, b, c', 'min': '5'})
(3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
(3, (), {'test': '-99', 'min': '-100'})
(3, (), {'max': '3', 'min': '1'})
(3, ('3', '6'), {'test': '36'})
(3, ('3', '6'), {'test': 'a, b, c'})
(3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
(3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
(3, (), {'test': 'x=fish(3)'})
"""
return (value, args, keywargs) | 5,329,770 |
def plot_chromaticity_diagram_CIE1976UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
) | 5,329,771 |
def generateFilter(targetType, left = False):
"""Generate filter function for loaded plugins"""
def filter(plugins):
for pi in plugins:
if left:
if not pi.isThisType(targetType):
plugins.remove(pi)
logger.info("Plugin: {} is filtered out by predefined filter"\
.format(pi.namePlugin()))
else:
if pi.isThisType(targetType):
plugins.remove(pi)
logger.info("Plugin: {} is filtered out by predefined filter"\
.format(pi.namePlugin()))
return filter | 5,329,772 |
def test__prod__homolytic_scission():
""" test graph.reac.prod_homolytic_scission
"""
gra = automol.geom.graph(
automol.inchi.geometry(
automol.smiles.inchi('CCCC')))
prod_gras = graph.reac.prod_homolytic_scission(gra)
print('\n homolyt scission')
for gra in prod_gras:
print(gra) | 5,329,773 |
def test_search_no_results(session, search_type, json_data):
"""Assert that a search query with no results returns the expected result."""
query = SearchRequest.create_from_json(json_data, None)
query.search()
assert query.id
assert not query.search_response
assert query.returned_results_size == 0 | 5,329,774 |
def test_compare_full_flash_hex(
mock_open_temp_html, mock_gen_diff_html, mock_read_flash_hex
):
"""Check that file contents."""
file_hex_path = os.path.join("path", "to", "file.hex")
file_hex_content = "This is the hex file content"
flash_hex_content = "This is the flash hex content"
mock_read_flash_hex.return_value = flash_hex_content
with mock.patch(
"ubittool.cmds.open", mock.mock_open(read_data=file_hex_content)
) as m_open:
cmds.compare_full_flash_hex(file_hex_path)
m_open.assert_called_once_with(file_hex_path, encoding="utf-8")
assert mock_read_flash_hex.call_count == 1
assert mock_read_flash_hex.call_args[1] == {"decode_hex": False}
assert mock_gen_diff_html.call_count == 1
assert mock_gen_diff_html.call_args[0] == (
"micro:bit",
[flash_hex_content],
"Hex file",
[file_hex_content],
)
assert mock_open_temp_html.call_count == 1 | 5,329,775 |
def load_vel_map(component="u"):
"""
Loads all mean streamwise velocity profiles. Returns a `DataFrame` with
`z_H` as the index and `y_R` as columns.
"""
# Define columns in set raw data file
columns = dict(u=1, v=2, w=3)
sets_dir = os.path.join("postProcessing", "sets")
latest_time = max(os.listdir(sets_dir))
data_dir = os.path.join(sets_dir, latest_time)
flist = os.listdir(data_dir)
z_H = []
for fname in flist:
if "UMean" in fname:
z_H.append(float(fname.split("_")[1]))
z_H.sort()
z_H.reverse()
vel = []
for zi in z_H:
fname = "profile_{}_UMean.xy".format(zi)
rawdata = np.loadtxt(os.path.join(data_dir, fname), unpack=True)
vel.append(rawdata[columns[component]])
y_R = rawdata[0]/R
z_H = np.array(z_H)/H
vel = np.array(vel).reshape((len(z_H), len(y_R)))
df = pd.DataFrame(vel, index=z_H, columns=y_R)
return df | 5,329,776 |
def iterate_packages(rospack, mode):
"""
Iterator for packages that contain messages/services
:param mode: .msg or .srv, ``str``
"""
if mode == MODE_MSG:
subdir = 'msg'
elif mode == MODE_SRV:
subdir = 'srv'
else:
raise ValueError('Unknown mode for iterate_packages: %s'%mode)
pkgs = rospack.list()
for p in pkgs:
d = os.path.join(rospack.get_path(p), subdir)
if os.path.isdir(d):
yield p, d | 5,329,777 |
def check_int(es_url, es_index, hash_id):
"""Query for interferograms with specified input hash ID."""
query = {
"query":{
"bool":{
"must":[
{"term":{"metadata.input_hash_id":hash_id}},
]
}
}
}
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code != 200:
print >>sys.stderr, "Failed to query %s:\n%s" % (es_url, r.text)
print >>sys.stderr, "query: %s" % json.dumps(query, indent=2)
print >>sys.stderr, "returned: %s" % r.text
r.raise_for_status()
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0: id = 'NONE'
else: id = result['hits']['hits'][0]['_id']
return total, id | 5,329,778 |
def format_sample_case(s: str) -> str:
"""format_sample_case convert a string s to a good form as a sample case.
A good form means that, it use LR instead of CRLF, it has the trailing newline, and it has no superfluous whitespaces.
"""
if not s.strip():
return ''
lines = s.strip().splitlines()
lines = [line.strip() + '\n' for line in lines]
return ''.join(lines) | 5,329,779 |
def replace_floats_with_decimals(obj: Any, round_digits: int = 9) -> Any:
"""Convert all instances in `obj` of `float` to `Decimal`.
Args:
obj: Input object.
round_digits: Rounding precision of `Decimal` values.
Returns:
Input `obj` with all `float` types replaced by `Decimal`s rounded to
`round_digits` decimal places.
"""
def _float_to_rounded_decimal(s: Text) -> Decimal:
return Decimal(s).quantize(Decimal(10) ** -round_digits)
return json.loads(json.dumps(obj), parse_float=_float_to_rounded_decimal) | 5,329,780 |
def _calc_range_mixed_data_columns(data, observation, dtypes):
""" Return range for each numeric column, 0 for categorical variables """
_, cols = data.shape
result = np.zeros(cols)
for col in range(cols):
if np.issubdtype(dtypes[col], np.number):
result[col] = max(max(data[:, col]), observation[col]) - min(min(data[:, col]), observation[col])
return result | 5,329,781 |
def get_from_hdfs(file_hdfs):
"""
compatible to HDFS path or local path
"""
if file_hdfs.startswith('hdfs'):
file_local = os.path.split(file_hdfs)[-1]
if os.path.exists(file_local):
print(f"rm existing {file_local}")
os.system(f"rm {file_local}")
hcopy(file_hdfs, file_local)
else:
file_local = file_hdfs
assert os.path.exists(file_local)
return file_local | 5,329,782 |
def search(request):
"""
Display search form/results for events (using distance-based search).
Template: events/search.html
Context:
form - ``anthill.events.forms.SearchForm``
event_list - events in the near future
searched - True/False based on if a search was done
search_results - list of results (may be empty)
"""
upcoming_events = Event.objects.future().select_related()[0:5]
if request.GET:
form = SearchForm(request.GET)
form.is_valid()
name = form.cleaned_data['name']
location = form.cleaned_data['location']
location_range = form.cleaned_data['location_range']
# only events that haven't happened
events = Event.objects.future().select_related()
if name:
events = events.filter(title__icontains=name)
if location:
events = events.search_by_distance(location, location_range)
context = {'form': form, 'searched': True, 'search_results': events,
'event_list': upcoming_events}
else:
context = {'form': SearchForm(), 'event_list': upcoming_events}
return render_to_response('events/search.html', context,
context_instance=RequestContext(request)) | 5,329,783 |
def logout() -> Response:
"""Logout route. Logs the current user out.
:return: A redirect to the landing page.
"""
name: str = current_user.name
logout_user()
flash(f'User "{name}" logged out.', 'info')
url: str = url_for('root')
output: Response = redirect(url)
return output | 5,329,784 |
def load_room(name):
"""
There is a potential security problem here.
Who gets to set name? Can that expose a variable?
"""
return globals().get(name) | 5,329,785 |
def call_ipt_func(ipt_id: str, function_name: str, source, **kwargs):
"""Processes an image/wrapper with an IPT using an function like syntax
:param ipt_id:
:param function_name:
:param source:
:param kwargs:
:return:
"""
cls_ = get_ipt_class(ipt_id)
if cls_ is not None:
item = cls_(**kwargs)
func = getattr(item, function_name, None)
if callable(func):
return func(wrapper=source)
return None | 5,329,786 |
def is_running(service):
"""
Checks if service is running using sysdmanager library.
:param service: Service to be checked.
:return: Information if service is running or not.
"""
manager = get_manager()
if manager.is_active(service + ".service"):
return 1
return 0 | 5,329,787 |
def pipeline_model(model, pipeline_splits):
"""
Split the model into stages.
"""
for name, modules in model.named_modules():
name = name.replace('.', '/')
if name in pipeline_splits:
logging.info('--------')
logging.info(name)
for split_idx, split in enumerate(pipeline_splits):
split_tokens = split.split('/')
logging.info(f'Processing pipeline split {split_tokens}')
parent, node, field_or_idx_str = get_module_and_parent_by_name(model, split_tokens)
if parent is None:
logging.error(f'Split {split} not found')
sys.exit()
else:
replace_layer(parent, field_or_idx_str, poptorch.BeginBlock(ipu_id=split_idx+1, layer_to_call=node)) | 5,329,788 |
async def setstatus(ctx, status: Option(str, "Set status", choices=["online", "idle", "dnd"])):
"""Change Dalti's status"""
try:
if status == "online":
await Dalti.change_presence(status=discord.Status.online)
Embed = discord.Embed(description=f"<:daltiSuccess:923699355779731476> Sucessfully changed my status to <:daltiOnline:923700141754552353> `Online`.", color=discord.Color.from_rgb(67,181,130))
await ctx.respond(embed=Embed)
elif status == "idle":
await Dalti.change_presence(status=discord.Status.idle)
Embed = discord.Embed(description=f"<:daltiSuccess:923699355779731476> Sucessfully changed my status to <:daltiIdle:923700173438349383> `Idle`.", color=discord.Color.from_rgb(67,181,130))
await ctx.respond(embed=Embed)
elif status == "dnd":
await Dalti.change_presence(status=discord.Status.dnd)
Embed = discord.Embed(description=f"<:daltiSuccess:923699355779731476> Sucessfully changed my status to <:daltiDND:923700213389086840> `DND`.", color=discord.Color.from_rgb(67,181,130))
await ctx.respond(embed=Embed)
except Exception:
Embed = discord.Embed(description="<:daltiError:923699414646816768> I was not able to change my status/activity.", colour=discord.Colour.from_rgb(240,74,71))
await ctx.respond(embed=Embed) | 5,329,789 |
def smoothen(data, kernel):
"""Convolve data with odd-size kernel, with boundary handling."""
n, = kernel.shape
assert n % 2 == 1
m = (n-1) // 2
# pad input data
k = m//2 + 1
data_padded = np.concatenate([
np.full(m, data[:k].mean()),
data,
np.full(m, data[-k:].mean())
])
smooth = np.convolve(data_padded, kernel, mode='same')[m:-m]
assert smooth.shape == data.shape
return smooth | 5,329,790 |
def find_node_name(node_id, g):
"""Go through the attributes and find the node with the given name"""
return g.node[node_id]["label"] | 5,329,791 |
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
block = data[i * BLOCK_SIZE_BYTES : (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[: len(data)]
return decrypted_data | 5,329,792 |
def score_concepts(merged_graph: AMR, counts: tuple, concept_alignments: Dict[str, str]) -> Counter:
"""
Calculate TF-IDF counts for each node(concept) in `merged_graph` according to their aligned words.
Parameters:
merged_graph(AMR): Graph which contains the concept to be scored.
counts(tuple): A tuple returned by the DohareEtAl2018.get_tf_idf() function.
concept_alignments(dict): A dictionary that maps concepts into a list of words.
Returns:
Counter: All TF-IDF scores for each concept. If the concept does not exist, the score is 0.
"""
tf_idf, tf_counts, df_counts, num_docs = counts
# Get score for each node
concept_scores = dict()
for c in merged_graph.get_concept_nodes():
concept = merged_graph.nodes[c]['label']
if concept in concept_alignments:
tf = 0
df = 0
for w in concept_alignments[concept]:
try:
tf += tf_counts[0, tf_idf.vocabulary_[w]]
df += df_counts[0, tf_idf.vocabulary_[w]]
except KeyError:
pass
concept_scores[concept] = tf * \
np.log((num_docs/(df + 1))) # TF-IDF
concept_scores = Counter(concept_scores)
return concept_scores | 5,329,793 |
def test_apply_bda_pre_fs_int_time_bad_quantity_error():
"""Test error for using an incompatible Quantity for pre_fs_int_time."""
# define parameters
uvd = UVData()
max_decorr = 0.1
pre_fs_int_time = 0.1 * units.m
corr_fov_angle = Angle(20.0, units.degree)
max_time = 30 * units.s
corr_int_time = 2 * units.s
# use pre_fs_int_time as a quantity with bogus units
with pytest.raises(ValueError) as cm:
bda_tools.apply_bda(
uvd,
max_decorr,
pre_fs_int_time,
corr_fov_angle,
max_time,
corr_int_time,
)
assert str(cm.value).startswith(
"pre_fs_int_time must be a Quantity with units of time"
)
return | 5,329,794 |
def test_newline_002(settings, sass_parser):
"""
Ensure nothing after import newline is wrongly captured
"""
content = (
"""// Foo\n"""
"""@import candidate1\n"""
"""@import candidate2\n"""
""".foo{ color: red }"""
)
result = sass_parser.parse(content)
assert result == ["candidate1", "candidate2"] | 5,329,795 |
def timestamp_to_double(sparkdf):
"""
Utility function to cast columns of type 'timestamp' to type 'double.'
"""
for dtype in sparkdf.dtypes:
if dtype[1] == 'timestamp':
sparkdf = sparkdf.withColumn(dtype[0], col(dtype[0]).cast(DoubleType()))
return sparkdf | 5,329,796 |
def get_signal(args):
"""function to get signal from a track around some sites
"""
if not args.out:
args.out = '.'.join(os.path.basename(args.bed).split('.')[0:-1])
chunks = ChunkList.read(args.bed, strand_col = args.strand)
params = _signalParams(args.bg, args.sizes, args.up, args.down,args.exp,
args.scale, args.positive, args.all)
sets = chunks.split(items = min(args.cores*20,len(chunks)))
pool = Pool(processes = args.cores)
tmp = pool.map(_signalHelper, zip(sets,itertools.repeat(params)))
pool.close()
pool.join()
if args.all:
mat = np.vstack(tmp)
np.savetxt(args.out + ".tracks.txt.gz", mat, delimiter = ",", fmt="%1.5g")
mat[np.isnan(mat)]=0
result = np.sum(mat, axis = 0)
else:
result = sum(tmp)
if not args.no_agg:
if args.norm:
result = result / len(chunks)
fig = plt.figure()
plt.plot(range(-args.up,args.down+1),result)
plt.xlabel("Position relative to Site")
plt.ylabel("Signal Intensity")
fig.savefig(args.out+'.agg.track.eps')
plt.close(fig)
np.savetxt(args.out+'.agg.track.txt',result,delimiter="\t") | 5,329,797 |
def get_entrez_id_from_organism_full_name_batch(organism_full_names: List[str]) -> List[str]:
"""Retrieves the Entrez numeric ID of the given organisms.
This numeric identifier is neccessary for BLAST and NCBI TAXONOMY
searches.
This function uses Biopython functions. Returns BLAST-compatible ID as
txid + NCBI ID + [ORGN].
Arguments:
>organism_full_names: List[str] ~ A list of full names of organisms, e.g. "Xanthomonas
campestris pv. campesris B100"
"""
batch_start = 0
organism_ncbi_ids_result: List[str] = []
# Go through each organism :D
while batch_start < len(organism_full_names):
organism_full_names_slice = organism_full_names[batch_start:batch_start+NCBI_BATCH_SIZE]
query_names = " OR ".join(organism_full_names_slice)
# An e-mail has to be set, you may change it to yours if you want to
# be notified if any problems occur.
Entrez.email = "x@x.x"
# Set the Entrez search to the NCBI TAXONOMY database.
handle = Entrez.esearch(db="Taxonomy", term=query_names)
# Wait in order to not overload the NCBI's server
time.sleep(WAIT_TIME)
# Reformat the Entrez search result in order to extract the Entrez ID
record = Entrez.read(handle)
organism_ncbi_ids = record["IdList"][::-1]
# txid+NUMBER+[ORGN] is the form that is used for NCBI BLASTP searches to restrict a search
# to an organism using the Entrez query constraint input.
organism_ncbi_ids_result += ["txid"+x +
"[ORGN]" for x in organism_ncbi_ids]
batch_start += NCBI_BATCH_SIZE
time.sleep(WAIT_TIME)
# Return the retrieved IDs :D
return organism_ncbi_ids_result | 5,329,798 |
def test_wrong_msgcat(po_file):
"""Test if msgcat is not available"""
environ_saved = os.environ["PATH"]
os.environ["PATH"] = ""
with pytest.raises(SystemExit) as sysexit:
powrap.check_style([po_file])
os.environ["PATH"] = environ_saved
assert sysexit.type == SystemExit
assert sysexit.value.code == 127 | 5,329,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.