content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def Pow_sca(x_e, c_e, g, R0, R1, omega, epM):
"""Calculate the power scattered by an annulus
with a 'circling' electron as exciting source inside and
an electron moving on a slightly curved trajectory outside (vertical)
The trajectory of the electron derives from a straight vertical
trajectory in the ellipse frame.
Output: Resistive losses as a function of omega"""
# epM = 1-64/(omega*(omega+1j*gamma))
# omega = omega*Conv
k0 = omega/3e8
gamma_abs = 1j* np.pi**2 * ep0 * g**2/8 * k0**2
k_n = 1
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s = np.exp(-omega/c_e*x_e)/omega*BesselI(1,omega*g/c_e)
#Calculate expansion coefficients as in ELS_ellipse_annulus.pdf
#This is for the cosine terms
b_c = (a_n_s /((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( (epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
#This is for the sin terms
b_s = (a_n_s/((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( -(epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
return omega/2 * np.imag(gamma_abs * (abs(b_c)**2 + abs(b_s)**2))
| 19,700
|
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi"
)
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q"
)[0]
x_y_id_s = read_next_bytes(
fid,
num_bytes=24 * num_points2D,
format_char_sequence="ddq" * num_points2D,
)
xys = np.column_stack(
[
tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3])),
]
)
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
return images
| 19,701
|
def index():
"""Render upload page."""
log_cmd('Requested upload.index', 'green')
return render_template('upload.html',
page_title='Upload',
local_css='upload.css',
)
| 19,702
|
def pin_rsconnect(data, pin_name, pretty_pin_name, connect_server, api_key):
"""
Make a pin on RStudio Connect.
Parameters:
data: any object that has a to_json method (eg. pandas DataFrame)
pin_name (str): name of pin, only alphanumeric and underscores
pretty_pin_name (str): display name of pin
connect_server (str): RStudio Connect server address e.g. https://connect.example.com/
api_key (str): API key of a user on RStudio Connect
Return:
Url of content
"""
# Save data
local_dir = tempfile.TemporaryDirectory()
data.to_json(local_dir.name + "/data.txt")
# Create landing page
i = open(local_dir.name + "/index.html", "w")
lines = ["<h1>Python Pin", "\n"]
for line in lines:
i.write(line)
i.close()
# Create Manifest
manifest = {
"version": 1,
"locale": "en_US",
"platform": "3.5.1",
"metadata": {
"appmode": "static",
"primary_rmd": None,
"primary_html": "index.html",
"content_category": "pin",
"has_parameters": False,
},
"packages": None,
"files": None,
"users": None,
}
with open(local_dir.name + "/manifest.json", "w") as manifest_conn:
json.dump(manifest, manifest_conn)
# Turn into tarfile
pins_tf = tempfile.NamedTemporaryFile(delete=False)
with tarfile.open(pins_tf.name, "w:gz") as tar:
tar.add(local_dir.name, arcname=os.path.basename(local_dir.name))
auth = {"Authorization": "Key " + api_key}
content = get_content(pin_name, pretty_pin_name, connect_server, auth)
content_url = connect_server + "/__api__/v1/content/" + content["guid"]
# Upload Bundle
with open(pins_tf.name, "rb") as tf_conn:
bundle = req.post(content_url + "/bundles", headers=auth, data=tf_conn)
bundle_id = bundle.json()["id"]
# Deploy bundle
deploy = req.post(
content_url + "/deploy", headers=auth, json={"bundle_id": bundle_id}
)
return {"dash_url": content["dashboard_url"], "content_url": content["content_url"]}
| 19,703
|
def partial_to_full(dic1, dic2):
"""This function relates partial curves to full curves, according to the distances between them
The inputs are two dictionaries"""
C = []
D = []
F = []
# Calculate the closest full curve for all the partial curves under
# evaluation
for i in tqdm(dic1.keys()):
df = distance_cycle_to_full(i, dic1, dic2)
Distance = df['Distance'][df.index[0]]
Full_cycle = df['Cycle'][df.index[0]]
C.append(i)
D.append(Distance)
F.append(Full_cycle)
D = np.array(D)
C = np.array(C)
F = np.array(F)
return D, C, F
| 19,704
|
def _sample(probabilities, population_size):
"""Return a random population, drawn with regard to a set of probabilities"""
population = []
for _ in range(population_size):
solution = []
for probability in probabilities:
# probability of 1.0: always 1
# probability of 0.0: always 0
if random.uniform(0.0, 1.0) < probability:
solution.append(1)
else:
solution.append(0)
population.append(solution)
return population
| 19,705
|
def nodal_scoping(node_ids, server = None):
"""Helper function to create a specific ``ansys.dpf.core.Scoping``
associated to a mesh.
Parameters
----------
node_ids : List of int
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
Returns
-------
scoping : ansys.dpf.core.Scoping
"""
if not isinstance(node_ids, list):
raise dpf_errors.InvalidTypeError("list", "node_ids")
scoping = Scoping(server = server, ids = node_ids, location = locations.nodal)
return scoping
| 19,706
|
def print_func(val):
"""test function"""
print(val)
| 19,707
|
def user_active(request):
"""Prevents auto logout by updating the session's last active time"""
# If auto logout is disabled, just return an empty body.
if not settings.AUTO_LOGOUT_SECONDS:
return HttpResponse(json.dumps({}), content_type="application/json", status=200)
last_active_at = set_session_user_last_active_at(request)
auto_logout_at = last_active_at + timedelta(seconds=settings.AUTO_LOGOUT_SECONDS)
auto_logout_warning_at = auto_logout_at - timedelta(seconds=settings.AUTO_LOGOUT_WARNING_AT_SECONDS_LEFT)
return HttpResponse(
json.dumps(
{
"auto_logout_at": auto_logout_at.isoformat(),
"auto_logout_warning_at": auto_logout_warning_at.isoformat(),
}
),
content_type="application/json",
status=200,
)
| 19,708
|
def decode_labels(labels):
"""Validate labels."""
labels_decode = []
for label in labels:
if not isinstance(label, str):
if isinstance(label, int):
label = str(label)
else:
label = label.decode('utf-8').replace('"', '')
labels_decode.append(label)
return labels_decode
| 19,709
|
def indent(text, num=2):
"""Indent a piece of text."""
lines = text.splitlines()
return '\n'.join(indent_iterable(lines, num=num))
| 19,710
|
def random_lever_value(lever_name):
"""Moves a given lever (lever_name) to a random position between 1 and 3.9"""
rand_val = random.randint(10, 39)/10 # Generate random value between 1 and 3.9
return move_lever([lever_name], [round(rand_val, 2)], costs = True)
| 19,711
|
def create_clean_dataset(input_train_csv_path: str, input_test_csv_path: str,
output_rootdir: str, output_train_csv_file: str, output_test_csv_file: str,
train_fname_prefix: str, test_fname_prefix: str, xforms: Sequence[dg_transform.Transform],
random_state_obj: RandomState) -> None:
"""
Creates a "clean" MNIST dataset, which is a the MNIST dataset (with potential transformations applied),
but no triggers.
:param input_train_csv_path: path to the CSV file containing the training data. The format of the CSV file is
is specified by the mnist_utils.convert() function
:param input_test_csv_path: path to the CSV file containing the test data. The format of the CSV file is
is specified by the mnist_utils.convert() function
:param output_rootdir: the root directory into which the clean data will be stored.
training data will be stored in: output_rootdir/train
test data will be stored in: output_rootdir/test
:param output_train_csv_file: a CSV file of the training data, which specifies paths to files, and their
associated labels
:param output_test_csv_file: a CSV file of the test data, which specifies paths to files, and their
associated labels
:param train_fname_prefix: a prefix to every training filename
:param test_fname_prefix: a prefix to every test filename
:param xforms: a dictionary which contains the necessary transformations to be applied to each input image.
The configuration is validated by _validate_create_clean_dataset_cfgdict(), but at a high level,
the dictionary must contain the 'transforms' key, and that must be a list of transformations to
be applied.
:param random_state_obj: object used to derive random states for each image that is generated
:return: None
"""
# input error checking
if not _validate_create_clean_dataset_cfgdict(xforms):
raise ValueError("mod_cfg argument incorrectly specified!")
# create a fresh version of the directory
try:
shutil.rmtree(output_rootdir)
except:
pass
X_train, y_train = load_dataset(input_train_csv_path)
X_test, y_test = load_dataset(input_test_csv_path)
train_output_subdir = 'train'
test_output_subdir = 'test'
# make necessary sub-directories
try:
os.makedirs(os.path.join(output_rootdir, train_output_subdir))
except:
pass
try:
os.makedirs(os.path.join(output_rootdir, test_output_subdir))
except:
pass
random_state = random_state_obj.get_state()
clean_train_output_list = _df_iterate_store(X_train, y_train,
train_fname_prefix, output_rootdir,
train_output_subdir,
xforms,
random_state_obj,
output_file_start_counter=0)
# reset state to ensure reproducibility regardless of the # of data points generated
random_state_obj.set_state(random_state)
clean_test_output_list = _df_iterate_store(X_test, y_test,
test_fname_prefix, output_rootdir,
test_output_subdir,
xforms,
random_state_obj,
output_file_start_counter=0)
keys = ['file', 'label']
with open(os.path.join(output_rootdir, output_train_csv_file), 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(clean_train_output_list)
with open(os.path.join(output_rootdir, output_test_csv_file), 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(clean_test_output_list)
| 19,712
|
def back(update, context):
"""Кнопка назад."""
user = get_user_or_raise(update.effective_user.id)
update.message.reply_text(
messages.MAIN_MENU_MESSAGE, reply_markup=get_start_keyboard(user)
)
return ConversationHandler.END
| 19,713
|
def patch_redhat_subscription(mocker):
"""
Function used for mocking some parts of redhat_subscription module
"""
mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.RegistrationBase.REDHAT_REPO')
mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.isfile', return_value=False)
mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.unlink', return_value=True)
mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.AnsibleModule.get_bin_path',
return_value='/testbin/subscription-manager')
| 19,714
|
def pandas_config():
"""
pandas config 적용
:return:
"""
for key, value in CONFIG['pandas'].items():
pd.set_option(key, value)
| 19,715
|
def get_norm_residuals(vecs, word):
"""
computes normalized residuals of vectors with respect to a word
Args:
vecs (ndarray):
word (ndarray):
Returns:
tuple : (rvecs_n, rvec_flag)
CommandLine:
python -m ibeis.algo.hots.smk.smk_residuals --test-get_norm_residuals
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs != words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = (hstypes.VEC_MAX * rng.rand(1, 128)).astype(hstypes.VEC_TYPE)
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs == words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = vecs[1]
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
IGNORE
rvecs_agg8 = compress_normvec_uint8(arr_float)
rvecs_agg16 = compress_normvec_float16(arr_float)
ut.print_object_size(rvecs_agg16, 'rvecs_agg16: ')
ut.print_object_size(rvecs_agg8, 'rvecs_agg8: ')
ut.print_object_size(rvec_flag, 'rvec_flag: ')
%timeit np.isnan(_rvec_sums)
%timeit _rvec_sums == 0
%timeit np.equal(rvec_sums, 0)
%timeit rvec_sums == 0
%timeit np.logical_or(np.isnan(_rvec_sums), _rvec_sums == 0)
"""
# Compute residuals of assigned vectors
#rvecs_n = word.astype(dtype=FLOAT_TYPE) - vecs.astype(dtype=FLOAT_TYPE)
arr_float = np.subtract(word.astype(hstypes.FLOAT_TYPE), vecs.astype(hstypes.FLOAT_TYPE))
# Faster, but doesnt work with np.norm
#rvecs_n = np.subtract(word.view(hstypes.FLOAT_TYPE), vecs.view(hstypes.FLOAT_TYPE))
vt.normalize_rows(arr_float, out=arr_float)
# Mark null residuals
#_rvec_sums = arr_float.sum(axis=1)
#rvec_flag = np.isnan(_rvec_sums)
# Converts normvec to a smaller type like float16 or int8
rvecs_n = compress_normvec(arr_float)
# IF FLOAT16 WE NEED TO FILL NANS
# (but we should use int8, and in that case it is implicit)
# rvecs_n = np.nan_to_num(rvecs_n)
return rvecs_n
| 19,716
|
def euler():
""" Plots the solution to the differential equation with the given initial conditions to verify they equal sin(x) and cos(x) using Euler's Method
"""
t = np.linspace(0.0, 5*2*np.pi, 1000, dtype = float)
x = np.zeros_like(t)
v = np.zeros_like(t)
x[0]= 1
v[0] = 0
s = len(t)
r = np.zeros((2,s))
for i in range(s-1):
x[i+1] = x[i] + (t[i+1]-t[i])*v[i]
r[0,i]= x[i]
v[i+1] = v[i] + (t[i+1]-t[i])*-x[i]
r[1,i] = v[i]
fig = plt.figure(figsize = (12,8))
cartesian = plt.axes()
cartesian.plot(t, r[0], 'b', label = "$u(t)$")
cartesian.plot(t, r[1], 'r', label = "$v(t)$")
cartesian.legend()
plt.show()
| 19,717
|
def user_closed_ticket(request):
"""
Returns all closed tickets opened by user
:return: JsonResponse
"""
columns = _no_priority
if settings.SIMPLE_USER_SHOW_PRIORITY:
columns = _ticket_columns
ticket_list = Ticket.objects.filter(created_by=request.user,
is_closed=True)
dtd = TicketDTD( request, ticket_list, columns )
return JsonResponse(dtd.get_dict())
| 19,718
|
def review_lock_status(review_id):
"""
return status of review included trials (locked = T or F)
@param review_id: pmid of review
@return: boolean
"""
conn = dblib.create_con(VERBOSE=True)
cur = conn.cursor()
cur.execute(
"SELECT included_complete FROM systematic_reviews WHERE review_id = %s;",
(review_id,))
locked = cur.fetchone()[0]
conn.close()
return locked
| 19,719
|
def cancel(foreign_id):
"""Cancel all queued tasks for the dataset."""
collection = get_collection(foreign_id)
cancel_queue(collection)
| 19,720
|
def get_placekey_from_address(street_address:str, city:str, state:str, postal_code:str, iso_country_code:str='US',
placekey_api_key: str = None) -> str:
"""
Look up the full Placekey for a given address string.
:param street_address: Street address with suite, floor, or apartment.
:param city: The city.
:param state: Two character state identifier.
:param postal_code: Postal code identifier; typically five numbers.
:param iso_country_code: Two character country identifier. Defaults to "US".
:param placekey_api_key: Placekey API key for making requests.
:return: Placekey string.
"""
# check a couple of things for the parameter inputs
assert len(state) == 2, f'state must be two character identifier, not "{state}".'
assert len(iso_country_code) == 2, 'iso_country_code must be two character identifier, not ' \
f'"{iso_country_code}".'
body = {
"query": {
"street_address": street_address,
"city": city,
"region": state,
"postal_code": postal_code,
"iso_country_code": iso_country_code
}
}
pk = _get_placekey(body, placekey_api_key)
return pk
| 19,721
|
async def upload_image(
request: Request,
file: UploadFile = File(...)
):
"""
upload image(jpg/jpeg/png/gif) to server and store locally
:return: upload result
"""
save_file_path = os.path.join(os.getcwd().split("app")[0], r"app/static/images")
# pic_uuid = str(uuid.uuid4())
file_name = file.filename
endfix = file_name.rpartition(".")[-1]
try:
content = await file.read()
# 对文件进行MD5校验 重复的无需再写
md5_vercation = md5(content)
md5_hash = md5_vercation.hexdigest()
file_url = f'/{env_api}/api/file/image/{md5_hash}'
file_path = os.path.join(save_file_path, f"{md5_hash}.{endfix}")
with request.pony_session:
file_obj = models.Upload.get(md5_hash=md5_hash)
if file_obj:
file_obj.file_path = file_path
file_obj.file_url = file_url
if not os.path.exists(file_path): # 判断如果不存在路径 需要重新写入
with open(file_path, "wb") as f:
f.write(content)
orm.commit()
# 说明已经写入过该文件了
return {"code": 20000, "msg": "Success, file info updated", "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": file_url}
# 文件属性
file_dict = {
"file_name": file_name,
"file_path": file_path,
"md5_hash": md5_hash,
"file_url": file_url,
"note": "image"
}
models.Upload(**file_dict)
orm.commit()
with open(file_path, "wb") as f:
print("写入路径", file_path)
f.write(content)
return {"code": 20000, "msg": "Success", "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": file_url}
except Exception as e:
return {"code": 50000, "msg": str(e), "time": time.strftime('%Y-%m-%d %H:%M:%S'), "url": ""}
| 19,722
|
def _path_to_str(var):
"""Make sure var is a string or Path, return string representation."""
if not isinstance(var, (Path, str)):
raise ValueError("All path parameters must be either strings or "
"pathlib.Path objects. Found type %s." % type(var))
else:
return str(var)
| 19,723
|
def netconf_edit_config(task: Task, config: str, target: str = "running") -> Result:
"""
Edit configuration of device using Netconf
Arguments:
config: Configuration snippet to apply
target: Target configuration store
Examples:
Simple example::
> nr.run(task=netconf_edit_config, config=desired_config)
"""
manager = task.host.get_connection("netconf", task.nornir.config)
manager.edit_config(config, target=target)
return Result(host=task.host)
| 19,724
|
def _cve_id_field_name():
""" Key name for a solr field that contains cve_id
"""
return "cve_id"
| 19,725
|
def mapRuntime(dataFrame1, dataFrame2):
"""
Add the scraped runtimes of the titles in the viewing activity dataframe
Parameters:
dataFrame1: string
The name of the dataFrame to which the user wants to add the runtime
dataFrame2: string
The name of the dataFrame containging the runtimes.
Returns:
a dataFrame
"""
dataFrame1['Runtime'] = dataFrame1.Title.map(
dataFrame2.set_index('Title')['runtime'].to_dict())
return dataFrame1
| 19,726
|
def main():
"""Mainline"""
parser = argparse.ArgumentParser(
description='Start a scan',
)
parser.add_argument('--smartcheck-host', action='store',
default=os.environ.get('DSSC_SMARTCHECK_HOST', None),
help='The hostname of the Deep Security Smart Check deployment. Example: smartcheck.example.com')
parser.add_argument('--smartcheck-user', action='store',
default=os.environ.get('DSSC_SMARTCHECK_USER', None),
help='The userid for connecting to Deep Security Smart Check')
parser.add_argument('--smartcheck-password', action='store',
default=os.environ.get(
'DSSC_SMARTCHECK_PASSWORD', None),
help='The password for connecting to Deep Security Smart Check')
parser.add_argument('--insecure-skip-tls-verify', action='store_true',
default=os.environ.get(
'DSSC_INSECURE_SKIP_TLS_VERIFY', False),
help='Ignore certificate errors when connecting to Deep Security Smart Check')
parser.add_argument('--image-pull-auth', action='store',
default=os.environ.get('DSSC_IMAGE_PULL_AUTH', None),
help='A JSON object of credentials for authenticating with the registry to pull the image from')
parser.add_argument('--registry-root-cas', action='store',
default=os.environ.get('DSSC_REGISTRY_ROOT_CAS', None),
help='A file containing the root CAs (in PEM format) to trust when connecting to the registry')
parser.add_argument('--insecure-skip-registry-tls-verify', action='store_true',
default=os.environ.get(
'DSSC_INSECURE_SKIP_REGISTRY_TLS_VERIFY', False),
help='Ignore certificate errors from the image registry')
parser.add_argument('--no-wait', action='store_false',
default=os.environ.get('DSSC_NO_WAIT', True),
help='Exit after requesting the scan')
parser.add_argument(
'image', help='The image to scan. Example: registry.example.com/project/image:latest')
args = parser.parse_args()
if args.smartcheck_host is None:
eprint('smartcheck_host is required')
sys.exit(1)
if args.insecure_skip_tls_verify:
import urllib3
urllib3.disable_warnings()
if not args.smartcheck_host.startswith('http'):
args.smartcheck_host = 'https://' + args.smartcheck_host
with get_session(
base=args.smartcheck_host,
user=args.smartcheck_user,
password=args.smartcheck_password,
verify=(not args.insecure_skip_tls_verify),
) as session:
start_scan(
session,
args.image,
image_pull_auth=args.image_pull_auth,
registry_root_cas=args.registry_root_cas,
insecure_skip_registry_tls_verify=args.insecure_skip_registry_tls_verify,
wait=args.no_wait,
)
| 19,727
|
def get_circ_center_2pts_r(p1, p2, r):
"""
Find the centers of the two circles that share two points p1/p2 and a radius.
From algorithm at http://mathforum.org/library/drmath/view/53027.html. Adapted from version at
https://rosettacode.org/wiki/Circles_of_given_radius_through_two_points#Python.
:param p1: First point , tuple (x, y)
:param p2: Second point, tuple (x, y)
:param r: Radius of circle
:return: a list of 2 points that are centers of circles of radius r sharing p1/p2
"""
if r == 0.0:
raise ValueError('No solution due to no radius')
(x1, y1), (x2, y2) = tuple(p1), tuple(p2)
if p1 == p2:
raise ValueError('Infinite numbre of solutions')
# Distance in x and y between points
dx = x2 - x1
dy = y1 - y2
# Dist between points
q = sqrt(dx ** 2 + dy ** 2)
if q > (2.0 * r):
raise ValueError('Too much distance between points to fit within radius')
# Halfway point
x3 = (x1 + x2) / 2.0
y3 = (y1 + y2) / 2.0
# Distance along the mirror line
d = sqrt(r ** 2 - ((q / 2.0) ** 2))
# First circle center
# c1 = (x3 + ((d * dy) / q), y3 + ((d * dx) / q))
# Second circle center
# c2 = (x3 - ((d * dy) / q), y3 - ((d * dx) / q))
c1x = x3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c1y = y3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
c2x = x3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c2y = y3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
return ((c1x, c1y), (c2x, c2y))
| 19,728
|
def bing_generator(subkey, limit=150, start=0):
"""
Uses the Bing search API to get all crawled Devpost urls. This should be
around 700 in total as of summer 2017.
:param subkey: Bing Web Search subscription key
:param limit: Cap the number of devpost names
:param start: What offset to start at in the Bing search results
:return: Iterator that yields string devpost names
"""
PAGE_TEMPLATE = "https://api.cognitive.microsoft.com/bing/v5.0/search"
pages_read = 0
page_size = 50
total_matches = 100 # Arbitrary number bigger than page size
while pages_read * page_size < min(total_matches, limit):
response = requests.get(PAGE_TEMPLATE, params={"q": "site:devpost.com",
"count": page_size,
"offset": start + pages_read * page_size,
"responseFilter": "Webpages"}, headers={
"Ocp-Apim-Subscription-Key": subkey })
json = response.json()
total_matches = json["webPages"]["totalEstimatedMatches"]
for page in json["webPages"]["value"]:
url = page["displayUrl"]
components = urlparse(page["displayUrl"])
devpost_name = components.netloc.split(".")[0]
if not devpost_name in ["devpost","secure","help","info","post"]:
yield devpost_name
pages_read += 1
| 19,729
|
def test_load_entity(query_factory):
"""Tests loading a basic query with an entity"""
markup_text = 'When does the {Elm Street|store_name} store close?'
processed_query = markup.load_query(markup_text, query_factory)
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.span.start == 14
assert entity.span.end == 23
assert entity.normalized_text == 'elm street'
assert entity.entity.type == 'store_name'
assert entity.entity.text == 'Elm Street'
| 19,730
|
def crext_MaxFragmentLength(length_exponent):
"""Create a MaxFragmentLength extension.
Allowed lengths are 2^9, 2^10, 2^11, 2^12. (TLS default is 2^14)
`length_exponent` should be 9, 10, 11, or 12, otherwise the extension will
contain an illegal value.
"""
maxlen = (length_exponent-8).to_bytes(1,"big")
return ExtensionType.max_fragment_length.value + lenprefix(maxlen)
| 19,731
|
def display_status_lcd(obj_lcd, *args):
""" Display in LCD display status """
# If getSensorStatus is the function, then call it with objDB as argument
if args[0].__name__ == 'getSensorStatus':
messages = args[0](args[1])
elif type(args) is list:
messages = args
else:
messages = [args]
for item in messages:
obj_lcd.text(item, 8)
| 19,732
|
def perform_intervention(intervention, model, effect_types=('indirect', 'direct')):
"""Perform intervention and return results for specified effects"""
x = intervention.base_strings_tok[0] # E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1] # E.g. The doctor asked the nurse a question. He
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
candidate1 = ' '.join(intervention.candidates[0]).replace('Ġ', '')
candidate2 = ' '.join(intervention.candidates[1]).replace('Ġ', '')
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
total_effect = (odds_alt - odds_base) / odds_base
results = {
'base_string1': intervention.base_strings[0],
'base_string2': intervention.base_strings[1],
'candidate1': candidate1,
'candidate2': candidate2,
'candidate1_base_prob': candidate1_base_prob,
'candidate2_base_prob': candidate2_base_prob,
'odds_base': odds_base,
'candidate1_alt_prob': candidate1_alt_prob,
'candidate2_alt_prob': candidate2_alt_prob,
'odds_alt': odds_alt,
'total_effect': total_effect,
}
for effect_type in effect_types:
candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model = model.attention_intervention_experiment(
intervention, effect_type)
odds_intervention_head = candidate2_probs_head / candidate1_probs_head
odds_intervention_layer = candidate2_probs_layer / candidate1_probs_layer
odds_intervention_model = candidate2_probs_model / candidate1_probs_model
effect_head = (odds_intervention_head - odds_base) / odds_base
effect_layer = (odds_intervention_layer - odds_base) / odds_base
effect_model = (odds_intervention_model - odds_base) / odds_base
results[effect_type + "_odds_head"] = odds_intervention_head.tolist()
results[effect_type + "_effect_head"] = effect_head.tolist()
results[effect_type + "_effect_layer"] = effect_layer.tolist()
results[effect_type + "_effect_model"] = effect_model
return results
| 19,733
|
def nice_size(
self: complex,
unit: str = 'bytes',
long: bool = False,
lower: bool = False,
precision: int = 2,
sep: str = '-',
omissions: list = 'mono deca hecto'.split(),
):
"""
This should behave well on int subclasses
"""
mag = magnitude(self, omissions)
precision = sredro[mag] if self < 5 else precision
unit = set_case(set_length(mag, unit, long, sep), lower)
val = round(self * 10 ** -(sredro[mag]), precision)
return lasso(val, unit)
| 19,734
|
def from_callback(func: Callable,
mapper: Optional[typing.Mapper] = None
) -> Callable[[], Observable]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on
next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single
value of the arguments to the callback as a list.
"""
from .core.observable.fromcallback import _from_callback
return _from_callback(func, mapper)
| 19,735
|
def EVLAUVFITS(inUV, filename, outDisk, err, compress=False, \
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL"], \
include=[], headHi=False, logfile=""):
"""
Write UV data as FITS file
Write a UV data set as a FITAB format file
History written to header
* inUV = UV data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy (FQ, AN always done )
Exclude has presidence over include
* headHi = if True move history to header, else leave in History table
returns FITS UV data object
"""
################################################################
mess = "Write Data to FITS UV data "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
# Checks
if not UV.PIsA(inUV):
raise TypeError("inUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outUV = UV.newPFUV("FITS UV DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
#Compressed?
if compress:
inInfo = UV.PGetList(outUV) #
dim = [1,1,1,1,1]
InfoList.PAlwaysPutBoolean (inInfo, "Compress", dim, [True])
# Copy
UV.PCopy (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying UV data to FITS")
# History
inHistory = History.History("inhistory", outUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvtab",err)
outHistory.WriteRec(-1,"uvtab / FITS file "+fn+" disk "+str(outDisk),err)
outHistory.Close(err)
# History in header?
if headHi:
History.PCopy2Header (inHistory, outHistory, err)
OErr.printErrMsg(err, "Error with history")
# zap table
outHistory.Zap(err)
# Copy Tables
UV.PCopyTables (inUV, outUV, exclude, include, err)
return outUV
| 19,736
|
def form_cleaner(querydict):
"""
Hacky way to transform form data into readable data by the model constructor
:param querydict: QueryDict
:return: dict
"""
r = dict(querydict.copy())
# Delete the CRSF Token
del r['csrfmiddlewaretoken']
for key in list(r):
# Take first element of array
r[key] = r[key][0]
# Delete empty fields
if r[key] == '' or r[key] is None:
del r[key]
return r
| 19,737
|
def has_1080p(manifest):
"""Return True if any of the video tracks in manifest have a 1080p profile
available, else False"""
return any(video['width'] >= 1920
for video in manifest['videoTracks'][0]['downloadables'])
| 19,738
|
def visualize_table(filename: str, table: str) -> bool:
"""
Formats the contents of a db table using the texttable package
:param filename: .db file name (String)
:param table: Name of the table to plot (String)
:return: Bool
"""
conn, cursor = get_connection(filename)
table_elements = get_table(filename, table)
if not len(table_elements) > 0:
print("This table is empty")
return False
text_table = Texttable()
allign = ["l" for i in range(len(table_elements[0]))]
vallign = ["m" for i in range(len(table_elements[0]))]
title = eval(query(filename, "tables", "name", table)[0][1])
text_table.set_cols_align(allign)
text_table.set_cols_valign(vallign)
text_table.header(title)
for row in table_elements:
text_table.add_row(row)
print(text_table.draw())
return True
| 19,739
|
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a controller."""
assert (
await async_setup_component(
hass, sensor.DOMAIN, {sensor.DOMAIN: {"platform": "unifi"}}
)
is True
)
assert unifi.DOMAIN not in hass.data
| 19,740
|
def extractIsekaiMahou(item):
"""
# Isekai Mahou Translations!
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Isekai Mahou Chapter' in item['title'] and 'Release' in item['title']:
return buildReleaseMessageWithType(item, 'Isekai Mahou wa Okureteru!', vol, chp, frag=frag, postfix=postfix)
return False
| 19,741
|
def delete_version_from_file(fname, par, ctype=gu.PIXEL_MASK, vers=None, cmt=None, verb=False) :
"""Delete specified version from calibration constants.
Parameters
- fname : full path to the hdf5 file
- par : psana.Event | psana.Env | float - tsec event time
- ctype : gu.CTYPE - enumerated calibration type, e.g.: gu.PIXEL_MASK
- vers : int - calibration version
- cmt : str - comment
- verb : bool - verbousity
See :py:class:`DCMethods`
"""
metname = sys._getframe().f_code.co_name
str_ctype = gu.dic_calib_type_to_name[ctype]
if verb : print ' %s.delete_version_from_file: ctype: %s vers: %s'%\
(metname, str_ctype, vers)
if not is_good_fname(fname, verb) : return None
cs = DCStore(fname)
cs.load()
ct = cs.ctypeobj(str_ctype)
if ct is None : return None
#ct.print_obj()
tsec = dcu.par_to_tsec(par)
cr = ct.range_for_tsec(tsec)
if cr is None : return None
v = vers if vers is not None else cr.vnum_last()
vdel = cr.mark_version(vnum=vers, cmt=cmt)
if verb : log.setPrintBits(02) # 0377
cs.save()
if verb :
print 50*'_','\nDCStore.print_obj() after delete version %s' % str(vdel)
cs.print_obj()
return vdel
| 19,742
|
def get_filename(filePath):
"""get filename without file extension from file path
"""
absFilePath = os.path.abspath(filePath)
return os.path.basename(os.path.splitext(absFilePath)[0])
| 19,743
|
def create_midi_file(notes: List[Tuple[int, int]]) -> io.BytesIO:
"""Create a MIDI file from the given list of notes.
Notes are played with piano instrument.
"""
byte_stream = io.BytesIO()
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
for note, t in notes:
track.append(mido.Message('note_on', note=note, velocity=64))
track.append(mido.Message('note_off', note=note, time=t))
mid.save(file=byte_stream)
return io.BytesIO(byte_stream.getvalue())
| 19,744
|
def brainprep_quasiraw(anatomical, mask, outdir, target=None, no_bids=False,
verbose=0):
""" Define quasi-raw pre-processing workflow.
Parameters
----------
anatomical: str
path to the anatomical T1w Nifti file.
mask: str
a binary mask to be applied.
outdir: str
the destination folder.
target: str
a custom target image for the registration.
no_bids: bool
set this option if the input files are not named following the
BIDS hierarchy.
verbose: int
control the verbosity level: 0 silent, [1, 2] verbose.
"""
print_title("Set outputs and default target if applicable...")
if target is None:
resource_dir = os.path.join(
os.path.dirname(brainprep.__file__), "resources")
target = os.path.join(
resource_dir, "MNI152_T1_1mm_brain.nii.gz")
print("set target:", target)
imfile = anatomical
maskfile = mask
targetfile = target
if no_bids:
basename = os.path.basename(imfile).split(".")[0] + "_desc-{0}_T1w"
else:
basename = os.path.basename(imfile).split(".")[0].replace(
"_T1w", "_desc-{0}_T1w")
basefile = os.path.join(outdir, basename + ".nii.gz")
print("use base file name:", basefile)
stdfile = basefile.format("1std")
stdmaskfile = basefile.format("1maskstd")
brainfile = basefile.format("2brain")
scaledfile = basefile.format("3scaled")
bfcfile = basefile.format("4bfc")
regfile = basefile.format("5reg")
regmaskfile = basefile.format("5maskreg")
applyfile = basefile.format("6apply")
print_title("Launch quasi-raw pre-processing...")
brainprep.reorient2std(imfile, stdfile)
brainprep.reorient2std(maskfile, stdmaskfile)
brainprep.apply_mask(stdfile, stdmaskfile, brainfile)
brainprep.scale(brainfile, scaledfile, scale=1)
brainprep.biasfield(scaledfile, bfcfile)
_, trffile = brainprep.register_affine(bfcfile, targetfile, regfile)
brainprep.apply_affine(stdmaskfile, regfile, regmaskfile, trffile,
interp="nearestneighbour")
brainprep.apply_mask(regfile, regmaskfile, applyfile)
| 19,745
|
async def get_programs(request: Request) -> Response:
"""
description: Get a list of all programs
responses:
200:
description: A list of programs.
"""
ow: "OpenWater" = request.app.ow
return ToDictJSONResponse([p.to_dict() for p in ow.programs.store.all])
| 19,746
|
def create_dataframe(dictionary_to_convert, cols):
"""
From a Dictionary which is passed, and the desired column to create, this function
returns a Dataframe.
"""
dataframe_converted = pd.DataFrame.from_dict(dictionary_to_convert, orient='index', columns = cols)
dataframe_converted = dataframe_converted.reset_index()
dataframe_converted = dataframe_converted.drop(columns=['index'])
return dataframe_converted
| 19,747
|
def version_keyword(dist, attr, value):
"""
Implements the actual version setup() keyword.
"""
if value == "PBR":
from pbr.util import setup_cfg_to_setup_kwargs
path = "setup.cfg"
parser = ConfigParser()
if not os.path.exists(path):
raise ValueError("file '%s' does not exist" % os.path.abspath(path))
parser.read(path)
config = {}
for section in parser.sections():
config[section] = dict(parser.items(section))
attrs = setup_cfg_to_setup_kwargs(config)
version = str(Version(attrs["name"]))
os.environ["PBR_VERSION"] = version
else:
version = str(Version(dist.metadata.get_name()))
dist.metadata.version = version
| 19,748
|
def test_smoke1_reboot():
"""Test reboot"""
return_value, _ = subprocess.getstatusoutput('meshtastic --reboot')
assert return_value == 0
# pause for the radio to reset (10 seconds for the pause, and a few more seconds to be back up)
time.sleep(18)
| 19,749
|
def br_candidates(data, br_remaining, commit):
"""Find candidates not yet included to be added to br(r,*)
Given the list of remaining, that is not yet taken into account
bug fixes, split this list into part that has been created (fixed)
before creation time of given bug report, and those that were
created (fixed) later.
Creation times of bugfix commits, and the date when given bug
report was creates is taken from the augmented combined bugs+fixes
dataset. The list of remaining fixes (as shortened SHA-1
identifiers, which are keys in the dataset to bug report + bug fix
info) needs to be sorted in ascending chronological order of
bugfix commit creation date. Returned lists are also sorted; the
original list is split in two.
Parameters
----------
data : dict | collections.OrderedDict
Combined data about bug reports and bugfix commits, read from
the JSON file.
br_remaining : list
List of remaining keys to data (of shortened SHA-1
ideintifiers of bugfix commits), sorted in the bugfix commit
creation time order. This means that the `commit` timestamp
divides this list into two parts: first that has commit
creation date not later than creation date of given bugfix,
and those that are later.
/-- t(r)
|
[c_0, c_1,...,c_i, | c_{i+1},...,c_{N-1}]
where t(c_j) < t_(c_{j+1}) and t(c_i) < t(r) <= t(c_{i+1}).
commit : str
Identifier of the bug report, all bugfix commits added to the
returned list have commit date not later than bug report
creation date.
TODO?: maybe change the name of this parameter.
Returns
-------
(br_new, br_remaining) : (list, list)
First list in returned pair is the list of bugfix commits from
`br_remaining` with creation time earlier than bug report
creation time of `commit`. Because `br_remaining` is assumed
to be sorted this would be some number of elements from the
start of it, and it would also be sorted. Possibly empty.
Second list in returned pair is the list of remaining bugfix
commits, with creation time later than cration time of given
bug report. Possibly empty.
These two lists are (br_remaining[:i], br_remaining[i:]) for
some value of i.
"""
this_bug_ts = int(data[commit]['bug_report']['timestamp'])
this_fix_ts = int(data[commit]['commit']['metadata']['timestamp'])
commit_list = []
# DEBUG
#print('commit =%s (bug_ts=%d) / bug_id=%d' %
# (commit, this_bug_ts, int(data[commit]['bug_report']['bug_id'])))
# corner cases
if not br_remaining:
# DEBUG
#print('br_candidates: empty list')
# no candidates
return ([], [])
elif this_bug_ts <= int(data[br_remaining[0]]['commit']['metadata']['timestamp']):
# DEBUG
#print('br_candidates: early return %d < %d' %
# (this_bug_ts, int(data[br_remaining[0]]['commit']['metadata']['timestamp'])))
# all commits are later (newer) than given bug
return ([], br_remaining)
elif int(data[br_remaining[-1]]['commit']['metadata']['timestamp']) < this_bug_ts:
# even last commit is earlier (older) than given bug
# NOTE: should never happen in this code
return (br_remaining, [])
for (i,v) in enumerate(br_remaining):
curr_bug_ts = int(data[v]['bug_report']['timestamp'])
curr_fix_ts = int(data[v]['commit']['metadata']['timestamp'])
if not curr_fix_ts < this_bug_ts:
return (br_remaining[:i], br_remaining[i:])
| 19,750
|
def aggregate_dataframe(mails_per_sender, datetimes_per_sender):
"""Engineer features and aggregate them in a dataframes.
:param dict mails_per_sender: A dictionary with email counts for each sender
:param dict datetimes_per_sender: A dictionary with datetime objects for
each sender
:raises InputError: if at least one of the arguments is an empty dictionary
:returns: A dataframe with aggregated features
:rtype: pandas.DataFrame
"""
try:
if not mails_per_sender or not datetimes_per_sender:
raise exceptions.InputError('At least one of the arguments is an '
'empty dictionary!')
except exceptions.InputError:
raise
average_timestamps = average_timestamps_in_seconds(
datetimes_per_sender)
average_weekdays = weekday_average(datetimes_per_sender)
aggregation = {'Mail Count': mails_per_sender,
'Average Timestamp': average_timestamps,
'Average Weekday': average_weekdays}
return pandas.DataFrame(aggregation)
| 19,751
|
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, aliases=None, **kwargs):
"""Create the test databases.
This function is a copy of the Django setup_databases with one addition.
A Tenant object is created and saved when setting up the database.
"""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
test_db_name = connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
try:
tenant, created = Tenant.objects.get_or_create(schema_name=Tenant._TEMPLATE_SCHEMA)
if created:
tenant.save()
tenant.create_schema()
tenant, created = Tenant.objects.get_or_create(schema_name=KokuTestRunner.schema)
if created:
tenant.save()
tenant.create_schema()
customer, __ = Customer.objects.get_or_create(
account_id=KokuTestRunner.account, schema_name=KokuTestRunner.schema
)
with tenant_context(tenant):
for tag_key in OCP_ENABLED_TAGS:
OCPEnabledTagKeys.objects.get_or_create(key=tag_key)
data_loader = NiseDataLoader(KokuTestRunner.schema, customer)
# Obtain the day_list from yaml
read_yaml = UploadAwsTree(None, None, None, None)
tree_yaml = read_yaml.import_yaml(yaml_file_path="scripts/aws_org_tree.yml")
day_list = tree_yaml["account_structure"]["days"]
# Load data
# TODO: COST-444: This NiseDataLoader to be removed and replaced with the commented baker_data_loaders below.
data_loader = NiseDataLoader(KokuTestRunner.schema, customer)
data_loader.load_openshift_data(customer, "ocp_aws_static_data.yml", "OCP-on-AWS")
data_loader.load_aws_data(customer, "aws_static_data.yml", day_list=day_list)
data_loader.load_openshift_data(customer, "ocp_azure_static_data.yml", "OCP-on-Azure")
data_loader.load_azure_data(customer, "azure_static_data.yml")
bakery_data_loader = ModelBakeryDataLoader(KokuTestRunner.schema, customer)
ocp_on_aws_cluster_id = "OCP-on-AWS"
ocp_on_azure_cluster_id = "OCP-on-Azure"
ocp_on_gcp_cluster_id = "OCP-on-GCP"
ocp_on_prem_cluster_id = "OCP-on-Prem"
# TODO: COST-444: uncomment these when the above data_loader is removed
# ocp_on_aws_ocp_provider, ocp_on_aws_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_aws_cluster_id, on_cloud=True
# )
# ocp_on_azure_ocp_provider, ocp_on_azure_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_azure_cluster_id, on_cloud=True
# )
# ocp_on_gcp_ocp_provider, ocp_on_gcp_report_periods = bakery_data_loader.load_openshift_data(
# ocp_on_gcp_cluster_id, on_cloud=True
# )
# _, __ = bakery_data_loader.load_openshift_data(ocp_on_prem_cluster_id, on_cloud=False)
# _, aws_bills = bakery_data_loader.load_aws_data(
# linked_openshift_provider=ocp_on_aws_ocp_provider, day_list=day_list
# )
# _, azure_bills = bakery_data_loader.load_azure_data(
# linked_openshift_provider=ocp_on_azure_ocp_provider
# )
_, gcp_bills = bakery_data_loader.load_gcp_data()
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_AWS_LOCAL, ocp_on_aws_cluster_id, aws_bills, ocp_on_aws_report_periods
# )
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_AZURE_LOCAL,
# ocp_on_azure_cluster_id,
# azure_bills,
# ocp_on_azure_report_periods,
# )
# bakery_data_loader.load_openshift_on_cloud_data(
# Provider.PROVIDER_GCP_LOCAL, ocp_on_gcp_cluster_id, gcp_bills, ocp_on_gcp_report_periods
# )
for account in [("10002", "acct10002"), ("12345", "acct12345")]:
tenant = Tenant.objects.get_or_create(schema_name=account[1])[0]
tenant.save()
tenant.create_schema()
Customer.objects.get_or_create(account_id=account[0], schema_name=account[1])
except Exception as err:
LOG.error(err)
raise err
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb)
else:
connection.creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
| 19,752
|
def about_view(request):
"""
This view gives information about the version and software we have loaded.
"""
evaluation = get_session_evaluation(request.session)
definitions = evaluation.definitions
system_info = mathics_system_info(definitions)
return render(
request,
"about.html",
{
"django_version": django_version,
"three_js_version": get_threejs_version(),
"mathics_threejs_backend_version": get_mathics_threejs_backend_version(),
"MathJax_version": get_MathJax_version(),
"mathics_version": mathics_version_info["mathics"],
"mathics_django_version": __version__,
"mpmath_version": mathics_version_info["mpmath"],
"numpy_version": mathics_version_info["numpy"],
"python_version": mathics_version_info["python"],
"sympy_version": mathics_version_info["sympy"],
"SystemID": system_info["$SystemID"],
"SystemTimeZone": system_info["$SystemTimeZone"],
"UserName": system_info["$UserName"],
"BaseDirectory": system_info["$BaseDirectory"],
"HomeDirectory": system_info["$HomeDirectory"],
"InstallationDirectory": system_info["$InstallationDirectory"],
"RootDirectory": system_info["$RootDirectory"],
"TemporaryDirectory": system_info["$TemporaryDirectory"],
"DB_PATH": MATHICS_DJANGO_DB_PATH,
"DOC_DATA_PATH": DOC_USER_HTML_DATA_PATH,
"HTTP_USER_AGENT": request.META.get("HTTP_USER_AGENT", ""),
"REMOTE_USER": request.META.get("REMOTE_USER", ""),
"REMOTE_ADDR": request.META.get("REMOTE_ADDR", ""),
"REMOTE_HOST": request.META.get("REMOTE_HOST", ""),
"MachinePrecision": system_info["MachinePrecision"],
"MemoryAvailable": system_info["MemoryAvailable[]"],
"SystemMemory": system_info["$SystemMemory"],
"Machine": system_info["$Machine"],
"MachineName": system_info["$MachineName"],
"ProcessID": system_info["$ProcessID"],
"ProcessorType": system_info["$ProcessorType"],
"PythonVersion": sys.version,
"user_settings": get_user_settings(evaluation),
},
)
| 19,753
|
def doDeploy():
"""Deploys the lambda package and associated resources, based on the configuration file config.yaml supplied in the root directory"""
packageDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir)
path_to_config_file = os.path.join(packageDir, 'config.yaml')
cfg = read(path_to_config_file, loader=yaml.load)
# build(packageDir)
print('Deploying Lambda function')
deploy(packageDir, perform_install=False)
print('Disabling trigger')
_updateTrigger(cfg, enabled=False)
print('Creating Kinesis stream')
_createKinesis(cfg)
print('Creating DynamoDB table')
_createDynamoDB(cfg)
print('Creating trigger')
_createTrigger(cfg)
print('Enabling trigger')
_updateTrigger(cfg, enabled=True)
| 19,754
|
def is_trace_directory(path: str) -> bool:
"""
Check recursively if a path is a trace directory.
:param path: the path to check
:return: `True` if it is a trace directory, `False` otherwise
"""
path = os.path.expanduser(path)
if not os.path.isdir(path):
return False
return impl.is_trace_directory(path)
| 19,755
|
def traj2points(traj, npoints, OS):
"""
Transform spoke trajectory to point trajectory
Args:
traj: Trajectory with shape [nspokes, 3]
npoints: Number of readout points along spokes
OS: Oversampling
Returns:
array: Trajectory with shape [nspokes, npoints, 3]
"""
[nspokes, ndim] = np.shape(traj)
r = (np.arange(0, npoints))/OS
Gx, Gy, Gz = np.meshgrid(r, np.arange(nspokes), np.arange(ndim))
traj_p = Gx*np.transpose(np.tile(traj, [npoints, 1, 1]), [1, 0, 2])
return traj_p
| 19,756
|
def parse_headings(raw_contents, file_):
"""Parse contents looking for headings. Return a tuple with number
of TOC elements, the TOC fragment and the number of warnings."""
# Remove code blocks
parsable_contents = re.sub(r"```[\s\S]+?```", "", raw_contents)
# Parse H1,H2,H3
headings = re.findall(r"^(#|##|###)\s+(.*)", parsable_contents,
re.MULTILINE)
toc = "Table of contents:" + os.linesep
tocsize = 0
warnings = 0
count_h1 = 0
count_h2 = 0
for heading in headings:
level = heading[0]
level = (1 if level == "#" else
2 if level == "##" else
3 if level == "###" else None)
assert level is not None
title = heading[1].strip()
if level == 1:
count_h1 += 1
if count_h1 > 1:
warnings += 1
print("WARNING: found more than one H1 in "+file_)
continue
if level == 2:
count_h2 += 1
hash_ = headinghash(title)
indent = ""
if level == 3:
if count_h2:
# If there was no H2 yet then H3 shouldn't have indent.
indent = " " * 2
toc += indent + "* [%s](#%s)" % (title, hash_) + os.linesep
tocsize += 1
if tocsize <= 1:
# If there is only one H2/H3 heading do not create TOC.
toc = ""
tocsize = 0
return tocsize, toc, warnings
| 19,757
|
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
| 19,758
|
def label_on():
"""Sets the next label to the ON value"""
global NEXT_LABEL
NEXT_LABEL = "1"
| 19,759
|
def model_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
constraint=None,
trainable=True,
collections=None,
**kwargs):
"""
Get or create a model variable.
When the variable is created, it will be added to both `GLOBAL_VARIABLES`
and `MODEL_VARIABLES` collection.
Args:
name: Name of the variable.
shape: Shape of the variable.
dtype: Data type of the variable.
initializer: Initializer of the variable.
regularizer: Regularizer of the variable.
constraint: Constraint of the variable.
trainable (bool): Whether or not the variable is trainable?
collections: In addition to `GLOBAL_VARIABLES` and `MODEL_VARIABLES`,
also add the variable to these collections.
\\**kwargs: Other named arguments passed to :func:`tf.get_variable`.
Returns:
tf.Variable: The variable.
"""
collections = list(set(
list(collections or ()) +
[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.MODEL_VARIABLES]
))
return tf.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
constraint=constraint,
trainable=trainable,
collections=collections,
**kwargs
)
| 19,760
|
def default_k_pattern(n_pattern):
""" the default number of pattern divisions for crossvalidation
minimum number of patterns is 3*k_pattern. Thus for n_pattern <=9 this
returns 2. From there it grows gradually until 5 groups are made for 40
patterns. From this point onwards the number of groups is kept at 5.
bootstrapped crossvalidation also uses this function to set k, but scales
n_rdm to the expected proportion of samples retained when bootstrapping
(1-np.exp(-1))
"""
if n_pattern < 12:
k_pattern = 2
elif n_pattern < 24:
k_pattern = 3
elif n_pattern < 40:
k_pattern = 4
else:
k_pattern = 5
return k_pattern
| 19,761
|
def test_character_references_336():
"""
Test case 336: (part 4) Entity and numeric character references cannot be used in place of symbols indicating structure in CommonMark documents.
"""
# Arrange
source_markdown = """	foo"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\a	\a\t\afoo:]",
"[end-para:::True]",
]
expected_gfm = """<p>\tfoo</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 19,762
|
def resize_basinData():
"""
read in global data and make the new bt with same length
this step can be elimated if we are using ibtracks in the future CHAZ development
"""
basinName = ['atl','wnp','enp','ni','sh']
nd = 0
for iib in range(0,len(basinName),1):
ib = basinName[iib]
f =gv.ipath + 'bt_'+ib+'.nc'
#bt1 = nc.Dataset(f)
bt1 = xr.open_dataset(f)
if iib == 0:
maxCol = bt1['PIslp'].shape[0]
else:
maxCol = np.nanmax([maxCol,bt1['PIslp'].shape[0]])
## in bt1, the time is datenumber start from 1800,01,01,0,0. So if datenumber is 0 means there is no data
nd += bt1['PIslp'].shape[1]
bt = {}
for iib in range(0,len(basinName),1):
bt1 = xr.open_dataset(f)
for iv in bt1.variables.keys():
if iib == 0:
if np.size(bt1.variables[iv].shape) >1:
bt[iv] = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
bt[iv][:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
else:
bt[iv] = bt1.variables[iv].values
else:
if np.size(bt1.variables[iv].shape) >1:
dummy = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
dummy[:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
bt[iv] = np.hstack([bt[iv],dummy])
del dummy
else:
bt[iv] = np.hstack([bt[iv],bt1.variables[iv].values])
del bt1
for iv in bt.keys():
if ((np.size(bt[iv].shape) >1) and ('Time' not in iv)):
bt[iv][bt['Time']==0] = np.float('nan')
bt['Time'][bt['Time']!=bt['Time']]=0
return bt
| 19,763
|
def _cast_wf(wf):
"""Cast wf to a list of ints"""
if not isinstance(wf, list):
if str(type(wf)) == "<class 'numpy.ndarray'>":
# see https://stackoverflow.com/questions/2060628/reading-wav-files-in-python
wf = wf.tolist() # list(wf) does not convert int16 to int
else:
wf = list(wf) # fallback
if len(wf) > 0:
assert isinstance(wf[0], int), f"first element of wf wasn't an int, but a {type(wf[0])}"
return wf
| 19,764
|
def encoded_path(root, identifiers, extension = ".enc", depth = 3, digest = True):
"""generate a unique file-accessible path from the given list of identifiers
starting at the given root directory."""
ident = string.join(identifiers, "_")
if digest:
ident = sha.new(ident).hexdigest()
ident = os.path.basename(ident)
tokens = []
for d in range(1, depth):
tokens.append(ident[0:d])
dir = os.path.join(root, *tokens)
verify_directory(dir)
return os.path.join(dir, ident + extension)
| 19,765
|
def block_gen(rows, cols, bs=64, random_flag=False):
"""Generate block indices for reading rasters/arrays as blocks
Return the row (y/i) index, then the column (x/j) index
Args:
rows (int): number of rows in raster/array
cols (int): number of columns in raster/array
bs (int): gdal_common block size (produces square block)
random (boolean): randomize the order or yielded blocks
Yields:
block_i and block_j indices of the raster using the specified block size
Example:
from osgeo import gdal, ogr, osr
import gdal_common as gis
ds = gdal.Open('/home/vitale232/Downloads/ndvi.img')
rows = ds.RasterYSize
cols = ds.RasterXSize
generator = gis.block_gen(rows, cols)
for row, col in generator:
print('Row: {0}'.format(row))
print('Col: {0}\\n'.format(col))
random_generator = gis.block_gen(rows, cols, random_flag=True)
for row, col in random_generator:
print('Row/Col: {0} {1}\n'.format(row, col))
"""
if random_flag:
## DEADBEEF - Is this actually a generator?
block_ij_list = list(itertools.product(
range(0, rows, bs), range(0, cols, bs)))
random.shuffle(block_ij_list)
for b_i, b_j in block_ij_list:
yield b_i, b_j
else:
for block_i in xrange(0, rows, bs):
for block_j in xrange(0, cols, bs):
yield block_i, block_j
| 19,766
|
def centralize_scene(points):
"""In-place centralize a whole scene"""
assert points.ndim == 2 and points.shape[1] >= 3
points[:, 0:2] -= points[:, 0:2].mean(0)
points[:, 2] -= points[:, 2].min(0)
return points
| 19,767
|
def ms(val):
""" Turn a float value into milliseconds as an integer. """
return int(val * 1000)
| 19,768
|
def cancel_member(sess: SQLASession, member: Member, keep_groups: bool = False) -> Collect[None]:
"""
Suspend the user account of a member.
"""
user = unix.get_user(member.uid)
yield unix.enable_user(user, False)
yield bespoke.clear_crontab(member)
yield bespoke.slay_user(member)
# TODO: for server in {"cavein", "doom", "sinkhole"}:
# bespoke.clear_crontab(member); bespoke.slay_user(member)
yield bespoke.archive_website(member)
yield bespoke.ensure_member(sess, member.crsid, member.preferred_name, member.surname,
member.email, MailHandler[member.mail_handler],
member.member, False)
with mysql.context() as cursor:
yield mysql.drop_account(cursor, member)
with pgsql.context() as cursor:
yield pgsql.drop_account(cursor, member)
if not keep_groups:
for society in member.societies:
yield remove_society_admin(sess, member, society, False)
| 19,769
|
def prepare_wordclouds(
clusters: Dict[str, List[int]], test_texts: Union[np.ndarray, pd.Series]
):
"""Pre-render the wordcloud for each cluster, this makes switching the main wordcloud figure faster.
:param clusters: Dictionary of clusters where the values are the lists of dataframe indices for the entries in each cluster.
:param test_texts: The full test corpus.
"""
for cluster in clusters:
_cached_wordclouds[cluster] = gen_wordcloud(test_texts[clusters[cluster]])
| 19,770
|
def render_doc(stig_rule, deployer_notes):
"""Generate documentation RST for each STIG configuration."""
template = JINJA_ENV.get_template('template_doc_rhel7.j2')
return template.render(
rule=stig_rule,
notes=deployer_notes
)
| 19,771
|
def official_evaluate(reference_csv_path, prediction_csv_path):
"""Evaluate metrics with official SED toolbox.
Args:
reference_csv_path: str
prediction_csv_path: str
"""
reference_event_list = sed_eval.io.load_event_list(reference_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
estimated_event_list = sed_eval.io.load_event_list(prediction_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
evaluated_event_labels = reference_event_list.unique_event_labels
files={}
for event in reference_event_list:
files[event['filename']] = event['filename']
evaluated_files = sorted(list(files.keys()))
segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=evaluated_event_labels,
time_resolution=1.0
)
for file in evaluated_files:
reference_event_list_for_current_file = []
for event in reference_event_list:
if event['filename'] == file:
reference_event_list_for_current_file.append(event)
estimated_event_list_for_current_file = []
for event in estimated_event_list:
if event['filename'] == file:
estimated_event_list_for_current_file.append(event)
segment_based_metrics.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
results = segment_based_metrics.results()
return results
| 19,772
|
def get_notebooks():
"""Read `notebooks.yaml` info."""
path = Path("tutorials") / "notebooks.yaml"
with path.open() as fh:
return yaml.safe_load(fh)
| 19,773
|
def getSimData(startDate, endDate, region):
""" Get all boundary condition data needed for a simulation run
Args:
startDate (string): Start date DD.MM.YYYY
(start time is hard coded to 00:00)
endDate (string): End date DD.MM.YYYY
(end day is not in time range, so end date
should be end date + 1 day)
region (string): Location of simulation (determines climate / weather)
Supported regions:
East, West, South, North
Returns:
int / np float (arrays): nSteps, time, SLP_PHH, SLP_BSLa, SLP_BSLc,
HWP, T, Eg
"""
data = getSimData_df(startDate, endDate, region)
return (data.time.size, data.time,
data.SLP,
data.HWPfactor.to_numpy(dtype=np.float32),
data.Weather,
data.SolarPosition
)
| 19,774
|
def get_dag_path(pipeline, module=None):
"""
Gets the DAG path.
:@param pipeline: The Airflow Variable key that has the config.
:@type pipeline: str.
:@param module: The module that belongs to the pipeline.
:@type module: str.
:@return: The DAG path of the pipeline.
"""
if module is None:
module = pipeline
config = Variable.get(pipeline, deserialize_json=True)
return pp.join(config['dag_install_path'], '{}_dag.py'.format(module))
| 19,775
|
def blow_up(polygon):
"""Takes a ``polygon`` as input and adds pixels to it according to the following rule. Consider the line between two
adjacent pixels in the polygon (i.e., if connected via an egde). Then the method adds additional equidistand pixels
lying on that line (if the value is double, convert to int), dependent on the x- and y-distance of the pixels.
:param polygon: input polygon that should be blown up
:type polygon: Polygon
:return: blown up polygon
"""
res = Polygon()
for i in range(1, polygon.n_points, 1):
x1 = polygon.x_points[i - 1]
y1 = polygon.y_points[i - 1]
x2 = polygon.x_points[i]
y2 = polygon.y_points[i]
diff_x = abs(x2 - x1)
diff_y = abs(y2 - y1)
# if (x1,y1) = (x2, y2)
if max(diff_x, diff_y) < 1:
if i == polygon.n_points - 1:
res.add_point(x2, y2)
continue
res.add_point(x1, y1)
if diff_x >= diff_y:
for j in range(1, diff_x, 1):
if x1 < x2:
xn = x1 + j
else:
xn = x1 - j
yn = int(round(y1 + (xn - x1) * (y2 - y1) / (x2 - x1)))
res.add_point(xn, yn)
else:
for j in range(1, diff_y, 1):
if y1 < y2:
yn = y1 + j
else:
yn = y1 - j
xn = int(round(x1 + (yn - y1) * (x2 - x1) / (y2 - y1)))
res.add_point(xn, yn)
if i == polygon.n_points - 1:
res.add_point(x2, y2)
return res
| 19,776
|
def x_sogs_raw(
s: SigningKey,
B: PublicKey,
method: str,
full_path: str,
body: Optional[bytes] = None,
*,
b64_nonce: bool = True,
blinded: bool = False,
timestamp_off: int = 0,
):
"""
Calculates X-SOGS-* headers.
Returns 4 elements: the headers dict, the nonce bytes, timestamp int, and signature bytes.
Use x_sogs(...) instead if you don't need the nonce/timestamp/signature values.
"""
n = x_sogs_nonce()
ts = int(time.time()) + timestamp_off
if blinded:
a = s.to_curve25519_private_key().encode()
k = sodium.crypto_core_ed25519_scalar_reduce(
blake2b(sogs.crypto.server_pubkey_bytes, digest_size=64)
)
ka = sodium.crypto_core_ed25519_scalar_mul(k, a)
kA = sodium.crypto_scalarmult_ed25519_base_noclamp(ka)
pubkey = '15' + kA.hex()
else:
pubkey = '00' + s.verify_key.encode().hex()
to_sign = [B.encode(), n, str(ts).encode(), method.encode(), full_path.encode()]
if body:
to_sign.append(blake2b(body, digest_size=64))
if blinded:
H_rh = sha512(s.encode())[32:]
r = sodium.crypto_core_ed25519_scalar_reduce(sha512([H_rh, kA, *to_sign]))
sig_R = sodium.crypto_scalarmult_ed25519_base_noclamp(r)
HRAM = sodium.crypto_core_ed25519_scalar_reduce(sha512([sig_R, kA, *to_sign]))
sig_s = sodium.crypto_core_ed25519_scalar_add(
r, sodium.crypto_core_ed25519_scalar_mul(HRAM, ka)
)
sig = sig_R + sig_s
else:
sig = s.sign(b''.join(to_sign)).signature
h = {
'X-SOGS-Pubkey': pubkey,
'X-SOGS-Nonce': sogs.utils.encode_base64(n) if b64_nonce else n.hex(),
'X-SOGS-Timestamp': str(ts),
'X-SOGS-Signature': sogs.utils.encode_base64(sig),
}
return h, n, ts, sig
| 19,777
|
def get_param(param, content, num=0):
"""
在内容中获取某一参数的值
:param param: 从接口返回值中要提取的参数
:param content: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:return: 返回非变量的提取参数值
"""
param_val = None
if "." in param:
patt = param.split('.')
param_val = httprunner_extract(content, patt)
return param_val
else:
if isinstance(content, str):
try:
content = json.loads(content)
except:
content = ""
if isinstance(content, dict):
param_val = get_param_response(param, content, num)
if isinstance(content, list):
dict_data = {}
for i in range(len(content)):
try:
dict_data[str(i)] = eval(content[i])
except:
dict_data[str(i)] = content[i]
param_val = get_param_response(param, dict_data, num)
if param_val is None:
return param_val
else:
if "$" + param == param_val:
param_val = None
return param_val
| 19,778
|
def make_thebig_df_from_data(strat_df_list, strat_names):
"""Joins strategy data frames into a single df - **The Big DF** -
Signature of The Big DF:
df(strategy, sim_prefix, exec, node)[metrics]
"""
thebig_df = pd.concat(strat_df_list, axis=0, keys=strat_names)
thebig_df.index.set_names("strategy", level=0, inplace=True)
return thebig_df
| 19,779
|
def first_weekday_date(date):
"""
Filter - returns the date of the first weekday for the date
Usage (in template):
{{ some_date|first_weekday_date }}
"""
week_start = date - datetime.timedelta(days=date.weekday())
return week_start.date()
| 19,780
|
def isLto():
"""*bool* = "--lto" """
return options.lto
| 19,781
|
def runtime():
"""Get the CumulusCI runtime for the current working directory."""
init_logger()
return CliRuntime()
| 19,782
|
def order_json_objects(obj):
"""
Recusively orders all elemts in a Json object.
Source:
https://stackoverflow.com/questions/25851183/how-to-compare-two-json-objects-with-the-same-elements-in-a-different-order-equa
"""
if isinstance(obj, dict):
return sorted((k, order_json_objects(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(order_json_objects(x) for x in obj)
return obj
| 19,783
|
def _add_var_summary(var, name, collection=None):
""" attaches a lot of summaries to a given tensor"""
with tf.name_scope(name):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean, collections=collection)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev, collections=collection)
tf.summary.scalar('max', tf.reduce_max(var), collections=collection)
tf.summary.scalar('min', tf.reduce_min(var), collections=collection)
tf.summary.histogram('histogram', var, collections=collection)
| 19,784
|
def CalculatePEOEVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using partial charges and surface
area contributions.
chgBins=[-.3,-.25,-.20,-.15,-.10,-.05,0,.05,.10,.15,.20,.25,.30]
You can specify your own bins to compute some descriptors
Usage:
result=CalculatePEOEVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.PEOE_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["PEOEVSA" + str(i)] = round(j, 3)
return res
| 19,785
|
def plot_pre_post_errors(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Make pre post chart with split by contacted
chart = (
alt.Chart(data)
.mark_boxplot()
.encode(
x=DatasetFields.contacted,
y=f"{ComputedFields.avg_errors_per_page_post.name.replace('_post', ''):}:Q",
column=alt.Column(DatasetFields.trial, spacing=30),
color=DatasetFields.contacted,
)
)
# Save
PLOTTING_DIR.mkdir(parents=True, exist_ok=True)
chart.save(str(PLOTTING_DIR / "pre-post-errors.png"))
| 19,786
|
def install_pyheif_from_pip() -> int:
"""
Install the python module pyheif from PIP.
Assumes required libraries already installed
:return: return code from pip
"""
print("Installing Python support for HEIF / HEIC...")
cmd = make_pip_command(
'install {} -U --disable-pip-version-check pyheif'.format(pip_user)
)
return popen_capture_output(cmd)
| 19,787
|
def start_transfer(sip, accession_id, archivematica_id=None):
"""Start the archive process for a sip.
Start the transfer of the sip in asynchronous mode. See
:py:mod:`invenio_archivematica.tasks`
:param sip: the sip to archive
:type sip: :py:class:`invenio_sipstore.api.SIP`
:param str accession_id: the accessioned ID in archivematica. You can
compute it from
:py:func:`invenio_archivematica.factories.create_accession_id`
:param str archivematica_id: the ID in Archivematica
"""
oais_start_transfer.delay(str(sip.id), accession_id, archivematica_id)
| 19,788
|
def adjust_mlb_names(mlb_id, fname, lname):
"""
Adjusts a prospect's first and last name (fname, lname) given their mlb.com player_id for better usage in matching to the professional_prospects table.
"""
player_mapper = {
}
qry = """SELECT wrong_name
, right_fname
, right_lname
FROM NSBL.name_mapper nm
;"""
res = db.query(qry)
for row in res:
wrong, right_fname, right_lname = row
player_mapper[wrong] = [right_fname, right_lname]
if mlb_id in player_mapper:
fname, lname = player_mapper.get(mlb_id)
return fname, lname
else:
return fname, lname
| 19,789
|
def all_predicates(*predicates: Callable[[Any], bool]) -> Callable[[Any], bool]:
"""Takes a set of predicates and returns a function that takes an entity
and checks if it satisfies all the predicates.
>>> even_and_prime = all_predicates(is_even, is_prime)
>>> even_and_prime(2)
True
>>> even_and_prime(4)
False
>>> even_and_prime(3)
False
Added in version: 0.1.0
"""
return lambda entity: all((p(entity) for p in predicates))
| 19,790
|
def centered_rand(l):
"""Sample from U(-l, l)"""
return l*(2.*np.random.rand()-1.)
| 19,791
|
def compute_rays_length(rays_d):
"""Compute ray length.
Args:
rays_d: [R, 3] float tensor. Ray directions.
Returns:
rays_length: [R, 1] float tensor. Ray lengths.
"""
rays_length = torch.norm(rays_d, dim=-1, keepdim=True) # [N_rays, 1]
return rays_length
| 19,792
|
def _repeat_elements(arr, n):
"""
Repeats the elements int the input array, e.g.
[1, 2, 3] -> [1, 1, 1, 2, 2, 2, 3, 3, 3]
"""
ret = list(itertools.chain(*[list(itertools.repeat(elem, n)) for elem in arr]))
return ret
| 19,793
|
def get_neighbors_table(embeddings, method, ntrees=None):
"""
This is a factory method for cosine distance nearest neighbor methods.
Args:
embeddings (ndarray): The embeddings to index
method (string): The nearest neighbor method to use
ntrees (int): number of trees for annoy
Returns:
Nearest neighbor table
"""
if method == "annoy":
if ntrees is None:
raise ImproperParameterSpecificationException("ntrees must be defined")
table = AnnoyNeighborsTable(embeddings, ntrees)
elif method == "brute":
table = BruteForceNeighborsTable(embeddings)
else:
raise MethodNotImplementedException("{} is not an implemented method".format(method))
return table
| 19,794
|
def _compute_positional_encoding(
attention_type,
position_encoding_layer,
hidden_size,
batch_size,
total_length,
seq_length,
clamp_length,
bi_data,
dtype=tf.float32):
"""Computes the relative position encoding.
Args:
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
position_encoding_layer: An instance of `RelativePositionEncoding`.
hidden_size: int, the hidden size.
batch_size: int, the batch size.
total_length: int, the sequence length added to the memory length.
seq_length: int, the length of each sequence.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
dtype: the dtype of the encoding.
Returns:
A Tensor, representing the position encoding.
"""
freq_seq = tf.range(0, hidden_size, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
if attention_type == "bi":
beg, end = total_length, -seq_length
elif attention_type == "uni":
beg, end = total_length, -1
else:
raise ValueError("Unknown `attention_type` {}.".format(attention_type))
if bi_data:
forward_position_sequence = tf.range(beg, end, -1.0)
backward_position_sequence = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(forward_position_sequence,
dtype=dtype)
backward_position_sequence = tf.cast(backward_position_sequence,
dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
backward_position_sequence = tf.clip_by_value(
backward_position_sequence,
-clamp_length,
clamp_length)
if batch_size is not None:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, batch_size // 2)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, batch_size // 2)
else:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, None)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, None)
relative_position_encoding = tf.concat(
[forward_positional_encoding, backward_positional_encoding], axis=0)
else:
forward_position_sequence = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(
forward_position_sequence, dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
relative_position_encoding = position_encoding_layer(
forward_position_sequence, batch_size)
return relative_position_encoding
| 19,795
|
def _get_instance_id(instance_list, identity):
"""
Return instance UUID by name or ID, if found.
"""
for i in instance_list.items:
if identity in (i.properties.name, i.id):
return i.id
return None
| 19,796
|
def DBConnect(dwhName=None):
"""
Parameters
----------
dwhName :
Default value = None)
Returns
-------
"""
conn = mysql.connect(host=os.getenv('DBT_MYSQL_HOST'), user="root",
database=dwhName, buffered=True)
cur = conn.cursor()
print("Successfully Connected!")
return conn, cur
| 19,797
|
def isPageWatched(user, trunk):
"""Is the page being watched by the user?"""
result = (models.Subscription.all().
filter('user =', user).
filter('trunk =', trunk).
filter('method !=', models.Subscription.METH_MEH))
return result.count(1) != 0
| 19,798
|
def main():
"""主函数:设置窗口部件,指定按钮点击事件处理函数
"""
window = tk.Tk()
window.geometry("600x480")
window.title("简单的图片查看器")
canvas = tk.Canvas(window, width=600, height=440)
canvas.pack(side="bottom")
button = tk.Button(window, text="打开图片",
command=lambda: openimage(canvas))
button.pack()
tk.mainloop()
| 19,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.