content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def dbrestore(c, keyname="local"):
"""Restore the database from a named s3 key
ie - fab dbrestore --keyname dev
"""
local(_api_cmd(f"python manage.py dbrestore {keyname}")) | 5,329,000 |
def normalize_not_found(wrapped):
"""View decorator to make 404 error messages more readable"""
def wrapper(context, request):
# Replace incoming 404 with one that has a sensible message
response = wrapped(_standard_not_found(), request)
return response
return wrapper | 5,329,001 |
def run(cfg, cfg2=None):
""" Start preprocessing. """
# Read all log files generated by ebpf_ros2_*
# TODO: convert addr and port to uint32, uint16
read_csv = functools.partial(pd.read_csv, dtype={
'pid':'Int32', 'seqnum':'Int64', 'subscriber':'Int64', 'publisher':'Int64'})
send_log = 'send_log.csv'
recv_log = 'recv_log.csv'
other_log = ['cls_bpf_log.csv']
if cfg is None:
cfg = sofa_config.SOFA_Config()
elif cfg2 is None:
cfg2 = cfg
with open(os.path.join(cfg.logdir, 'unix_time_off.txt')) as f:
lines = f.readlines()
cfg.unix_time_off = float(lines[0])
sofa_print.print_hint('unix time offset:' + str(cfg.unix_time_off) + ' in ' + cfg.logdir)
with open(os.path.join(cfg2.logdir, 'unix_time_off.txt')) as f:
lines = f.readlines()
cfg.unix_time_off = float(lines[0])
sofa_print.print_hint('unix time offset:' + str(cfg2.unix_time_off) + ' in ' + cfg2.logdir)
send_log = os.path.join(cfg.logdir, cfg.ros2logdir, 'send_log.csv')
recv_log = os.path.join(cfg2.logdir, cfg2.ros2logdir, 'recv_log.csv')
print(send_log, recv_log)
cvs_files_others = []
for idx in range(len(other_log)):
print(os.path.join(cfg.logdir, cfg.ros2logdir, other_log[idx]))
cvs_files_others.append(
(cfg, os.path.join(cfg.logdir, cfg.ros2logdir, other_log[idx])))
if cfg2 is not cfg:
for idx in range(len(other_log)):
print(os.path.join(cfg2.logdir, cfg2.ros2logdir, other_log[idx]))
cvs_files_others.append(
(cfg2, os.path.join(cfg2.logdir, cfg2.ros2logdir, other_log[idx]))
)
df_send = (cfg, read_csv(send_log))
df_recv = (cfg2, read_csv(recv_log))
df_others = []
for cfg_to_pass, csv_file in cvs_files_others:
try:
df_others.append((cfg_to_pass, read_csv(csv_file)))
except pd.errors.EmptyDataError as e:
print(csv_file + ' is empty')
all_msgs = extract_individual_rosmsg(df_send, df_recv, *df_others)
print(all_msgs)
# TODO: Filiter topics
# Calculate ros latency for all topics
res = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
res.append(future.result())
print(res)
# res = ros_msgs_trace_read(next(iter(all_msgs.items())), cfg=cfg)
ros_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_ros_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
ros_lat_send.append(future.result())
print(ros_lat_send)
# Calculate time spent in OS for all topics
os_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_os_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
os_lat_send.append(future.result())
print(os_lat_send)
dds_lat_send = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_dds_lat_send, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
dds_lat_send.append(future.result())
print(dds_lat_send)
os_lat_recv = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_os_lat_recv, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
os_lat_recv.append(future.result())
print(os_lat_recv)
dds_lat_recv = []
ros_executor_recv = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(ros_msgs_trace_read_dds_ros_lat_recv, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
print(future.result())
# topic = item[0]
dds_lat_recv.append(future.result()[0])
ros_executor_recv.append(future.result()[1])
print(dds_lat_recv)
print(ros_executor_recv)
retransmissions = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(find_retransmissions, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
retransmissions.append(future.result())
print(retransmissions)
sample_drop = []
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
future_res = {executor.submit(find_sample_drop, item, cfg=cfg): item for item in all_msgs.items()}
for future in concurrent.futures.as_completed(future_res):
item = future_res[future]
topic = item[0]
sample_drop.append(future.result())
print(sample_drop)
sofatrace = sofa_models.SOFATrace()
sofatrace.name = 'ros2_latency'
sofatrace.title = 'ros2_latency'
sofatrace.color = 'DeepPink'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = pd.concat(res) # TODO:
sofatrace_ros_lat_send = sofa_models.SOFATrace()
sofatrace_ros_lat_send.name = 'ros2_lat_send'
sofatrace_ros_lat_send.title = 'ros2_lat_send'
sofatrace_ros_lat_send.color = '#D15817'
sofatrace_ros_lat_send.x_field = 'timestamp'
sofatrace_ros_lat_send.y_field = 'duration'
sofatrace_ros_lat_send.data = pd.concat(ros_lat_send)
sofatrace_ros_executor_recv = sofa_models.SOFATrace()
sofatrace_ros_executor_recv.name = 'ros2_executor_recv'
sofatrace_ros_executor_recv.title = 'ros2_executor_recv'
sofatrace_ros_executor_recv.color = next(color_recv)
sofatrace_ros_executor_recv.x_field = 'timestamp'
sofatrace_ros_executor_recv.y_field = 'duration'
sofatrace_ros_executor_recv.data = pd.concat(ros_executor_recv)
sofatrace_dds_lat_send = sofa_models.SOFATrace()
sofatrace_dds_lat_send.name = 'dds_send_latency'
sofatrace_dds_lat_send.title = 'dds_send_latency'
sofatrace_dds_lat_send.color = next(color_send)
sofatrace_dds_lat_send.x_field = 'timestamp'
sofatrace_dds_lat_send.y_field = 'duration'
sofatrace_dds_lat_send.data = pd.concat(dds_lat_send)
sofatrace_dds_lat_recv = sofa_models.SOFATrace()
sofatrace_dds_lat_recv.name = 'dds_recv_latency'
sofatrace_dds_lat_recv.title = 'dds_recv_latency'
sofatrace_dds_lat_recv.color = next(color_recv)
sofatrace_dds_lat_recv.x_field = 'timestamp'
sofatrace_dds_lat_recv.y_field = 'duration'
sofatrace_dds_lat_recv.data = pd.concat(dds_lat_recv)
sofatrace_os_lat_send = sofa_models.SOFATrace()
sofatrace_os_lat_send.name = 'os_send_latency'
sofatrace_os_lat_send.title = 'os_send_latency'
sofatrace_os_lat_send.color = next(color_send)
sofatrace_os_lat_send.x_field = 'timestamp'
sofatrace_os_lat_send.y_field = 'duration'
sofatrace_os_lat_send.data = pd.concat(os_lat_send)
sofatrace_os_lat_recv = sofa_models.SOFATrace()
sofatrace_os_lat_recv.name = 'os_recv_latency'
sofatrace_os_lat_recv.title = 'os_recv_latency'
sofatrace_os_lat_recv.color = next(color_recv)
sofatrace_os_lat_recv.x_field = 'timestamp'
sofatrace_os_lat_recv.y_field = 'duration'
sofatrace_os_lat_recv.data = pd.concat(os_lat_recv)
sofatrace_retransmissions = sofa_models.SOFATrace()
sofatrace_retransmissions.name = 'retransmissions'
sofatrace_retransmissions.title = 'retransmissions'
sofatrace_retransmissions.color = 'Crimson'
sofatrace_retransmissions.x_field = 'timestamp'
sofatrace_retransmissions.y_field = 'duration'
sofatrace_retransmissions.data = pd.concat(retransmissions)
sofatrace_retransmissions.highlight = True
sofatrace_sample_drop = sofa_models.SOFATrace()
sofatrace_sample_drop.name = 'sample_drop'
sofatrace_sample_drop.title = 'sample_drop'
sofatrace_sample_drop.color = 'DarkCyan'
sofatrace_sample_drop.x_field = 'timestamp'
sofatrace_sample_drop.y_field = 'duration'
sofatrace_sample_drop.data = pd.concat(sample_drop)
sofatrace_sample_drop.highlight = True
sofatrace_targets = find_outliers(
[sofatrace, sofatrace_ros_executor_recv, \
sofatrace_dds_lat_send, sofatrace_dds_lat_recv, \
sofatrace_os_lat_send, sofatrace_os_lat_recv], sofatrace)
# cmd_vel = all_msgs['/cmd_vel']
# cmd_vel_msgids = [('1.f.c5.ba.f4.30.0.0.1.0.0.0|0.0.10.3', num) for num in [46, 125, 170, 208, 269, 329, 545, 827, 918, 1064, 1193, 1228, 1282]]
# print(cmd_vel[('1.f.c5.ba.f4.30.0.0.1.0.0.0|0.0.10.3', 45)])
# res2 = ros_msgs_trace_read(('/cmd_vel', {msgid:cmd_vel[msgid] for msgid in cmd_vel_msgids}), cfg=cfg)
# highlight = sofa_models.SOFATrace()
# highlight.name = 'update_cmd_vel'
# highlight.title = 'Change velocity event'
# highlight.color = next(color)
# highlight.x_field = 'timestamp'
# highlight.y_field = 'duration'
# highlight.data = pd.concat([res2])
return [sofatrace,
sofatrace_ros_lat_send, sofatrace_ros_executor_recv,
sofatrace_dds_lat_send, sofatrace_dds_lat_recv,
sofatrace_os_lat_send, sofatrace_os_lat_recv,
sofatrace_targets, sofatrace_retransmissions, sofatrace_sample_drop] | 5,329,002 |
def jacobi_sequence(ns, alpha, beta, x):
"""Jacobi polynomials of orders ns with weight parameters alpha and beta.
Parameters
----------
ns : iterable
sorted polynomial orders to return, e.g. [1, 3, 5, 7, ...]
alpha : float
first weight parameter
beta : float
second weight parameter
x : numpy.ndarray
x coordinates to evaluate at
Returns
-------
generator
equivalent to array of shape (len(ns), len(x))
"""
# three key flavors: return list, return array, or return generator
# return generator has most pleasant interface, benchmarked at 68 ns
# per yield (315 clocks). With 32 clocks per element of x, 1% of the
# time is spent on yield when x has 1000 elements, or 32x32
# => use generator
# benchmarked at 4.6 ns/element (256x256), 4.6GHz CPU = 21 clocks
# ~4x faster than previous impl (118 ms => 29.8)
ns = list(ns)
min_i = 0
Pn = np.ones_like(x)
if ns[min_i] == 0:
yield Pn
min_i += 1
if min_i == len(ns):
return
Pn = alpha + 1 + (alpha + beta + 2) * ((x - 1) / 2)
if ns[min_i] == 1:
yield Pn
min_i += 1
if min_i == len(ns):
return
Pnm1 = Pn
A, B, C = recurrence_abc(1, alpha, beta)
Pn = (A * x + B) * Pnm1 - C # no C * Pnm2 =because Pnm2 = 1
if ns[min_i] == 2:
yield Pn
min_i += 1
if min_i == len(ns):
return
max_n = ns[-1]
for i in range(3, max_n+1):
Pnm2, Pnm1 = Pnm1, Pn
A, B, C = recurrence_abc(i-1, alpha, beta)
Pn = (A * x + B) * Pnm1 - C * Pnm2
if ns[min_i] == i:
yield Pn
min_i += 1 | 5,329,003 |
def process_results(unprocessed, P, R, G):
"""Process the results returned by the worker pool, sorting them by
policy and run e.g. results[i][j][k] are the results from policy i
on run j on graph k. Parameters:
- unprocessed: Unprocessed results (as returned by the worker pool)
- P: number of policies
- R: number of runs
- G: number of graphs/SCMs/test cases
"""
results = []
for i in range(P):
policy_results = []
for r in range(R):
run_results = unprocessed[(i*G*R + G*r):(i*G*R + G*(r+1))]
policy_results.append(run_results)
results.append(policy_results)
return results | 5,329,004 |
def make_inline_table(data):
"""Create an inline table from the given data."""
table = tomlkit.inline_table()
table.update(data)
return table | 5,329,005 |
def copy(drs_uri: str,
dst: str,
indicator_type: Indicator=Indicator.notebook_bar if is_notebook() else Indicator.bar,
workspace_name: Optional[str]=WORKSPACE_NAME,
workspace_namespace: Optional[str]=WORKSPACE_NAMESPACE):
"""Copy a DRS object to either the local filesystem, or to a Google Storage location if `dst` starts with
"gs://".
"""
enable_requester_pays(workspace_name, workspace_namespace)
with DRSCopyClient(raise_on_error=True, indicator_type=indicator_type) as cc:
cc.workspace = workspace_name
cc.workspace_namespace = workspace_namespace
cc.copy(drs_uri, dst or ".") | 5,329,006 |
def _mi_dc(x, y, k):
"""
Calculates the mututal information between a continuous vector x and a
disrete class vector y.
This implementation can calculate the MI between the joint distribution of
one or more continuous variables (X[:, 1:3]) with a discrete variable (y).
Thanks to Adam Pocock, the author of the FEAST package for the idea.
Brian C. Ross, 2014, PLOS ONE
Mutual Information between Discrete and Continuous Data Sets
"""
y = y.flatten()
n = x.shape[0]
classes = np.unique(y)
knn = NearestNeighbors(n_neighbors=k)
# distance to kth in-class neighbour
d2k = np.empty(n)
# number of points within each point's class
Nx = []
for yi in y:
Nx.append(np.sum(y == yi))
# find the distance of the kth in-class point
for c in classes:
mask = np.where(y == c)[0]
knn.fit(x[mask, :])
d2k[mask] = knn.kneighbors()[0][:, -1]
# find the number of points within the distance of the kth in-class point
knn.fit(x)
m = knn.radius_neighbors(radius=d2k, return_distance=False)
m = [i.shape[0] for i in m]
# calculate MI based on Equation 2 in Ross 2014
MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
return MI | 5,329,007 |
def get_new_deals_intent_handler(handler_input):
"""
Purpose:
Handler for getting new deals
Args:
handler_input (Dict): Input data from the Alexa Skill
Return:
alexa_reponse (Dict): Reponse for Alexa Skill to handle
"""
feed = get_slickdeals_feed(SLICKDEALS_URL)
deals = get_top_slickdeals(feed)
speech_text = "There are {0} deals. The first deal is {1}".format(
len(deals), deals[0]
)
return (
handler_input.response_builder.speak(speech_text)
.set_card(SimpleCard("Slick Deals", speech_text))
.set_should_end_session(True)
.response
) | 5,329,008 |
def test_trigger_pipeline_build(settings, mocker, version):
"""The correct requests should be made to trigger a pipeline build"""
job_name = "build-ocw-site"
mock_get = mocker.patch(
"content_sync.pipelines.concourse.ConcourseApi.get",
return_value={"config": {"jobs": [{"name": job_name}]}},
)
mock_post = mocker.patch("content_sync.pipelines.concourse.ConcourseApi.post")
website = WebsiteFactory.create(
starter=WebsiteStarterFactory.create(
source=STARTER_SOURCE_GITHUB, path="https://github.com/org/repo/config"
)
)
pipeline = ConcourseGithubPipeline(website)
pipeline.trigger_pipeline_build(version)
mock_get.assert_called_once_with(
f"/api/v1/teams/{settings.CONCOURSE_TEAM}/pipelines/{version}/config?vars={pipeline.instance_vars}"
)
mock_post.assert_called_once_with(
f"/api/v1/teams/{settings.CONCOURSE_TEAM}/pipelines/{version}/jobs/{job_name}/builds?vars={pipeline.instance_vars}"
) | 5,329,009 |
def link_nodes(isy, argv) :
"""
node's infomation
"""
if len(argv) == 0 :
do_interactive_link(isy)
cmd = argv.pop(0).upper()
if cmd in [ "START" ] :
isy.node_discover()
elif cmd in [ "STOP" ] :
node_discover_cancel()
exit(0) | 5,329,010 |
def make_parser():
"""Returns the command-line argument parser for sage-spkg-uninstall."""
doc_lines = __doc__.strip().splitlines()
parser = argparse.ArgumentParser(
description=doc_lines[0],
epilog='\n'.join(doc_lines[1:]).strip(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('spkg', type=spkg_type, help='the spkg to uninstall')
parser.add_argument('sage_local', type=dir_type, nargs='?',
default=os.environ.get('SAGE_LOCAL'),
help='the SAGE_LOCAL path (default: the $SAGE_LOCAL '
'environment variable if set)')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output showing all files removed')
parser.add_argument('-k', '--keep-files', action='store_true',
help="only delete the package's installation record, "
"but do not remove files installed by the "
"package")
parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)
return parser | 5,329,011 |
def analyse_dataset(imgs, lbls, name=None):
"""Analyse labelled dataset
# Arguments:
imgs: ndarray, a set of images
lbls: ndarray, labels for a set of images
"""
if name is not None:
print('Dataset: {}'.format(name))
unique_lbl, counts = np.unique(lbls, return_counts=True)
min_samples = min(counts)
max_samples = max(counts)
avr_samples = np.mean(counts)
std_dev = np.std(counts)
imgs_dict = dict()
imgs_dict['name'] = name
imgs_dict['n_samples'] = imgs.shape[0]
imgs_dict['samples_shape'] = imgs.shape[1:]
imgs_dict['n_unique_labels'] = len(counts)
imgs_dict['unique_labels'] = unique_lbl
imgs_dict['min_samples'] = min_samples
imgs_dict['max_samples'] = max_samples
imgs_dict['average_samples'] = round(avr_samples, 0)
imgs_dict['std_dev'] = round(std_dev, 2)
for k, v in imgs_dict.items():
print('{}: {}'.format(k, v))
return imgs_dict | 5,329,012 |
def transform(doc, *, sort_keys=False):
"""reorder"""
heavy_defs = ["definitions", "schemas", "responses", "parameters", "paths"]
r = make_dict()
for k, v in doc.items():
if k in heavy_defs:
continue
r[k] = v
for k in heavy_defs:
if k in doc:
r[k] = doc[k]
if sort_keys:
r = str_dict(r) # side effect
return r | 5,329,013 |
def plot_card(
box: str,
title: str,
data: PackedRecord,
plot: Plot,
events: Optional[List[str]] = None,
commands: Optional[List[Command]] = None,
) -> PlotCard:
"""Create a card displaying a plot.
Args:
box: A string indicating how to place this component on the page.
title: The title for this card.
data: Data for this card.
plot: The plot to be displayed in this card.
events: The events to capture on this card.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.PlotCard` instance.
"""
return PlotCard(
box,
title,
data,
plot,
events,
commands,
) | 5,329,014 |
def publish(
click_context: click.Context, local: bool, remote: bool, push_missing: bool
) -> None: # pylint: disable=unused-argument
"""Publish the agent to the registry."""
ctx = cast(Context, click_context.obj)
_validate_pkp(ctx.agent_config.private_key_paths)
_validate_config(ctx)
if remote:
_publish_agent_remote(ctx, push_missing=push_missing)
else:
_save_agent_locally(
ctx, is_mixed=not local and not remote, push_missing=push_missing
) | 5,329,015 |
def variance():
"""Running sample variance co-routine.
``variance`` consumes values and returns the variance with N-1 degrees
of freedom of the data points seen so far.
WARNING: The sample variance with N-1 degrees of freedom
is not defined for a single data point. The first result
given by ``variance`` will always be a NAN.
>>> data = [0.25, 0.5, 1.25, 1.25, 1.75, 2.75, 3.5]
>>> rvar = variance()
>>> for x in data:
... print(rvar.send(x))
... #doctest: +ELLIPSIS
nan
0.03125
0.27083333333...
0.265625
0.375
0.81041666666...
1.37202380952...
"""
cr = _welford()
x = (yield None)
n, M2 = cr.send(x)
assert n == 1 and M2 == 0
x = (yield float('nan'))
n, M2 = cr.send(x)
while True:
n, M2 = cr.send((yield M2/(n-1))) | 5,329,016 |
def generateAllResizedImages(newWidth, newHeight, backgroundColor=None, noBackground=False):
"""
generateAllResizedImages reads each annotation file in
/annotation/* and then creates a resized image based on the
bounding box for that annotation and saves it to the folder
/images/breed/boxes_newWidth_newHeight
input:
newWidth: int, the width for the output image
newHeight: int, the height for the output image
backgroundColor: (int, int, int), color for background,
not providing a background color will resize the image
and not maintain the original aspect ratio of the bounding
box
noBackground: bool, crops the bounding box of the image and
saves the bounding box to disk with no background color,
the saved image will have the dimensions of the bounding box
that fits within an image of newWidth x newHeight
output:
returns nothing, but saves images to the corresponding
breed folders in /images
"""
count = 0
# recursively iterate through all the annotation files
for fname in glob.iglob(ANNOTATION_PATH + '**/*', recursive=True):
if os.path.isfile(fname):
annotationDict = getAnnotationDict(fname)
boxFolderPath = getBoxFolderPathName(annotationDict, newWidth, newHeight)
# create the 'boxes_newWidth_newHeight' folder if it
# does not already exist
if not os.path.exists(boxFolderPath):
os.makedirs(boxFolderPath)
# only write a new image if we haven't come across it yet
if True or not os.path.exists(getImageFilePathName(annotationDict, newWidth, newHeight)):
# crop and save the new image file
cropSaveBoundedBox(annotationDict, newWidth, newHeight, backgroundColor,
noBackground)
count += 1
if count % 100 == 0:
print('Progress: ' + str(count / float(20580) * 100) + '%')
print('Just processed ' + getImageFilePathName(annotationDict, newWidth, newHeight))
print('Images Resized:', count) | 5,329,017 |
def get_cluster_id(url):
"""
Google assign a cluster identifier to a group of web documents
that appear to be the same publication in different places on the web.
How they do this is a bit of a mystery, but this identifier is
important since it uniquely identifies the publication.
"""
vals = parse_qs(urlparse(url).query).get("cluster", [])
if len(vals) == 1:
return vals[0]
else:
vals = parse_qs(urlparse(url).query).get("cites", [])
print(vals)
if len(vals) == 1:
return vals[0]
return None | 5,329,018 |
def vol_allocation_factory(covs:List, pres:List=None)->[float]:
""" Allocate capital between portfolios using either cov or pre matrices
:param covs: List of covariance matrices
:param pres: List of precision matrices
:return: Capital allocation vector
"""
if pres is None:
pres = []
try:
return normalize([ 1/vol_portfolio_variance(cov=cov, pre=pre) for cov, pre in zip_longest(covs, pres, fillvalue=None) ])
except Exception as e:
print('vol allocation failed')
return diagonal_allocation_factory(covs=covs, pres=pres) | 5,329,019 |
def add_contact_to_room(room_id: str, contact: str):
"""Use the webexteamssdk to add the contact to the room."""
# You can also use it as a context handler to catch errors and capture
# checkpoint data.
with mission.checkpoint(2, "Add the Contact to the Room") as checkpoint:
membership = teams.memberships.create(room_id, personEmail=contact)
checkpoint.data["contact_id"] = membership.personId
checkpoint.data["contact_name"] = membership.personDisplayName | 5,329,020 |
def get_inspection_page(**kwargs):
"""Fetch inspection data."""
url = KING_COUNTY_DOMAIN + DATA_PATH
params = INSPECTION_PARAMS.copy()
for key, val in kwargs.items():
print(key)
if key in INSPECTION_PARAMS:
params[key] = val
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding | 5,329,021 |
def get_table_count(url, table_id):
"""
Count the number of rowns in a ActivityTable
:param url:
:param table_id: The ActivityTable ID to update count from and return
:return: count : count of rows from ActivityTable
"""
token = ActivitySites.objects.get(site_id=1)
if token.activity_tables_token:
headers = {'content-type': 'application/json',
'Authorization': 'Token ' + token.activity_tables_token}
else:
headers = {'content-type': 'application/json'}
print("Token Not Found")
response = requests.get(url, headers=headers, verify=True)
data = json.loads(response.content)
count = None
try:
count = data['data_count']
ActivityTable.objects.filter(table_id=table_id)\
.update(unique_count=count)
except KeyError:
pass
return count | 5,329,022 |
def get_audio_mfcc_features(txt_files, wav_files, n_input, n_context, word_num_map, txt_labels=None):
"""
提取音频数据的MFCC特征
:param txt_files:
:param wav_files:
:param n_input:
:param n_context:
:param word_num_map:
:param txt_labels:
:return:
"""
audio_features = []
audio_features_len = []
text_vector = []
text_vector_len = []
if txt_files != None:
txt_labels = txt_files
for txt_obj, wav_file in zip(txt_labels, wav_files):
# 载入音频数据并转化为特征值
audio_data = audiofile_to_input_vector(wav_file, n_input, n_context)
audio_data = audio_data.astype('float32')
audio_features.append(audio_data)
audio_features_len.append(np.int32(len(audio_data)))
# 载入音频对应的文本
target = []
if txt_files != None: # txt_obj是文件
target = trans_text_ch_to_vector(txt_obj, word_num_map)
else:
target = trans_text_ch_to_vector(None, word_num_map, txt_obj) # txt_obj是labels
# target = text_to_char_array(target)
text_vector.append(target)
text_vector_len.append(len(target))
audio_features = np.asarray(audio_features)
audio_features_len = np.asarray(audio_features_len)
text_vector = np.asarray(text_vector)
text_vector_len = np.asarray(text_vector_len)
return audio_features, audio_features_len, text_vector, text_vector_len | 5,329,023 |
def getOffsetsFromPixelFractions(col, row):
"""
Determine just the fractional part (the intra-pixel part) of the col,row position.
For example, if (col, row) = (123.4, 987.6), then
(colFrac, rowFrac) = (.4, .6).
Function then returns the offset necessary for addressing the interleaved PRF array.
to ensure you get the location appropriate for your sub-pixel values.
Inputs
------
col
(float) Column position
row
(float) Row position.
Returns
------
(colFrac, rowFrac)
(int, int) offset necessary for addressing the interleaved PRF array.
"""
gridSize = 9
colFrac = np.remainder(float(col), 1)
rowFrac = np.remainder(float(row), 1)
colOffset = gridSize - np.round(gridSize * colFrac) - 1
rowOffset = gridSize - np.round(gridSize * rowFrac) - 1
return int(colOffset), int(rowOffset) | 5,329,024 |
def all_index(request):
"""
Inventory Index View
"""
# build changelist
item_changelist = HTSChangeList(request, Item,
list_filter=[],
search_fields=[],
list_per_page=200,
model_admin=ItemAdmin(Item, None)
)
context_dict = {
'item_changelist': item_changelist,
'page_name': 'Inventory Index'
}
context_dict.update(INVENTORY_CONTEXT_DEFAULTS)
return render(request,
'inventory/inventory_all_index.html',
context_dict) | 5,329,025 |
def calcSeason(ra, time):
"""Calculate the 'season' in the survey for a series of ra/dec/time values of an observation.
Based only on the RA of the point on the sky, it calculates the 'season' based on when this
point would be overhead. To convert to an integer season label, take np.floor of the returned
float season values.
Note that seasons should be calculated for a fixed point on the sky, not for each pointing that
overlaps a point on the sky. For example, bad things might happen if you compute the season
for observations that overlap RA=0, but were centered on RA=359.
Parameters
----------
ra : float
The RA (in degrees) of the point on the sky
time : np.ndarray
The times of the observations, in MJD
Returns
-------
np.ndarray
The season values
"""
# A reference RA and equinox to anchor ra/season calculation - RA = 0 is overhead at this (local) time.
# This time was chosen as it is close to the expected start of the survey.
# Generally speaking, this is the equinox (RA=0 is overhead at midnight)
Equinox = 60208.00106863426
# convert ra into 'days'
dayRA = ra / 360 * 365.25
firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25
seasons = (time - firstSeasonBegan) / 365.25
# Set first season to 0
seasons = seasons - np.floor(np.min(seasons))
return seasons
# The value for the equinox above was calculated as follows:
#from astropy.time import Time
#from astropy.coordinates import EarthLocation
#loc = EarthLocation.of_site('Cerro Pachon')
#t = Time('2023-09-21T00:01:32.33', format='isot', scale='utc', location=loc)
#print(t.sidereal_time('apparent') - loc.lon, t.utc.mjd) | 5,329,026 |
def set_node_event_info(info: NodeEventInfo) -> Item:
"""Encaches an item.
:param info: Node event information.
:returns: Item to be cached.
"""
if info.event_type in (
EventType.MONIT_CONSENSUS_FINALITY_SIGNATURE,
EventType.MONIT_BLOCK_FINALIZED,
EventType.MONIT_BLOCK_ADDED,
):
names = [
info.block_hash,
]
elif info.event_type == EventType.MONIT_DEPLOY_PROCESSED:
names = [
info.block_hash,
info.deploy_hash,
]
else:
names=[]
return Item(
item_key=ItemKey(
paths=[
info.network,
COL_EVENT,
info.event_type.name[6:],
],
names=names,
),
data=info,
expiration=EXPIRATION_COL_EVENT
) | 5,329,027 |
def add_cleanup():
"""Generic cleaning helper."""
to_cleanup = []
def f(func, *args, **kwargs):
"""Store the cleaning actions for later."""
to_cleanup.append((func, args, kwargs))
yield f
for func, args, kwargs in to_cleanup:
func(*args, **kwargs) | 5,329,028 |
def move_up(n=1):
"""Moves your cursor up 'n' rows."""
# TODO: is math correct here ?
code.CURSOR_UP(n) | 5,329,029 |
def check_hashtarget(bible_hash, target):
""" tests if the biblepay hash is valid for the hashtarget, means that is it lower.
True = is lower and all is fine """
rs = False
try:
rs = int(bible_hash, 16) < int(target, 16)
except:
pass
return rs | 5,329,030 |
def get_client_secret():
"""
Prompt the user for their Client Secret
:return:
"""
config.client_secret = getpass.getpass(prompt='Please enter your Client Secret: ') | 5,329,031 |
def fix_anacondapy_pythonw(fname):
"""fix shebang line for scripts using anaconda python
to use 'pythonw' instead of 'python'
"""
# print(" fix anaconda py (%s) for %s" % (sys.prefix, script))
with open(fname, 'r') as fh:
try:
lines = fh.readlines()
except IOError:
lines = ['-']
firstline = lines[0][:-1].strip()
if firstline.startswith('#!') and 'python' in firstline:
firstline = '#!/usr/bin/env pythonw'
fh = open(fname, 'w')
fh.write('%s\n' % firstline)
fh.write("".join(lines[1:]))
fh.close() | 5,329,032 |
def run_pytest(secret_key, db_pwd, db_host, cloud_host, cloud_pwd):
""" run flask pytest param """
pytest.main(['--cov-config=../.coveragerc',
'--cov=./app',
'--cov-report=xml',
'./tests',
f'--secret_key={secret_key}',
f'--db_pwd={db_pwd}',
f'--db_host={db_host}',
f'--cloud_host={cloud_host}',
f'--cloud_pwd={cloud_pwd}',
]) | 5,329,033 |
def assert_and_infer_cfg_fl(cfg_fl, args, make_immutable=True, train_mode=True):
"""
Calls /semantic-segmentation/config.assert_and_infer_cfg and adds additional assertions
"""
if args.manual_client_setup:
cfg_fl.CLIENT.MANUAL = args.manual_client_setup
if cfg_fl.CLIENT.MANUAL:
print('-------------------------')
print('> Clients manual settings')
print('-------------------------')
for i in cfg_fl.CLIENT.POPULATION:
print(i)
if args.replicate:
cfg_fl.REPLICATE = args.replicate
if args.seed:
cfg_fl.SEED = args.seed
cfg_fl.TORCH_SEED = args.seed
if args.task:
cfg_fl.TASK = args.task
if args.dataset:
cfg_fl.DATASET.DATASET_NAME = args.dataset
if args.clients_per_dist:
cfg_fl.FEDERATION.CLIENTS_PER_DIST = args.clients_per_dist
if cfg_fl.FEDERATION.CLIENTS_PER_DIST is not None and cfg_fl.FEDERATION.NUM_CLIENTS is None:
cfg_fl.FEDERATION.NUM_CLIENTS = cfg_fl.FEDERATION.CLIENTS_PER_DIST * cfg_fl.FEDERATION.NUM_DISTRIBUTIONS
if args.num_clients:
cfg_fl.FEDERATION.NUM_CLIENTS = args.num_clients
if args.print_logx:
cfg_fl.LOGX_STDOUT = True
if args.num_distributions:
cfg_fl.FEDERATION.NUM_DISTRIBUTIONS = args.num_distributions
assertion_num_clients = "Either 'clients_per_dist' or 'num_clients' needs to be specified"
assert cfg_fl.FEDERATION.CLIENTS_PER_DIST or cfg_fl.FEDERATION.NUM_CLIENTS, assertion_num_clients
# if args.dist_type:
# cfg.FEDERATION.DIST_TYPE = args.dist_type
if args.clustering_method:
cfg.FEDERATION.CLUSTERING_METHOD = args.clustering_method
if args.federation_method:
assert args.federation_method in ['fomo', 'embeddings', 'local', 'fedavg']
cfg_fl.FEDERATION.METHOD = args.federation_method
if args.federation_method == 'fedavg':
cfg_fl.FEDERATION.FED_AVERAGING = True
if args.random_distributions:
cfg_fl.FEDERATION.RANDOM_DISTS = args.random_distributions # True
if args.federated_averaging:
cfg_fl.FEDERATION.FED_AVERAGING = True
cfg_fl.FEDERATION.METHOD = 'fedavg'
if args.local_train_val_size:
cfg_fl.FEDERATION.LOCAL_TRAIN_VAL_SIZE = args.local_train_val_size
if args.federation_epoch:
cfg_fl.FEDERATION.EPOCH = args.federation_epoch
if args.num_update_clients:
cfg_fl.CLIENT_WEIGHT.NUM_UPDATE_CLIENTS = args.num_update_clients
if args.model_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.model_weight_delta
if args.explicit_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.explicit_weight_delta
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = False
if args.client_weight_epsilon:
cfg_fl.CLIENT_WEIGHT.EPSILON = args.client_weight_epsilon
if args.client_weight_epsilon_decay:
cfg_fl.CLIENT_WEIGHT.EPSILON_DECAY = args.client_weight_epsilon_decay
if args.client_weight_method:
cfg_fl.CLIENT_WEIGHT.METHOD = args.client_weight_method
if args.update_positive_delta_only:
cfg_fl.MODEL_WEIGHT.UPDATE_POSITIVE_ONLY = args.update_positive_delta_only
if args.leave_one_out:
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = args.leave_one_out
if args.baseline_model:
cfg_fl.CLIENT_WEIGHT.BASELINE = args.baseline_model
if args.train_split:
cfg_fl.CLIENT.TRAIN_SPLIT = args.train_split
cfg_fl.CLIENT.VAL_SPLIT = 1 - args.train_split
if args.dataset == 'cifar100':
args.num_classes = 100
elif args.dataset == 'cifar10':
args.num_classes = 10
elif args.dataset == 'mnist':
args.num_classes = 10
return cfg_fl | 5,329,034 |
def replace_service(name,
metadata,
spec,
source,
template,
old_service,
saltenv,
namespace="default",
**kwargs):
"""
Replaces an existing service with a new one defined by name and namespace,
having the specificed metadata and spec.
"""
body = __create_object_body(
kind="Service",
obj_class=kubernetes.client.V1Service,
spec_creator=__dict_to_service_spec,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=saltenv,
)
# Some attributes have to be preserved
# otherwise exceptions will be thrown
body.spec.cluster_ip = old_service["spec"]["cluster_ip"]
body.metadata.resource_version = old_service["metadata"][
"resource_version"]
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.replace_namespaced_service(
name, namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception("Exception when calling "
"CoreV1Api->replace_namespaced_service")
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | 5,329,035 |
def test_add_contributor(staff_client, user, private_channel, attempts):
"""
Adds a contributor to a channel
"""
url = reverse("contributor-list", kwargs={"channel_name": private_channel.name})
for _ in range(attempts):
resp = staff_client.post(
url, data={"contributor_name": user.username}, format="json"
)
assert resp.status_code == status.HTTP_201_CREATED
assert resp.json() == {
"contributor_name": user.username,
"full_name": user.profile.name,
"email": user.email,
} | 5,329,036 |
def timesketch_add_manual_event(
data: Text, timestamp: Optional[int] = 0,
date_string: Optional[Text] = '',
timestamp_desc: Optional[Text] = '',
attributes: Optional[Dict[str, Any]] = None,
tags: Optional[List[str]] = None) -> Dict[str, str]:
"""Add a manually generated event to the sketch.
Args:
data (str): The message string for for the event to be generated.
timestamp (int): Optional timestamp in either seconds since Epoch or
microseconds since Epoch.
date_string (str): An optional date time as a human readable string. If
neither date_string nor timestamp is provided then the current timestamp
will be used as the time of the event.
timestamp_desc (str): Optional timestamp description field.
attributes (dict): Optional dict which contains extra attributes to add
to the manual event.
tags (list): Optional list of tags to add to the manual event.
Returns:
Dictionary with query results.
"""
connect()
state_obj = state.state()
sketch = state_obj.get_from_cache('timesketch_sketch')
if not sketch:
print('Not able to connect to a sketch.')
return {}
# Default timestamp.
date_obj = datetime.datetime.now(datetime.timezone.utc)
date = date_obj.isoformat()
if timestamp:
try:
date_obj = datetime.datetime.fromtimestamp(
timestamp, datetime.timezone.utc)
except ValueError:
date_obj = datetime.datetime.fromtimestamp(
timestamp / 1e6, datetime.timezone.utc)
date = date_obj.isoformat()
elif date_string:
elements = time_elements.TimeElements()
if 'T' in date_string:
try:
elements.CopyFromStringISO8601(date_string)
except ValueError:
logging.error(
'Unable to convert date string, is it really in ISO 8601 format?')
return {}
try:
elements.CopyFromString(date_string)
except ValueError:
try:
elements.CopyFromStringRFC1123(date_string)
except ValueError:
logging.error(
'Unable to convert date string, needs to be in ISO 8601, 1123 or '
'in the format YYYY-MM-DD hh:mm:ss.######[+-]##:##')
return {}
date = elements.CopyToDateTimeStringISO8601()
if not timestamp_desc:
timestamp_desc = 'Event Logged'
if not isinstance(tags, (tuple, list)):
tags = []
if not isinstance(attributes, dict):
attributes = {}
if not date:
logging.error('Unable to convert date string, please check it.')
return {}
return sketch.add_event(
data, date, timestamp_desc, attributes=attributes, tags=tags) | 5,329,037 |
def filter_vcf_by_sex(vcf_file, data):
"""Post-filter a single sample VCF, handling sex chromosomes.
Handles sex chromosomes and mitochondrial. Does not try to resolve called
hets into potential homozygotes when converting diploid to haploid.
Skips filtering on pooled samples, we still need to implement.
"""
if len(vcfutils.get_samples(vcf_file)) > 1:
return vcf_file
_, sexes = _configured_ploidy_sex([data])
sex = sexes.pop()
out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file)
if not utils.file_exists(out_file):
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
else:
line = _fix_line_ploidy(line, sex)
if line:
out_handle.write(line)
if orig_out_file.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file | 5,329,038 |
def gen_spacer(spacer_char="-", nl=2):
"""
Returns a spacer string with 60 of designated character, "-" is default
It will generate two lines of 60 characters
"""
spacer = ""
for i in range(nl):
spacer += spacer_char * 60
spacer += "\n"
return spacer | 5,329,039 |
def repo_config_factory(repo_type, repo_id, repo_label, **kwargs):
"""
Constructs a repository configuration in form of a
TTL structure utilizing the TTL templates from
./repo_types_template.
"""
# Check if the repo_type is a known template
if repo_type not in REPO_TYPES:
raise RepositoryTypeUnknown
# Get the path to the template
template_path = TEMPLATE_FOLDER / '{}{}'.format(repo_type, '.ttl')
# Open the template file and read it
with open(template_path) as template_file:
template = template_file.read()
# get the default values for the template
params = DEFAULTS
# Overwrite them with the given kwargs
params.update(kwargs)
# Fill the params in the template
ttl = template.format(repo_id=repo_id.replace('-', '_'), repo_label=repo_label, **params)
# return the final TTL
return ttl | 5,329,040 |
def oil_isothermal_density(rho: NDArrayOrFloat, p: NDArrayOrFloat) -> NDArrayOrFloat:
"""Calculates the oil density for a given pressure at 15.6 degC
B&W 1992 Equation 18
Args:
rho: The oil reference density (g/cc) at 15.6 degC
can be compensated for disovled gases by running `oil_rho_sat` first.
p: Pressure (MPa)
Returns:
The oil density (g/cc) at pressure p
"""
return (
rho
+ (0.00277 * p - 1.71e-7 * np.power(p, 3)) * np.power(rho - 1.15, 2)
+ 3.49e-4 * p
) | 5,329,041 |
def get_waveform_dataset(path):
"""Loads the waveform dataset from a given path.
Args:
path: The path to the .npz file containing the waveform data set.
Returns:
An array of waveform chunks loaded from the given path.
"""
dataset = np.load(path)['arr_0']
return dataset | 5,329,042 |
def logger(filename: str, name: str) -> logging.Logger:
"""configure task logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s: %(message)s')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | 5,329,043 |
def verify_signature(pubkey_path, message, signature):
"""
Use Crypto.Signature.PKCS1_v1_5 to verify the signature on a message.
Returns True for valid signature.
"""
log.debug("salt.crypt.verify_signature: Loading public key")
pubkey = get_rsa_pub_key(pubkey_path)
log.debug("salt.crypt.verify_signature: Verifying signature")
if HAS_M2:
md = EVP.MessageDigest("sha1")
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return pubkey.verify(digest, signature)
else:
verifier = PKCS1_v1_5.new(pubkey)
return verifier.verify(
SHA.new(salt.utils.stringutils.to_bytes(message)), signature
) | 5,329,044 |
def untranslate_module_name(module):
"""Rename module names mention in JSON to names that we can import
This reverses the translation applied by translate_module_name() to
a module name available to the current version of Python.
"""
if PY3:
# remap `__builtin__` and `exceptions` to the `builtins` module
if module == '__builtin__':
module = 'builtins'
elif module == 'exceptions':
module = 'builtins'
return module | 5,329,045 |
def RGBRamp(size=256, upperValue=.6666666666666667):
"""Generate an RGB color ramp, values range from 0.0 to 1.0"""
assert size > 0
hsv = HSVRamp(size, upperValue)
rgb = Numeric.zeros( (hsv.shape[0], 3), viewerConst.FPRECISION )
for i in xrange(hsv.shape[0]):
rgb[i] = ToRGB(hsv[i])
return rgb | 5,329,046 |
def find_poly_ras_intersect(shape, raster_dir, extension='.tif'):
""" Finds all the tiles falling within raster object
the get shape geometry should be seperated from the intesect check,
currently causes a exit code 139 on unix box
:param polygon:
:param extension:
:param raster_dir:
"""
print 'starting shape: {}'.format(shape)
# get vector geometry
if not os.path.isfile(shape):
raise NotImplementedError('Shapefile not found')
polygon = ogr.Open(shape)
layer = polygon.GetLayer()
feature = layer.GetFeature(0)
vector_geo = feature.GetGeometryRef()
# print 'vector geometry: {}'.format(vector_geo)
tiles = [os.path.join(raster_dir, x) for x in
os.listdir(os.path.join(raster_dir)) if x.endswith(extension)]
raster_list = []
for tile in tiles:
print tile, srt.tif_proj4_spatial_reference(tile)
if srt.check_same_reference_system(shape, tile):
raster_geo = get_polygon_from_raster(tile)
if raster_geo.Intersect(vector_geo):
print 'tile: {} intersects {}'.format(os.path.basename(tile), os.path.basename(shape))
raster_list.append(tile)
return raster_list | 5,329,047 |
def delay(seconds) -> None:
"""
Alias to Python time.sleep(seconds)
Args:
seconds (number): see Python time.sleep documentation
Returns:
None
"""
sleep(seconds) | 5,329,048 |
def Session(
retries: int = 10,
backoff_factor: float = 0.3,
allowed_methods: Iterable[str] = ('HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE'),
status_forcelist: Iterable[int] = (408, 429, 500, 502, 503, 504),
) -> requests.Session:
"""Return a Session object with full retry capabilities.
Args:
retries (int): number of retries
backoff_factor (float): speed factor for retries (in seconds)
allowed_methods (iterable): http methods to retry on
status_forcelist (iterable): http status codes to retry on
Returns:
:py:class:`requests.Session`: session object
"""
session = requests.Session()
retry = Retry(
total=retries,
connect=retries,
read=retries,
redirect=retries,
# status=retries,
allowed_methods=allowed_methods,
status_forcelist=status_forcelist,
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | 5,329,049 |
def default_plot(times, attendances):
"""
A default plotting method.
"""
fig, ax = plt.subplots(figsize=(12, 6))
ax.axhline(y=0, color="k", linestyle="--")
ax.plot(times, attendances, label=r"$A(t)$")
ax.set_xlabel("t")
ax.set_ylabel("A(t)")
plt.show() | 5,329,050 |
def tp53():
"""Create a TP53 gene fixture."""
params = {
'label': 'tumor protein p53',
'concept_id': 'hgnc:11998',
'symbol': 'TP53',
'location_annotations': [],
'strand': None,
'locations': [
{
'_id': 'ga4gh:VCL._Cl_XG2bfBUVG6uwi-jHtCHavOAyfPXN',
'chr': '17',
'interval': {
'end': 'p13.1',
'start': 'p13.1',
'type': 'CytobandInterval'
},
'species_id': 'taxonomy:9606',
'type': 'ChromosomeLocation'
}
],
'previous_symbols': [],
'aliases': [
'p53',
'LFS1'
],
'symbol_status': 'approved',
'associated_with': [
'vega:OTTHUMG00000162125',
'refseq:NM_000546',
'cosmic:TP53',
'omim:191170',
'ucsc:uc060aur.1',
'uniprot:P04637',
'orphanet:120204',
'ccds:CCDS73968',
'ccds:CCDS73971',
'ccds:CCDS73970',
'ccds:CCDS73969',
'ccds:CCDS73967',
'ccds:CCDS73966',
'ccds:CCDS73965',
'ccds:CCDS73964',
'ccds:CCDS73963',
'ccds:CCDS11118',
'ccds:CCDS45605',
'ccds:CCDS45606',
'ena.embl:AF307851',
'pubmed:6396087',
'pubmed:3456488',
'pubmed:2047879'
],
'xrefs': [
'ensembl:ENSG00000141510',
'ncbigene:7157'
]
}
return Gene(**params) | 5,329,051 |
def factorOrder(factors, varOrder):
"""Return an order of factors for sampling given a variable order for sampling"""
pri = [0 for x in varOrder]
for i,x in enumerate(varOrder): # first, find position of each var in sampling order
pri[x]=i
factorOrder = [ Factor() for x in varOrder ] # fill order with blanks initially
for f in factors:
f_pri = max([pri[x] for x in f.vars]) # get last-sampled variable for this factor
if factorOrder[f_pri].nvar == 0:
factorOrder[f_pri] = f # if first factor for this variable, save it
else: # o.w. take one with the lowest conditional entropy:
if ent[f_pri] < 0: # (compute previous' if necessary)
ent[f_pri] = factorOrder[f_pri].entropy() - factorOrder[f_pri].sum([f_pri]).entropy()
ent_new = f.entropy() - f.sum([f_pri]).entropy() # (and this factor's)
if ent_new < ent[f_pri]: # (keep whichever is lower)
factorOrder[f_pri] = f
ent[f_pri] = ent_new
return factorOrder | 5,329,052 |
def boundary_nodes(graph, nodes):
# TODO: move to utils
#TODO: use networkx boundary nodes directly: does the same thing
""" returns nodes at boundary of G based on edge_boundary from networkx """
graph = unwrap_graph(graph)
nodes = list(nodes)
nbunch = list(unwrap_nodes(nodes))
# find boundary
b_edges = nx.edge_boundary(graph, nbunch) # boundary edges
internal_nodes = [s for (s, t) in b_edges]
assert(all(n in nbunch for n in internal_nodes)) # check internal
return wrap_nodes(graph, internal_nodes) | 5,329,053 |
def home(request):
"""
rendering ui by template for homepage
this view never cache for delivering correct translation inside template
"""
template = loader.get_template('weather/home.html')
return HttpResponse(template.render({}, request)) | 5,329,054 |
def plot_histogram(df, path, col_x, ax=None, size=None, save=True, suffix=None,
show=False, **kwargs):
"""Geneate a histogram plot.
Args:
df (:class:`pandas.DataFrame`): Data frame to plot.
path (str): Path to data frame to use if ``df`` is None, also used
as the basis for output path.
col_x (str): Name of column with values to plot.
ax (:class:`matplotlib.axes.Axes`): Matplotlib axes; defaults to
None to generate a new figure with axes.
size (Sequence[float]): Sequence of ``width, height`` to size the
figure; defaults to None.
save (bool): True to save the plot; defaults to True.
suffix: String to append to output path before extension;
defaults to None to ignore.
show: True to display the image; otherwise, the figure will only
be saved to file, if :attr:``config.savefig`` is set.
Defaults to True.
kwargs (Any): Extra arguments to :meth:`decorate_plot`.
Returns:
:class:`matplotlib.axes.Axes`: Matplotlib axes.
"""
# load data frame from CSV unless already given and set up figure
if df is None:
df = pd.read_csv(path)
if ax is None:
fig, gs = plot_support.setup_fig(1, 1, size)
ax = plt.subplot(gs[0, 0])
# generate histogram
n, bins, patches = ax.hist(df[col_x])
decorate_plot(ax, **kwargs)
# save and display plot if indicated
if save:
out_path = libmag.make_out_path(path, suffix=suffix)
plot_support.save_fig(out_path, config.savefig)
if show: plt.show()
return ax | 5,329,055 |
def __check_interface_state(duthost, interface, state='up'):
"""
Check interface status
Args:
duthost: DUT host object
interface: Interface of DUT
state: state of DUT's interface
Returns:
Bool value which confirm port state
"""
ports_down = duthost.interface_facts(up_ports=[interface])['ansible_facts']['ansible_interface_link_down_ports']
if 'down' in state:
return interface in ports_down
return interface not in ports_down | 5,329,056 |
def test_usergroup_delete(conn_args, query_return, mock_login):
"""
query_submitted = {"params": [13], "jsonrpc": "2.0", "id": 0,
"auth": "9bad39de2a5a9211da588dd06dad8773", "method": "usergroup.delete"}
"""
module_return = ["13"]
query_return({"jsonrpc": "2.0", "result": {"usrgrpids": ["13"]}, "id": 0})
assert zabbix.usergroup_delete("13", **conn_args) == module_return | 5,329,057 |
def install_nbextension(path, overwrite=False, symlink=False, user=False, prefix=None, nbextensions_dir=None, destination=None, verbose=1):
"""Install a Javascript extension for the notebook
Stages files and/or directories into the nbextensions directory.
By default, this compares modification time, and only stages files that need updating.
If `overwrite` is specified, matching files are purged before proceeding.
Parameters
----------
path : path to file, directory, zip or tarball archive, or URL to install
By default, the file will be installed with its base name, so '/path/to/foo'
will install to 'nbextensions/foo'. See the destination argument below to change this.
Archives (zip or tarballs) will be extracted into the nbextensions directory.
overwrite : bool [default: False]
If True, always install the files, regardless of what may already be installed.
symlink : bool [default: False]
If True, create a symlink in nbextensions, rather than copying files.
Not allowed with URLs or archives. Windows support for symlinks requires
Vista or above, Python 3, and a permission bit which only admin users
have by default, so don't rely on it.
user : bool [default: False]
Whether to install to the user's nbextensions directory.
Otherwise do a system-wide install (e.g. /usr/local/share/jupyter/nbextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will install to ``<prefix>/share/jupyter/nbextensions``
nbextensions_dir : str [optional]
Specify absolute path of nbextensions directory explicitly.
destination : str [optional]
name the nbextension is installed to. For example, if destination is 'foo', then
the source file will be installed to 'nbextensions/foo', regardless of the source name.
This cannot be specified if an archive is given as the source.
verbose : int [default: 1]
Set verbosity level. The default is 1, where file actions are printed.
set verbose=2 for more output, or verbose=0 for silence.
"""
nbext = _get_nbext_dir(nbextensions_dir, user, prefix)
# make sure nbextensions dir exists
ensure_dir_exists(nbext)
if isinstance(path, (list, tuple)):
raise TypeError("path must be a string pointing to a single extension to install; call this function multiple times to install multiple extensions")
path = cast_unicode_py2(path)
if path.startswith(('https://', 'http://')):
if symlink:
raise ValueError("Cannot symlink from URLs")
# Given a URL, download it
with TemporaryDirectory() as td:
filename = urlparse(path).path.split('/')[-1]
local_path = os.path.join(td, filename)
if verbose >= 1:
print("downloading %s to %s" % (path, local_path))
urlretrieve(path, local_path)
# now install from the local copy
install_nbextension(local_path, overwrite=overwrite, symlink=symlink, nbextensions_dir=nbext, destination=destination, verbose=verbose)
elif path.endswith('.zip') or _safe_is_tarfile(path):
if symlink:
raise ValueError("Cannot symlink from archives")
if destination:
raise ValueError("Cannot give destination for archives")
if verbose >= 1:
print("extracting %s to %s" % (path, nbext))
if path.endswith('.zip'):
archive = zipfile.ZipFile(path)
elif _safe_is_tarfile(path):
archive = tarfile.open(path)
archive.extractall(nbext)
archive.close()
else:
if not destination:
destination = basename(path)
destination = cast_unicode_py2(destination)
full_dest = pjoin(nbext, destination)
if overwrite and os.path.lexists(full_dest):
if verbose >= 1:
print("removing %s" % full_dest)
if os.path.isdir(full_dest) and not os.path.islink(full_dest):
shutil.rmtree(full_dest)
else:
os.remove(full_dest)
if symlink:
path = os.path.abspath(path)
if not os.path.exists(full_dest):
if verbose >= 1:
print("symlink %s -> %s" % (full_dest, path))
os.symlink(path, full_dest)
elif os.path.isdir(path):
path = pjoin(os.path.abspath(path), '') # end in path separator
for parent, dirs, files in os.walk(path):
dest_dir = pjoin(full_dest, parent[len(path):])
if not os.path.exists(dest_dir):
if verbose >= 2:
print("making directory %s" % dest_dir)
os.makedirs(dest_dir)
for file in files:
src = pjoin(parent, file)
# print("%r, %r" % (dest_dir, file))
dest_file = pjoin(dest_dir, file)
_maybe_copy(src, dest_file, verbose)
else:
src = path
_maybe_copy(src, full_dest, verbose) | 5,329,058 |
def find_dateTime_in_html(text):
"""
find dateTime in html
"""
r = findall('<time dateTime="(.*?)">', text)
if r:
return r
return [] | 5,329,059 |
def get_number_of_voxels_per_class(labels: torch.Tensor) -> torch.Tensor:
"""
Computes the number of voxels for each class in a one-hot label map.
:param labels: one-hot label map in shape Batches x Classes x Z x Y x X or Classes x Z x Y x X
:return: A tensor of shape [Batches x Classes] containing the number of non-zero voxels along Z, Y, X
"""
if not len(labels.shape) in [5, 4]:
raise Exception("labels must have either 4 (Classes x Z x Y x X) "
"or 5 dimensions (Batches x Classes x Z x Y x X), found:{}"
.format(len(labels.shape)))
if len(labels.shape) == 4:
labels = labels[None, ...]
return torch.count_nonzero(labels, dim=(2, 3, 4)) | 5,329,060 |
def get_columns_width(user_width):
"""define width of the report columns"""
default_width = [30, 7, 60]
if not user_width:
return default_width
try:
return [7 if user_width[i] < 7 else user_width[i] for i in range(3)]
except (TypeError, IndexError):
_LOGGER.error(
"Invalid configuration for table column widths, default values" " used %s",
default_width,
)
return default_width | 5,329,061 |
def tx_failure():
"""
Failed ```tx```.
"""
message = request.args.get('m')
protocol = request.args.get('p')
address = request.args.get('a')
command = request.args.get('c')
repeats = request.args.get('r')
bits = request.args.get('b')
response = make_response(
render_template(
"tx.html",
success=False,
message=message,
protocol=protocol,
address=address,
command=command,
repeats=repeats,
bits=bits
)
)
response.headers.set('Irbox-Success', 'false')
return response | 5,329,062 |
def read_docstring(object_):
"""
Returns object docstring without the FILE information.
"""
fmt = "```\n{}\n```\n"
docs = pydoc.plain(pydoc.render_doc(object_)).split("FILE")[0].rstrip()
return fmt.format(docs) | 5,329,063 |
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y | 5,329,064 |
def register_type(item_type, item_creator):
"""Register data type to Pipe class. Check :py:meth:`Pipe.__or__` and
:py:meth:`Pipe.__ror__` for detail.
:param item_type: The type of data object which used in pipe cascading.
:param item_creator: A function to convert data to Pipe object.
"""
Pipe.pipe_item_types[item_type] = item_creator | 5,329,065 |
def _redacted_to_curl(request: httpx.Request) -> str:
"""Pass through to curlify2.to_curl that redacts the authorization in the headers
"""
if (auth_header := request.headers.get('authorization')) is None:
return curlify2.to_curl(request)
req_copy = copy.copy(request)
req_copy.headers = copy.deepcopy(request.headers)
if "Bearer" in auth_header:
req_copy.headers['authorization'] = "Bearer [REDACTED]"
else:
req_copy.headers['authorization'] = "[REDACTED]"
return curlify2.to_curl(req_copy) | 5,329,066 |
def get_norm(norm):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
support_norm_type = ['BN', 'SyncBN', 'FrozenBN', 'GN', 'nnSyncBN']
assert norm in support_norm_type, 'Unknown norm type {}, support norm types are {}'.format(
norm, support_norm_type)
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": nn.BatchNorm2d,
"SyncBN": NaiveSyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": groupNorm,
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
}[norm]
return norm | 5,329,067 |
def parse_scales_line(line):
"""
Args:
- line:
Returns:
- scales_dict
"""
def advance_past_token(str, token):
return str[str.find(token) + len(token):]
scales_dict = {}
line = advance_past_token(line, 'Scales:')
pair_str = line.split(',')
for pair_str in pair_str:
dname, scale = pair_str.split(':')
scales_dict[dname.strip()] = float(scale)
return scales_dict | 5,329,068 |
def test_org_admin_get_own_user_info(org_admin_headers):
""" services api allows org admin to get its own user info """
org = org_admin_headers['CVE-API-ORG']
user = org_admin_headers['CVE-API-USER']
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/user/{user}',
headers=org_admin_headers
)
assert res.status_code == 200
response_contains(res, user) | 5,329,069 |
def parse_remove_configuration(configuration):
"""
Turns the configuration line of splitting into a name and a set of params.
"""
if configuration is None:
return "None", None
print('conf', configuration)
conf_dict = collections.OrderedDict(configuration)
name = 'remove'
for key in conf_dict.keys():
if key != 'weights' and key != 'boost':
name += '_'
name += key
return name, conf_dict | 5,329,070 |
def _calc_cost_grad_first(data_input, w, label, features):
"""Calculate the partial cost and gradient."""
train_data = read_stage_file(data_input, features + [label])
size_train = train_data.shape[0]
labels = train_data[label].values
train_data = train_data[features].values
if size_train > 0:
dim = train_data.shape[1]
if dim != len(w):
w = np.zeros(dim, dtype=float) # initial
prediction = (labels * np.dot(train_data, w))
# hinge loss (select negative values)
idx = np.nonzero((prediction - 1) < 0)
loss = np.sum(1 - prediction[idx])
# -y * x for all values lesser than 1
grad = - np.dot(labels[idx], train_data[idx])
return [loss, grad, size_train], [labels, train_data]
else:
return [0, 0, size_train], [labels, train_data] | 5,329,071 |
def mean_over_patches(dataframe, temps):
"""Compute the mean of the module temperatures over all patches of a module."""
for patch_area_agg in ["min", "max", "mean", "median"]:
dataframe["{}_temp".format(patch_area_agg)] = pd.Series({track_id: np.mean(t[patch_area_agg]) for track_id, t in temps.items()}) | 5,329,072 |
def subtract_dbm(dbm1: float, dbm2: float):
"""Adds two decibel values"""
watt1 = dbm_to_watt(dbm1)
watt2 = dbm_to_watt(dbm2)
return watt_to_dbm(watt1 - watt2) | 5,329,073 |
def _validateConfigFile(configFilePath):
"""
Test a configuration file path to be sure it is usable in the plugin
Uses a binary included in the project to test a given configuration file, and will raise an exception
if something is not valid.
The idea if to fail fast at startup for any configuration file issue.
:param configFilePath: absolute path to a yaml file
:raises ValueError if configFilePath is missing or is not a valid yaml file
"""
if not os.path.isfile(configFilePath):
raise ValueError('missing configuration file')
return subprocess.call(
[constants.LAYER_ALCHEMY_CONFIGTESTER_BIN, '--config', configFilePath, '--quiet']
) | 5,329,074 |
def focused_evaluate(board):
"""
Given a board, return a numeric rating of how good
that board is for the current player.
A return value >= 1000 means that the current player has won;
a return value <= -1000 means that the current player has lost
"""
score = board.longest_chain(board.get_current_player_id()) * 10
# Prefer having your pieces in the center of the board.
for row in range(6):
for col in range(7):
if board.get_cell(row, col) == board.get_current_player_id():
score -= abs(3-col)
elif board.get_cell(row, col) == board.get_other_player_id():
score += abs(3-col)
if board.is_game_over():
if int(board.is_win()) == int(board.get_current_player_id()):
score = +1000;
score -= board.num_tokens_on_board()
elif int(board.is_win()) == int(board.get_other_player_id()):
score = -1000
return score | 5,329,075 |
def tanh(x, name=None):
"""
sparse tanh activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = tanh(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.tanh(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_tanh(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_tanh(x)
else:
raise ValueError(
"Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor"
) | 5,329,076 |
def record(args, filename):
"""Record a snapshot in a json file, as specified by arguments in args.
Return 0 on success, 1 on failure."""
LOGGER.debug('In subcommand record.')
os.chdir(args.project)
projectpath = os.getcwd()
# parse addons.make into a list of addons
addons_list = []
try:
with open('addons.make', 'r') as addons_make:
for line in addons_make.readlines():
addons_list.append(line.rstrip())
except IOError as exc:
if exc.errno == errno.ENOENT:
LOGGER.debug('No addons.make file found.')
else: # pragma: no cover
raise
if len(addons_list) is 0:
LOGGER.info('No addons found.')
# search config.make for OF location
with open('config.make', 'r') as config_make:
of_path = ''
for line in config_make.readlines():
if 'OF_ROOT =' in line:
of_path = line.split('=', 1)[-1].strip()
break
if len(of_path) == 0:
LOGGER.error('Did not find OF location in config.make in ' +
os.getcwd())
return 1
LOGGER.info('Processing OF at ' + of_path)
os.chdir(of_path)
core_dict = {'path': of_path}
if validate_git_repo() != 0:
LOGGER.error('OF git repo could not be validated successfully.')
return 1
LOGGER.debug('Recording commit SHA')
out = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
universal_newlines=True)
core_dict['sha'] = out.strip()
LOGGER.debug('OF commit SHA: ' + core_dict['sha'])
LOGGER.info('Processing addons')
addons_path = os.path.join(os.getcwd(), 'addons')
os.chdir(addons_path)
# get list of official addons
official_addons = []
with open('.gitignore', 'r') as gitignore_file:
for line in gitignore_file:
if line.startswith('!ofx'):
official_addons.append(line[1:].strip())
# prune official addons (which are in the OF repo already)
# not very efficient (better with sets),
# but irrelevant for the small lists we expect
addons_list = [{'name': x}
for x
in addons_list
if x
not in official_addons]
for addon in addons_list:
LOGGER.info('Processing addon ' + addon['name'])
try:
os.chdir(os.path.join(addons_path, addon['name']))
except Exception as exc:
if exc.errno == errno.ENOENT:
LOGGER.error(addon['name'] + ' does not exist at ' +
addons_path + '.')
sys.exit('Aborting')
else: # pragma: no cover
raise
ret = validate_git_repo(strict=False)
if ret == 0:
out_string = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
universal_newlines=True)
addon['sha'] = out_string.strip()
elif ret == 2:
addon['sha'] = 'non-git'
else:
LOGGER.error(addon['name'] +
' git repo could not be validated successfully.')
return 1
LOGGER.info('Storing metadata')
os.chdir(projectpath)
# Open/initialise metadata file
try:
with open(filename, 'r') as metafile:
json_object = json.load(metafile)
LOGGER.info('loaded data from ' + filename)
LOGGER.debug(json_object)
except IOError as exc:
if exc.errno == errno.ENOENT:
LOGGER.info(filename + ' does not exist yet. Creating..')
open(filename, 'w').close()
# create new skeleton json_object
json_object = json.loads('{ "snapshots": [] }')
else: # pragma: no cover
raise
# Store/update metadata
# check if snapshot entry already exists
for entry in json_object['snapshots']:
if entry['name'] == args.name:
if (args.update is False) and (args.name is not 'latest'):
LOGGER.error(args.name +
': entry with the same name already exists. ' +
'Use -u option to overwrite.')
return 1
json_object['snapshots'].remove(entry)
# write updated entry
temp = {'name': args.name,
'date': datetime.now().isoformat(),
'description': args.description,
'core': core_dict,
'addons': addons_list}
json_object['snapshots'].append(temp)
LOGGER.info('Writing updated data to ' + filename)
with open(filename, 'w') as metafile:
json.dump(json_object, metafile, indent=1, sort_keys=True)
return 0 | 5,329,077 |
def collector(monkeypatch):
"""
Unit test: base case
"""
col = SunPowerPVSupervisorCollector(use_device_data_timestamp=False)
attrs = [
'connect',
'disconnect',
'info_metrics',
]
mocked = MagicMock()
mocked.connect.return_value = []
mocked.disconnect.return_value = []
mocked.info_metrics.return_value = []
for attr in attrs:
monkeypatch.setattr(col, attr, getattr(mocked, attr))
return col | 5,329,078 |
def _setorder(req, stores):
"""Pull the password store ordering out of the req object"""
for store in stores.get_all_stores():
stores[store] = int(req.args.get(store.__class__.__name__, 0))
continue | 5,329,079 |
def GetParents_old(con, cur, term):
"""
Get all the parents of the term in the ontology tree
input:
con,cur
term : str
The term for which to look for parents
output:
err : str
Error message or empty string if ok
parents : list of str
the parents of term
"""
# termid = dbidval.GetIdFromDescription(con, cur, 'OntologyTable', term)
err, termids = get_term_ids(con, cur, term)
if err:
debug(3, err)
return err, []
# if termid < 0:
if len(termids) == 0:
err, termid = GetSynonymTermId(con, cur, term)
if err:
debug(3, 'ontology term not found for %s' % term)
return 'ontolgy term %s not found' % term, []
debug(2, 'converted synonym to termid')
termids = [termid]
# plist = [termid]
plist = termids
parents = [term]
parents_id_set = set()
while len(plist) > 0:
cid = plist.pop(0)
origid = cid
if cid in parents_id_set:
continue
err, cparentids = GetTreeParentsById(con, cur, cid)
if err:
continue
plist.extend(cparentids)
for cid in cparentids:
err, cparent = dbidval.GetDescriptionFromId(con, cur, 'OntologyTable', cid)
if err:
continue
parents.append(cparent)
parents_id_set.add(origid)
debug(2, 'found %d parents' % len(parents))
return '', parents | 5,329,080 |
def uploadResourceFileUsingSession(url, session, resourceName, fileName, fullPath, scannerId):
"""
upload a file for the resource - e.g. a custom lineage csv file
works with either csv for zip files (.csv|.zip)
returns rc=200 (valid) & other rc's from the post
"""
print(
"uploading file for resource "
+ url
+ " resource="
+ resourceName
)
apiURL = url + "/access/1/catalog/resources/" + resourceName + "/files"
print("\turl=" + apiURL)
# header = {"accept": "*/*", }
params = {"scannerid": scannerId, "filename": fileName, "optionid": "File"}
print("\t" + str(params))
# files = {'file': fullPath}
mimeType = "text/csv"
readMode = "rt"
if fileName.endswith(".zip"):
mimeType = "application/zip"
readMode = "rb"
if fileName.endswith(".dsx"):
mimeType = "text/plain"
file = {"file": (fileName, open(fullPath, readMode), mimeType)}
# file = {"file": (fileName, open(fullPath, readMode), )}
print(f"\t{file}")
# print(f"session header:{session.headers}")
uploadResp = session.post(
apiURL,
data=params,
files=file,
)
print("\tresponse=" + str(uploadResp.status_code))
if uploadResp.status_code == 200:
# valid - return the json
return uploadResp.status_code
else:
# not valid
print("\tupload file failed")
print("\t" + str(uploadResp))
print("\t" + str(uploadResp.text))
return uploadResp.status_code | 5,329,081 |
def read_inc_stmt(line: str) -> tuple[Literal["inc"], str] | None:
"""Attempt to read INCLUDE statement"""
inc_match = FRegex.INCLUDE.match(line)
if inc_match is None:
return None
inc_path: str = inc_match.group(1)
return "inc", inc_path | 5,329,082 |
def _clear_screen():
""" http://stackoverflow.com/questions/18937058/python-clear-screen-in-shell """
if platform.system() == "Windows":
tmp = os.system('cls') #for window
else:
tmp = os.system('clear') #for Linux
return True | 5,329,083 |
def prepare_ssh_command(config_name, server, hostname, client_id, tool, param, view_id=0, username='bluecat', timeout=30,
**kwargs):
"""
:param config_name:
:param server:
:param hostname:
:param client_id:
:param tool:
:param param:
:param username:
:param timeout:
:return:
"""
global management_result
current_path = os.path.dirname(os.path.abspath(__file__))
key_path = os.path.join(current_path, '../ssh/key')
try:
key = RSAKey.from_private_key_file(key_path)
except SSHException:
g.user.logger.error(
'Exception trying to connect to server: SSH key must be in PEM format : ssh-keygen -m PEM')
g.user.logger.error(traceback.format_exc())
raise Exception('Failed to connect to server!')
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
is_connected = ssh_open_connection(
ssh, hostname, username, key, timeout, **kwargs)
if not is_connected:
raise Exception('Failed to connect to server!')
dig_cmd = "dig {}"
if view_id != 0:
loop_back_ip = get_loop_back_ip(ssh, view_id)
if loop_back_ip:
dig_cmd += ' -b {}'.format(loop_back_ip)
tool_cmd = {
"ping": "ping -c 10 {}",
"dig": dig_cmd,
"traceroute": "traceroute {}"
}
if ":" in param:
tool_cmd = {
"ping": "ping6 -c 10 {}",
"dig": dig_cmd,
"traceroute": "traceroute6 {}"
}
cmd = tool_cmd.get(tool).format(param)
update_result_global_stream_result(
config_name, server, client_id, tool, stream_result, "False")
t = Thread(target=exec_command, args=(
ssh, cmd, config_name, server, client_id, tool))
t.start()
management_result[config_name][server][client_id]['ssh'] = ssh
management_result[config_name][server][client_id]['thread'] = t | 5,329,084 |
def sent2vec(s, model):
"""
Transform a sentence to a vector.
Pre: No parameters may be None.
Args:
s: The sentence to transform.
model: A word2vec model.
Returns: A vector, representing the given sentence.
"""
words = word_tokenize(s.lower())
# Stopwords and numbers must be removed, as well as words that are not
# part of the model
M = [model[w] for w in words if w not in stop_words and w.isalpha() and w in model]
M = np.array(M)
if len(M) > 0:
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
else:
# When the sentence is empty after removing unvalid tokens, the vector
# is equal to the null-vector
return model.get_vector('null') | 5,329,085 |
def validate_json_object(json_obj, obj):
"""Determines if the json template matches expected Batch object
:param json_obj: json dictionary from template.
:param obj: matched Batch object.
"""
# pylint:disable=protected-access
from enum import Enum
if issubclass(type(obj), Enum):
return
key_attr_map = {}
key_type_map = {}
for key in obj._attribute_map:
key_type_map[obj._attribute_map[key]['key'].lower()] = obj._attribute_map[key]['type']
key_attr_map[obj._attribute_map[key]['key'].lower()] = key
for item in json_obj:
if not item.lower() in key_type_map:
raise ValueError('Unexpect element {} in template'.format(item))
t = key_type_map[item.lower()]
if t[0].islower() or (t[0] == '[' and t[1].islower()):
continue
if t[0] == '[':
if not isinstance(json_obj[item], list):
raise ValueError('Expect element {} is list in template'.format(item))
for index in range(len(json_obj[item])):
inner_type = getattr(obj, key_attr_map[item.lower()])
validate_json_object(json_obj[item][index], inner_type[index])
else:
inner_type = getattr(obj, key_attr_map[item.lower()])
validate_json_object(json_obj[item], inner_type) | 5,329,086 |
def subnet_group_present(
name,
subnet_ids=None,
subnet_names=None,
description=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure ElastiCache subnet group exists.
.. versionadded:: 2015.8.0
name
The name for the ElastiCache subnet group. This value is stored as a lowercase string.
subnet_ids
A list of VPC subnet IDs for the cache subnet group. Exclusive with subnet_names.
subnet_names
A list of VPC subnet names for the cache subnet group. Exclusive with subnet_ids.
description
Subnet group description.
tags
A list of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Subnet group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elasticache.create_subnet_group"](
name=name,
subnet_ids=subnet_ids,
subnet_names=subnet_names,
description=description,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} subnet group.".format(name)
return ret
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["comment"] = "Subnet group {} created.".format(name)
return ret
ret["comment"] = "Subnet group present."
return ret | 5,329,087 |
def GetTensorFlowVersion(vm):
"""Returns the version of tensorflow installed on the vm.
Args:
vm: the target vm on which to check the tensorflow version
Returns:
installed python tensorflow version as a string
"""
stdout, _ = vm.RemoteCommand(
('echo -e "import tensorflow\nprint(tensorflow.__version__)" | {0} python'
.format(GetEnvironmentVars(vm)))
)
return stdout.strip() | 5,329,088 |
def process_step_collect_parse(project, step, process_result, format_args=None):
"""
Function will parse the file from an output
:type step: structures.project_step.ProjectStep
:type project: structures.project.Project
:type process_result: proc.step.step_shell.ProcessStepResult
"""
logger.debug(f'parsing output artifacts')
if not process_result.output:
logger.warning(f'Empty output received, make sure the field ouutput is set to \n'
f'output: log+stdout \n'
f'in order to capture output of the shell step')
return []
index = 0
output = process_result.output
start = step.collect.parse.start
stop = step.collect.parse.start
ls, le = len(start), len(stop)
length = len(output)
reports = list()
while True:
# no more reports found
s = output.find(start, index, length)
if s == -1:
break
# broken report
e = output.find(stop, s + 1, length)
if e == -1:
logger.debug(f'could not find end of the report file while parsing the output')
break
index = e + le + 1
# try to parse the data or skip it
reports.append(output[s + ls:e].strip())
return reports | 5,329,089 |
async def test_hello_world(client):
"""Test Hello World."""
resp = await client.get('/hello-world', headers={'Accept': 'application/json'})
assert resp.status == 200
assert 'application/json' in resp.headers['Content-Type']
text = await resp.text()
body = json.loads(text)
assert len(body.keys()) == 1
assert 'Hello' in body.keys()
assert body['Hello'] == 'World' | 5,329,090 |
def datetime_column_evrs():
"""hand-crafted EVRS for datetime columns"""
with open(
file_relative_path(__file__, "../fixtures/datetime_column_evrs.json")
) as infile:
return expectationSuiteValidationResultSchema.load(
json.load(infile, object_pairs_hook=OrderedDict)
) | 5,329,091 |
def _write_dihedral_information(xml_file, structure, ref_energy):
"""Write dihedrals in the system.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
structure : parmed.Structure
Parmed structure object
ref_energy : float, default=1.0
Reference energy for conversion to reduced units
"""
unique_dihedral_types = set()
xml_file.write('<dihedral>\n')
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type,
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3]):
types_in_dihedral = '-'.join((t1, t2, t3, t4))
else:
types_in_dihedral = '-'.join((t4, t3, t2, t1))
dihedral_type = (types_in_dihedral, dihedral.type.c0,
dihedral.type.c1, dihedral.type.c2, dihedral.type.c3, dihedral.type.c4,
dihedral.type.c5, dihedral.type.scee, dihedral.type.scnb)
unique_dihedral_types.add(dihedral_type)
xml_file.write('{} {} {} {} {}\n'.format(
dihedral_type[0], dihedral.atom1.idx, dihedral.atom2.idx,
dihedral.atom3.idx, dihedral.atom4.idx))
xml_file.write('</dihedral>\n')
xml_file.write('<dihedral_coeffs>\n')
xml_file.write('<!-- type k1 k2 k3 k4 -->\n')
for dihedral_type, c0, c1, c2, c3, c4, c5, scee, scnb in unique_dihedral_types:
opls_coeffs = RB_to_OPLS(c0, c1, c2, c3, c4, c5)
opls_coeffs /= ref_energy
xml_file.write('{} {:.5f} {:.5f} {:.5f} {:.5f}\n'.format(
dihedral_type, *opls_coeffs))
xml_file.write('</dihedral_coeffs>\n') | 5,329,092 |
def chat_header_args(panel_vars: List[PanelVariable], parsed_args: Dict) -> List:
"""Creates a list of tuples containing the passed in arguments from the chat command.
Args:
panel_vars (list(nautobot_plugin_chatops_grafana.models.PanelVariable)): A list of PanelVariable objects.
parsed_args (dict): Dictionary of parsed arguments from argparse.
Returns:
args (List): List of tuples containing the arguments passed into the chat command.
Examples:
>>> print(chat_header_args([PanelVariable(name="test")], {"test": "testing", "timespan": "P12M"}))
[("test", "testing"), ("timespan", "timespan=P12M")]
"""
args = []
# Check the parsed args to see if they match a panel variable. If so, and the
# value isn't the default value, then append it on as a passed in arg.
for panel_var in panel_vars:
arg = parsed_args.get(panel_var.name)
if arg and arg != panel_var.response:
args.append((panel_var.name, arg))
continue
# If we didn't find the parsed arg in the panel variable, look in the default variables.
# Do the same here, if it does not match the default value, append it on as a passed in arg.
for def_param, def_value in handler.default_params.items():
arg = parsed_args.get(def_param)
if arg and def_value != arg and def_param not in [a[0] for a in args]:
args.append((def_param, f"{def_param}={arg}"))
return args | 5,329,093 |
def pd_df_timeseries():
"""Create a pandas dataframe for testing, with timeseries in one column"""
return pd.DataFrame(
{
"time": pd.date_range(start="1/1/2018", periods=100),
"A": np.random.randint(0, 100, size=100),
}
) | 5,329,094 |
def n_tokens(doc: Union[Doc, Span]):
"""Return number of words in the document."""
return len(doc._._filtered_tokens) | 5,329,095 |
def getJobs(numJobs=1):
"""
Return a list of dictionary data as provided to the plugin `submit` method
"""
job = {'allowOpportunistic': False,
'bulkid': None,
'cache_dir': TEST_DIR + '/JobCollection_1_0/job_1',
'estimatedDiskUsage': 5000000,
'estimatedJobTime': 28800,
'estimatedMemoryUsage': 6000.0,
'gridid': None,
'id': 1L,
'inputDataset': '/HLTPhysics/Run2017B-PromptReco-v1/AOD',
'inputDatasetLocations': ['T2_CH_CERN_HLT', 'T2_CH_CERN'],
'jobid': 1L,
'location': 'T2_CH_CERN',
'name': '934a7f0d-2934-4939-b366-0a9efe0df15e-0',
'numberOfCores': 8,
'packageDir': TEST_DIR + '/batch_1-0',
'plugin': 'SimpleCondorPlugin',
'possibleSites': [u'T2_CH_CERN', u'T1_US_FNAL'],
'potentialSites': frozenset([u'T1_US_FNAL', u'T2_CH_CERN']),
'proxyPath': None,
'request_name': 'amaltaro_test_submission_180620_105409_2045',
'retry_count': 0L,
'sandbox': TEST_DIR + '/Blah-Sandbox.tar.bz2',
'scramArch': ['slc6_amd64_gcc630'],
'siteName': u'T2_CH_CERN',
'site_cms_name': 'T2_CH_CERN',
'status': None,
'status_time': None,
'swVersion': ['CMSSW_9_4_0'],
'taskPriority': 0L,
'task_id': 383L,
'task_name': '/amaltaro_test_submission_180620_105409_2045/Blah_Task',
'task_type': 'Processing',
'userdn': '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=amaltaro/CN=718748/CN=Alan Malta Rodrigues',
'usergroup': 'unknown',
'userrole': 'unknown',
'wf_priority': 420000L}
jobs = []
for i in range(0, numJobs):
job.update({'id': long(i), 'jobid': long(i), 'name': makeUUID()})
jobs.append(deepcopy(job))
return jobs | 5,329,096 |
def create_test_user():
"""Creates a new user with random username for testing
If two randomly assigned usernames overlap, it will fail
"""
UserModel = get_user_model()
username = '%s_%s' % ('test', uuid4().get_hex()[:10],)
user = UserModel.objects.create(username=username)
return user | 5,329,097 |
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1) | 5,329,098 |
def pushd(working_directory: Union[os.PathLike, str]) -> Iterator[None]:
"""Change the current working directory for a block of code."""
cwd = os.getcwd()
try:
os.chdir(working_directory)
yield
finally:
os.chdir(cwd) | 5,329,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.