content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _manually_create_user(username, pw):
"""
Create an *active* user, its server directory, and return its userdata dictionary.
:param username: str
:param pw: str
:return: dict
"""
enc_pass = server._encrypt_password(pw)
# Create user directory with default structure (use the server function)
user_dir_state = server.init_user_directory(username)
single_user_data = user_dir_state
single_user_data[server.USER_IS_ACTIVE] = True
single_user_data[server.PWD] = enc_pass
single_user_data[server.USER_CREATION_TIME] = server.now_timestamp()
single_user_data['shared_with_me'] = {}
single_user_data['shared_with_others'] = {}
single_user_data['shared_files'] = {}
server.userdata[username] = single_user_data
return single_user_data | 5,330,500 |
def update_geoscale(df, to_scale):
"""
Updates df['Location'] based on specified to_scale
:param df: df, requires Location column
:param to_scale: str, target geoscale
:return: df, with 5 digit fips
"""
# code for when the "Location" is a FIPS based system
if to_scale == 'state':
df.loc[:, 'Location'] = df['Location'].apply(lambda x: str(x[0:2]))
# pad zeros
df.loc[:, 'Location'] = df['Location'].apply(lambda x:
x.ljust(3 + len(x), '0')
if len(x) < 5 else x)
elif to_scale == 'national':
df.loc[:, 'Location'] = US_FIPS
return df | 5,330,501 |
def plot_throughput_speedup_analysis(plot, input_data):
"""Generate the plot(s) with algorithm:
plot_throughput_speedup_analysis
specified in the specification file.
:param plot: Plot to generate.
:param input_data: Data to process.
:type plot: pandas.Series
:type input_data: InputData
"""
# Transform the data
plot_title = plot.get("title", "")
logging.info(" Creating the data set for the {0} '{1}'.".
format(plot.get("type", ""), plot_title))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
return
y_vals = dict()
y_tags = dict()
for job in data:
for build in job:
for test in build:
if y_vals.get(test["parent"], None) is None:
y_vals[test["parent"]] = {"1": list(),
"2": list(),
"4": list()}
y_tags[test["parent"]] = test.get("tags", None)
try:
if test["type"] in ("NDRPDR",):
if "-pdr" in plot_title.lower():
ttype = "PDR"
elif "-ndr" in plot_title.lower():
ttype = "NDR"
else:
continue
if "1C" in test["tags"]:
y_vals[test["parent"]]["1"]. \
append(test["throughput"][ttype]["LOWER"])
elif "2C" in test["tags"]:
y_vals[test["parent"]]["2"]. \
append(test["throughput"][ttype]["LOWER"])
elif "4C" in test["tags"]:
y_vals[test["parent"]]["4"]. \
append(test["throughput"][ttype]["LOWER"])
except (KeyError, TypeError):
pass
if not y_vals:
logging.warning("No data for the plot '{}'".
format(plot.get("title", "")))
return
y_1c_max = dict()
for test_name, test_vals in y_vals.items():
for key, test_val in test_vals.items():
if test_val:
avg_val = sum(test_val) / len(test_val)
y_vals[test_name][key] = (avg_val, len(test_val))
ideal = avg_val / (int(key) * 1000000.0)
if test_name not in y_1c_max or ideal > y_1c_max[test_name]:
y_1c_max[test_name] = ideal
vals = dict()
y_max = list()
nic_limit = 0
lnk_limit = 0
pci_limit = plot["limits"]["pci"]["pci-g3-x8"]
for test_name, test_vals in y_vals.items():
try:
if test_vals["1"][1]:
name = "-".join(test_name.split('-')[1:-1])
if len(name) > 50:
name_lst = name.split('-')
name = ""
split_name = True
for segment in name_lst:
if (len(name) + len(segment) + 1) > 50 and split_name:
name += "<br>"
split_name = False
name += segment + '-'
name = name[:-1]
vals[name] = dict()
y_val_1 = test_vals["1"][0] / 1000000.0
y_val_2 = test_vals["2"][0] / 1000000.0 if test_vals["2"][0] \
else None
y_val_4 = test_vals["4"][0] / 1000000.0 if test_vals["4"][0] \
else None
vals[name]["val"] = [y_val_1, y_val_2, y_val_4]
vals[name]["rel"] = [1.0, None, None]
vals[name]["ideal"] = [y_1c_max[test_name],
y_1c_max[test_name] * 2,
y_1c_max[test_name] * 4]
vals[name]["diff"] = [(y_val_1 - y_1c_max[test_name]) * 100 /
y_val_1, None, None]
vals[name]["count"] = [test_vals["1"][1],
test_vals["2"][1],
test_vals["4"][1]]
try:
val_max = max(max(vals[name]["val"], vals[name]["ideal"]))
except ValueError as err:
logging.error(err)
continue
if val_max:
y_max.append(int((val_max / 10) + 1) * 10)
if y_val_2:
vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2)
vals[name]["diff"][1] = \
(y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2
if y_val_4:
vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2)
vals[name]["diff"][2] = \
(y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4
except IndexError as err:
logging.warning("No data for '{0}'".format(test_name))
logging.warning(repr(err))
# Limits:
if "x520" in test_name:
limit = plot["limits"]["nic"]["x520"]
elif "x710" in test_name:
limit = plot["limits"]["nic"]["x710"]
elif "xxv710" in test_name:
limit = plot["limits"]["nic"]["xxv710"]
elif "xl710" in test_name:
limit = plot["limits"]["nic"]["xl710"]
elif "x553" in test_name:
limit = plot["limits"]["nic"]["x553"]
else:
limit = 0
if limit > nic_limit:
nic_limit = limit
mul = 2 if "ge2p" in test_name else 1
if "10ge" in test_name:
limit = plot["limits"]["link"]["10ge"] * mul
elif "25ge" in test_name:
limit = plot["limits"]["link"]["25ge"] * mul
elif "40ge" in test_name:
limit = plot["limits"]["link"]["40ge"] * mul
elif "100ge" in test_name:
limit = plot["limits"]["link"]["100ge"] * mul
else:
limit = 0
if limit > lnk_limit:
lnk_limit = limit
# Sort the tests
order = plot.get("sort", None)
if order and y_tags:
y_sorted = OrderedDict()
y_tags_l = {s: [t.lower() for t in ts] for s, ts in y_tags.items()}
for tag in order:
for test, tags in y_tags_l.items():
if tag.lower() in tags:
name = "-".join(test.split('-')[1:-1])
try:
y_sorted[name] = vals.pop(name)
y_tags_l.pop(test)
except KeyError as err:
logging.error("Not found: {0}".format(err))
finally:
break
else:
y_sorted = vals
traces = list()
annotations = list()
x_vals = [1, 2, 4]
# Limits:
try:
threshold = 1.1 * max(y_max) # 10%
except ValueError as err:
logging.error(err)
return
nic_limit /= 1000000.0
if nic_limit < threshold:
traces.append(plgo.Scatter(
x=x_vals,
y=[nic_limit, ] * len(x_vals),
name="NIC: {0:.2f}Mpps".format(nic_limit),
showlegend=False,
mode="lines",
line=dict(
dash="dot",
color=COLORS[-1],
width=1),
hoverinfo="none"
))
annotations.append(dict(
x=1,
y=nic_limit,
xref="x",
yref="y",
xanchor="left",
yanchor="bottom",
text="NIC: {0:.2f}Mpps".format(nic_limit),
font=dict(
size=14,
color=COLORS[-1],
),
align="left",
showarrow=False
))
y_max.append(int((nic_limit / 10) + 1) * 10)
lnk_limit /= 1000000.0
if lnk_limit < threshold:
traces.append(plgo.Scatter(
x=x_vals,
y=[lnk_limit, ] * len(x_vals),
name="Link: {0:.2f}Mpps".format(lnk_limit),
showlegend=False,
mode="lines",
line=dict(
dash="dot",
color=COLORS[-2],
width=1),
hoverinfo="none"
))
annotations.append(dict(
x=1,
y=lnk_limit,
xref="x",
yref="y",
xanchor="left",
yanchor="bottom",
text="Link: {0:.2f}Mpps".format(lnk_limit),
font=dict(
size=14,
color=COLORS[-2],
),
align="left",
showarrow=False
))
y_max.append(int((lnk_limit / 10) + 1) * 10)
pci_limit /= 1000000.0
if pci_limit < threshold:
traces.append(plgo.Scatter(
x=x_vals,
y=[pci_limit, ] * len(x_vals),
name="PCIe: {0:.2f}Mpps".format(pci_limit),
showlegend=False,
mode="lines",
line=dict(
dash="dot",
color=COLORS[-3],
width=1),
hoverinfo="none"
))
annotations.append(dict(
x=1,
y=pci_limit,
xref="x",
yref="y",
xanchor="left",
yanchor="bottom",
text="PCIe: {0:.2f}Mpps".format(pci_limit),
font=dict(
size=14,
color=COLORS[-3],
),
align="left",
showarrow=False
))
y_max.append(int((pci_limit / 10) + 1) * 10)
# Perfect and measured:
cidx = 0
for name, val in y_sorted.iteritems():
hovertext = list()
try:
for idx in range(len(val["val"])):
htext = ""
if isinstance(val["val"][idx], float):
htext += "No. of Runs: {1}<br>" \
"Mean: {0:.2f}Mpps<br>".format(val["val"][idx],
val["count"][idx])
if isinstance(val["diff"][idx], float):
htext += "Diff: {0:.0f}%<br>".format(round(val["diff"][idx]))
if isinstance(val["rel"][idx], float):
htext += "Speedup: {0:.2f}".format(val["rel"][idx])
hovertext.append(htext)
traces.append(plgo.Scatter(x=x_vals,
y=val["val"],
name=name,
legendgroup=name,
mode="lines+markers",
line=dict(
color=COLORS[cidx],
width=2),
marker=dict(
symbol="circle",
size=10
),
text=hovertext,
hoverinfo="text+name"
))
traces.append(plgo.Scatter(x=x_vals,
y=val["ideal"],
name="{0} perfect".format(name),
legendgroup=name,
showlegend=False,
mode="lines",
line=dict(
color=COLORS[cidx],
width=2,
dash="dash"),
text=["Perfect: {0:.2f}Mpps".format(y)
for y in val["ideal"]],
hoverinfo="text"
))
cidx += 1
except (IndexError, ValueError, KeyError) as err:
logging.warning("No data for '{0}'".format(name))
logging.warning(repr(err))
try:
# Create plot
logging.info(" Writing file '{0}{1}'.".
format(plot["output-file"], plot["output-file-type"]))
layout = deepcopy(plot["layout"])
if layout.get("title", None):
layout["title"] = "<b>Speedup Multi-core:</b> {0}". \
format(layout["title"])
layout["annotations"].extend(annotations)
plpl = plgo.Figure(data=traces, layout=layout)
# Export Plot
ploff.plot(plpl,
show_link=False, auto_open=False,
filename='{0}{1}'.format(plot["output-file"],
plot["output-file-type"]))
except PlotlyError as err:
logging.error(" Finished with error: {}".
format(str(err).replace("\n", " ")))
return | 5,330,502 |
def translate(text, from_lang="auto", to_lang="zh-CN"):
"""translate text, return the result as json"""
url = 'https://translate.googleapis.com/translate_a/single?'
params = []
params.append('client=gtx')
params.append('sl=' + from_lang)
params.append('tl=' + to_lang)
params.append('hl=en-US')
params.append('dt=t')
params.append('dt=bd')
params.append('dj=1')
params.append('source=input')
params.append(urlencode({'q': text}))
url += '&'.join(params)
request = urllib2.Request(url)
browser = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0"
request.add_header('User-Agent', browser)
response = urllib2.urlopen(request)
return json.loads(response.read().decode('utf8')) | 5,330,503 |
def backproject_to_plane(cam, img_pt, plane):
"""Back an image point to a specified world plane"""
# map to normalized image coordinates
npt = np.matrix(npl.solve(cam[0], np.array(list(img_pt)+[1.0])))
M = cam[1].transpose()
n = np.matrix(plane[:3]).flatten()
d = plane.flat[3]
Mt = M * cam[2]
Mp = M * npt.transpose()
return Mp * (np.dot(n, Mt) - d) / np.dot(n, Mp) - Mt | 5,330,504 |
def build_base():
"""
Remotely build base python image with all installed packages on image-factory server
"""
with lcd(env.local_path):
put('./requirements.txt', '/srv/build/requirements.txt')
with cd('/srv/build'):
run('docker build -t {base_image_name} .'.format(
base_image_name=env.base_image_name,
)) | 5,330,505 |
def create_tables(engine):
"""
Create all tables according to metadata of Base.
Args:
engine(instance): _engine.Engine instance
"""
Base.metadata.create_all(engine) | 5,330,506 |
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can update primary data attributes."""
request = factory.put(
reverse("artist-detail", kwargs={"pk": 1}),
{
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
},
)
view_detail = ArtistViewSet.as_view({"put": "update"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
}
artist = get_artists().get(1)
assert artist.id == 1
assert artist.first_name == "Art"
assert artist.last_name == "Blakey" | 5,330,507 |
def format_cell(cell, datetime_fmt=None):
"""Format a cell."""
if datetime_fmt and isinstance(cell, datetime):
return cell.strftime(datetime_fmt)
return cell | 5,330,508 |
def optimize(gradients, optim, global_step, summaries, global_norm=None, global_norm_clipped=None, appendix=''):
"""Modified from sugartensor"""
# Add Summary
if summaries is None:
summaries = ["loss", "learning_rate"]
# if "gradient_norm" in summaries:
# if global_norm is None:
# tf.summary.scalar("global_norm/gradient_norm" + appendix,
# clip_ops.global_norm(list(zip(*gradients))[0]))
# else:
# tf.summary.scalar("global_norm/gradient_norm" + appendix,
# global_norm)
# if global_norm_clipped is not None:
# tf.summary.scalar("global_norm/gradient_norm_clipped" + appendix,
# global_norm_clipped)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
# if "gradients" in summaries:
# tf.summary.histogram("gradients/%s" % var_name, grad_values)
# if "gradient_norm" in summaries:
# tf.summary.scalar("gradient_norm/%s" % var_name,
# clip_ops.global_norm([grad_values]))
# Gradient Update OP
return optim.apply_gradients(gradients, global_step=global_step) | 5,330,509 |
def p_stage_macro(p):
"""stage : MACRO"""
p[0] = ParseTreeNode('STAGE')
p[0].add_child(ParseTreeNode('MACRO', raw=p[1])) | 5,330,510 |
def _get_desired_asg_capacity(region, stack_name):
"""Retrieve the desired capacity of the autoscaling group for a specific cluster."""
asg_conn = boto3.client("autoscaling", region_name=region)
tags = asg_conn.describe_tags(Filters=[{"Name": "value", "Values": [stack_name]}])
asg_name = tags.get("Tags")[0].get("ResourceId")
response = asg_conn.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
return response["AutoScalingGroups"][0]["DesiredCapacity"] | 5,330,511 |
def _default_geo_type_precision():
""" default digits after decimal for geo types """
return 4 | 5,330,512 |
def print_prog_bar(iteration, total, prefix='', suffix='',
decimals=2, length=90, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
strdec = str(decimals)
percent = ("{0:." + strdec + "f}").format(100 *
(iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s\r' % (prefix, bar, percent, suffix))
# Print New Line on Complete
if iteration == total:
print() | 5,330,513 |
def load_movietimes(filepath_timestamps, filepath_daq):
"""Load daq and cam time stamps, create muxer"""
df = pd.read_csv(filepath_timestamps)
# DAQ time stamps
with h5py.File(filepath_daq, 'r') as f:
daq_stamps = f['systemtime'][:]
daq_sampleinterval = f['samplenumber'][:]
# remove trailing zeros - may be left over if recording didn't finish properly
if 0 in daq_stamps:
last_valid_idx = np.argmax(daq_stamps == 0)
else:
last_valid_idx = len(daq_stamps) - 1 # in case there are no trailing zeros
daq_samplenumber = np.cumsum(daq_sampleinterval)[:last_valid_idx, np.newaxis]
last_sample = daq_samplenumber[-1, 0]
nb_seconds_per_interval, _ = scipy.stats.mode(np.diff(daq_stamps[:last_valid_idx, 0])) # seconds - using mode here to be more robust
nb_seconds_per_interval = nb_seconds_per_interval[0]
nb_samples_per_interval = np.mean(np.diff(daq_samplenumber[:last_valid_idx, 0]))
sampling_rate_Hz = np.around(nb_samples_per_interval / nb_seconds_per_interval, -3) # round to 1000s of Hz
# ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], frame_times=shutter_times, sample_numbers=daq_samplenumber[:, 0], auto_monotonize=False)
ss = SampStamp(sample_times=daq_stamps[:last_valid_idx, 0], sample_numbers=daq_samplenumber[:, 0],
frame_samples=df['sample'], frame_numbers=df['movie_frame'], auto_monotonize=False)
# # different refs:
#
# # first sample is 0 seconds
# s0 = ss.sample_time(0)
# ss = SampStamp(sample_times=daq_stamps[:, 0] - s0, frame_times=cam_stamps[:, 0] - s0, sample_numbers=daq_samplenumber[:, 0])
#
# # first frame is 0 seconds - for no-resample-video-data
# f0 = ss.frame_time(0)
# ss = SampStamp(sample_times=daq_stamps[:, 0] - f0, frame_times=cam_stamps[:, 0] - f0, sample_numbers=daq_samplenumber[:, 0])
return ss, last_sample, sampling_rate_Hz | 5,330,514 |
def is_valid_hotkey(hotkey: str) -> bool:
"""Returns True if hotkey string is valid."""
mode_opts = ["press", "click", "wheel"]
btn_opts = [b.name for b in Button]
wheel_opts = ["up", "down"]
hotkeylist = hotkey[2:].split("_")
if len(hotkeylist) == 0 or len(hotkeylist) % 2 != 0:
return False
for i in range(0, len(hotkeylist), 2):
mode = hotkeylist[i]
btn = hotkeylist[i + 1]
if mode not in mode_opts:
return False
if mode == "wheel" and btn not in wheel_opts:
return False
elif mode in ("press", "click") and btn not in btn_opts:
return False
if hotkey[-2] == "press":
return False
return True | 5,330,515 |
def find_range_with_sum(values : list[int], target : int) -> tuple[int, int]:
"""Given a list of positive integers, find a range which sums to a target
value."""
i = j = acc = 0
while j < len(values):
if acc == target:
return i, j
elif acc < target:
acc += values[j]
j += 1
else:
acc -= values[i]
i += 1
return -1, -1 | 5,330,516 |
def output_AR1(outfile, fmri_image, clobber=False):
"""
Create an output file of the AR1 parameter from the OLS pass of
fmristat.
Parameters
----------
outfile :
fmri_image : ``FmriImageList`` or 4D image
object such that ``object[0]`` has attributes ``coordmap`` and ``shape``
clobber : bool
if True, overwrite previous output
Returns
-------
regression_output : ``RegressionOutput`` instance
"""
outim = ModelOutputImage(outfile, fmri_image[0].coordmap,
fmri_image[0].shape, clobber=clobber)
return outputters.RegressionOutput(outim, outputters.output_AR1) | 5,330,517 |
def _sanity_check(module):
"""Run sanity checks that don't depend on info from the zone/record."""
overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
ttl = module.params['ttl']
record_data = module.params['record_data']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed=False
)
# A negative TTL is not permitted (how would they even work?!).
if ttl < 0:
module.fail_json(
msg='TTL cannot be less than zero, got: %d' % ttl,
changed=False
)
# Deleting SOA records is not permitted.
if record_type == 'SOA' and state == 'absent':
module.fail_json(msg='cannot delete SOA records', changed=False)
# Updating SOA records is not permitted.
if record_type == 'SOA' and state == 'present' and overwrite:
module.fail_json(msg='cannot update SOA records', changed=False)
# Some sanity checks depend on what value was supplied.
if record_data is not None and (state == 'present' or not overwrite):
# A records must contain valid IPv4 addresses.
if record_type == 'A':
for value in record_data:
try:
socket.inet_aton(value)
except socket.error:
module.fail_json(
msg='invalid A record value, got: %s' % value,
changed=False
)
# AAAA records must contain valid IPv6 addresses.
if record_type == 'AAAA':
for value in record_data:
try:
socket.inet_pton(socket.AF_INET6, value)
except socket.error:
module.fail_json(
msg='invalid AAAA record value, got: %s' % value,
changed=False
)
# CNAME and SOA records can't have multiple values.
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
module.fail_json(
msg='CNAME or SOA records cannot have more than one value, ' +
"got: %s" % record_data,
changed=False
)
# Google Cloud DNS does not support wildcard NS records.
if record_type == 'NS' and record_name[0] == '*':
module.fail_json(
msg="wildcard NS records not allowed, got: %s" % record_name,
changed=False
)
# Values for txt records must begin and end with a double quote.
if record_type == 'TXT':
for value in record_data:
if value[0] != '"' and value[-1] != '"':
module.fail_json(
msg='TXT record_data must be enclosed in double quotes, ' +
'got: %s' % value,
changed=False
) | 5,330,518 |
def feature_bit_number(current):
"""Fuzz bit number field of a feature name table header extension."""
constraints = UINT8_V
return selector(current, constraints) | 5,330,519 |
def render_page(context, slot, payload): # pylint: disable=R0201,W0613
""" Base template slot """
chapter = request.args.get('chapter', '')
module = request.args.get('module', '')
page = request.args.get('page', '')
try:
if page:
return render_template(f"{chapter.lower()}/{module.lower()}/{page.lower()}.html", active_chapter=chapter,
config=payload)
return render_template(f"{chapter.lower()}/{module.lower()}.html", active_chapter=chapter, config=payload)
except:
return render_template(f"common/empty.html", active_chapter=chapter, config=payload) | 5,330,520 |
def add_stripe_customer_if_not_existing(f):
"""
Decorator which creates user as a customer if not already existing before making a request to the Stripe API
"""
@wraps(f)
def wrapper(user: DjangoUserProtocol, *args, **kwargs):
user = create_customer(user)
return f(user, *args, **kwargs)
return wrapper | 5,330,521 |
def test_editor_rstrip_keypress(editorbot, input_text, expected_text, keys,
strip_all):
"""
Test that whitespace is removed when leaving a line.
"""
qtbot, widget = editorbot
widget.strip_trailing_spaces_on_modify = strip_all
widget.set_text(input_text)
cursor = widget.textCursor()
cursor.movePosition(QTextCursor.End)
widget.setTextCursor(cursor)
for key in keys:
if isinstance(key, tuple):
# Mouse event
button, position = key
cursor = widget.textCursor()
cursor.setPosition(position)
xypos = widget.cursorRect(cursor).center()
widget.mousePressEvent(QMouseEvent(
QEvent.MouseButtonPress, xypos,
button, button,
Qt.NoModifier))
else:
qtbot.keyPress(widget, key)
assert widget.toPlainText() == expected_text | 5,330,522 |
def debug():
"""
Import the test utils module to be able to:
- Use the trace tool and get context variables after making a request to Apigee
"""
return ApigeeApiTraceDebug(proxy=config.PROXY_NAME) | 5,330,523 |
def FromModuleToDoc(importedMod,filDfltText):
"""
Returns the doc string of a module as a literal node. Possibly truncated
so it can be displayed.
"""
try:
docModuAll = importedMod.__doc__
if docModuAll:
docModuAll = docModuAll.strip()
# Take only the first non-empty line.
docModuSplit = docModuAll.split("\n")
docModu = None
for docModu in docModuSplit:
if docModu :
# sys.stderr.write("DOC="+docModu)
maxLen = 40
if len(docModu) > maxLen:
docModu = docModu[0:maxLen] + "..."
break
except:
docModu = ""
if not docModu:
# If no doc available, just transform the file name.
docModu = filDfltText.replace("_"," ").capitalize()
nodModu = NodeLiteral(docModu)
return nodModu | 5,330,524 |
def noisify_patternnet_asymmetric(y_train, noise, random_state=None):
""" mistakes in labelling the land cover classes in PatternNet dataset
cemetery -> christmas_tree_fram
harbor <--> ferry terminal
Den.Res --> costal home
overpass <--> intersection
park.space --> park.lot
runway_mark --> park.space
costal home <--> sparse Res
swimming pool --> costal home
"""
nb_classes = 38
P = np.eye(nb_classes)
n = noise
if n>0.0:
P[5,5], P[5,7] = 1.-n, n
P[9,9], P[9,32] =1.-n, n
P[11,11], P[11,9] = 1.-n, n
P[17,17], P[17,12] = 1.-n, n
P[12,12], P[12,17] = 1.-n, n
P[18,18], P[18,23] = 1.-n, n
P[23,23], P[23,18] = 1.-n, n
P[25,25], P[25, 24] = 1.-n, n
P[29,29], P[29,25] = 1.-n, n
P[32,32], P[32,9] = 1.-n, n
P[34,34], P[34,9] = 1.-n, n
y_train_noisy = multiclass_noisify(y_train, P=P,
random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train | 5,330,525 |
def group_superset_counts(pred, label):
"""
Return TP if all label spans appear within pred spans
:param pred, label: A group, represeted as a dict
:return: A Counts namedtuple with TP, FP and FN counts
"""
if (pred["label"] != label["label"]):
return Counts(0, 1, 1)
for label_span in label["spans"]:
for pred_span in pred["spans"]:
if (pred_span["start"] <= label_span["start"] and
pred_span["end"] >= label_span["end"]):
break
else:
return Counts(0, 1, 1)
return Counts(1, 0, 0) | 5,330,526 |
def other():
""" Queries all of the logged in user's Campaigns
and plugs them into the campaigns template """
entities = db.session.query(Entity)
entities = [e.to_dict() for e in entities]
return render_template('other.html', entities=entities) | 5,330,527 |
def test_limited_query():
"""Test that result set limiting works."""
reader = EuropeanaSearchReader(os.environ['EUROPEANA_API_KEY'], 'Python', max_records=65)
count = 0
for record in reader:
assert record.id
count = count + 1
assert reader.result_count > 0
assert count == 65 | 5,330,528 |
def rel_angle(vec_set1, vec_set2):
"""
Calculate the relative angle between two vector sets
Args:
vec_set1(array[array]): an array of two vectors
vec_set2(array[array]): second array of two vectors
"""
return vec_angle(vec_set2[0], vec_set2[1]) / vec_angle(vec_set1[0], vec_set1[1]) - 1 | 5,330,529 |
def check_position_axes(chgcar1: CHGCAR, chgcar2: CHGCAR) -> bool:
"""Check the cell vectors and atom positions are same in two CHGCAR.
Parameters
-----------
chgcar1, chgcar2: vaspy.CHGCAR
Returns
-------
bool
"""
cell1 = chgcar1.poscar.cell_vecs
cell2 = chgcar2.poscar.cell_vecs
pos1 = np.array(chgcar1.poscar.positions)
pos2 = np.array(chgcar2.poscar.positions)
assert np.allclose(cell1, cell2), "UnitCells are inconsistent. Abort."
assert np.allclose(pos1, pos2), "Atom positions are inconsistent!!! Abort."
return True | 5,330,530 |
def path_shortest(graph, start):
""" Pythonic minheap implementation of dijkstra's algorithm """
# Initialize all distances to infinity but the start one.
distances = {node: float('infinity') for node in graph}
distances[start] = 0
paths = [(0, start)]
while paths:
current_distance, current_node = heap.heappop(paths)
neighbors = graph[current_node].items()
for neighbor, weight in neighbors:
distance = current_distance + weight
if distance < distances[neighbor]:
distances[neighbor] = distance
heap.heappush(paths, (distance, neighbor))
return distances | 5,330,531 |
def test_issue5():
"""https://github.com/nazrulworld/fhir.resources/issues/5"""
from fhir.resources.codeableconcept import CodeableConcept
from fhir.resources.coding import Coding
coding1 = Coding(jsondict={'system': 'http://www.snomed.org/', 'code': '424144002'})
coding2 = Coding(jsondict={'system': 'https://loinc.org/', 'code': '30525-0'})
with pytest.raises(Exception) as e_info:
CodeableConcept(jsondict={'text': 'Age', 'coding': [coding1, coding2]})
# Test if already solved the problem
assert "name 'self' is not defined" not in str(e_info.value) | 5,330,532 |
def request(url, *args, **kwargs):
"""Requests a single JSON resource from the Wynncraft API.
:param url: The URL of the resource to fetch
:type url: :class:`str`
:param args: Positional arguments to pass to the URL
:param kwargs: Keyword arguments (:class:`str`) to pass to the URL
:returns: The returned JSON object as a :class:`dict`
:rtype: :class:`dict`
"""
parsedArgs = (urllib.parse.quote(a) for a in args)
parsedKwargs = {}
for k,v in kwargs.items():
parsedKwargs[k] = urllib.parse.quote(v)
response = urllib.request.urlopen(url.format(*parsedArgs, **parsedKwargs))
data = json.load(response)
response.close()
return data | 5,330,533 |
def test_status_success(
mock_git_use_case: MockerFixture,
mock_config_manager: MockerFixture,
runner: CliRunner,
) -> None:
"""It calls status."""
runner.invoke(git_portfolio.__main__.main, ["status"], prog_name="gitp")
mock_git_use_case.return_value.execute.assert_called_once_with(
["staticdev/omg"], "status", ()
) | 5,330,534 |
def destagger(var, stagger_dim, meta=False):
"""Return the variable on the unstaggered grid.
This function destaggers the variable by taking the average of the
values located on either side of the grid box.
Args:
var (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A variable
on a staggered grid.
stagger_dim (:obj:`int`): The dimension index to destagger.
Negative values can be used to choose dimensions referenced
from the right hand side (-1 is the rightmost dimension).
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is False.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`:
The destaggered variable. If xarray is enabled and
the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
var_shape = var.shape
num_dims = var.ndim
stagger_dim_size = var_shape[stagger_dim]
# Dynamically building the range slices to create the appropriate
# number of ':'s in the array accessor lists.
# For example, for a 3D array, the calculation would be
# result = .5 * (var[:,:,0:stagger_dim_size-2]
# + var[:,:,1:stagger_dim_size-1])
# for stagger_dim=2. So, full slices would be used for dims 0 and 1, but
# dim 2 needs the special slice.
full_slice = slice(None)
slice1 = slice(0, stagger_dim_size - 1, 1)
slice2 = slice(1, stagger_dim_size, 1)
# default to full slices
dim_ranges_1 = [full_slice] * num_dims
dim_ranges_2 = [full_slice] * num_dims
# for the stagger dim, insert the appropriate slice range
dim_ranges_1[stagger_dim] = slice1
dim_ranges_2[stagger_dim] = slice2
result = .5*(var[tuple(dim_ranges_1)] + var[tuple(dim_ranges_2)])
return result | 5,330,535 |
def sms_whoami(msg):
""" Check what the username is
:param msg: A full telerivet message object
:returns: Username associated with the phone number
"""
user_id = UserData.objects.get(phone=msg.connections[0].identity).user_id
msg.respond('User: @{}'.format(get_user_model().objects.get(id=user_id).username)) | 5,330,536 |
def predict_pipeline_acceleration(
data: arr_t, sampling_rate: float, convert_to_g: Optional[bool] = True, **kwargs
) -> Dict[str, Any]:
"""Apply sleep processing pipeline on raw acceleration data.
This function processes raw acceleration data collected during sleep. The pipeline consists of the following steps:
* *Activity Count Conversion*: Convert (3-axis) raw acceleration data into activity counts. Most sleep/wake
detection algorithms use activity counts (as typically provided by Actigraphs) as input data.
* *Wear Detection*: Detect wear and non-wear periods. Cut data to longest continuous wear block.
* *Rest Periods*: Detect rest periods, i.e., periods with large physical inactivity. The longest continuous
rest period (*Major Rest Period*) is used to determine the *Bed Interval*, i.e., the period spent in bed.
* *Sleep/Wake Detection*: Apply sleep/wake detection algorithm to classify phases of sleep and wake.
* *Sleep Endpoint Computation*: Compute Sleep Endpoints from sleep/wake detection results and bed interval.
Parameters
----------
data : array_like with shape (n,3)
input data. Must be a 3-d acceleration signal
sampling_rate : float
sampling rate of recorded data in Hz
convert_to_g : bool, optional
``True`` if input data is provided in :math:`m/s^2` and should be converted in :math:`g`, ``False`` if input
data is already in :math:`g` and does not need to be converted.
Default: ``True``
**kwargs :
additional parameters to configure sleep/wake detection. The possible parameters depend on the selected
sleep/wake detection algorithm and are passed to
:class:`~biopsykit.sleep.sleep_wake_detection.SleepWakeDetection`.
Returns
-------
dict
dictionary with Sleep Processing Pipeline results.
"""
# TODO: add entries of result dictionary to docstring and add possibility to specify sleep/wake prediction algorithm
ac = ActivityCounts(sampling_rate)
wd = WearDetection(sampling_rate=sampling_rate)
rp = RestPeriods(sampling_rate=sampling_rate)
sw = SleepWakeDetection("cole_kripke", **kwargs)
if convert_to_g:
data = convert_acc_data_to_g(data, inplace=False)
df_wear = wd.predict(data)
major_wear_block = wd.get_major_wear_block(df_wear)
# cut data to major wear block
data = wd.cut_to_wear_block(data, major_wear_block)
if len(data) == 0:
return {}
df_ac = ac.calculate(data)
df_sw = sw.predict(df_ac)
df_rp = rp.predict(data)
bed_interval = [df_rp["start"][0], df_rp["end"][0]]
sleep_endpoints = compute_sleep_endpoints(df_sw, bed_interval)
if not sleep_endpoints:
return {}
major_wear_block = [str(d) for d in major_wear_block]
dict_result = {
"wear_detection": df_wear,
"activity_counts": df_ac,
"sleep_wake_prediction": df_sw,
"major_wear_block": major_wear_block,
"rest_periods": df_rp,
"bed_interval": bed_interval,
"sleep_endpoints": sleep_endpoints,
}
return dict_result | 5,330,537 |
def create_blueprint(request_manager: RequestManager, cache: Cache,
dataset_factory: DatasetFactory):
"""
Creates an instance of the blueprint.
"""
blueprint = Blueprint('metadata', __name__, url_prefix='/metadata')
@cache.memoize()
def _get_method_types_per_approach():
frame = dataset_factory.get_prepared_data_frame()
return metadata.get_method_type_count_per_approach(frame)
@cache.memoize()
def _get_approach_type_counts():
frame = dataset_factory.get_prepared_data_frame()
return metadata.get_approach_type_count(frame)
# pylint: disable=unused-variable
@blueprint.route('method/count')
def get_method_count():
"""
Triggers calculation of number of method types per approach.
---
response:
200:
description: The retrieved result will be a JSON object
representing the number of different method types per
approach.
application/json:
schema:
$ref: '#/definitions/RequestResponse'
"""
ticket = request_manager.submit_ticketed(
_get_method_types_per_approach)
return get_state_response(ticket)
@blueprint.route('approaches/count')
def get_approach_count():
"""
Computes which approach types are present in the available data and how
many sessions each of them was used in.
---
response:
200:
description: The retrieved result will be a JSON object
representing the number of sessions each approach was
used in.
application/json:
schema:
$ref: '#/definitions/RequestResponse'
"""
ticket = request_manager.submit_ticketed(_get_approach_type_counts)
return get_state_response(ticket)
return blueprint | 5,330,538 |
def sides(function_ast, parameters, function_callback):
"""
Given an ast, parses both sides of an expression.
sides(b != c) => None
"""
left = side(function_ast['leftExpression'], parameters, function_callback)
right = side(function_ast['rightExpression'], parameters, function_callback)
return (left, right) | 5,330,539 |
def create_path_file(directory):
"""Writes out the path to images in a folder as a list"""
# if save:
# utils.list_path_to_files(directory,file="ds.txt", save=True)
#
# else:
output = utils.list_path_to_files(directory)
print(output) | 5,330,540 |
def print_url(host, port, datasets):
"""
Prints a list of available dataset URLs, if any. Otherwise, prints a
generic URL.
"""
def url(path = None):
return colored(
"blue",
"http://{host}:{port}/{path}".format(
host = host,
port = port,
path = path if path is not None else ""))
horizontal_rule = colored("green", "—" * 78)
print()
print(horizontal_rule)
if len(datasets):
print(" The following datasets should be available in a moment:")
for path in sorted(datasets, key = str.casefold):
print(" • %s" % url(path))
else:
print(" Open <%s> in your browser." % url())
print()
print(" ", colored("yellow", "Warning: No datasets detected."))
print(horizontal_rule)
print() | 5,330,541 |
def test_ipv6_dns(ip):
"""Test known ip address."""
server = Server(ip)
assert str(server) == ip
assert server.check_service_v6(53, 5) | 5,330,542 |
def load_records(es_app, filename, schema):
"""Try to index records."""
indexer = RecordIndexer()
with es_app.test_request_context():
data_filename = pkg_resources.resource_filename("invenio_records", filename)
records_data = load(data_filename)
records = []
for item in records_data:
item_dict = dict(marc21.do(item))
item_dict["$schema"] = schema
record = Record.create(item_dict)
records.append(record)
db.session.commit()
es_records = []
for record in records:
es_records.append(indexer.index(record))
from invenio_search import current_search
for record in es_records:
current_search.client.get(
index=record["_index"], doc_type=record["_type"], id=record["_id"]
) | 5,330,543 |
def get_default_sample_path_random(data_path):
"""Return path to sample with default parameters as suffix"""
extra_suffix = get_default_extra_suffix(related_docs=False)
return get_default_sample_path(data_path, sample_suffix=extra_suffix) | 5,330,544 |
def test_python_parse_error(python_contents):
"""Test that a python parse error yields a ProtocolAnalysisException."""
proto = """
there's nothing here
"""
with patch.object(contents,
"get_protocol_contents", return_value=proto):
r = analyze._analyze(python_contents)
assert len(r.errors) == 1
assert r.errors[0].lineNumber == 2
assert r.errors[0].description != ""
assert r.errors[0].type == "SyntaxError" | 5,330,545 |
def VectorShadersAddMaterialDesc(builder, materialDesc):
"""This method is deprecated. Please switch to AddMaterialDesc."""
return AddMaterialDesc(builder, materialDesc) | 5,330,546 |
def phase_randomize(D, random_state=0):
"""Randomly shift signal phases
For each timecourse (from each voxel and each subject), computes its DFT
and then randomly shifts the phase of each frequency before inverting
back into the time domain. This yields timecourses with the same power
spectrum (and thus the same autocorrelation) as the original timecourses,
but will remove any meaningful temporal relationships between the
timecourses.
This procedure is described in:
Simony E, Honey CJ, Chen J, Lositsky O, Yeshurun Y, Wiesel A, Hasson U
(2016) Dynamic reconfiguration of the default mode network during narrative
comprehension. Nat Commun 7.
Parameters
----------
D : voxel by time by subject ndarray
fMRI data to be phase randomized
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
ndarray of same shape as D
phase randomized timecourses
"""
random_state = check_random_state(random_state)
F = fft(D, axis=1)
if D.shape[1] % 2 == 0:
pos_freq = np.arange(1, D.shape[1] // 2)
neg_freq = np.arange(D.shape[1] - 1, D.shape[1] // 2, -1)
else:
pos_freq = np.arange(1, (D.shape[1] - 1) // 2 + 1)
neg_freq = np.arange(D.shape[1] - 1, (D.shape[1] - 1) // 2, -1)
shift = random_state.rand(D.shape[0], len(pos_freq),
D.shape[2]) * 2 * math.pi
# Shift pos and neg frequencies symmetrically, to keep signal real
F[:, pos_freq, :] *= np.exp(1j * shift)
F[:, neg_freq, :] *= np.exp(-1j * shift)
return np.real(ifft(F, axis=1)) | 5,330,547 |
def test_changing_duration(
service_registry: Contract, get_accounts: Callable, custom_token: Contract
) -> None:
"""The controller can change the registration period of ServiceRegistry"""
new_duration = 90 * SECONDS_PER_DAY
service_registry.functions.changeParameters(
_price_bump_numerator=DEFAULT_BUMP_NUMERATOR,
_price_bump_denominator=DEFAULT_BUMP_DENOMINATOR,
_decay_constant=DEFAULT_DECAY_CONSTANT,
_min_price=DEFAULT_MIN_PRICE,
_registration_duration=new_duration,
).call_and_transact({"from": CONTRACT_DEPLOYER_ADDRESS})
# make sure that the duration has changed.
assert service_registry.functions.registration_duration().call() == new_duration
(A,) = get_accounts(1)
custom_token.functions.mint(2 * SERVICE_DEPOSIT).call_and_transact({"from": A})
custom_token.functions.approve(
service_registry.address, 2 * SERVICE_DEPOSIT
).call_and_transact({"from": A})
service_registry.functions.deposit(SERVICE_DEPOSIT).call_and_transact({"from": A})
first_expiration = service_registry.functions.service_valid_till(A).call()
service_registry.functions.deposit(SERVICE_DEPOSIT).call_and_transact({"from": A})
second_expiration = service_registry.functions.service_valid_till(A).call()
assert second_expiration == first_expiration + new_duration | 5,330,548 |
def monospaced(text):
"""
Convert all contiguous whitespace into single space and strip leading and
trailing spaces.
Parameters
----------
text : str
Text to be re-spaced
Returns
-------
str
Copy of input string with all contiguous white space replaced with
single space " ".
"""
return REGEX_SPACE.sub(' ', text).strip() | 5,330,549 |
def molecule_block(*args, **kwargs):
"""
Generates the TRIPOS Mol2 block for a given molecule, returned as a string
"""
mol = Molecule(*args, **kwargs)
block = mol.molecule_block() + mol.atom_block() + mol.bond_block() + '\n'
return block | 5,330,550 |
def setup_test_env(settings_key='default'):
"""Allows easier integration testing by creating RPC and HTTP clients
:param settings_key: Desired server to use
:return: Tuple of RPC client, HTTP client, and thrift module
"""
return RpcClient(handler), HttpClient(), load_module(settings_key) | 5,330,551 |
def calculateCentroid(
pointCloud : List[Tuple[float, float, float]]
) -> Tuple[float, float, float]:
"""Calculate centroid of point cloud.
Arguments
--------------------------------------------------------------------------
pointCloud (float 3-tuple list) -- list of xyz coordinates.
Returns
--------------------------------------------------------------------------
centroid (float 3-tuple) -- centroid of points in point cloud.
"""
numPoints = len(pointCloud)
x, y, z = [], [], []
for point in pointCloud:
x.append(point[0])
y.append(point[1])
z.append(point[2])
x, y, z = sum(x) / numPoints, sum(y) / numPoints, sum(z) / numPoints
return x, y, z | 5,330,552 |
def timber_load():
"""
Calculate Timber's IO load since the last call
"""
#io(read_count=454556, write_count=3456, read_bytes=110592, write_bytes=0)
global timber_io_stat
try:
new_stat = p.get_io_counters()
readCount = new_stat.read_count - timber_io_stat.read_count
writeCount = new_stat.write_count - timber_io_stat.write_count
readBytes = new_stat.read_bytes - timber_io_stat.read_bytes
writeBytes = new_stat.write_bytes - timber_io_stat.write_bytes
timber_io_stat = new_stat
return readCount,writeCount,readBytes,writeBytes
except Exception as e:
debug(e)
debug("Timber load data pulled failed", error=True) | 5,330,553 |
def get_num_uniq_users(csv_file, userid_col):
"""
A Helper function to help get the number of unique users
:param csv_file: path to CSV file
:param userid_col: Column for user ID
:return:
"""
# Read the CSV file using pandas
df = pd.read_csv(csv_file)
# Use the nunique() method to get number of unique users
num = len(np.unique(df[userid_col]))
return num | 5,330,554 |
def generate_graph_batch(n_examples, sample_length):
""" generate all of the training data
Parameters
----------
n_examples: int
Num of the samples
sample_length: int
Length of the samples.
# TODO we should implement samples of different lens as in the DeepMind example.
Returns
-------
res: tuple
(input_data, target_data), each of the elements is a list of entities dicts
"""
input_data = [
graph_data_from_list(np.random.uniform(size=sample_length))
for _ in range(n_examples)
]
target_data = [create_target_data(v, e, conn) for v, e, conn in input_data]
return input_data, target_data | 5,330,555 |
def administration(request):
"""Administration actions ((re)train acton predictor for a new survey)
Parameters
----------
request:
POST request
Returns
-------
render:
django.shortcuts.render (a page to be rendered)
"""
from zooniverse_web.models import Survey, QuestionResponse, Response, QuestionOption
from zooniverse_web.utility.survey import generate_new_survey
message = None
message_class = None
if request.method == 'POST':
next_action = request.POST.get('submit', None)
if next_action == '(Re)Train Recommender':
previous_survey = Survey.objects.filter(active=True).order_by('-creation_date').first()
if not previous_survey:
survey_created = generate_new_survey()
message_class = 'success'
message = 'New survey created on {}!'.format(survey_created.creation_date)
else:
# Are there any responses for this survey?
try:
for option in QuestionOption.objects.all():
QuestionResponse.objects.filter(
response=Response.objects.get(
status=Response.FINISHED,
survey=previous_survey
),
answer=option.option
)
survey_created = generate_new_survey()
message_class = 'success'
message = 'New survey created on {}!'.format(survey_created.creation_date.date())
except (QuestionOption.DoesNotExist, QuestionResponse.DoesNotExist, Response.DoesNotExist):
message = 'You do not have enough question responses saved yet for the current survey! ' \
'Try again later.'
message_class = 'warning'
except:
message = 'Something went wrong while generating the survey. Please try again. <br />' \
'If the problem keeps on occuring, please contact your system administrator.'
message_class = 'danger'
else:
message = ''
message_class = ''
return render(
request,
'administration/administration.html',
{
'message': message,
'message_class': message_class,
}
) | 5,330,556 |
def main(flow_id, git_tag):
"""
:param flow_id: 订单ID
:param git_tag: Git Tag名字
:return:
"""
print('[INFO]: 这部分是用来在编译镜像,并且上传docker仓库')
data = get_publish_data(flow_id) # 配置信息
obj = BuildImage(data, git_tag) # 初始化类
obj.build_image() # 编译镜像
obj.push_image() | 5,330,557 |
def test_post_payments_error(client, jwt, app, payment_mock_error):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), get_token_header())
headers = {'content-type': 'application/json', 'Authorization': f'Bearer {token}'}
rv = client.post('/api/v1/payments', data=json.dumps({
'feeCode': 'BSH105',
'userId': 'PB25020',
'invoiceNumber': 'TEST12345678901',
'folioNumber': 'TEST1234567890',
'formNumber': '',
'quantity': '',
'rate': '',
'amount': '',
'remarks': 'TEST',
'reduntantFlag': ' '
}), headers=headers)
assert rv.status_code == 400 | 5,330,558 |
def checkout(skus):
"""
Calculate the total amount for the checkout based on the SKUs entered in
:param skus: string, each char is an item
:return: int, total amount of the cart, including special offers
"""
total = 0
counter = Counter(skus)
# got through the offers (biggest first), and calculate the line total, and any free offers...
for item in counter:
print('item: {}'.format(item))
if item not in ITEMS:
return -1
line_total = 0
free_offer = 0
qty = counter[item]
ordered_offers = sorted(ITEMS[item]['special_offers'], key=lambda k: (k['min_quantity']), reverse=True)
# does this item have an specials?
for offer in ordered_offers:
# how many can we get of the biggest offer
number_of_offers = qty // offer['min_quantity']
if 'price' in offer:
# how many are left, put in qty for next offer...
number_of_items_in_offer = number_of_offers * offer['min_quantity']
qty -= number_of_items_in_offer
# update the line total
line_total += number_of_offers * offer['price']
elif 'other_free' in offer:
if offer['other_free'] in counter:
# make sure we have the min required items
if counter[item] >= offer['min_quantity']:
other_free = offer['other_free']
# is this full price the correct value? what if we used a multi price?
free_offer = number_of_offers * ITEMS[other_free]['price']
# add any remaining qty as full price to the line_total
line_total += qty * ITEMS[item]['price']
# add the line total, and the free offers to the checkout total
total += line_total
total -= free_offer
return total | 5,330,559 |
def warn_with_traceback(message, category, filename, lineno,
file=None, line=None):
"""
Alternate warning printer which shows a traceback with the warning.
To use, set *warnings.showwarning = warn_with_traceback*.
"""
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
log.write(warnings.formatwarning(message, category, filename, lineno, line)) | 5,330,560 |
def func_tradeg(filename, hdulist=None, whichhdu=None):
"""Return the fits header value TELRA in degrees.
"""
hdulist2 = None
if hdulist is None:
hdulist2 = fits.open(filename, 'readonly')
else:
hdulist2 = hdulist
telra = fitsutils.get_hdr_value(hdulist2, 'TELRA')
if hdulist is None:
hdulist2.close()
return spmeta.convert_ra_to_deg(telra) | 5,330,561 |
def minor(ctx, v):
""" Increase minor version, tag and push """
try:
new_value = v.next_minor()
new_value = v.omit_prefix(new_value)
click.echo(new_value)
except GitCommandError as e:
click.echo(str(e))
ctx.exit(1) | 5,330,562 |
def column_indexes(column_names, row_header):
"""項目位置の取得
Args:
column_names (str): column name
row_header (dict): row header info.
Returns:
[type]: [description]
"""
column_indexes = {}
for idx in column_names:
column_indexes[idx] = row_header.index(column_names[idx])
return column_indexes | 5,330,563 |
def dmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs | 5,330,564 |
def fit_stats(act_map, param, func=KentFunc):
"""Generate fitting statistics from scipy's curve fitting"""
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = act_map.flatten()
fpred = func(Xin, *param) # KentFunc
res = fval - fpred
rsquare = 1 - (res**2).mean() / fval.var()
return res.reshape(act_map.shape), rsquare | 5,330,565 |
def prep_dyson(
opt_in,
):
"""Runs lightspeed calculations from template files and traj object
Params:
opt_in - [dict], options to override defaults
"""
options = {
# all paths must be absolute!
"submit_input": None, # sbatch file to edit
"submit_input2": "sbatch.sh", # modified sbatch file to execute
"out_dir": None, # dir for output
"submit": False, # whether to submit or just prep the job
"traj": None, # traj object to run (all frames)
"njobs": 25, # number of jobs
}
for key, val in list(opt_in.items()):
options[key] = val
# TODO: throw error messages for invalid combination of options
# override options
submit_input = options["submit_input"]
submit_input2 = options["submit_input2"]
out_dir = options["out_dir"]
submit = options["submit"]
traj = options["traj"]
njobs = options["njobs"]
# loop over jobs
frames_per_job = len(traj.frames) / njobs
nframes = len(traj.frames)
for job in range(njobs + 1):
# setup directories
os.chdir(out_dir)
os.system("mkdir %02djob" % job)
os.system("cp %s %02djob" % (submit_input, job))
os.system("cp ../ref/dyson_fomo.py %02djob" % job)
os.chdir("%02djob" % job)
# divide frames per job (nframes % njobs goes into last job)
ind1 = frames_per_job * job
ind2 = frames_per_job * (job + 1)
if job == njobs:
ind2 = nframes
states = [frame.I - 1 for frame in traj.frames[ind1:ind2]]
nstates = "[ "
for state in states:
nstates += str(state) + ", "
nstates += "]"
# update submission script
update_sbatch("%d" % job, submit_input2, ind1=ind1, ind2=ind2)
update_dyson(nstates=nstates)
# loop over frames
for ind, frame in enumerate(traj.frames[ind1:ind2]):
# set up directory
os.system("mkdir %04d" % (ind + ind1))
os.chdir("%04d" % (ind + ind1))
# modify neutral molpro params
symbols = [atom_data.atom_symbol_table[N] for N in frame.N]
write_xyz(frame, symbols, "geom.xyz")
# create file to identify frame with job
os.system(
"echo 'labels: %4d %4d\nt: %12.6f' > %s"
% (frame.label[0], frame.label[1], frame.t, "%04d_ID.txt" % ind)
)
# return to working directory
os.chdir("../")
# if requested, submit the job
if submit == True:
os.system("sbatch %s" % submit_input2)
os.chdir("../") | 5,330,566 |
def conv2d_for_hpool_valid_width_wrapper(inputs,filters,strides,padding,**kwargs):
"""
Wraps tf.layers.conv2d to allow valid convolution across signal width and
'same' convolution across signal height when padding is set to "valid_time"
Arguments:
inputs (TF Tensor): Tensor input.
filters (TF Tensor): Must have the same type as input.
A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels]
strides (int or tuple/list) : An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding (string): One of `"valid"`, `"same"`, or `"valid_time"` (case-insensitive).
kwargs (dictionary): Specifies all other arguments required by
tf.layers.conv2d. Passes these directly to function without modification.
See Tensorflow documentation for further details.
Returns:
(TF Tensor): Output of tf.layers.conv2d.
"""
#Collects relvant parameters
size=inputs.get_shape()
kernel_size = filters.get_shape()
filter_height = int(kernel_size[0])
in_height = int(size[1])
#Calculates according to SAME padding formula
if (in_height % strides[0] == 0):
pad_along_height = max(filter_height - strides[0], 0)
else:
pad_along_height = max(filter_height - (in_height % strides[0]), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
#Pads signal if VALID_TIME is selected and padding is necessary
#Otherwise, pass inputs through and allow specified convolutioon
if pad_along_height == 0 or padding.upper() != 'VALID_TIME':
padding = 'VALID' if padding.upper() == 'VALID_TIME' else padding
output_tensor = tf.nn.conv2d(inputs,filter=filters,
strides=strides,padding=padding,
**kwargs)
else:
#Pads input tensor and moves conv2d to valid padding
paddings = tf.constant([[0,0],[pad_top, pad_bottom], [0, 0],[0,0]])
input_padded = tf.pad(inputs,paddings)
output_tensor=tf.nn.conv2d(input_padded,filter=filters,
strides=strides, padding="VALID",
**kwargs)
return output_tensor | 5,330,567 |
def resample_cells(tree, params, current_node = 'root', inplace = False):
"""
Runs a new simulation of the cell evolution on a fixed tree
"""
if not inplace:
tree = copy.deepcopy(tree)
for child in tree.successors(current_node):
initial_cell = tree.nodes[current_node]['cell'].deepcopy()
initial_cell.reset_seed()
tree.nodes[child]['cell'] = sim.evolve_cell(initial_cell,
tree.nodes[child]['time_to_parent'],
params)
resample_cells(tree, params, current_node = child, inplace = True)
return tree | 5,330,568 |
def get_sql_query(table_name:str) -> str:
"""Fetch SQL query file for generation of dim or fact table(s)"""
f = open(f'./models/sql/{table_name}.sql')
f_sql_query = f.read()
f.close()
return f_sql_query | 5,330,569 |
def run_cli(
ctx: click.Context,
pkgs: list[str],
) -> None:
"""A shortcut command to run pytest against a specific set of CLI-based
integration tests
It takes one or more test package names in a comma-separated list (PKGS)
and forwards all other extra arguments and options (PYTEST_ARGS) to
the underlying pytest command.
\b
Available CLI-based integration test package names:
admin
user
"""
pytest_args = ctx.args
result = subprocess.run([
sys.executable, '-m', 'pytest',
'--pyargs',
*(f'ai.backend.test.cli_integration.{pkg}' for pkg in pkgs),
*pytest_args,
])
ctx.exit(result.returncode) | 5,330,570 |
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
return tf_inspect.getcallargs(function, *args, **kwds) | 5,330,571 |
def sub_bases( motif ):
"""
Return all possible specifications of a motif with degenerate bases.
"""
subs = {"W":"[AT]", \
"S":"[CG]", \
"M":"[AC]", \
"K":"[GT]", \
"R":"[AG]", \
"Y":"[CT]", \
"B":"[CGT]", \
"D":"[AGT]", \
"H":"[ACT]", \
"V":"[ACG]", \
"N":"[ACGTN]"}
for symbol,sub in subs.iteritems():
if motif.find(symbol) > -1:
motif = motif.replace(symbol, sub)
return motif | 5,330,572 |
def test_PMC_003_pass():
"""
Test that using df[col] does not result in an error.
"""
statement = "df[col]\ndf.loc[col]\ndf.sum().loc[col]"
actual = _results(statement)
expected = set()
assert actual == expected | 5,330,573 |
def interrogate_host():
""" CLI entry point for first usage in docstring. """
info = get_info()
with open('%s.sysinfo.json' % info['hostname'], 'w') as f:
json.dump(info, f, indent=2) | 5,330,574 |
def parse(input_file_path):
"""
Parse input file
:param input_file_path: input file path
:return: Image list
"""
verticals, horizontals = 0, 0
logging.info("parsing %s", input_file_path)
with open(input_file_path, 'r') as input_file:
nb = int(input_file.readline()) # images nb
images = []
for i, img_txt in enumerate(input_file.readlines()):
data = img_txt.rstrip().split(' ')
orientation = data[0]
tags = data[2:]
images.append(Image(i, orientation, set(tags)))
if orientation == 'V':
verticals += 1
else: # H
horizontals += 1
logging.info('parsing %s done', input_file_path)
logging.info('%d images found (%d V,%d H)', nb, verticals, horizontals)
return images | 5,330,575 |
def DeleteMembership(name, release_track=None):
"""Deletes a membership from the GKE Hub.
Args:
name: the full resource name of the membership to delete, e.g.,
projects/foo/locations/global/memberships/name.
release_track: the release_track used in the gcloud command,
or None if it is not available.
Raises:
apitools.base.py.HttpError: if the request returns an HTTP error
"""
client = gkehub_api_util.GetApiClientForTrack(release_track)
op = client.projects_locations_memberships.Delete(
client.MESSAGES_MODULE
.GkehubProjectsLocationsMembershipsDeleteRequest(name=name))
op_resource = resources.REGISTRY.ParseRelativeName(
op.name, collection='gkehub.projects.locations.operations')
waiter.WaitFor(
waiter.CloudOperationPollerNoResources(
client.projects_locations_operations), op_resource,
'Waiting for membership to be deleted') | 5,330,576 |
def char(ctx, number):
"""
Returns the character specified by a number
"""
return chr(conversions.to_integer(number, ctx)) | 5,330,577 |
async def get_character_name(gear_url, message):
"""
It is *sometimes* the case that discord users don't update their username
to be their character name (eg for alts).
This method renders the gear_url in an HTML session and parses the page
to attempt to find the character's name.
This assumes a specific format of the page: player names are nested in
an h3 element with css class named 'class-[player class]'
Returns the character's name if successful, otherwise returns the message sender's
display name in discord.
"""
name = message.author.display_name
if not re.match(SIXTY_UPGRADES_REGEX, gear_url):
return name
for i in range(MAX_FETCH_CHARACTER_NAME_RETRIES):
try:
asession = AsyncHTMLSession()
webpage = await asession.get(gear_url)
await webpage.html.arender()
query_selector = "h3[class^='class-']"
name = webpage.html.find(query_selector, first=True).text
break
except Exception as e:
logging.error(e)
finally:
await asession.close()
return name | 5,330,578 |
def get_treant_df(tags, path='.'):
"""Get treants as a Pandas DataFrame
Args:
tags: treant tags to identify the treants
path: the path to search for treants
Returns:
a Pandas DataFrame with the treant name, tags and categories
>>> from click.testing import CliRunner
>>> from toolz.curried import do
>>> with CliRunner().isolated_filesystem() as dir_:
... assert pipe(
... dir_,
... dtr.Treant,
... do(lambda x: x.__setattr__('tags', ['atag'])),
... lambda x: x.uuid[:8],
... lambda x: x == get_treant_df(['atag'], path=dir_).uuid[0]
... )
"""
return pipe(
tags,
get_by_tags(path=path),
lambda x: x.map(get_treant_data),
pandas.DataFrame,
) | 5,330,579 |
def gather_keypoints(keypoints_1, keypoints_2, matches):
"""
Gather matched keypoints in a (n x 4) array,
where each row correspond to a pair of matching
keypoints' coordinates in two images.
"""
res = []
for m in matches:
idx_1 = m.queryIdx
idx_2 = m.trainIdx
pt_1 = keypoints_1[idx_1].pt
pt_2 = keypoints_2[idx_2].pt
row = [pt_1[0], pt_1[1], pt_2[0], pt_2[1]]
res.append(row)
return np.array(res) | 5,330,580 |
def plot_dendrogram(
x: Union[navis.TreeNeuron, navis.NeuronList],
heal_neuron: bool = False,
downsample_neuron: float = 0.0,
plot_connectors: bool = True,
connector_confidence: Tuple[float, float] = (0.0, 0.0),
highlight_nodes: Optional = None,
highlight_connectors: Optional = None,
fragment: bool = False,
presyn_color: List[List[float]] = [[0.9, 0.0, 0.0]],
postsyn_color: List[List[float]] = [[0.0, 0.0, 0.9]],
highlight_connector_color: List[List[float]] = [[0.0, 0.9, 0.0]],
highlight_connector_size: int = 20,
presyn_size: float = 0.1,
postsyn_size: float = 0.1,
prog: str = "dot",
):
"""
This function creates a 2-dimensional dendrogram, a 'flattened' version of a neuron.
Dendrograms can be used to visualise the locations of specific partner synapses.
Parameters
----------
x : navis.TreeNeuron
A single navis tree neuron object
heal_neuron : bool
Whether you want to heal the neuron or not. N.B. Navis neurons
should be healed on import, i.e. navis.fetch_skeletons(bodyid, heal = True)
see navis.fetch_skeletons and navis.heal_fragmented_neuron for more details
downsample_neuron: float
A float specifying the downsampling factor used by navis.downsample_neuron()
If 0.0, then no downsampling will occur. If float('inf') then this will reduce
the neuron to branch and end points.
It is recommended to downsample very large neurons when testing out this code
for the first time.
plot_connectors: bool
Whether to plot presynapses and postsynapses on the dendrogram or not.
connector_confidence: tuple
The confidence value used to threshold the synapses.
The first value (connector_confidence[0]) will be used to threshold presynapses
The second value (connector_confidence[1]) will be used to threshold postsynapses
highlight_connectors: optional | np.array | dict
If a numpy array, then this should be an array of the treenodes
connected to the connectors that you want to highlight.
The single color and size will be specified in
highlight_connector_color and highlight_connector_size
If a dictionary, then the key values should be
treenode ids of the connectors you want to highlight
and their values should be the color you want to colour them.
Passing dictionaries to this parameter allow for synapses to be coloured differently
fragment: bool
Whether the neuron object you are passing
is a fragment or not (i.e. does it have a soma or not)
presyn_color: list
A list containing the rgb values that you want to colour the presynapses.
All presynapses will be coloured this color
postsyn_color: list
A list containing the rgb values that you want to colour the postsynapses.
All postsynapses will be coloured this color
highlight_connector_color: list
A list containing the rgb values that you want to color your special synapses
highlight_connector_size: int
The size of the synapses you want to highlight on the dendrogram
presyn_size: int
The size of all presynapses on the dendrogram
postsyn_size: int
The size of all postsynapses on the dendrogrm
prog : str
The layout type used by navis.nx_agraph.graphviz_layout()
Valid programs include [dot, neato or fdp].
The dot program provides a hierarchical layout, this is the fastest program
The neato program creates edges between nodes proportional to their real length.
The neato program takes the longest amount of time, can be ~2hrs for a single neuron!
Returns
-------
fig: a figure of containing the dendrogram
Example
--------
from neuroboom.utils import create_graph_structure
from neuroboom.dendrogram import plot_dendrogram
import navis.interfaces.neuprint as nvneu
from matplotlib import pyplot as plt
test_neuron = nvneu.fetch_skeletons(722817260)
plt.clf()
fig, ax = plt.subplots(figsize = (20,20))
plot_dendrogram(test_neuron, prog = 'dot')
plt.show()
"""
x = check_valid_neuron_input(x)
assert isinstance(
connector_confidence, tuple
), f"Need to pass a tuple for confidence values. You have passed a {type(connector_confidence)}"
assert (
len(connector_confidence) == 2
), """
Need to pass a tuple containing two values for confidence. \n
The first value is the confidence threshold for presynapses. \n
The second value is the confidence threshold for postsynapses. """
valid_progs = ["fdp", "dot", "neato"]
assert (
prog in valid_progs
), f"Invalid program parameter. You need to pass one of {valid_progs}"
start = time.time()
if heal_neuron:
print("Healing Neuron...")
navis.heal_fragmented_neuron(x, inplace=True)
if any(connector_confidence) > 0.0:
print(
"""Thresholding synapses: only considering
presynapses above {} confidence and postsynapses above {}""".format(
connector_confidence[0], connector_confidence[1]
)
)
presyn_included = x.connectors[x.connectors.type == "pre"][
x.connectors.confidence > connector_confidence[0]
].connector_id.tolist()
postsyn_included = x.connectors[x.connectors.type == "post"][
x.connectors.confidence > connector_confidence[1]
].connector_id.tolist()
connectors_included = list(
chain.from_iterable([presyn_included, postsyn_included])
)
x.connectors = x.connectors[x.connectors.connector_id.isin(connectors_included)]
if downsample_neuron > 0:
print("Downsampling neuron, factor = {}".format(downsample_neuron))
nodes_to_keep = []
if highlight_nodes is not None:
node_list = list(highlight_nodes.keys())
nodes_to_keep.append(node_list)
else:
nodes_to_keep.append(x.connectors.node_id.unique().tolist())
nodes_to_keep = list(chain.from_iterable(nodes_to_keep))
x = navis.downsample_neuron(
x,
downsampling_factor=downsample_neuron,
preserve_nodes=nodes_to_keep
)
if "parent_dist" not in x.nodes:
print("Calculating cable length...")
x = calc_cable(x, return_skdata=True)
g, pos = create_graph_structure(x, returned_object="graph_and_positions", prog=prog)
# Plotting tree with the above layout
print("Plotting Tree...")
nx.draw(g, pos, node_size=0, arrows=False, width=0.25)
# Whether to add soma or not
if not fragment:
if type(x.soma) == np.ndarray:
print("Plotting soma")
soma = x.soma[0]
plt.scatter([pos[soma][0]], [pos[soma][1]], s=80, c=[[0, 0, 0]], zorder=1)
else:
print("Plotting soma")
plt.scatter([pos[x.soma][0]], [pos[x.soma][1]], s=80, c=[[0, 0, 0]], zorder=1)
if plot_connectors:
print("Plotting connectors...")
plt.scatter(
[
pos[tn][0]
for tn in x.connectors[x.connectors.type == "pre"].node_id.values
],
[
pos[tn][1]
for tn in x.connectors[x.connectors.type == "pre"].node_id.values
],
c=presyn_color,
zorder=2,
s=presyn_size,
linewidths=1,
)
plt.scatter(
[
pos[tn][0]
for tn in x.connectors[x.connectors.type == "post"].node_id.values
],
[
pos[tn][1]
for tn in x.connectors[x.connectors.type == "post"].node_id.values
],
c=postsyn_color,
zorder=2,
s=postsyn_size,
linewidths=1,
)
if highlight_nodes is not None:
if isinstance(highlight_nodes, dict):
hl_tn_coords = np.array(
[
pos[tn] for tn in highlight_nodes.keys()
]
)
tn_col = [highlight_nodes[i] for i in highlight_nodes.keys()]
plt.scatter(
hl_tn_coords[:, 0],
hl_tn_coords[:, 1],
s=10,
c=tn_col,
zorder=3
)
if highlight_connectors is not None:
if isinstance(highlight_connectors, (list, np.ndarray)):
hl_cn_coords=np.array(
[
pos[tn]
for tn in x.connectors[
x.connectors.connector_id.isin(highlight_connectors)
].node_id
]
)
plt.scatter(
hl_cn_coords[:, 0],
hl_cn_coords[:, 1],
s=highlight_connector_size,
c=highlight_connector_color,
zorder=3,
linewidths=1,
)
elif isinstance(highlight_connectors, dict):
for cn in highlight_connectors:
if cn in highlight_connectors:
if cn is None:
continue
if cn not in x.connectors.connector_id.values:
print(
"Connector {} is not present in the neuron / neuron fragment".format(
cn
)
)
hl_cn_coords = np.array(
[
pos[tn]
for tn in x.connectors[
x.connectors.connector_id == cn
].node_id
]
)
plt.scatter(
hl_cn_coords[:, 0],
hl_cn_coords[:, 1],
s=highlight_connector_size,
color=highlight_connectors[cn],
zorder=3,
)
else:
raise TypeError(
"Unable to highlight connectors from data of type {}".format(
type(highlight_connectors)
)
)
print("Completed in %is" % int(time.time() - start)) | 5,330,581 |
async def stop():
""" Stop any playing audio. """
Sound.stop()
return Sound.get_state() | 5,330,582 |
def test_sanitize_content_filename(filename, expected):
"""
Test inputs where the result is the same for Windows and non-Windows.
"""
assert sanitize_content_filename(filename) == expected | 5,330,583 |
def is_numeric(X, compress=True):
"""
Determine whether input is numeric array
Parameters
----------
X: Numpy array
compress: Boolean
Returns
-------
V: Numpy Boolean array if compress is False, otherwise Boolean Value
"""
def is_float(val):
try:
float(val)
except ValueError:
return False
else:
return True
isnumeric = np.vectorize(is_float, otypes=[bool]) # return numpy array
V = isnumeric(X)
if compress:
return np.all(V)
return V | 5,330,584 |
def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9):
""" show figure with result of the ellipse fitting
:param ndarray img:
:param ndarray seg:
:param [(int, int, int, int, float)] ellipses:
:param [(int, int)] centers:
:param [float] crits:
:param float fig_size:
:return:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> ells = np.random.random((3, 5)) * 25
>>> centers = np.random.random((3, 2)) * 25
>>> crits = np.random.random(3)
>>> fig = figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
assert len(ellipses) == len(centers) == len(crits), \
'number of ellipses (%i) and centers (%i) and criteria (%i) ' \
'should match' % (len(ellipses), len(centers), len(crits))
fig, ax = create_figure_by_image(img.shape[:2], fig_size)
assert img.ndim == 2, \
'required image dimension is 2 to instead %s' % repr(img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
for i, params in enumerate(ellipses):
c1, c2, h, w, phi = params
rr, cc = ellipse_perimeter(int(c1), int(c2), int(h), int(w), phi)
ax.plot(cc, rr, '.', color=COLORS[i % len(COLORS)],
label='#%i with crit=%d' % ((i + 1), int(crits[i])))
ax.legend(loc='lower right')
# plt.plot(centers[:, 1], centers[:, 0], 'ow')
for i in range(len(centers)):
ax.plot(centers[i, 1], centers[i, 0], 'o',
color=COLORS[i % len(COLORS)])
ax.set_xlim([0, seg.shape[1]])
ax.set_ylim([seg.shape[0], 0])
ax.axis('off')
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig | 5,330,585 |
def fmt_bytesize(num: float, suffix: str = "B") -> str:
"""Change a number of bytes in a human readable format.
Args:
num: number to format
suffix: (Default value = 'B')
Returns:
The value formatted in human readable format (e.g. KiB).
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.1f} {}{}".format(num, unit, suffix)
num /= 1024.0
return "{:.1f} {}{}".format(num, "Yi", suffix) | 5,330,586 |
def get_kpoint_mesh(structure: Structure, cutoff_length: float, force_odd: bool = True):
"""Calculate reciprocal-space sampling with real-space cut-off."""
reciprocal_lattice = structure.lattice.reciprocal_lattice_crystallographic
# Get reciprocal cell vector magnitudes
abc_recip = np.array(reciprocal_lattice.abc)
mesh = np.ceil(abc_recip * 2 * cutoff_length).astype(int)
if force_odd:
mesh += (mesh + 1) % 2
return mesh | 5,330,587 |
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_101', **kwargs):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope, **kwargs) | 5,330,588 |
def test_l2_vlan_bcast_ucast(npu, dataplane):
"""
Description:
VLAN broadcast and known unicast test.
Verify the broadcast packet reaches all ports in the VLAN and known unicast packet reaches specific port.
Test scenario:
1. Create a VLAN 10
2. Add ports as untagged members to the VLAN
3. Add MAC for each port
4. Send untagged broadcast packet from port 1, verify all ports receive the packet
5. Send untagged unicast packets from port 1 to the rest of the vlan members ports.
Verify only one port at a time receives the packet and port n does not.
6. Clean up configuration
"""
vlan_id = "10"
macs = []
# Create VLAN
vlan_oid = npu.create(SaiObjType.VLAN, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx, bp_oid in enumerate(npu.dot1q_bp_oids):
npu.remove_vlan_member(npu.default_vlan_oid, bp_oid)
npu.create_vlan_member(vlan_oid, bp_oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
npu.set(npu.port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
macs.append("00:00:00:00:00:%02x" %(idx+1))
npu.create_fdb(vlan_oid, macs[idx], bp_oid)
try:
if npu.run_traffic:
bcast_pkt = simple_tcp_packet(eth_dst='ff:ff:ff:ff:ff:ff',
eth_src='00:00:00:00:00:01',
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
expected_ports = []
for idx in range(1, len(npu.dot1q_bp_oids)):
expected_ports.append(idx)
send_packet(dataplane, 0, bcast_pkt)
verify_packets(dataplane, bcast_pkt, expected_ports)
for idx in range(1, len(npu.dot1q_bp_oids)):
ucast_pkt = simple_tcp_packet(eth_dst=macs[idx],
eth_src='00:00:00:00:00:01',
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
send_packet(dataplane, 0, ucast_pkt)
verify_packets(dataplane, ucast_pkt, [idx])
finally:
for idx, bp_oid in enumerate(npu.dot1q_bp_oids):
npu.remove_fdb(vlan_oid, macs[idx])
npu.remove_vlan_member(vlan_oid, bp_oid)
npu.create_vlan_member(npu.default_vlan_oid, bp_oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
npu.set(npu.port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", npu.default_vlan_id])
npu.remove(vlan_oid) | 5,330,589 |
def is_package_authorized(package_name):
"""
get user information if it is authorized user in the package config
Returns:
[JSON string]: [user information session]
"""
authorized_users = get_package_admins(package_name)
user_info = get_user_info()
user_dict = j.data.serializers.json.loads(user_info)
username = user_dict["username"]
# if the package doesn't include admins then allow any authenticated user
if authorized_users and not any([username in authorized_users, username in j.core.identity.me.admins]):
return abort(403)
return user_info | 5,330,590 |
def submit_search_query(query_string, query_limit, query_offset,
class_resource):
"""
Submit a search query request to the RETS API
"""
search_result = class_resource.search(
query='%s' % query_string, limit=query_limit, offset=query_offset)
return search_result | 5,330,591 |
def extract_site_packages(archive, target_path, compile_pyc=False, compile_workers=0, force=False):
"""Extract everything in site-packages to a specified path.
:param ZipFile archive: The zipfile object we are bootstrapping from.
:param Path target_path: The path to extract our zip to.
:param bool compile_pyc: A boolean to dictate whether we pre-compile pyc.
:param int compile_workers: An int representing the number of pyc compiler workers.
:param bool force: A boolean to dictate whether or not we force extraction.
"""
parent = target_path.parent
target_path_tmp = Path(parent, target_path.stem + ".tmp")
lock = Path(parent, f".{target_path.stem}_lock")
# If this is the first time that a pyz is being extracted, we'll need to create the ~/.shiv dir
if not parent.exists():
parent.mkdir(parents=True, exist_ok=True)
with FileLock(lock):
# we acquired a lock, it's possible that prior invocation was holding the lock and has
# completed bootstrapping, so let's check (again) if we need to do any work
if not target_path.exists() or force:
# extract our site-packages
for fileinfo in archive.infolist():
if fileinfo.filename.startswith("site-packages"):
extracted = archive.extract(fileinfo.filename, target_path_tmp)
# restore original permissions
os.chmod(extracted, fileinfo.external_attr >> 16)
if compile_pyc:
compileall.compile_dir(target_path_tmp, quiet=2, workers=compile_workers)
# if using `force` we will need to delete our target path
if target_path.exists():
shutil.rmtree(str(target_path))
# atomic move
shutil.move(str(target_path_tmp), str(target_path)) | 5,330,592 |
def one_hot_encode(df):
"""
desc : one hot encodes categorical cols
args:
df (pd.DataFrame) : stroke dataframe
returns:
df (pd.DataFrame) : stroke dataframe with one_hot_encoded columns
"""
# extract categorical columns
stroke_data = df.copy()
cat_cols = stroke_data.select_dtypes(include = ["object"])
cat_vals = cat_cols.values
cat_cols_names = cat_cols.columns
enc = OneHotEncoder(sparse = False)
encoded_vals = enc.fit_transform(cat_vals)
encoded_cols = enc.get_feature_names(cat_cols_names)
encoded_cols = pd.DataFrame(encoded_vals,columns = encoded_cols,index = cat_cols.index)
#drop non one hot encoded cols
stroke_data.drop(columns = cat_cols_names, axis = 1, inplace = True)
#add encoded columns
stroke_data = pd.concat([stroke_data,encoded_cols], axis = 1)
#print(stroke_data.shape)
print(stroke_data)
return stroke_data | 5,330,593 |
def assert_increasing(a):
"""Utility function for enforcing ascending values.
This function's handle can be supplied as :py:kwarg:`post_method` to a
:py:func:`processed_proprty <pyproprop>` to enforce values within a
:py:type:`ndarray <numpy>` are in ascending order. This is useful for
enforcing time guesses to be sequential.
"""
if (a is not None) and (not np.all(np.diff(a) >= 0)):
msg = f"Elements in {a} must be in ascending numerical order."
raise ValueError(msg)
return a | 5,330,594 |
def invalidate_users_QBT(user_id):
"""Mark the given user's QBT rows invalid (by deletion)"""
QBT.query.filter(QBT.user_id == user_id).delete()
db.session.commit() | 5,330,595 |
def copy_to_device(device,
remote_path,
local_path='harddisk:',
server=None,
protocol='http',
vrf=None,
timeout=300,
compact=False,
use_kstack=False,
fu=None,
http_auth=True,
**kwargs):
"""
Copy file from linux server to the device.
Args:
device (Device): Device object
remote_path (str): remote file path on the server
local_path (str): local file path to copy to on the device (default: harddisk:)
server (str): hostname or address of the server (default: None)
protocol(str): file transfer protocol to be used (default: http)
vrf (str): vrf to use (optional)
timeout(int): timeout value in seconds, default 300
compact(bool): compress image option for n9k, defaults False
fu(obj): FileUtils object to use instead of creating one. Defaults to None.
use_kstack(bool): Use faster version of copy, defaults False
Not supported with a file transfer protocol
prompting for a username and password
http_auth (bool): Use http authentication (default: True)
Returns:
None
If the server is not specified, a HTTP server will be spawned
on the local system and serve the directory of the file
specified via remote_path and the copy operation will use http.
If the device is connected via CLI proxy (unix jump host) and the proxy has
'socat' installed, the transfer will be done via the proxy automatically.
"""
return generic_copy_to_device(device=device,
remote_path=remote_path,
local_path=local_path,
server=server,
protocol=protocol,
vrf=vrf,
timeout=timeout,
compact=compact,
use_kstack=use_kstack,
fu=fu,
http_auth=http_auth,
**kwargs) | 5,330,596 |
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.gdrive')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'gdrive.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials | 5,330,597 |
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, protocol.typeinfos)
return decoder.instance(protocol.replay_header_typeid) | 5,330,598 |
def test_disabled_command(pkg):
"""Test command that has been disabled."""
npmpkg = NPMPackage(pkg, commands=['run-script'])
pytest.raises(AttributeError, getattr, npmpkg, 'install') | 5,330,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.