content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def rgetattr(obj, attr):
"""
Get named attribute from an object, i.e. getattr(obj, 'a.a') is
equivalent to ``obj.a.a''.
- obj: object
- attr: attribute name(s)
>>> class A: pass
>>> a = A()
>>> a.a = A()
>>> a.a.a = 1
>>> rgetattr(a, 'a.a')
1
>>> rgetattr(a, 'a.c')
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute 'c'
"""
attrs = attr.split(".")
obj = getattr(obj, attrs[0])
for name in attrs[1:]:
obj = getattr(obj, name)
return obj
| 5,343,500
|
def lambda1_plus_lambda2(lambda1, lambda2):
"""Return the sum of the primary objects tidal deformability and the
secondary objects tidal deformability
"""
return lambda1 + lambda2
| 5,343,501
|
def __create_resource_management_client():
"""
Create a ResourceManagementClient object using the subscription ID from environment variables
"""
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None)
if subscription_id is None:
return None
return ResourceManagementClient(
credential=__create_service_principal_credentials(),
subscription_id=subscription_id
)
| 5,343,502
|
def create_cut_sht(stockOutline,array,features,partSpacing,margin):
""" """
numParts = len(array)
basePlanes = generate_base_planes_from_array(array)
targetPlanes = create_cut_sht_targets(stockOutline,array,margin,partSpacing)
if targetPlanes == None:
return None
else:
# converts GH branch to python list for a set of features
features = [item for item in features.Branches]
cut_sht = []
for i in range(numParts):
objects = [array[i]]
for item in features[i]:
objects.append(item)
cutPart = reorient_objects(objects,basePlanes[i],targetPlanes[i])
cut_sht.append(cutPart)
return cut_sht
| 5,343,503
|
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
| 5,343,504
|
def duplicate_item(api_key: str, board_id: str, item_id: str, *args, **kwargs):
"""Duplicate an item.
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
item_id : `str`
The item's unique identifier.
args : `tuple`
The list of item return fields.
kwargs : `dict`
Optional arguments for item.
Returns
data : `dict`
A monday.com item in dictionary form.
Return Fields
assets : `list[moncli.entities.Asset]`
The item's assets/files.
board : `moncli.entities.Board`
The board that contains this item.
column_values : `list[moncli.entities.ColumnValue]`
The item's column values.
created_at : `str`
The item's create date.
creator : `moncli.entities.User`
The item's creator.
creator_id : `str`
The item's unique identifier.
group : `moncli.entities.Group`
The group that contains this item.
id : `str`
The item's unique identifier.
name : `str`
The item's name.
state : `str`
The board's state (all / active / archived / deleted)
subscriber : `moncli.entities.User`
The pulse's subscribers.
updated_at : `str`
The item's last update date.
updates : `moncli.entities.Update`
The item's updates.
Optional Arguments
with_updates : `bool`
Duplicate with the item's updates.
"""
kwargs = {
'board_id': gql.IntValue(board_id),
'item_id': gql.IntValue(item_id)
}
return execute_query(api_key, query_name=DUPLICATE_ITEM, operation_type=gql.OperationType.MUTATION, fields=args, arguments=kwargs)
| 5,343,505
|
def hdparm_secure_erase(disk_name, se_option):
"""
Secure erase using hdparm tool
:param disk_name: disk to be erased
:param se_option: secure erase option
:return: a dict includes SE command exitcode and SE message
"""
# enhance_se = ARG_LIST.e
log_file = disk_name.split("/")[-1] + ".log" # log file for sdx will be sdx.log
log = open(log_file, "a+")
if se_option:
hdparm_option = "--" + se_option
else:
hdparm_option = "--security-erase" # Default is security erase
# Hdparm SE Step1: check disk status
#
# Secure Erase supported output example
# Security:
# Master password revision code = 65534
# supported
# not enabled
# not locked
# not frozen
# not expired: security count
# supported: enhanced erase
# 2min for SECURITY ERASE UNIT. 2min for ENHANCED SECURITY ERASE UNIT.
# Checksum: correct
#
# except for "supported" and "enabled", other items should have "not" before them
if hdparm_option == "--security-erase":
pattern_se_support = re.compile(r'[\s\S]*(?!not)[\s]*supported'
r'[\s]*[\s\S]*enabled[\s]*not[\s]'
r'*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*')
else:
pattern_se_support = re.compile(r'[\s\S]*(?!not)[\s]*supported[\s]*[\s\S]*enabled[\s]*not'
r'[\s]*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*'
r'supported: enhanced erase[\s\S]*')
hdparm_check_drive_status(pattern_se_support, disk_name, log)
# TODO: add section to unlocked a disk
# Hdparm SE Step2: set password
command = ["hdparm", "--verbose", "--user-master", "u",
"--security-set-pass", SE_PASSWORD, disk_name]
assert robust_check_call(command, log)["exit_code"] == 0, \
"Failed to set password for disk " + disk_name
# Hdparm SE Step3: confirm disk is ready for secure erase
# both "supported" and "enabled" should have no "not" before them
# other items should still have "not" before them
pattern_se_enabled = re.compile(r'[\s\S]*(?!not)[\s]*supported[\s]*(?!not)[\s]*enabled[\s]*not'
r'[\s]*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*')
hdparm_check_drive_status(pattern_se_enabled, disk_name, log)
log.close()
# Hdparm SE step4: run secure erase command
command = ["hdparm", "--verbose", "--user-master", "u", hdparm_option, SE_PASSWORD, disk_name]
return secure_erase_base(disk_name, command)
| 5,343,506
|
def compute_loss(retriever_logits, retriever_correct, reader_logits,
reader_correct):
"""Compute loss."""
# []
retriever_loss = marginal_log_loss(retriever_logits, retriever_correct)
# []
reader_loss = marginal_log_loss(
tf.reshape(reader_logits, [-1]), tf.reshape(reader_correct, [-1]))
# []
any_retrieved_correct = tf.reduce_any(retriever_correct)
any_reader_correct = tf.reduce_any(reader_correct)
retriever_loss *= tf.cast(any_retrieved_correct, tf.float32)
reader_loss *= tf.cast(any_reader_correct, tf.float32)
loss = retriever_loss + reader_loss
tf.summary.scalar("num_read_correct",
tf.reduce_sum(tf.cast(reader_correct, tf.int32)))
tf.summary.scalar("reader_loss", tf.reduce_mean(reader_loss))
tf.summary.scalar("retrieval_loss", tf.reduce_mean(retriever_loss))
# []
loss = tf.reduce_mean(loss)
return loss
| 5,343,507
|
def qa_curveofgrowth(ellipsefit, pipeline_ellipsefit=None, png=None,
plot_sbradii=False, cosmo=None, verbose=True):
"""Plot up the curve of growth versus semi-major axis.
"""
from legacyhalos.ellipse import CogModel
if ellipsefit['success'] is False or np.atleast_1d(ellipsefit['r_sma'])[0] == -1:
return
colors = _sbprofile_colors()
fig, ax = plt.subplots(figsize=(9, 7))
bands, refband = ellipsefit['bands'], ellipsefit['refband']
if 'redshift' in ellipsefit.keys():
redshift = ellipsefit['redshift']
smascale = legacyhalos.misc.arcsec2kpc(redshift, cosmo=cosmo) # [kpc/arcsec]
else:
redshift, smascale = None, None
#maxsma = ellipsefit['cog_sma_{}'.format(refband)].max()
maxsma = 0
yfaint, ybright = 0, 50
for filt in bands:
col = next(colors) # iterate here in case we're missing a bandpass
#flux = ellipsefit['apphot_mag_{}'.format(filt)]
#good = np.where( np.isfinite(flux) * (flux > 0) )[0]
#mag = 22.5-2.5*np.log10(flux[good])
cog = ellipsefit['{}_cog_mag'.format(filt)]
cogerr = ellipsefit['{}_cog_magerr'.format(filt)]
chi2 = ellipsefit['{}_cog_params_chi2'.format(filt)]
if np.atleast_1d(cog)[0] == -1 or chi2 == 1e6: # no measurement, or failed
continue
sma = ellipsefit['{}_cog_sma'.format(filt)]
radius = sma**0.25
xlim = (0.9, radius.max()*1.01)
magtot = ellipsefit['{}_cog_params_mtot'.format(filt)]
m0 = ellipsefit['{}_cog_params_m0'.format(filt)]
alpha1 = ellipsefit['{}_cog_params_alpha1'.format(filt)]
alpha2 = ellipsefit['{}_cog_params_alpha2'.format(filt)]
#magtot = np.mean(mag[-5:])
if pipeline_ellipsefit and False:
pipeline_magtot = pipeline_ellipsefit['cog_params_{}'.format(filt)]['mtot']
label = '{}={:.3f} ({:.3f})'.format(filt, magtot, pipeline_magtot)
else:
label = r'${}$'.format(filt)
#label = r'${}_{{\mathrm{{tot}}}}={:.3f}$'.format(filt, magtot)
#label = r'{}={:.3f} ($\chi^2_\nu={:.1f}$)'.format(filt, magtot, chi2)
#ax.plot(sma, cog, label=label)
ax.fill_between(radius, cog-cogerr, cog+cogerr, label=label, color=col)
#facecolor=col, edgecolor='k', lw=2)
#if np.any(np.iscomplex(sma)) or np.any(np.iscomplex(cog)) or np.any(np.iscomplex(cogerr)):
# pdb.set_trace()
if pipeline_ellipsefit and False:
_sma = pipeline_ellipsefit['{}_cog_sma'.format(filt)]
_cog = pipeline_ellipsefit['{}_cog_mag'.format(filt)]
_cogerr = pipeline_ellipsefit['{}_cog_magerr'.format(filt)]
#ax.plot(_sma, _cog, alpha=0.5, color='gray')
ax.fill_between(_sma, _cog-_cogerr, _cog+_cogerr,
facecolor=col, alpha=0.5)#, edgecolor='k', lw=1)
cogmodel = CogModel().evaluate(sma, magtot, m0, alpha1, alpha2)
ax.plot(radius, cogmodel, color='k', lw=2, ls='--', alpha=0.5)
if sma.max() > maxsma:
maxsma = sma.max()
#print(filt, np.mean(mag[-5:]))
#print(filt, mag[-5:], np.mean(mag[-5:])
#print(filt, np.min(mag))
inrange = np.where((radius >= xlim[0]) * (radius <= xlim[1]))[0]
if cog[inrange].max() > yfaint:
yfaint = cog[inrange].max()
if cog[inrange].min() < ybright:
ybright = cog[inrange].min()
#if filt == 'r':
# pdb.set_trace()
#ax.set_xlabel(r'Semi-major axis (arcsec)')
#ax.set_ylabel('Cumulative brightness (AB mag)')
ax.set_xlabel(r'(Semi-major axis $r$)$^{1/4}$ (arcsec)')
ax.set_ylabel('$m(<r)$ (mag)')
if maxsma > 0:
ax.set_xlim(0.9, (maxsma**0.25)*1.01)
#ax.set_xlim(0, maxsma*1.01)
else:
ax.set_xlim(0, 3) # hack!
#ax.margins(x=0)
xlim = ax.get_xlim()
if smascale:
ax_twin = ax.twiny()
ax_twin.set_xlim(xlim[0]*smascale, xlim[1]*smascale)
ax_twin.set_xlabel('Semi-major axis (kpc)')
#ax_twin.margins(x=0)
yfaint += 0.5
ybright += -0.5
ax.set_ylim(yfaint, ybright)
if False:
ax_twin = ax.twinx()
ax_twin.set_ylim(yfaint, ybright)
ax_twin.set_ylabel('Cumulative Flux (AB mag)')#, rotation=-90)
hh, ll = ax.get_legend_handles_labels()
if len(hh) > 0:
leg1 = ax.legend(loc='lower right', fontsize=14)#, ncol=3)
# Plot some threshold radii for the large-galaxy project--
if plot_sbradii:
lline, llabel = [], []
if ellipsefit['radius_sb24'] > 0: #< xlim[1]:
ll = ax.axvline(x=ellipsefit['radius_sb24'], lw=2, color='k', ls='-.')
lline.append(ll), llabel.append('R(24)')
if ellipsefit['radius_sb25'] > 0: #< xlim[1]:
ll = ax.axvline(x=ellipsefit['radius_sb25'], lw=2, color='k', ls='--')
lline.append(ll), llabel.append('R(25)')
if ellipsefit['radius_sb26'] > 0: #< xlim[1]:
ll = ax.axvline(x=ellipsefit['radius_sb26'], lw=2, color='k', ls='-')
lline.append(ll), llabel.append('R(26)')
if False:
ll = ax.axvline(x=ellipsefit['majoraxis'] * ellipsefit['refpixscale'],
lw=2, color='#e41a1c', ls='dotted')
lline.append(ll), llabel.append('Moment Size')
if len(lline) > 0:
leg2 = ax.legend(lline, llabel, loc='lower left', fontsize=14, frameon=False)
ax.add_artist(leg1)
if smascale:
fig.subplots_adjust(left=0.12, bottom=0.15, top=0.85, right=0.95)
#fig.subplots_adjust(left=0.12, bottom=0.15, top=0.85, right=0.88)
else:
fig.subplots_adjust(left=0.12, bottom=0.15, top=0.95, right=0.95)
#fig.subplots_adjust(left=0.12, bottom=0.15, top=0.95, right=0.88)
if png:
#if verbose:
print('Writing {}'.format(png))
fig.savefig(png)
plt.close(fig)
else:
plt.show()
| 5,343,508
|
def is_using_git():
"""True if git checkout is used."""
return os.path.exists(os.path.join(REPO_ROOT, '.git', 'objects'))
| 5,343,509
|
def index() -> render_template:
"""
The main part of the code that is ran when the user visits the address.
Parameters:
covid_data: This is a dictionary of the data returned from the API request.
local_last7days_cases: The number of local cases in the last 7 days.
national_last7days_cases: The number of national cases in the last 7 days.
current_hospital_cases: The number of current hospital cases.
total_deaths: The number of total deaths in The UK.
news: A list of all the news.
update_name: The name of the scheduled update.
update_interval: The time the event will take place.
repeat: Whether the update will repeat.
updating_covid: Whether the update will update the covid data.
updating_news: Whether the update will update the news.
news_to_delete: The title of the news that is to be deleted.
update_to_delete: The title of the update that is to be deleted.
Returns:
A rendered template with the data.
"""
s.run(blocking=False) # stops the scheduler from blocking the server from running
covid_data = covid_API_request()
(local_last7days_cases,
national_last7days_cases,
current_hospital_cases,
total_deaths) = process_covid_data(covid_data)
news = update_news()
update_name = request.args.get("two")
if update_name: # checks if an update has been scheduled
update_interval = request.args.get("update")
repeat = request.args.get("repeat")
updating_covid = request.args.get("covid-data")
updating_news = request.args.get("news")
schedule_covid_updates(update_interval, update_name, repeat, updating_covid, updating_news)
if request.args.get("notif"): # checks if news has been deleted
news_to_delete = request.args.get("notif")
delete_news(news_to_delete)
if request.args.get("update_item"): # checks if an update has been deleted
update_to_delete = request.args.get("update_item")
delete_update(update_to_delete, True)
return render_template('index.html',
title=(title),
news_articles=news,
updates=update,
location=(city),
local_7day_infections=(local_last7days_cases),
nation_location=("United Kingdom"),
national_7day_infections=(national_last7days_cases),
hospital_cases=(f"Hospital Cases: {current_hospital_cases}"),
deaths_total=(f"Total Deaths: {total_deaths}"))
| 5,343,510
|
def r(x):
"""
Cartesian radius of a point 'x' in 3D space
Parameters
----------
x : (3,) array_like
1D vector containing the (x, y, z) coordinates of a point.
Returns
-------
r : float
Radius of point 'x' relative to origin of coordinate system
"""
return np.sqrt((x[0]**2) + (x[1]**2) + (x[2]**2))
| 5,343,511
|
def solve(*args):
"""
Crunch the numbers; solve the problem.
solve(IM A, IM b) -> IM
solve(DM A, DM b) -> DM
solve(SX A, SX b) -> SX
solve(MX A, MX b) -> MX
solve(IM A, IM b, str lsolver, dict opts) -> IM
solve(DM A, DM b, str lsolver, dict opts) -> DM
solve(SX A, SX b, str lsolver, dict opts) -> SX
solve(MX A, MX b, str lsolver, dict opts) -> MX
"""
return _casadi.solve(*args)
| 5,343,512
|
def NonNegativeInteger(num):
"""
Ensures that the number is non negative
"""
if num < 0:
raise SmiNetValidationError("A non-negative integer is required")
return num
| 5,343,513
|
def to_cartesian(r, ang):
"""Returns the cartesian coordinates of a polar point."""
x = r * np.cos(ang)
y = r * np.sin(ang)
return x, y
| 5,343,514
|
def plot_seqlogo(ax, seq_1hot, sat_score_ti, pseudo_pct=0.05):
""" Plot a sequence logo for the loss/gain scores.
Args:
ax (Axis): matplotlib axis to plot to.
seq_1hot (Lx4 array): One-hot coding of a sequence.
sat_score_ti (L_sm array): Minimum mutation delta across satmut length.
pseudo_pct (float): % of the max to add as a pseudocount.
"""
satmut_len = len(sat_score_ti)
# add pseudocounts
sat_score_ti += pseudo_pct * sat_score_ti.max()
# expand
sat_score_4l = expand_4l(sat_score_ti, seq_1hot)
plots.seqlogo(sat_score_4l, ax)
| 5,343,515
|
def _get_filtered_partially_learnt_topic_summaries(
topic_summaries, topic_ids):
"""Returns a list of summaries of the partially learnt topic ids and the ids
of topics that are no longer present.
Args:
topic_summaries: list(TopicSummary). The list of topic
summary domain objects to be filtered.
topic_ids: list(str). The ids of the topic corresponding to
the topic summary domain objects.
Returns:
tuple. A 2-tuple whose elements are as follows:
- list(TopicSummary). A filtered list with the summary domain
objects of the partially_learnt topics.
- list(str). The ids of the topics that are no longer present.
"""
nonexistent_partially_learnt_topic_ids = []
filtered_partially_learnt_topic_summaries = []
topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids)
for index, topic_summary in enumerate(topic_summaries):
if topic_summary is None:
nonexistent_partially_learnt_topic_ids.append(topic_ids[index])
else:
topic_id = topic_summary.id
if not topic_rights[index].topic_is_published:
nonexistent_partially_learnt_topic_ids.append(topic_id)
else:
filtered_partially_learnt_topic_summaries.append(topic_summary)
return (
filtered_partially_learnt_topic_summaries,
nonexistent_partially_learnt_topic_ids)
| 5,343,516
|
def build_bar_chart_with_two_bars_per_label(series1, series2, series1_label, series2_label, series1_labels,
series2_labels,
title, x_axis_label, y_axis_label, output_file_name):
"""
This function builds a bar chart that has two bars per label.
:param series1: a list of values containing the data for the first series
:param series2: a list of values containing the data for the second series
:param series1_label: a label to be shown in the legend for the first series
:param series2_label: a label to be shown in the legend for the second series
:param series1_labels: a list of labels for the first series
:param series2_labels: a list of labels for the second series
:param title: string value of the title of the bar chart
:param x_axis_label: the label to show on the x axis
:param y_axis_label: the label to show on the y axis
:param output_file_name: the name and path of the file where the figure is to be exported to
:return: string path of the image that has been saved of the figure
"""
index_series1 = np.arange(len(series1_labels))
index_series2 = np.arange(len(series2_labels))
fig, ax = plt.subplots()
ax.bar(x=index_series1 - 0.4, height=series1, width=0.4, bottom=0, align='center', label=series1_label)
ax.bar(x=index_series2, height=series2, width=0.4, bottom=0, align='center', label=series2_label)
ax.set_xlabel(x_axis_label, fontsize=10)
ax.set_ylabel(y_axis_label, fontsize=10)
ax.set_xticks(index_series1)
ax.set_xticklabels(series1_labels, fontsize=10, rotation=30)
ax.set_title(title)
ax.legend(loc='upper right', frameon=True)
plt.show()
# fig.savefig(output_file_name, dpi=300, bbox_inches='tight')
# return '../{}'.format(output_file_name)
return "{}".format(write_to_image_file(fig, output_file_name, False, 300))
| 5,343,517
|
def calculateDescent():
"""
Calculate descent timestep
"""
global descentTime
global tod
descentTime = myEndTime
line = len(originalTrajectory)
for segment in reversed(originalTrajectory):
flInit = int(segment[SEGMENT_LEVEL_INIT])
flEnd = int(segment[SEGMENT_LEVEL_END])
status = segment[STATUS]
if flInit == flEnd and status == '2':
stop=True
for i in range(1,4):
flInitAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_INIT])
flEndAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_END])
statAux = originalTrajectory[line-i][STATUS]
if flInitAux == flEndAux and statAux == '2': pass
else: stop = False; break
if stop: break
else: descentTime-= TIME_STEP
line-=1
tod = {}
tod['LAT'] = originalTrajectory[line][SEGMENT_LAT_INIT]
tod['LON'] = originalTrajectory[line][SEGMENT_LON_INIT]
tod['ALT'] = originalTrajectory[line][SEGMENT_LEVEL_INIT]
logger(myLogFile,rankMsg,LOG_STD,'Descending starts at time '+str(descentTime)+' [s]')
return line
| 5,343,518
|
def update_tutorial(request,pk):
"""View function for updating tutorial """
tutorial = get_object_or_404(Tutorial, pk=pk)
form = TutorialForm(request.POST or None, request.FILES or None, instance=tutorial)
if form.is_valid():
form.save()
messages.success(request=request, message="Congratulations! Tutorial has been updated.")
return redirect(to="dashboard")
context={
"form":form,
}
return render(request=request, context=context, template_name="dashboard/dashboard_addtutorialseries.html")
| 5,343,519
|
def run(_):
"""Run the command. """
source_promusrc()
promusrc()
| 5,343,520
|
def download_with_options():
"""
Test if the script is able to download images
with specified characteristics
"""
print('Instantiating crawler')
searcher = crawler()
try:
print('Searching')
for key in options:
for option in options[key]:
print('Searching: ', key, option)
searcher.search('testing', **{key: option})
print(searcher.links)
print('Success')
except Exception as e:
print(e.args[0])
finally:
print('Stopping')
searcher.stop()
| 5,343,521
|
def get_transform_dest_array(output_size):
"""
Returns a destination array of the desired size. This is also used to define the
order of points necessary for cv2.getPerspectiveTransform: the order can change, but
it must remain consistent between these two arrays.
:param output_size: The size to make the output image ((width, height) tuple)
:return: The destination array, suitable to feed into cv2.getPerspectiveTransform
"""
bottom_right = [output_size[0] - 1, output_size[1] - 1]
bottom_left = [0, output_size[1] - 1]
top_left = [0, 0]
top_right = [output_size[0] - 1, 0]
return np.array(
[bottom_right, bottom_left, top_left, top_right],
dtype="float32")
| 5,343,522
|
def fetch(url):
"""
引数urlで与えられたURLのWebページを取得する。
WebページのエンコーディングはContent-Typeヘッダーから取得する。
戻り値:str型のHTML
"""
f = urlopen(url)
# HTTPヘッダーからエンコーディングを取得する(明示されていない場合はutf-8とする)。
encoding = f.info().get_content_charset(failobj="utf-8")
html = f.read().decode(encoding) # 得られたエンコーディングを指定して文字列にデコードする。
return html
| 5,343,523
|
def _is_industrial_user():
"""Checking if industrial user is trying to use relion_it.."""
if not grp:
# We're not on a linux/unix system, therefore not at Diamond
return False
not_allowed = ["m10_valid_users", "m10_staff", "m08_valid_users", "m08_staff"]
uid = os.getegid()
fedid = grp.getgrgid(uid)[0]
groups = str(subprocess.check_output(["groups", str(fedid)]))
return any(group in groups for group in not_allowed)
| 5,343,524
|
def get_rounded_coordinates(point):
"""Helper to round coordinates for use in permalinks"""
return str(round(point.x, COORDINATE_ROUND)) + '%2C' + str(round(point.y, COORDINATE_ROUND))
| 5,343,525
|
def rgb_to_hls(image: np.ndarray, eps: float = 1e-8) -> np.ndarray:
"""Convert a RGB image to HLS. Image data is assumed to be in the range
of [0.0, 1.0].
Args:
image (np.ndarray[B, 3, H, W]):
RGB image to be converted to HLS.
eps (float):
Epsilon value to avoid div by zero.
Returns:
hls (np.ndarray[B, 3, H, W]):
HLS version of the image.
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
| 5,343,526
|
def array_max_dynamic_range(arr):
"""
Returns an array scaled to a minimum value of 0 and a maximum value of 1.
"""
finite_arr = arr[np.isfinite(arr)]
low = np.nanmin(finite_arr)
high = np.nanmax(finite_arr)
return (arr - low)/(high - low)
| 5,343,527
|
def production(*args):
"""Creates a production rule or list of rules from the input.
Supports two kinds of input:
A parsed string of form "S->ABC" where S is a single character, and
ABC is a string of characters. S is the input symbol, ABC is the output
symbols.
Neither S nor ABC can be any of the characters "-", ">" for obvious
reasons.
A tuple of type (S, Seq, ...) where S is the symbol of some hashable
type and seq is an finite iterable representing the output symbols.
Naturally if you don't want to use characters/strings to represent
symbols then you'll typically need to use the second form.
You can pass multiple inputs to generate multiple production rules,
in that case the result is a list of rules, not a single rule.
If you pass multiple inputs the symbol must differ since a simple
L-System only supports one production rule per symbol.
Example:
>>> production("F->Ab[]")
('F', ['A', 'b', '[', ']'])
>>> production("F->Ab[]", ("P", "bAz"), (1, (0,1)))
[('F', ['A', 'b', '[', ']']), ('P', ['b', 'A', 'z']), (1, [0, 1])]
"""
if len(args) < 1:
raise ValueError("missing arguments")
res = []
for a in args:
if issubclass(str, type(a)):
parts = a.split(sep="->", maxsplit=1)
if len(parts) < 2:
raise ValueError("couldn't parse invalid string \"{}\"".format(a))
res.append((parts[0], list(parts[1])))
elif issubclass(tuple, type(a)):
s, to, *vals = a
res.append((s, list(to)))
else:
raise TypeError("sorry don't know what to do with " + str(type(a)))
if len(res) == 1:
return res[0]
return res
| 5,343,528
|
def _unpack_compute(input_place, num, axis):
"""Unpack a tensor into `num` tensors along axis dimension."""
input_shape = get_shape(input_place)
for index, _ in enumerate(input_shape):
input_shape[index] = input_shape[index] if index != axis else 1
output_shape_list = [input_shape for i in range(num)]
offset = 0
out_tensor_list = []
for i, t_shape in enumerate(output_shape_list):
out_tensor = tvm.compute(
t_shape,
lambda *index, t_shape=t_shape:
input_place(*_index_offset(t_shape, axis, offset, *index)),
name='tensor' + str(i))
out_tensor_list.append(out_tensor)
offset = offset + 1
return tuple(out_tensor_list)
| 5,343,529
|
def flatten(items):
"""Convert a sequence of sequences to a single flat sequence.
Works on dictionaries, tuples, lists.
"""
result = []
for item in items:
if isinstance(item, list):
result += flatten(item)
else:
result.append(item)
return result
| 5,343,530
|
def _datum_to_cap(datum: Dict) -> float:
"""Cap value of a datum."""
return _cap_str_to_mln_float(datum["cap"])
| 5,343,531
|
def add_eval_to_game(game: chess.pgn.Game, engine: chess.engine.SimpleEngine, analysis_time: float,
should_re_add_analysis: bool = False) -> chess.pgn.Game:
"""
MODIFIES "game" IN PLACE
"""
current_move = game
while len(current_move.variations):
if "eval" in current_move.comment and not should_re_add_analysis:
continue
score, actual_eval = get_score(current_move.board(), engine, analysis_time=analysis_time)
current_move.comment += f'[%eval {score}]'
if current_move.eval().pov(chess.WHITE) != actual_eval:
# assert not rounding error
assert abs(current_move.eval().pov(chess.WHITE).score() - actual_eval.score()) == 1, \
f"eval's not equal, not rounding error: {current_move.eval().pov(chess.WHITE)} != {actual_eval}"
current_move = current_move.variations[0]
return game
| 5,343,532
|
def MC_no(a,b,N,pi,mp):
""" Monte Carlo simulation drawn from beta distribution for the uninsured agents
Args:
N (integer): number of draws
a (integer): parameter
b (integer): parameter
Returns:
(numpy float): Monte Carlo integration that computes expected utility for given gamma and premium
"""
x = np.random.beta(a,b,N)
return np.mean(utility(mp['y']-x,mp))
| 5,343,533
|
def gen_even_tree(fanout):
"""This generalization hierarchy is defined according to even fan-out
(average distribution).For large dataset fanout = 5, for small dataset fanout = 4
"""
treeseed = open('data/treeseed_BMS.txt', 'rU')
treefile = open('data/treefile_BMS.txt', 'w')
for line in treeseed:
line = line.strip()
temp = line.split(',')
prefix = ''
str_len = len(temp[0])
if temp[0][0] == 'E' or temp[0][0] == 'V':
prefix = temp[0][0]
temp[0] = temp[0][1:]
temp[1] = temp[1][1:]
str_len -= 1
bottom = string.atoi(temp[0])
top = string.atoi(temp[1])
# get height
temp = top - bottom
height = 0
flag = True
while temp:
temp /= fanout
height += 1
level_len = []
tree = []
for i in range(height):
level_len = pow(fanout, i)
level_split = []
temp = bottom
while temp <= top:
stemp = ''
if level_len == 1:
stemp = prefix + str(temp).rjust(str_len, '0')
elif temp + level_len - 1 > top:
stemp = prefix + str(temp).rjust(str_len, '0')
stemp += ',' + prefix + str(top).rjust(str_len, '0')
else:
stemp = prefix + str(temp).rjust(str_len, '0')
stemp += ',' + prefix + str(temp + level_len - 1).rjust(str_len, '0')
level_split.append(stemp)
temp += level_len
tree.append(level_split)
for i in range(len(tree[0])):
w_line = ''
temp = i
for index in range(height):
w_line += tree[index][temp] + ';'
temp /= fanout
w_line += line + ';*\n'
treefile.write(w_line)
treeseed.close()
treefile.close()
| 5,343,534
|
def get_conflicting_types(type, tyepdef_dict):
"""Finds typedefs defined in the same class that conflict. General algo
is: Find a type definition that is identical to type but for a
different key. If the type definitions is coming from a different
class, neglect it. This is a pretty slow function for large dictionaries."""
conflicting_types = []
if type in typedef_dict:
typedef = typedef_dict[type] # Look for an identical typedef mapped under a different key.
for key, value in typedef_dict.items():
if((typedef == value) and (type != key) and (type.rpartition("::")[0] == key.rpartition("::")[0])):
conflicting_types.append(key)
return conflicting_types
| 5,343,535
|
def extract_entity_type_and_name_from_uri(uri: str) -> Tuple[str, str]:
"""
从entity uri中提取出其type和name
:param uri: 如 http://www.kg.com/kg/ontoligies/ifa#Firm/百度
:return: ('Firm', '百度')
"""
name_separator = uri.rfind('/')
type_separator = uri.rfind('#')
return uri[type_separator + 1: name_separator], uri[name_separator + 1:]
| 5,343,536
|
def resolve_raw_resource_description(
raw_rd: GenericRawRD, root_path: os.PathLike, nodes_module: typing.Any
) -> GenericResolvedNode:
"""resolve all uris and sources"""
rd = UriNodeTransformer(root_path=root_path).transform(raw_rd)
rd = SourceNodeTransformer().transform(rd)
rd = RawNodeTypeTransformer(nodes_module).transform(rd)
return rd
| 5,343,537
|
def ret_dict() -> dict:
"""
Returns
-------
"""
# blahs
return {}
| 5,343,538
|
def load_rokdoc_well_markers(infile):
"""
Function to load well markers exported from RokDoc in ASCII format.
"""
with open(infile, 'r') as fd:
buf = fd.readlines()
marker = []
well = []
md = []
tvdkb = []
twt = []
tvdss = []
x = []
y = []
for line in buf[5:]:
c1, c2, c3, c4, c5 = line.split("'")
c6, c7, c8, c9, c10, c11 = c5.strip().split()
marker.append(c2)
well.append(c4)
md.append(float(c6))
tvdkb.append(float(c7))
twt.append(float(c8))
tvdss.append(float(c9))
x.append(float(c10))
y.append(float(c11))
markers = {}
for each in list(set(well)):
markers[each] = {}
for i in range(len(marker)):
cur_well = well[i]
cur_marker = marker[i]
cur_md = md[i]
cur_tvdkb = tvdkb[i]
cur_tvdss = tvdss[i]
cur_twt = twt[i]
cur_x = x[i]
cur_y = y[i]
markers[cur_well][cur_marker] = {'md': cur_md, 'tvdkb': cur_tvdkb,
'tvdss': cur_tvdss, 'twt': cur_twt,
'x': cur_x, 'y': cur_y}
return markers
| 5,343,539
|
def get_fees():
"""
Returns all information related to fees configured for the institution.
:returns: String containing xml or an lxml element.
"""
return get_anonymous('getFees')
| 5,343,540
|
async def create_db_pool():
"""Connects to the local PostgreSQL server."""
client.pg_con = await asyncpg.create_pool(database='postgres', user='postgres', password='Mehul09!')
| 5,343,541
|
def resample_uv_to_bbox(
predictor_output: DensePoseChartPredictorOutput,
labels: torch.Tensor,
box_xywh_abs: Tuple[int, int, int, int],
) -> torch.Tensor:
"""
Resamples U and V coordinate estimates for the given bounding box
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be resampled
labels (tensor [H, W] of uint8): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled U and V coordinates - a tensor [2, H, W] of float
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
u_bbox = F.interpolate(predictor_output.u, (h, w), mode="bilinear", align_corners=False)
v_bbox = F.interpolate(predictor_output.v, (h, w), mode="bilinear", align_corners=False)
uv = torch.zeros([2, h, w], dtype=torch.float32, device=predictor_output.u.device)
for part_id in range(1, u_bbox.size(1)):
uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
return uv
| 5,343,542
|
def run(cmd, capture_output=True):
"""
Run command locally with current user privileges
:returns: command output on success
:raises: LocalExecutionFailed if command failed"""
try:
LOG.debug("Running '%s' locally", cmd)
return api.local(cmd, capture=capture_output)
except (SystemExit, env.abort_exception) as e:
LOG.debug("Command '%s' failed with '%s'", cmd, e.message)
raise LocalExecutionFailed(e.message, e.code)
| 5,343,543
|
def rectangle(ctx, width, height, angle=0):
"""Draws a rectangle at a specified angle.
Parameters
----------
ctx : cairo.Context
Context.
width : float
Width of the rectangle.
height : float
Height of the rectangle.
angle : float, optional
Angle in radians of the rotation of plane from the positive x axis
towards positive y axis.
"""
ctx.rotate(angle)
x, y = ctx.get_current_point()
ctx.rectangle(x, y, width, height)
ctx.rotate(-angle)
| 5,343,544
|
def test_proj2geo():
""" Test Map.proj2geo method """
m = pmk.Map()
m.set_geographic_crs('EPSG:4267') ## NAD84
m.set_projection("EPSG:32023") ## Ohio South FT
## Test singlet integer input
test_coord = (1859916, 728826)
expected = (-83, 40)
actual = m.proj2geo(*test_coord)
assert expected[0] == pytest.approx(actual[0])
assert expected[1] == pytest.approx(actual[1])
## Test list
test_coords = (
[27416968.3248, 20414646.4987, 1606378.3434, 5169978.7942, np.inf],
[15047776.1068, 10772468.3457, -33210736.0296, -26917578.0576, np.inf]
)
expected = (
[22.52, -3.13, -83.1, -77.1, -77.1],
[33.45, 43.80, -31.8, -22.9, -22.9]
)
actual = m.proj2geo(*test_coords)
## Assert output is list
assert isinstance(actual[0], list)
assert isinstance(actual[1], list)
## Test for expected results
for actual_x, actual_y, expected_x, expected_y in zip(*actual, *expected):
assert expected_x == pytest.approx(actual_x)
assert expected_y == pytest.approx(actual_y)
| 5,343,545
|
def get_error_code(output: int,
program: List[int]
) -> int:
"""
Determine what pair of inputs, "noun" and "verb", produces the output.
The inputs should be provided to the program by replacing the values
at addresses 1 and 2. The value placed in address 1 is called the "noun",
and the value placed in address 2 is called the "verb".
It returns the error code: 100 * noun + verb
Implementation options:
- By brute force, looping twice over 0-99
- Looping over the noun linearly, and using binary search for the verb,
since all the values of the program are integers, and therefore
positive (IMPLEMENTED)
- Optimize the possible value intervals for both noun and verb checking
the possible min and max outputs for each pair
"""
# Reset the memory
program = program.copy()
# Linear loop over the noun
for noun in range(0, 100):
program[1] = noun
# Binary search over the verb
verb = binary_search_code(program, output)
# Return the code if found
if verb != -1:
return (100 * noun + verb)
raise ValueError('Code not found!')
| 5,343,546
|
def page(page_id):
"""Gets one page from the database."""
page = Page.objects.get(id=page_id)
return render_template('page.html', page=page)
| 5,343,547
|
def compute_Csigma_from_alphaandC(TT,minT,alphaT,CT,ibrav=4):
"""
This function calculate the difference between the constant stress heat capacity
:math:`C_{\sigma}` and the constant strain heat capacity :math:`C_{\epsilon}`
from the *V* (obtained from the input lattice parameters *minT*, the thermal
expansion tensor *alphaT* and the elastic constant tensor *CT*, all as a function
of temperature. This is essentially the anisotropic equivalent of the equation
:math:`Cp - Cv = T V beta^2 B0` for the isotropic case (volume only)
and it avoids a further numerical derivation to obtain :math:`C_{\sigma}`.
It is however more complex in the anisotropic case since *minT*, *alphaT* and
in particul the elastic constant tensor *CT* must me known in principle
including their temperature dependence.
.. Warning::
Still very experimental...
"""
CT = CT / RY_KBAR
Csigma = np.zeros(len(TT))
for i in range(1,len(TT)):
V = compute_volume(minT[i],ibrav)
for l in range(0,6):
for m in range(0,6):
temp = alphaT[i,l] * CT[l,m] * alphaT[i,m]
Csigma[i] = V * TT[i] * temp # this is C_sigma-C_epsilon at a given T
return Csigma
| 5,343,548
|
def services(request):
"""
"""
context = {}
services = Service.objects.filter(active=True, hidden=False)
context["services"] = services
context["services_nav"] = True
return render(request, "services.html", context)
| 5,343,549
|
def _create_presigned_url(method, object_name, duration_in_seconds=600):
"""
Create presigned S3 URL
"""
s3_client = boto3.client('s3',
endpoint_url=CONFIG.get('s3', 'url'),
aws_access_key_id=CONFIG.get('s3', 'access_key_id'),
aws_secret_access_key=CONFIG.get('s3', 'secret_access_key'))
if method == 'get':
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key': object_name},
ExpiresIn=duration_in_seconds)
except Exception:
logger.critical('Unable to generate presigned url for get')
return None
else:
try:
response = s3_client.generate_presigned_url('put_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key':object_name},
ExpiresIn=duration_in_seconds,
HttpMethod='PUT')
except Exception:
logger.critical('Unable to generate presigned url for put')
return None
return response
| 5,343,550
|
def read_data(model_parameters, ARGS):
"""Read the data from provided paths and assign it into lists"""
data = pd.read_pickle(ARGS.path_data)
y = pd.read_pickle(ARGS.path_target)['target'].values
data_output = [data['codes'].values]
if model_parameters.numeric_size:
data_output.append(data['numerics'].values)
if model_parameters.use_time:
data_output.append(data['to_event'].values)
return (data_output, y)
| 5,343,551
|
def _is_src(file):
""" Returns true if the file is a source file
Bazel allows for headers in the srcs attributes, we need to filter them out.
Args:
file (File): The file to check.
"""
if file.extension in ["c", "cc", "cpp", "cxx", "C", "c++", "C++"] and \
file.is_source:
return True
return False
| 5,343,552
|
def constructResponseObject(responsePassed):
"""
constructs an Error response object, even if the
"""
if (not (responsePassed is None)):
temp_resp = Response()
temp_resp.status_code = responsePassed.status_code or 404
if((temp_resp.status_code >= 200) and (temp_resp.status_code < 300)):
temp_resp.status_code = 404
temp_resp.reason = 'Bad Request'
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'Warning': details}
else:
temp_resp.reason = responsePassed.reason or 'Bad Request'
details = responsePassed.content or 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
else:
temp_resp = Response()
temp_resp.reason = 'Bad Request'
temp_resp.status_code = 404
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
return temp_resp
| 5,343,553
|
async def callback_query_handler(bot: BotAPI, update: Update):
"""Test inline keyboard with callback query and answer_callback_query."""
if update.message is not None and update.message.text is not None:
await bot.send_message(
update.message.chat.id,
'A reply!',
reply_markup=InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(
text='Button', callback_data='button_pressed'
)
]
]
),
)
if update.callback_query is not None:
if update.callback_query.data == 'button_pressed':
await bot.answer_callback_query(update.callback_query.id, 'Button pressed!')
| 5,343,554
|
def calculate_signal_strength(rssi):
# type: (int) -> int
"""Calculate the signal strength of access point."""
signal_strength = 0
if rssi >= -50:
signal_strength = 100
else:
signal_strength = 2 * (rssi + 100)
return signal_strength
| 5,343,555
|
async def verify_email(token: str, auth: AuthJWT = Depends()):
"""Verify the user's email with the supplied token"""
# Manually assign the token value
auth._token = token # pylint: disable=protected-access
user = await User.by_email(auth.get_jwt_subject())
if user.email_confirmed_at is not None:
raise HTTPException(400, "Email is already verified")
if user.disabled:
raise HTTPException(400, "Your account is disabled")
user.email_confirmed_at = datetime.now(tz=timezone.utc)
await user.save()
return Response(status_code=200)
| 5,343,556
|
def step_select_from_table(context):
"""
Send select from table.
"""
context.cli.sendline('select * from a;')
| 5,343,557
|
def main():
""" main body """
# constants
n = 200
# r axis
r_axis = np.linspace(0, 2, n)
# x axis
x_fix = np.array([])
# simulate for different r
for enum in enumerate(r_axis):
# x_fix[200 * enum[0]: 200 * (enum[0] + 1)] = stable_point(enum[1])
x_fix = np.hstack((x_fix, stable_point(enum[1])))
print(x_fix)
# repeat each value of r, 200 times for the plot
r_axis= np.repeat(r_axis, 200)
# plot
fig, ax = plt.subplots(1, 1, figsize=(18, 9))
ax.plot(r_axis, x_fix, ls='', marker='o', ms=1)
plt.xlabel('r')
plt.ylabel('x_fix')
plt.title('bifurcation plot')
plt.savefig("bifurcation.jpg", dpi=200, bbox_inches='tight')
plt.show()
| 5,343,558
|
def viz_predictions(
input_: np.ndarray,
output: np.ndarray,
target: np.ndarray,
centerlines: np.ndarray,
city_names: np.ndarray,
idx=None,
show: bool = True,
) -> None:
"""Visualize predicted trjectories.
Args:
input_ (numpy array): Input Trajectory with shape (num_tracks x obs_len x 2)
output (numpy array of list): Top-k predicted trajectories, each with shape (num_tracks x pred_len x 2)
target (numpy array): Ground Truth Trajectory with shape (num_tracks x pred_len x 2)
centerlines (numpy array of list of centerlines): Centerlines (Oracle/Top-k) for each trajectory
city_names (numpy array): city names for each trajectory
show (bool): if True, show
"""
num_tracks = input_.shape[0]
obs_len = input_.shape[1]
pred_len = target.shape[1]
plt.figure(0, figsize=(8, 7))
avm = ArgoverseMap()
for i in range(num_tracks):
plt.plot(
input_[i, :, 0],
input_[i, :, 1],
color="#ECA154",
label="Observed",
alpha=1,
linewidth=3,
zorder=15,
)
plt.plot(
input_[i, -1, 0],
input_[i, -1, 1],
"o",
color="#ECA154",
label="Observed",
alpha=1,
linewidth=3,
zorder=15,
markersize=9,
)
plt.plot(
target[i, :, 0],
target[i, :, 1],
color="#d33e4c",
label="Target",
alpha=1,
linewidth=3,
zorder=20,
)
plt.plot(
target[i, -1, 0],
target[i, -1, 1],
"o",
color="#d33e4c",
label="Target",
alpha=1,
linewidth=3,
zorder=20,
markersize=9,
)
for j in range(len(centerlines[i])):
plt.plot(
centerlines[i][j][:, 0],
centerlines[i][j][:, 1],
"--",
color="grey",
alpha=1,
linewidth=1,
zorder=0,
)
for j in range(len(output[i])):
plt.plot(
output[i][j][:, 0],
output[i][j][:, 1],
color="#007672",
label="Predicted",
alpha=1,
linewidth=3,
zorder=15,
)
plt.plot(
output[i][j][-1, 0],
output[i][j][-1, 1],
"o",
color="#007672",
label="Predicted",
alpha=1,
linewidth=3,
zorder=15,
markersize=9,
)
for k in range(pred_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
output[i][j][k, 0],
output[i][j][k, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
for j in range(obs_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
input_[i, j, 0],
input_[i, j, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
[avm.draw_lane(lane_id, city_names[i]) for lane_id in lane_ids]
for j in range(pred_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
target[i, j, 0],
target[i, j, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
[avm.draw_lane(lane_id, city_names[i]) for lane_id in lane_ids]
plt.axis("equal")
plt.xticks([])
plt.yticks([])
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
if show:
plt.show()
| 5,343,559
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a National Weather Service entry."""
hass_data = hass.data.setdefault(DOMAIN, {})
station = entry.data[CONF_STATION]
radar = Nexrad(station)
radar_update = Debouncer(
hass, _LOGGER, cooldown=60, immediate=True, function=radar.update
)
await radar_update.async_call()
_LOGGER.debug("layers: %s", radar.layers)
if radar.layers is None:
raise ConfigEntryNotReady
hass_data[entry.entry_id] = {"radar": radar, "radar_update": radar_update}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
| 5,343,560
|
def lookAtThisMethod(
first_parameter,
second_paramter=None,
third_parameter=32,
fourth_parameter="a short string as default argument",
**kwargs
):
"""The point of this is see how it reformats parameters
It might be fun to see what goes on
Here I guess it should respect this spacing, since we are in a comment.
We are done!
"""
return kwargs["whatever"](
first_parameter * third_parameter,
second_paramter,
fourth_parameter,
"extra string because I want to",
)
| 5,343,561
|
def data_incremental_benchmark(
benchmark_instance: GenericCLScenario,
experience_size: int,
shuffle: bool = False,
drop_last: bool = False,
split_streams: Sequence[str] = ("train",),
custom_split_strategy: Callable[
[ClassificationExperience], Sequence[AvalancheDataset]
] = None,
experience_factory: Callable[
[ClassificationStream, int], ClassificationExperience
] = None,
):
"""
High-level benchmark generator for a Data Incremental setup.
This generator accepts an existing benchmark instance and returns a version
of it in which experiences have been split in order to produce a
Data Incremental stream.
In its base form this generator will split train experiences in experiences
of a fixed, configurable, size. The split can be also performed on other
streams (like the test one) if needed.
The `custom_split_strategy` parameter can be used if a more specific
splitting is required.
Beware that experience splitting is NOT executed in a lazy way. This
means that the splitting process takes place immediately. Consider
optimizing the split process for speed when using a custom splitting
strategy.
Please note that each mini-experience will have a task labels field
equal to the one of the originating experience.
The `complete_test_set_only` field of the resulting benchmark instance
will be `True` only if the same field of original benchmark instance is
`True` and if the resulting test stream contains exactly one experience.
:param benchmark_instance: The benchmark to split.
:param experience_size: The size of the experience, as an int. Ignored
if `custom_split_strategy` is used.
:param shuffle: If True, experiences will be split by first shuffling
instances in each experience. This will use the default PyTorch
random number generator at its current state. Defaults to False.
Ignored if `custom_split_strategy` is used.
:param drop_last: If True, if the last experience doesn't contain
`experience_size` instances, then the last experience will be dropped.
Defaults to False. Ignored if `custom_split_strategy` is used.
:param split_streams: The list of streams to split. By default only the
"train" stream will be split.
:param custom_split_strategy: A function that implements a custom splitting
strategy. The function must accept an experience and return a list
of datasets each describing an experience. Defaults to None, which means
that the standard splitting strategy will be used (which creates
experiences of size `experience_size`).
A good starting to understand the mechanism is to look at the
implementation of the standard splitting function
:func:`fixed_size_experience_split_strategy`.
:param experience_factory: The experience factory.
Defaults to :class:`GenericExperience`.
:return: The Data Incremental benchmark instance.
"""
split_strategy = custom_split_strategy
if split_strategy is None:
split_strategy = partial(
fixed_size_experience_split_strategy,
experience_size,
shuffle,
drop_last,
)
stream_definitions: TStreamsUserDict = dict(
benchmark_instance.stream_definitions
)
for stream_name in split_streams:
if stream_name not in stream_definitions:
raise ValueError(
f"Stream {stream_name} could not be found in the "
f"benchmark instance"
)
stream = getattr(benchmark_instance, f"{stream_name}_stream")
split_datasets: List[AvalancheDataset] = []
split_task_labels: List[Set[int]] = []
exp: ClassificationExperience
for exp in stream:
experiences = split_strategy(exp)
split_datasets += experiences
for _ in range(len(experiences)):
split_task_labels.append(set(exp.task_labels))
stream_def = StreamUserDef(
split_datasets,
split_task_labels,
stream_definitions[stream_name].origin_dataset,
False,
)
stream_definitions[stream_name] = stream_def
complete_test_set_only = (
benchmark_instance.complete_test_set_only
and len(stream_definitions["test"].exps_data) == 1
)
return GenericCLScenario(
stream_definitions=stream_definitions,
complete_test_set_only=complete_test_set_only,
experience_factory=experience_factory,
)
| 5,343,562
|
def generate_doc_from_endpoints(
routes: typing.List[tornado.web.URLSpec],
*,
api_base_url,
description,
api_version,
title,
contact,
schemes,
security_definitions,
security
):
"""Generate doc based on routes"""
from tornado_swagger.model import export_swagger_models # pylint: disable=C0415
swagger_spec = {
"openapi": "3.0.0",
"info": {
"title": title,
"description": _clean_description(description),
"version": api_version,
},
"basePath": api_base_url,
"schemes": schemes,
"components": {
"schemas": export_swagger_models(),
},
"paths": _extract_paths(routes),
}
if contact:
swagger_spec["info"]["contact"] = {"name": contact}
if security_definitions:
swagger_spec["securityDefinitions"] = security_definitions
if security:
swagger_spec["security"] = security
return swagger_spec
| 5,343,563
|
def _filter_builds(build: Build) -> bool:
"""
Determine if build should be filtered.
:param build: Build to check.
:return: True if build should not be filtered.
"""
if build.display_name.startswith("!"):
return True
return False
| 5,343,564
|
def test_compute_free_energy(seq, actions, expected):
"""Tests private method _compute_free_energy()"""
env = Lattice2DEnv(seq)
for action in actions:
env.step(action)
result = env._compute_free_energy(env.state)
assert expected == result
| 5,343,565
|
def load_transformers(model_name, skip_model=False):
"""Loads transformers config, tokenizer, and model."""
config = AutoConfig.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(
model_name,
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
model = AutoModelForMaskedLM.from_pretrained(model_name, config=config)
return config, tokenizer, model
| 5,343,566
|
def build_test_data(data):
"""
Generates various features needed to predict
the class of the news.
Input: DataFrame
Returns Array of generated features.
"""
data = process(data)
generators = [
CountFeatureGenerator,
TfidfFeatureGenerator,
Word2VecFeatureGenerator,
SentimentFeatureGenerator,
ReadabilityFeatureGenerator
]
# Class generators one by one to generate features
features = [feature for generator in generators for feature in generator(data)]
print("Total number of raw features: {}".format(len(features)))
# Stack and return the features
return np.hstack(features)
| 5,343,567
|
def set_password_for_sub_account(account_id, password):
"""
Create a message to set the password for a given sub-account.
:param account_id: Integer representing the ID of the account
:param password: String representing the password for the sub-account
:return: Message (dict)
"""
data = sanitize(account_id=account_id, password=password)
msg = message(method=ACCOUNT_GET_SUB_ACCOUNTS)
params = {"sid": data["account_id"], "password": data["password"]}
return add_params_to_message(params, msg)
| 5,343,568
|
def post(req, api):
"""
Append a story to our rpg
Input:
content: string
Output:
string
"""
api.debug(req.body['content'])
return 'Success'
| 5,343,569
|
def generate_batch(n, batch_size):
""" Generates a set of batch indices
Args:
n: total number of samples in set
batch_size: size of batch
Returns:
batch_index: a list of length batch_size containing randomly sampled indices
"""
batch_index = a.sample(range(n), batch_size)
return batch_index
| 5,343,570
|
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.CoupledDecay, Literal["bdf"]],
):
"""
usage.scipy: 3
"""
...
| 5,343,571
|
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an `argparse.Namespace` object to string paths.
"""
train_model_from_file(
parameter_filename=args.param_path,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
file_friendly_logging=args.file_friendly_logging,
recover=args.recover,
force=args.force,
node_rank=args.node_rank,
include_package=args.include_package,
dry_run=args.dry_run,
)
| 5,343,572
|
def solve(si, y, infile):
"""Conducts the solution step, based on the dopri5 integrator in scipy
:param si: the simulation info object
:type si: SimInfo
:param y: the solution vector
:type y: np.ndarray
:param infile: the imported infile module
:type infile: imported module
"""
n = ode(f_n).set_integrator('dopri5')
n.set_initial_value(y0_n(si), si.timer.
t0.magnitude)
n.set_f_params(si)
th = ode(f_th).set_integrator('dopri5', nsteps=infile.nsteps)
th.set_initial_value(y0_th(si), si.timer.t0.magnitude)
th.set_f_params(si)
while (n.successful() and
n.t < si.timer.tf.magnitude and
th.t < si.timer.tf.magnitude):
si.timer.advance_one_timestep()
si.db.record_all()
n.integrate(si.timer.current_time().magnitude)
update_n(n.t, n.y, si)
th.integrate(si.timer.current_time().magnitude)
update_th(th.t, n.y, th.y, si)
return si.y
| 5,343,573
|
def update_logo(img):
""""this function update the warrior logo"""
try:
with open(img, "rb") as image_file:
encoded_img = base64.b64encode(image_file.read())
img_path = os.path.join(
BASE_DIR, "wui/core/static/core/images/logo.png")
fh = open(img_path, "wb")
fh.write(base64.b64decode(encoded_img))
fh.close()
except:
create_log(
"Error: Unable to upload logo, please provide the valid image path.")
| 5,343,574
|
def test_annotated_field_also_used_in_filter():
"""
Test that when a field also used in filter needs to get annotated, it really annotates only the field.
See issue https://github.com/preply/graphene-federation/issues/50
"""
@key("id")
class B(ObjectType):
id = ID()
@extend("id")
class A(ObjectType):
id = external(ID())
b = Field(B, id=ID())
class Query(ObjectType):
a = Field(A)
schema = build_schema(query=Query)
graphql_compatibility.assert_schema_is(
actual=schema,
expected_2=FILTER_SCHEMA_2,
expected_3=FILTER_SCHEMA_3,
)
# Check the federation service schema definition language
query = """
query {
_service {
sdl
}
}
"""
result = graphql_compatibility.perform_graphql_query(schema, query)
assert not result.errors
graphql_compatibility.assert_graphql_response_data(
schema=schema,
actual=result.data["_service"]["sdl"].strip(),
expected_2=FILTER_RESPONSE_2,
expected_3=FILTER_RESPONSE_3,
)
| 5,343,575
|
def writeOutput(info,prettyPrint=False):
""" Simple method used to print or dump to a file the output, probably will be included inside of class """
if JSON_OUTPUT:
if prettyPrint:
json_dump = json.dumps(info, indent=4, sort_keys=True)
else:
json_dump = json.dumps(info)
if XML_OUTPUT:
xml_dump = dicttoxml(info, custom_root='output', attr_type=False)
if FILE_OUTPUT:
if JSON_OUTPUT and XML_OUTPUT:
with open('json_'+Output_file_name,'w') as file_:
file_.write(json_dump)
with open('xml_'+Output_file_name,'w') as file_:
file_.write(xml_dump)
elif JSON_OUTPUT:
with open(Output_file_name,'w') as file_:
file_.write(json_dump)
elif XML_OUTPUT:
with open('xml_'+Output_file_name,'w') as file_:
file_.write(xml_dump)
else:
if JSON_OUTPUT and XML_OUTPUT:
print(json_dump)
print("\n\n")
print(xml_dump)
if JSON_OUTPUT:
print(json_dump)
elif XML_OUTPUT:
print(xml_dump)
| 5,343,576
|
def test_disable():
"""
Test to disable state run
"""
mock = MagicMock(return_value=["C", "D"])
with patch.dict(state.__salt__, {"grains.get": mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {"grains.setval": mock}):
mock = MagicMock(return_value=[])
with patch.dict(state.__salt__, {"saltutil.refresh_modules": mock}):
assert state.disable("C") == {
"msg": "Info: C state " "already disabled.",
"res": True,
}
assert state.disable("Z") == {
"msg": "Info: Z state " "disabled.",
"res": True,
}
| 5,343,577
|
def extend_track(
tator_api: tator.openapi.tator_openapi.api.tator_api.TatorApi,
media_id: int,
state_id: int,
start_localization_id: int,
direction: str,
work_folder: str,
max_coast_frames: int=0,
max_extend_frames: int=None) -> None:
""" Extends the track using the given track's detection using a visual tracker
:param tator_api: Connection to Tator REST API
:param media_id: Media ID associated with the track
:param state_id: State/track ID to extend
:param start_localization_id: Localization/detection to start the track extension with.
The attributes of this detection will be copied over to subsequent detections
created during the extension process.
:param direction: 'forward'|'backward'
:param work_folder: Folder that will contain the images
:param max_coast_frames: Number of coasted frames allowed if the tracker fails to
track in the given frame.
:param max_extend_frames: Maximum number of frames to extend. Track extension will stop if
coasting occurs still or if the start/end of the video has been
reached.
This function will ignore existing detections.
"""
logger.info(f"media_id: {media_id}")
logger.info(f"state_id: {media_id}")
logger.info(f"max_coast_frames: {max_coast_frames}")
logger.info(f"max_extend_frames: {max_extend_frames}")
logger.info(f"direction: {direction}")
# Make sure the provided direction makes sense
if direction.lower() == 'forward':
moving_forward = True
elif direction.lower() == 'backward':
moving_forward = False
else:
raise ValueError("Invalid direction provided.")
# Initialize the visual tracker with the start detection
media = tator_api.get_media(id=media_id)
# Frame buffer that handles grabbing images from the video
frame_buffer = FrameBuffer(
tator_api=tator_api,
media_id=media.id,
media_num_frames=media.num_frames,
moving_forward=moving_forward,
work_folder=work_folder,
buffer_size=200)
logger.info("Frame buffer initialized")
start_detection = tator_api.get_localization(id=start_localization_id)
current_frame = start_detection.frame
image = frame_buffer.get_frame(frame=current_frame)
media_width = image.shape[1]
media_height = image.shape[0]
logger.info(f"media (width, height) {media_width} {media_height}")
logger.info(f"start_detection: {start_detection}")
roi = (
start_detection.x * media_width,
start_detection.y * media_height,
start_detection.width * media_width,
start_detection.height * media_height)
tracker = cv2.legacy.TrackerCSRT_create()
ret = tracker.init(image, roi)
# If the tracker fails to create for some reason, then bounce out of this routine.
if not ret:
log_msg = f'Tracker init failed. '
log_msg += f'Localization: {start_detection} State: Media: {media}'
logger.warning(log_msg)
return
else:
previous_roi = roi
previous_roi_image = image.copy()
logger.info("Tracker initialized")
# Loop over the frames and attempt to continually track
coasting = False
new_detections = []
frame_count = 0
while True:
# For now, only process the a certain amount of frames
if frame_count == max_extend_frames:
break
frame_count += 1
# Get the frame number in the right extension direction
current_frame = current_frame + 1 if moving_forward else current_frame - 1
# Stop processing if the tracker is operating outside of the valid frame range
if current_frame < 0 or current_frame >= media.num_frames - 2:
break
# Grab the image
start = time.time()
image = frame_buffer.get_frame(frame=current_frame)
end = time.time()
start = time.time()
if coasting:
# Track coasted the last frame. Re-create the visual tracker using
# the last known good track result before attempting to track this frame.
logging.info("...coasted")
tracker = cv2.legacy.TrackerCSRT_create()
ret = tracker.init(previous_roi_image, previous_roi)
if not ret:
break
# Run the tracker with the current frame image
ret, roi = tracker.update(image)
end = time.time()
if ret:
# Tracker was successful, save off the new detection position/time info
# Also save off the image in-case the tracker coasts the next frame
coasting = False
previous_roi = roi
previous_roi_image = image.copy()
new_detections.append(
SimpleNamespace(
frame=current_frame,
x=roi[0],
y=roi[1],
width=roi[2],
height=roi[3]))
else:
# Tracker was not successful and the track is coasting now.
# If the maximum number of coast frames is reached, we're done
# trying to track.
coast_frames = coast_frames + 1 if coasting else 1
coasting = True
if coast_frames == max_coast_frames:
break
# Alter the attributes. If there's a None, put in a ""
# Otherwise, there will be an error when attempting to create these new localizations
attributes = start_detection.attributes.copy()
for key in attributes:
if attributes[key] == None:
attributes[key] = ""
# Finally, create the new localizations and add them to the state
localizations = []
for det in new_detections:
x = 0.0 if det.x < 0 else det.x / media_width
y = 0.0 if det.y < 0 else det.y / media_height
width = media_width - det.x if det.x + det.width > media_width else det.width
height = media_height - det.y if det.y + det.height > media_height else det.height
width = width / media_width
height = height / media_height
detection_spec = dict(
media_id=start_detection.media,
type=start_detection.meta,
frame=det.frame,
x=x,
y=y,
width=width,
height=height,
version=start_detection.version,
**attributes)
localizations.append(detection_spec)
# These are encapsulated in try/catch blocks to delete newly created localizations
# if something goes awry
created_ids = []
try:
for response in tator.util.chunked_create(
tator_api.create_localization_list,
media.project,
localization_spec=localizations):
created_ids += response.id
except:
for loc_id in created_ids:
tator_api.delete_localization(id=loc_id)
created_ids = []
raise ValueError("Problem creating new localizations")
try:
if len(created_ids) > 0:
tator_api.update_state(id=state_id, state_update={'localization_ids_add': created_ids})
except:
for loc_id in created_ids:
tator_api.delete_localization(id=loc_id)
raise ValueError("Problem updating state with new localizations")
| 5,343,578
|
def base(request, format=None):
"""Informational version endpoint."""
message = f"Welcome to {VERSION} of the Cannlytics API. Available endpoints:\n\n"
for endpoint in ENDPOINTS:
message += f"{endpoint}\n"
return Response({ "message": message}, content_type="application/json")
| 5,343,579
|
def push(ctx, apikey, url, skill_name):
"""
(topic branch) Reassemble a skill and deploy it as a sandbox
Deploys the files in <project_folder>/waw/<skill_name> as a WA skill named
"<gitbranch>__<skill_name>
"""
Sandbox(apikey, url, skill_name).push()
| 5,343,580
|
def discover(discover_system: bool = True) -> Discovery:
"""
Discover Reliably capabilities from this extension.
"""
logger.info("Discovering capabilities from chaostoolkit-reliably")
discovery = initialize_discovery_result(
"chaostoolkit-reliably", __version__, "reliably"
)
discovery["activities"].extend(load_exported_activities())
return discovery
| 5,343,581
|
def test_interval_in_seconds() -> None:
"""Tests the interval_in_seconds function"""
interval = interval_in_seconds("13:00")
assert isinstance(interval, int)
| 5,343,582
|
def global_search_f(event):
"""
Do global search.
To restore the original appearance of the window, type help.
The per-commander @int fts_max_hits setting controls the maximum hits returned.
"""
c = event['c']
if hasattr(g.app,'_global_search'):
g.app._global_search.fts_max_hits = c.config.getInt('fts_max_hits') or 30
# Use the per-commander setting.
g.app._global_search.show()
| 5,343,583
|
def frequent_word(message: str) -> str:
"""get frequent word."""
from collections import Counter
words = Counter(message.split())
result = max(words, key=words.get)
print(result)
return result
| 5,343,584
|
def test_column_regex_multiindex():
"""Text that column regex works on multi-index column."""
column_schema = Column(
Int,
Check(lambda s: s >= 0),
name=("foo_*", "baz_*"),
regex=True,
)
dataframe_schema = DataFrameSchema(
{
("foo_*", "baz_*"): Column(
Int, Check(lambda s: s >= 0), regex=True
),
}
)
data = pd.DataFrame(
{
("foo_1", "biz_1"): range(10),
("foo_2", "baz_1"): range(10, 20),
("foo_3", "baz_2"): range(20, 30),
("bar_1", "biz_2"): range(10),
("bar_2", "biz_3"): range(10, 20),
("bar_3", "biz_3"): range(20, 30),
}
)
assert isinstance(column_schema.validate(data), pd.DataFrame)
assert isinstance(dataframe_schema.validate(data), pd.DataFrame)
# Raise an error if tuple column name is applied to a dataframe with a
# flat pd.Index object.
failure_column_cases = (
[f"foo_{i}" for i in range(6)],
pd.MultiIndex.from_tuples(
[(f"foo_{i}", f"bar_{i}", f"baz_{i}") for i in range(6)]
),
)
for columns in failure_column_cases:
data.columns = columns
with pytest.raises(IndexError):
column_schema.validate(data)
with pytest.raises(IndexError):
dataframe_schema.validate(data)
| 5,343,585
|
def parse_bjobs_nodes(output):
"""Parse and return the bjobs command run with
options to obtain node list, i.e. with `-w`.
This function parses and returns the nodes of
a job in a list with the duplicates removed.
:param output: output of the `bjobs -w` command
:type output: str
:return: compute nodes of the allocation or job
:rtype: list of str
"""
nodes = []
lines = output.split("\n")
nodes_str = lines[1].split()[5]
nodes = nodes_str.split(":")
return list(dict.fromkeys(nodes))
| 5,343,586
|
def find_nearest_array(array, array_comparison, tol = 1e-4):
"""
Find nearest array
@ In, array, array-like, the array to compare from
@ In, array_comparison, array-like, the array to compare to
@ In, tol, float, the tolerance
"""
array_comparison = np.asarray(array_comparison)
indeces = np.zeros(len(array), dtype=bool)
notFound = np.zeros(len(array), dtype=bool)
for val in array_comparison:
idx, diff = find_nearest(array, val)
rel = (np.abs(diff / val)) if val != 0 else np.abs(val)
if rel <= tol:
indeces[idx] = True
else:
notFound[idx] = True
return indeces, not np.any(notFound)
| 5,343,587
|
def create_bar_filled_line_fusion_chart(fname, frame_data, chart_dir=''):
"""Create the bar filled line fusion chart from window data"""
path_to_image = os.path.join(chart_dir, ChartType.BAR_FLINE_FUSION.value, "%s.png" % fname)
if not os.path.exists(path_to_image):
fig_obj, ax_fline_obj = plt.subplots()
time_series = convert_to_list(frame_data['Time'])
trading_volume = convert_to_list(frame_data['Trade Volume'])
high_prices = convert_to_list(frame_data['Trade High'])
low_prices = convert_to_list(frame_data['Trade Low'])
mean_prices = ((np.array(high_prices) + np.array(low_prices)) / 2).tolist()
transformed_time_series = list(range(len(time_series)))
ax_bar_obj = draw_fusion_bar_chart(ax_fline_obj, transformed_time_series, trading_volume)
ax_fline_obj.plot(transformed_time_series, high_prices, color='green', linewidth=0.1)
ax_fline_obj.plot(transformed_time_series, low_prices, color='red', linewidth=0.1)
ax_fline_obj.fill_between(transformed_time_series, high_prices, mean_prices, color='green')
ax_fline_obj.fill_between(transformed_time_series, mean_prices, low_prices, color='red')
format_and_save_chart(path_to_image, fig_obj, ax_fline_obj, ax_bar_obj)
return decode_img(path_to_image)
| 5,343,588
|
def test_notebooks(nb):
""" Test that notebooks run fine """
if re.match('.*nbconvert.*', nb) is not None:
# nbconvert leaves files like nbconvert.ipynb
# we don't want to run on those
return
# Here I'm trying to determine if the converted nb exists
# if it does not I'll try to delete it after we are done
postfl = 6
assert nb[-postfl:] == '.ipynb'
fname_converted = nb[:-postfl] + '.nbconvert.ipynb'
if not os.path.exists(fname_converted):
delete = True
else:
delete = False
try:
cmd = f'jupyter nbconvert --to notebook --execute "{nb}"'
stat = os.system(cmd)
assert stat == 0
finally:
if delete:
try:
os.unlink(fname_converted)
except:
pass
| 5,343,589
|
def create_block_statistics_on_addition(
block_hash: str,
block_hash_parent: str,
chain_name: str,
deploy_cost_total: int,
deploy_count: int,
deploy_gas_price_avg: int,
era_id: int,
height: int,
is_switch_block: bool,
network: str,
size_bytes: str,
state_root_hash: str,
status: int,
timestamp: datetime,
proposer: str,
) -> BlockStatistics:
"""Returns a domain object instance: BlockStatistics.
"""
return BlockStatistics(
block_hash = block_hash,
block_hash_parent = block_hash_parent,
chain_name = chain_name,
deploy_cost_total = deploy_cost_total,
deploy_count = deploy_count,
deploy_gas_price_avg = deploy_gas_price_avg,
era_id = era_id,
height = height,
is_switch_block = is_switch_block,
network = network,
size_bytes = size_bytes,
state_root_hash = state_root_hash,
status = status,
timestamp = timestamp,
proposer = proposer,
)
| 5,343,590
|
def get_arg():
"""解析参数"""
parser = argparse.ArgumentParser(prog='prcdns', description='google dns proxy.')
parser.add_argument('--debug', help='debug model,default NO', default=False)
parser.add_argument('-l', '--listen', help='listening IP,default 0.0.0.0', default='0.0.0.0')
parser.add_argument('-p', '--port', help='listening Port,default 3535', default=3535)
parser.add_argument('-r', '--proxy', help='Used For Query Google DNS,default direct', default=None)
parser.add_argument('-ip', '--myip', help='IP location', default=None)
return parser.parse_args()
| 5,343,591
|
def test___get_flux__f() -> None:
"""
Test for `hsr4hci.photometry._get_flux__f`.
"""
# Case 1
x, y = np.meshgrid(np.arange(33), np.arange(33))
gaussian = models.Gaussian2D(
x_mean=17, x_stddev=1, y_mean=17, y_stddev=1, amplitude=1
)
position, flux = _get_flux__f(
frame=gaussian(x, y),
position=(17, 17),
)
assert position == (17, 17)
assert np.isclose(flux, 2 * np.pi * (1 - np.exp(-1 / 8)))
| 5,343,592
|
def text(title='Text Request', label='', parent=None, **kwargs):
"""
Quick and easy access for getting text input. You do not have to have a
QApplication instance, as this will look for one.
:return: str, or None
"""
# -- Ensure we have a QApplication instance
q_app = qApp()
# -- Get the text
name, ok = Qt.QtWidgets.QInputDialog.getText(
parent,
title,
label,
**kwargs
)
if not ok:
return None
return name
| 5,343,593
|
def get_key_score(chroma_vector, keys, key_index):
"""Returns the score of an approximated key, given the index of the key weights to try out"""
chroma_vector = np.rot90(chroma_vector,3)
chroma_vector = chroma_vector[0,:]
key_vector = keys[key_index,:]
score = np.dot(key_vector,chroma_vector)
return score
| 5,343,594
|
def as_moderator(mocker):
"""Enforces that the requesting users is treated as a moderator"""
mock_api = mocker.patch("open_discussions.middleware.channel_api.Api").return_value
mock_api.is_moderator.return_value = True
| 5,343,595
|
def app():
"""Create the test application."""
return flask_app
| 5,343,596
|
def coord_for(n, a=0, b=1):
"""Function that takes 3 parameters or arguments, listed above, and returns a list of the interval division coordinates."""
a=float(a)
b=float(b)
coords = []
inc = (b-a)/ n
for x in range(n+1):
coords.append(a+inc*x)
return coords
| 5,343,597
|
def find_indeces_vector(transect_lons, transect_lats, model_lons, model_lats,
tols={
'NEMO': {'tol_lon': 0.104, 'tol_lat': 0.0388},
'GEM2.5': {'tol_lon': 0.016, 'tol_lat': 0.012},
}):
"""Find all indeces for the given vector
:arg transect_lons: Longitude of point 1.
:type lon1: float or :py:class:`numpy.ndarray`
:arg transect_lats: Latitude of point 1.
:type lat1: float or :py:class:`numpy.ndarray`
:arg model_lons: Longitude of point 2.
:type lon2: float or :py:class:`numpy.ndarray`
:arg model_lats: Latitude of point 2.
:type lat2: float or :py:class:`numpy.ndarray`
:returns: vector of i and j indices associated with the input lons and lats
:rtype: float or :py:class:`numpy.ndarray`
"""
transect_i = np.array([])
transect_j = np.array([])
for k in range(0,len(transect_lons)):
i, j = find_closest_model_point(transect_lons[k], transect_lats[k], model_lons, model_lats,tols=tols)
try:
transect_i = np.append(transect_i, int(i))
transect_j = np.append(transect_j, int(j))
except:
transect_i = np.append(transect_i, np.nan)
transect_j = np.append(transect_j, np.nan)
return transect_i, transect_j
| 5,343,598
|
def _make_prediction_ops(features, hparams, mode, num_output_classes):
"""Returns (predictions, predictions_for_loss)."""
del hparams, mode
logits = tf.layers.dense(
features, num_output_classes, name='logits')
confidences = tf.nn.softmax(logits)
confidence_of_max_prediction = tf.reduce_max(confidences, axis=-1)
predicted_index = tf.argmax(confidences, axis=-1)
predictions = {
'label': predicted_index,
'logits': logits,
'confidences': confidences,
'confidence_of_max_prediction': confidence_of_max_prediction
}
predictions_for_loss = logits
return predictions, predictions_for_loss
| 5,343,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.