content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def fine_license_ratio(license_data, fine_data, column_name1=None, column_name2=None,year=None):
"""Get ratio of fines to licenses issued in a given year
Parameters:
-----------
license_data: DataFrame
Any subset of the Professional and Occupational Licensing dataframe
fine_data: DataFrame
Any subset of the Disciplinary Actions dataframe
year: int
Year to use to subset your data
column_name1: Series
Column containing years in license_data dataset
column_name2: Series
Column containing years in fine_data dataset
Returns:
--------
tuple
A tuple with license percentage as the first entry and fine percentage as the second
(year, ratio)
"""
int(year)
str(column_name1)
str(column_name2)
if year not in license_data[column_name1].unique() or year not in fine_data[column_name2].unique():
raise Exception(str(year) + " not a valid year for this dataset" + "\n----------------------------------------")
return "No Data for " + str(year)
else:
license_data = license_data[license_data[column_name1]==year]
fine_data = fine_data[fine_data[column_name2]==year]
try:
license_count = len(license_data)
fine_count = len(fine_data)
fine_percentage = fine_count/license_count * 100
license_percentage = 100 - fine_percentage
return license_percentage, fine_percentage, license_count, fine_count
except ZeroDivisionError:
print("Hmmm...It looks like there is are no licenses yet for the year " + str(year))
| 12,700
|
def import_obj(obj_path, hard=False):
"""
import_obj imports an object by uri, example::
>>> import_obj("module:main")
<function main at x>
:param obj_path: a string represents the object uri.
:param hard: a boolean value indicates whether to raise an exception on
import failures.
"""
try:
# ``__import__`` of Python 2.x could not resolve unicode, so we need
# to ensure the type of ``module`` and ``obj`` is native str.
module, obj = str(obj_path).rsplit(':', 1)
m = __import__(module, globals(), locals(), [obj], 0)
return getattr(m, obj)
except (ValueError, AttributeError, ImportError):
if hard:
raise
| 12,701
|
def Get_Histogram_key(qubitOperator):
"""
Function to obtain histogram key string for Cirq Simulator.
e.g.
PauliWord = QubitOperator('X0 Z2 Y3', 0.5j)
returning: histogram_string = '0,2,3'
Args:
qubitOperator (openfermion.ops._qubit_operator.QubitOperator): QubitOperator
Returns:
histogram_string (str): Returns string corresponding to histogram key (required for Cirq simulator)
"""
qubit_No, PauliStr = zip(*list(*qubitOperator.terms.keys()))
histogram_string = ','.join([str(i) for i in qubit_No])
return histogram_string
| 12,702
|
def get_organizations():
""" Queries API for a list of all basketball organizations registered
with Basketbal Vlaanderen.
:return: list of basketball organizations
:rtype: [Organization]
"""
organizations = []
for organization_data in get_list():
organizations.append(Organization(organization_data))
return list(sorted(organizations, key=lambda o: o.guid))
| 12,703
|
def logout():
"""Logout."""
logout_user()
flash('您已成功登出', 'info')
return redirect(url_for('public.home'))
| 12,704
|
def get_column_labels():
"""
This function generates a list of column names for the extracted features
that are returned by the get_features function.
"""
# list the names of the extracted features
feature_labels = ["amplitude_envelope",
"root_mean_square_energy",
"zero_crossing_rate",
"band_energy_ratio",
"spectral_centroid",
"spectral_bandwidth",
"spectral_contrast",
"spectral_flatness",
"spectral_rolloff",
"spectral_rolloff_99",
"spectral_rolloff_01"]
# list the names of the used descriptive statistics
measure_suffixes = ["_mean", "_min", "_max", "_std"]
# create a list to append the generated column names to
columns = ["row_index"]
# generate some labels and append them to the list
columns.extend([l+s for l in feature_labels for s in measure_suffixes])
# append labels for the distributed AE
columns.extend(["amplitude_envelope_f1",
"amplitude_envelope_f2",
"amplitude_envelope_f3",
"amplitude_envelope_f4",
"amplitude_envelope_f5"])
# append labels for the distributed RMS
columns.extend(["root_mean_square_energy_f0",
"root_mean_square_energy_f1",
"root_mean_square_energy_f2",
"root_mean_square_energy_f3",
"root_mean_square_energy_f4",
"root_mean_square_energy_f5",
"root_mean_square_energy_f6",
"root_mean_square_energy_f7",
"root_mean_square_energy_f8",
"root_mean_square_energy_f9",
"root_mean_square_energy_f10"])
# append labels for the distributed ZCR
columns.extend(["zero_crossing_rate_f0",
"zero_crossing_rate_f1",
"zero_crossing_rate_f2",
"zero_crossing_rate_f3",
"zero_crossing_rate_f4",
"zero_crossing_rate_f5",
"zero_crossing_rate_f6",
"zero_crossing_rate_f7",
"zero_crossing_rate_f8",
"zero_crossing_rate_f9",
"zero_crossing_rate_f10"])
return columns
| 12,705
|
def corr_finder(X, threshold):
""" For each variable, find the independent variables that are equal to
or more highly correlated than the threshold with the curraent variable
Parameters
----------
X : pandas Dataframe
Contains only independent variables and desired index
threshold: float < 1
Minimum level of correlation to search for
Returns
-------
Dictionary with the key's as independent variavble indices and values as a
list of variables with a correlation greater to or equal than the threshold.
Correlation Matrix
"""
corr_matrix = X.corr(method='kendall') #create the correlation matrix
corr_dic = {}
for row_name, ser in corr_matrix.iterrows(): #search through each row
corr_list = [] #list of variables past/at the threshold
for idx, val in ser.iteritems(): #search through the materials of each row
if (abs(val) > threshold) and (abs(val) != 1): #if the variable correlates past/at the threshold
corr_list.append(idx)
if len(corr_list) > 0:
corr_dic[row_name] = corr_list
return corr_dic, corr_matrix
| 12,706
|
def scrape_url(url):
"""
makes request to input url and passes the response to be scraped and parsed
if it is not an error code response
"""
try:
r = requests.get(url, allow_redirects=True, timeout=TIMEOUT)
except Exception as e:
print('ERROR with URL: {}'.format(url))
return
status_code = r.status_code
if r and r.headers:
content_type = r.headers.get('Content-Type', 'None')
else:
return
if (status_code >= 300 or content_type.__class__.__name__ != 'str' or 'text/html' not in content_type.lower()):
print('ERROR with URL: {}, status: {}, content-type: {}'.format(url, status_code, content_type))
return
parsed_original_url_object = urlparse(url)
original_domain = get_original_domain_from_url(parsed_original_url_object)
emails, social_links = parse_response(original_domain, r)
io.temp_write_updates_to_files(url, emails, social_links)
| 12,707
|
def add(a, b):
"""Compute a + b"""
pass
| 12,708
|
def magic_file(filename):
""" Returns tuple of (num_of_matches, array_of_matches)
arranged highest confidence match first.
:param filename: path to file
:return: list of possible matches, highest confidence first
"""
head, foot = _file_details(filename)
if not head:
raise ValueError("Input was empty")
try:
info = _identify_all(head, foot, ext_from_filename(filename))
except PureError:
info = []
info.sort(key=lambda x: x[3], reverse=True)
return info
| 12,709
|
def plot_sentiment(
df: pd.DataFrame, title: str = None, height: int = 300, label_col: str = "label"
) -> Figure:
"""
Plot the predicted sentiment of the sentences.
Args:
df (pd.DataFrame):
Dataframe with the outputs of a sentiment analysis model.
title (str):
Title of the plot.
height (int):
Height of the plot.
label_col (str):
Column name of the sentiment.
Returns:
Figure:
Plotly figure with the percentage of hate speech.
"""
sentiments_count = get_counts(df, label_col=label_col)
labels_order = ["neutro", "positivo", "negativo"]
fig = px.bar(
x=labels_order,
y=[
float(sentiments_count[sentiments_count[label_col] == label].percent)
for label in labels_order
],
title=title,
)
fig.update_traces(
marker_color=["gray", "green", "red"],
hovertemplate="%{y:.1f}%<extra></extra>",
)
fig.update_layout(
xaxis_title="Sentimento",
yaxis_title="Percentagem de frases",
margin=dict(l=0, r=0, b=0, t=0, pad=0),
height=height,
)
return fig
| 12,710
|
def get_participant_output(conversation_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
participant_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetParticipantResult]:
"""
Retrieves a conversation participant.
"""
...
| 12,711
|
def subf(pattern, format, string, count=0, flags=0): # noqa A002
"""Apply `sub` with format style replace."""
is_replace = _is_replace(format)
is_string = isinstance(format, (_util.string_type, _util.binary_type))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return _re.sub(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format),
string, count, flags
)
| 12,712
|
def curve_comparison_2d(x, y, labels=None, xlabel=None, ylabel=None,
markers=None, colors=None, file_name=None, logy=False,
save=False):
"""
Takes a 1d array of x values and a 3d array of y values and produces a
plot comparing the data sets contained in y such that similar rows have the
same color and similar columns have the same marker
x - 1d array of x values
y - 3d array, or 2d array where each 'element' is a dataset
labels - 2d array of strings such that each element provides a name for
the data set located in the same location in y
xlabel - string giving a label for the x axis
ylabel - string giving a label for the y axis
markers - list of strings representing matplotlib markers with same
length as number of rows in y
colors - list of strings representing valid matplotlib colors with same
length as number of columns in y
file_name - name of file the plot should be saved as
logy - boolean denoting whether y-axis should have log scale
save - boolean denoting whether plot should be saved
"""
labels = labels or [
['Line {}'.format(i * len(y) + j) for j in range(len(y[i]))]
for i in range(len(y))]
xlabel = xlabel or 'x axis'
ylabel = ylabel or 'y axis'
markers = markers or ['o', '^', 's', 'p']
colors = colors or ['#9b8e82', '#7a8e99', ]
file_name = file_name or 'comparison_plot'
# Set up figure
fig = plt.figure(figsize=(12, 6), dpi=80)
frame = gridspec.GridSpec(1, 1, right=0.7)
fig.add_subplot(frame[0])
# Plotting
for category, marker, label_set in zip(y, markers, labels):
for data, color, label in zip(category, colors, label_set):
plt.plot(x, data, '-'+marker, label=label, color=color)
# Formatting plots
if logy:
plt.yscale('log')
plt.xlabel(r'{}'.format(xlabel))
plt.ylabel(r'{}'.format(ylabel))
plt.legend()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# Save figure
if save:
plt.savefig(file_name, dpi=300, bbox_inches='tight')
| 12,713
|
def page_file(
self: BaseHTTPRequestHandler, filename: str, mime: str, folder: str = ""
) -> None:
"""Sends an on-disk file to the client, with the given mime type"""
filename = folder + filename
# 404 if the file is not found.
if not os.path.exists(filename):
self.send_error(404, f"File not {filename} found on disk")
return
mod_date: int = 0
if "If-Modified-Since" in self.headers:
mod_date = int(
datetime.datetime.strptime(
str(self.headers["If-Modified-Since"]), "%a, %d %b %Y %H:%M:%S GMT"
).timestamp()
)
with open(filename, "rb") as contents:
# stat(2) the file handle to get the file size.
stat = os.fstat(contents.fileno())
if int(stat.st_mtime) <= mod_date:
send_304(self, stat)
return
if mime == "text/html":
do_replacement(self, contents, mime, stat)
return
# Send the HTTP headers.
self.send_response(200)
self.send_header("Content-Type", mime)
self.send_header("Content-Length", str(stat.st_size))
self.send_header("Last-Modified", self.date_time_string(int(stat.st_mtime)))
self.send_header("Cache-Control", "public; max-age=3600")
self.send_header("Expires", self.date_time_string(int(time.time() + 3600)))
self.end_headers()
# Send the file to the client
shutil.copyfileobj(contents, self.wfile)
| 12,714
|
def independent_connections(fn):
"""Target must support simultaneous, independent database connections."""
# This is also true of some configurations of UnixODBC and probably win32
# ODBC as well.
return _chain_decorators_on(
fn,
no_support('sqlite', 'Independent connections disabled when '
':memory: connections are used'),
exclude('mssql', '<', (9, 0, 0),
'SQL Server 2005+ is required for independent connections'),
)
| 12,715
|
def Route(template, handler):
"""Make a Route whose placeholders accept only allowable map IDs or labels."""
return webapp2.Route(template.replace('>', r':[\w-]+>'), handler)
| 12,716
|
def values_iterator(dictionary):
"""Add support for python2 or 3 dictionary iterators."""
try:
v = dictionary.itervalues() # python 2
except:
v = dictionary.values() # python 3
return v
| 12,717
|
def _standardize_bicluster(bicluster):
"""Standardize a bicluster by subtracting the mean and dividing by standard
deviation.
Ref.:
Pontes, B., Girldez, R., & Aguilar-Ruiz, J. S. (2015). Quality measures
for gene expression biclusters. PloS one, 10(3), e0115497.
Note that UniBic synthetic data was generated with mean 0 and standard
deviation 1, so it is already standardized.
Args:
bicluster (array-like): The bicluster data values.
Returns:
(float): The standardized bicluster.
"""
_bicluster = np.copy(bicluster)
row_std = np.std(_bicluster, axis=0)
row_std[row_std == 0] = 1
row_mean = np.mean(_bicluster, axis=0)
return (_bicluster - row_mean) / row_std
| 12,718
|
def test_nucid_to_xs_with_names():
"""Test the _nucid_to_xs function given a nuc_names dictionary."""
mat_lib = {
"mat:M1": {290630000: 0.058, 290650000: 0.026},
"mat:M2": {10010000: 0.067, 80160000: 0.033},
}
nuc_names = {}
nuc_names[290630000] = "cu63"
nuc_names[290650000] = "cu65"
nuc_names[10010000] = "h1"
nuc_names[80160000] = "o16"
mat_xs_names_expected = {
"mat:M1": {"cu63": 0.058, "cu65": 0.026},
"mat:M2": {"h1": 0.067, "o16": 0.033},
}
mat_xs_names = partisn._nucid_to_xs(mat_lib, nuc_names=nuc_names)
assert mat_xs_names == mat_xs_names_expected
| 12,719
|
def upload_video(video_file, upload_url):
"""Uploads video file to Street View via Upload URL.
Args:
video_file: The video file to upload.
upload_url: The upload URL, provided by SV Publish API in step 1.
Returns:
None.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
file_size = get_file_size(str(video_file))
try:
curl = pycurl.Curl()
curl.setopt(pycurl.URL, upload_url)
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.CUSTOMREQUEST, "POST")
curl.setopt(pycurl.HTTPHEADER,
get_headers(credentials, file_size, upload_url))
curl.setopt(pycurl.INFILESIZE, file_size)
curl.setopt(pycurl.READFUNCTION, open(str(video_file), "rb").read)
curl.setopt(pycurl.UPLOAD, 1)
curl.perform()
curl.close()
except pycurl.error:
print "Error uploading file %s", video_file
| 12,720
|
def export_globals(function):
"""Add a function's globals to the current globals."""
rootmod = _inspect.getmodule(function)
globals()[rootmod.__name__] = rootmod
for k, v in _inspect.getmembers(rootmod, _inspect.ismodule):
if not k.startswith('__'):
globals()[k] = v
| 12,721
|
def asymmetric(interface):
"""Show asymmetric pfc"""
cmd = 'pfc show asymmetric'
if interface is not None and clicommon.get_interface_naming_mode() == "alias":
interface = iface_alias_converter.alias_to_name(interface)
if interface is not None:
cmd += ' {0}'.format(interface)
run_command(cmd)
| 12,722
|
def get_deployment_polarion_id():
"""
Determine the polarion_id of the deployment or upgrade
Returns:
str: polarion_id of the deployment or upgrade
"""
polarion_config = config.REPORTING.get('polarion')
if polarion_config:
if config.UPGRADE.get('upgrade'):
if config.DEPLOYMENT.get('subscription_plan_approval') == 'Manual':
return polarion_config.get('upgrade_manual_id')
else:
return polarion_config.get('upgrade_auto_id')
else:
return polarion_config.get('deployment_id')
| 12,723
|
def parse_command(message) -> ParsedStatusCommand:
"""Parsing command arguments to arguments list"""
LOGGER.debug('Got message: %s', message)
try:
_, target, *args = shlex.split(message)
return ParsedStatusCommand(target, *args)
except ValueError as ex:
raise CommandParsingError('Incorrect `/status` command') from ex
except TypeError as ex:
raise CommandParsingError('Too many arguments for `/status` command') from ex
| 12,724
|
def mail_on_fail(func: callable):
"""Send an email when something fails. Use this as a decorator."""
@wraps(func)
def _wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Handle recursive error handling.
# This way if a task wrapped in `@mail_on_fail` sends an email, we
# don't sent multiple emails.
if getattr(e, '__email_sent__', False):
raise e
# Get the stack trace
f = io.StringIO()
traceback.print_exc(file=f)
f.seek(0)
# Render the email body
html = render_template(
'mail/error.html',
stack_trace=f.read(),
func_name=getattr(func, '__name__', repr(func))
)
# Send the email
msg = ErrorEmail(html=html)
mail.send(msg)
# Mark as sent
e.__email_sent__ = True
# Raise the error
raise e
return _wrap
| 12,725
|
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset(r'C:\Users\Lenovo\Documents\GitHub\IML.HUJI\datasets//' + f)
# Fit models and predict over training set
my_lda = LDA().fit(X, y)
lda_y_hat = my_lda.predict(X)
lda_acc = accuracy(y, lda_y_hat)
my_gdb = GaussianNaiveBayes().fit(X, y)
gdb_y_hat = my_gdb.predict(X)
gdb_acc = accuracy(y, gdb_y_hat)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
fig = make_subplots(rows=1, cols=2, subplot_titles=(
f[:-4] + ', GaussianNaiveBayes, accuracy: ' + str(gdb_acc), f[:-4] + ', LDA, accuracy: ' + str(lda_acc)))
# Add traces for data-points setting symbols and colors
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers",
marker=dict(color=gdb_y_hat, symbol=y, line=dict(color="black", width=0.5))),
row=1, col=1)
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers",
marker=dict(color=lda_y_hat, symbol=y, line=dict(color="black", width=0.5))),
row=1, col=2)
# Add `X` dots specifying fitted Gaussians' means
fig.add_trace(go.Scatter(mode='markers', x=my_gdb.mu_[:, 0], y=my_gdb.mu_[:, 1],
marker=dict(color='black', size=12, symbol='x')), row=1, col=1)
fig.add_trace(go.Scatter(mode='markers', x=my_lda.mu_[:, 0], y=my_lda.mu_[:, 1],
marker=dict(color='black', size=12, symbol='x')), row=1, col=2)
# add elipsis
for i in range(len(my_lda.classes_)):
gdb_index = np.where(y == my_lda.classes_[i])
gdb_covv = np.cov(X[gdb_index].transpose())
fig.add_trace(get_ellipse(my_gdb.mu_[i], np.diag(my_gdb.vars_[i])), row=1, col=1)
fig.add_trace(get_ellipse(my_lda.mu_[i], my_lda.cov_), row=1, col=2)
fig.write_html(f + '_plot.html')
| 12,726
|
def generate_confusion_matrix(args, sort_method, dataset_cfg, output_dir, video_predictions, video_labels, tb_writer, shrink=False):
"""save confusion matrix"""
'''
for i in range(10):
print(video_predictions[i].max())
print(video_predictions[i].min())
print(video_labels[i])
print()
'''
class_indices = range(dataset_cfg.num_classes)
class_keys = dataset_cfg.class_keys
pred_labels = np.argmax(video_predictions, axis=1)
cm = confusion_matrix(video_labels, pred_labels, labels = class_indices)
class_frequency_in_train = dataset_cfg.count_train_class_frequency()
num_samples_per_target = cm.sum(axis=1)
if sort_method == 'val_class_frequency':
# sort by class frequency in validation data (video-based)
sort_labels = num_samples_per_target.argsort()[::-1]
elif sort_method == 'train_class_frequency':
# sort by class frequency in training data (video-based)
sort_labels = class_frequency_in_train.argsort()[::-1]
elif sort_method == 'val_per_class_accuracy':
# sort by accuracy per class (video-based)
cm = normalize(cm, axis=1, norm='l1') # row (true labels) will sum to 1.
sort_labels = cm.diagonal().argsort()[::-1]
else:
raise ValueError('Wrong sort_method')
if shrink:
# remove the ones with too few val samples
# whilst keeping the top 20 classes
labels_to_keep = []
for i in sort_labels:
if num_samples_per_target[i] >= 20:
labels_to_keep.append(i)
if len(labels_to_keep) >= 20:
break
class_keys, pred_labels, video_labels, sort_labels, class_frequency_in_train = shrink_labels(dataset_cfg.num_classes, labels_to_keep, class_keys, pred_labels, video_labels, sort_labels, class_frequency_in_train)
##
cm_sorted = confusion_matrix(video_labels, pred_labels, labels = sort_labels)
num_samples_per_target = cm_sorted.sum(axis=1) # class frequency in val
num_correct_pred_per_target = cm_sorted.diagonal()
class_frequency_in_train = class_frequency_in_train[sort_labels]
if args.normalise:
cm_sorted = normalize(cm_sorted, axis=1, norm='l1') # row (true labels) will sum to 1.
# Generate visualisation and summary files
out_dir = os.path.join(output_dir, 'sort_%s' % sort_method)
os.makedirs(out_dir, exist_ok = True)
df_cm = pd.DataFrame(cm_sorted, class_keys[sort_labels],
class_keys[sort_labels])
if False:
# old settings
fig = plt.figure(figsize = (350,250))
ax = fig.add_subplot(111)
# x label on top
ax.xaxis.tick_top()
sn.set(font_scale=10)#for label size
sn_plot = sn.heatmap(df_cm, annot=False, annot_kws={"size": 12}, cmap="YlGnBu", square=True, vmin=0, vmax=1)# font size
plt.xlabel('Predicted', fontsize=300)
plt.ylabel('Target', fontsize=300)
# This sets the yticks "upright" with 0, as opposed to sideways with 90.
plt.yticks(fontsize=50, rotation=0)
plt.xticks(fontsize=50, rotation=90)
# here set the colorbar labelsize by 20
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=20)
fig = dataset_cfg.plot_confusion_matrix(df_cm, shrink=shrink)
shrink_str = 'shrinked_' if shrink else ''
logger.info(f'Saving confusion matrix to {out_dir}')
plt.savefig('%s/%sconfusion.pdf' % (out_dir, shrink_str))
plt.savefig('%s/%sconfusion.png' % (out_dir, shrink_str))
tag = f'{shrink_str}sort_{sort_method}'
logger.info(f'Saving confusion matrix to TensorBoard tagged {tag}')
tb_writer.add_figure(tag, fig)
with open('%s/%sper_class_accuracy.csv' % (out_dir, shrink_str), mode='w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=str(','), quotechar=str('"'), quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['class_key', 'accuracy (%)', 'num_correct_pred', 'num_samples_in_val', 'num_samples_in_train'])
for class_label, num_correct_pred, num_samples_in_val, num_samples_in_train in zip(class_keys[sort_labels], num_correct_pred_per_target, num_samples_per_target, class_frequency_in_train):
csvwriter.writerow([class_label, float(num_correct_pred) / num_samples_in_val * 100 if num_samples_in_val != 0 else 'NaN', num_correct_pred, num_samples_in_val, num_samples_in_train])
| 12,727
|
def base_conv(num, base):
"""Write a Python program to converting
an Integer to a string in any base"""
_list = []
if num//base == 0:
return str(num%base)
else:
return (base_conv(num//base, base) + str(num%base))
| 12,728
|
def handle_log(request):
""" Handle streaming logs to a client """
params = request.match_info
log_dir = py.path.local('data').join(
params['project_slug'],
params['job_slug'],
)
# Handle .log ext for DockCI legacy data
log_path_bare = log_dir.join(params['stage_slug'])
log_path_ext = log_dir.join('%s.log' % params['stage_slug'])
log_path = None
if log_path_bare.check():
log_path = log_path_bare
elif log_path_ext.check():
log_path = log_path_ext
if log_path is None:
return web.Response(status=404)
byte_seek = try_qs_int(request, 'seek')
line_seek = try_qs_int(request, 'seek_lines')
bytes_count = try_qs_int(request, 'count')
lines_count = try_qs_int(request, 'count_lines')
if byte_seek and line_seek:
return web.Response(
body="byte_seek and line_seek are mutually exclusive".encode(),
status=400,
)
if bytes_count and lines_count:
return web.Response(
body="bytes_count and lines_count are mutually exclusive".encode(),
status=400,
)
response = web.StreamResponse(status=200, headers={
'content-type': 'text/plain',
})
yield from response.prepare(request)
with log_path.open('rb') as handle:
if byte_seek is not None:
_seeker_bytes(handle, byte_seek)
if line_seek is not None:
_seeker_lines(handle, line_seek)
if bytes_count is not None:
gen = _reader_bytes(handle, bytes_count)
elif lines_count is not None:
gen = _reader_lines(handle, lines_count)
else:
gen = _reader_bytes(handle)
for data in gen:
response.write(data)
yield from response.drain()
return response
| 12,729
|
def get_scheme(patterns, config):
"""Returns the encoding scheme specified by the given config object
Args:
patterns (list(list)): List of input patterns
config (dict): The config object
"""
assert(type(patterns) == list and len(patterns) > 0)
assert(type(config) == dict)
min_max_values = utils.get_min_max_values(patterns)
pattern_dims = len(patterns[0])
scheme = None
method = config["method"]
if method == "quantize":
bits_per_attr = config["quantize"]["bits_per_attr"]
bits_set_per_attr = config["quantize"]["bits_set_per_attr"]
assert(type(bits_per_attr) == list and len(bits_per_attr) == pattern_dims)
assert(type(bits_set_per_attr) == list and len(bits_set_per_attr) == pattern_dims)
scheme = schemes.QuantizationEncoder(min_max_values, bits_per_attr, bits_set_per_attr)
elif method == "donothing":
bits_set = config["donothing"]["bits_set"]
scheme = schemes.DoNothingEncoder(bits_set)
elif method == "som":
som_path = config["som"]["som_file_path"]
scheme = schemes.SOMEncoder(som_path)
elif method == "baum":
segment_sizes = config["baum"]["segment_sizes"]
scheme = schemes.BaumEncoder(segment_sizes)
else:
raise ValueError("Unrecognized encoding method: " + method)
return scheme
| 12,730
|
def add_sort_to_todo_items(sender, **kwargs):
"""The receiver called before a todo item is saved
to give it a unique sort"""
todo = kwargs['instance']
if not todo.pk:
latest_sort = TodoModel.objects.filter(category=todo.category).count()
todo.sort = latest_sort + 1
| 12,731
|
def date_range(start: Literal["2015-1-1"], end: Literal["2015-1-3"]):
"""
usage.alphalens: 3
"""
...
| 12,732
|
def open_signatures_window(*args):
"""
open_signatures_window() -> TWidget *
Open the signatures window ( 'ui_open_builtin' ).
@return: pointer to resulting window
"""
return _ida_kernwin.open_signatures_window(*args)
| 12,733
|
def get_subscription_id(_ctx=ctx):
"""
Gets the subscription ID from either the node or
the provider context
"""
return get_credentials(_ctx=_ctx).subscription_id
| 12,734
|
def train_one():
"""
train an agent
"""
if not os.path.exists(config.TRAINING_DATA_FILE):
print("==============Start Fetching Data===========")
df = DataDowloader(start_date = config.START_DATE,
end_date = config.END_DATE,
ticker_list = config.TICKER_LIST).fetch_data()
print("==============Start Feature Engineering===========")
df = FeatureEngineer(df,
use_technical_indicator=config.USE_TECHNICAL_INDICATOR,
user_defined_feature=config.USER_DEFINED_FEATURE,
use_turbulence=config.USE_TURBULENCE).preprocess_data()
df.to_csv(config.TRAINING_DATA_FILE)
else:
print("==============Using Saved Data===========")
df = pd.read_csv(config.TRAINING_DATA_FILE)
selected_stocks = df.tic.unique()
print('Selected tocks: {}'.format(', '.join(selected_stocks)))
# Training & Trade data split
train = data_split(df, config.START_DATE,config.START_TRADE_DATE)
trade = data_split(df, config.START_TRADE_DATE,config.END_DATE)
trade = data_filter(trade, config.MULTIPLE_STOCK_TICKER)
# data normalization
feaures_list = list(train.columns)
#feaures_list.remove('date')
#feaures_list.remove('tic')
#feaures_list.remove('close')
print('features', ', '.join(feaures_list))
#data_normaliser = preprocessing.StandardScaler()
#train[feaures_list] = data_normaliser.fit_transform(train[feaures_list])
#trade[feaures_list] = data_normaliser.fit_transform(trade[feaures_list])
print("==============Enviroiment Setup===========")
train_env_class = StockEnvTrain
trade_env_class = StockEnvTrade
if config.TRADING_POLICY == 'SINGLE_STOCK':
train_env_class = SingleStockEnv
trade_env_class = SingleStockEnv
if config.TRADING_POLICY == 'SINGLE_PORFOLIO':
train_env_class = StockPortfolioEnv
trade_env_class = StockPortfolioEnv
# calculate state action space
# stock_dimension = len(train.tic.unique())
stock_dimension = config.NUMBER_SAMPLE_STOCKS
stock_data_dimension = len(config.STOCK_DATA_COLUMNS)
tech_indicators_dimension = len(config.TECHNICAL_INDICATORS_LIST)
user_defined_dimension = len(config.STOCK_USER_DEFINED_COLUMNS) if config.USER_DEFINED_FEATURE else 0
state_space = 1 + (1 + user_defined_dimension + tech_indicators_dimension + stock_data_dimension)*stock_dimension
print('Stock dimension: {}'.format(stock_dimension))
print('State dimension {}'.format(state_space))
env_setup = EnvSetup(stock_dim = stock_dimension,
population_space = config.NUMBER_OF_STOCKS,
sample_space = config.NUMBER_SAMPLE_STOCKS,
state_space = state_space,
hmax = config.MAXIMUM_STOCKS_PER_COMMIT,
hmin= config.STOCKS_PER_BATCH,
initial_amount = config.INITIAL_AMMOUNT,
transaction_cost_pct = config.TRANSACTION_COST_PCT)
env_train = env_setup.create_env_training(data = train,
env_class = train_env_class,
turbulence_threshold=config.TURBULENCE_THRESHOLD)
agent = DRLAgent(env = env_train)
print("==============Model Training===========")
print("Using Model {}".format(config.ENABLED_MODEL))
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
model_params_tuning=config.SAC_PARAMS
model_name = "SAC_{}".format(now)
model = agent.train_SAC(model_name = model_name, model_params = model_params_tuning)
if config.ENABLED_MODEL == 'ppo':
model_params_tuning=config.PPO_PARAMS
model_name = "PPO_{}".format(now)
model = agent.train_PPO(model_name=model_name, model_params = model_params_tuning)
if config.ENABLED_MODEL == 'a2c':
model_params_tuning=config.A2C_PARAMS
model_name = "A2C_{}".format(now)
model = agent.train_A2C(model_name=model_name, model_params = model_params_tuning)
if config.ENABLED_MODEL == 'ddpg':
model_params_tuning=config.DDPG_PARAMS
model_name = "DDPG_{}".format(now)
model = agent.train_DDPG(model_name=model_name, model_params = model_params_tuning)
if config.ENABLED_MODEL == 'td3':
model_params_tuning=config.TD3_PARAMS
model_name = "TD3_{}".format(now)
model = agent.train_TD3(model_name=model_name, model_params = model_params_tuning)
print("==============Model Testing===========")
backtest(model_name=model_name)
| 12,735
|
def origin_trial_function_call(feature_name, execution_context=None):
"""Returns a function call to determine if an origin trial is enabled."""
return 'RuntimeEnabledFeatures::{feature_name}Enabled({context})'.format(
feature_name=feature_name,
context=execution_context
if execution_context else "execution_context")
| 12,736
|
def plotComputedSolution(n,u,uO):
"""
Plot solution at tStop timesteps.
Parameters
----------
n: Integer
Advance solution n time steps
u : Numpy array
Current solution
uO: Numpy array
Old solution
Returns
-------
None.
"""
for j in range(n):
u[1:xPtsM1] = uO[1:xPtsM1] + dp*(uO[0:xPtsM1-1] -2.0*uO[1:xPtsM1] + uO[2:xPts+1])
# Enforce boundary conditions
u[0] = lBC
u[-1] = rBC
uO, u = u, uO
t = n*dt
s = "\nSolution at time = "+str(t)+" s"
# Plot solution
plt.plot(x,u,color='purple')
plt.title(s)
plt.ylabel("u")
plt.xlabel("x")
plt.grid(True)
plt.text(0.62,8.2,r'$u_t=\alpha\cdot u_{xx}$')
plt.show()
| 12,737
|
def get_bit(byteval, index) -> bool:
"""retrieve bit value from byte at provided index"""
return (byteval & (1 << index)) != 0
| 12,738
|
def upsert_target(data, analyst):
"""
Add/update target information.
:param data: The target information.
:type data: dict
:param analyst: The user adding the target.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if 'email_address' not in data:
return {'success': False,
'message': "No email address to look up"}
target = Target.objects(email_address__iexact=data['email_address']).first()
is_new = False
if not target:
is_new = True
target = Target()
target.email_address = data['email_address']
bucket_list = False
ticket = False
if 'department' in data:
target.department = data['department']
if 'division' in data:
target.division = data['division']
if 'organization_id' in data:
target.organization_id = data['organization_id']
if 'firstname' in data:
target.firstname = data['firstname']
if 'lastname' in data:
target.lastname = data['lastname']
if 'note' in data:
target.note = data['note']
if 'title' in data:
target.title = data['title']
if 'bucket_list' in data:
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
if 'ticket' in data:
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
if bucket_list:
target.add_bucket_list(bucket_list, analyst)
if ticket:
target.add_ticket(ticket, analyst)
try:
target.save(username=analyst)
target.reload()
if is_new:
run_triage(target, analyst)
return {'success': True,
'message': "Target saved successfully",
'id': str(target.id)}
except ValidationError, e:
return {'success': False,
'message': "Target save failed: %s" % e}
| 12,739
|
def test_elem_z009_elem_z009_v(mode, save_output, output_format):
"""
TEST :3.3.2 XML Representation of Element Declaration Schema
Components : Components in A may be indirectly using components from
C. Lets assume that a type declared in B derives from one in C (which
is possible because B imports C). Document A can declare elements
using that type because it includes B. Such use obviously involves
information from the base type in C as will as the explicit reference
in B.
"""
assert_bindings(
schema="msData/element/elemZ009.xsd",
instance="msData/element/elemZ009.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 12,740
|
def batch_data(data, batch_size):
"""
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
"""
data_x = data["x"]
data_y = data["y"]
# randomly shuffle data
np.random.seed(100)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
batch_data = list()
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i : i + batch_size]
batched_y = data_y[i : i + batch_size]
batched_x = torch.from_numpy(np.asarray(batched_x)).float()
batched_y = torch.from_numpy(np.asarray(batched_y)).long()
batch_data.append((batched_x, batched_y))
return batch_data
| 12,741
|
def ast_parse_node(node):
"""
:param ast.Node node: an ast node representing an expression of variable
:return ast.Node: an ast node for:
_watchpoints_obj = var
if <var is a local variable>:
# watch(a)
_watchpoints_localvar = "a"
elif <var is a subscript>:
# watch(a[3])
_watchpoints_parent = a
_watchpoints_subscr = 3
elif <var is an attribute>:
# watch(a.b)
_watchpoints_parent = a
_watchpoints_attr = "b"
"""
root = ast.Module(
body=[
ast.Assign(
targets=[
ast.Name(id="_watchpoints_obj", ctx=ast.Store())
],
value=node
)
],
type_ignores=[]
)
if type(node) is ast.Name:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_localvar", ctx=ast.Store())
],
value=ast.Constant(value=node.id)
)
)
elif type(node) is ast.Subscript:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_parent", ctx=ast.Store())
],
value=node.value
)
)
if sys.version_info.minor <= 8 and type(node.slice) is ast.Index:
value_node = node.slice.value
elif sys.version_info.minor >= 9 and type(node.slice) is not ast.Slice:
value_node = node.slice
else:
raise ValueError("Slice is not supported!")
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_subscr", ctx=ast.Store())
],
value=value_node
)
)
elif type(node) is ast.Attribute:
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_parent", ctx=ast.Store())
],
value=node.value
)
)
root.body.append(
ast.Assign(
targets=[
ast.Name(id="_watchpoints_attr", ctx=ast.Store())
],
value=ast.Constant(value=node.attr)
)
)
ast.fix_missing_locations(root)
return root
| 12,742
|
def simplify_board_name(board_name: str) -> str:
"""Removes the following from board names:
- `x86-`, e.g. `x86-mario`
- `_he`, e.g. `x86-alex_he`
- `&` - e.g. `falco & falco_II`
- ',' - e.g. `hoho, but substitute a dp to vga chip` (why)
Args:
board_name: the board name to simplify
Returns:
str: a simplified board name
"""
if '&' in board_name:
# Always try to extract the first of two. For the time being,
# only legacy devices have this format and the second element
# is always the 'II' one.
board_name = board_name.split('&')[0].strip()
if ',' in board_name:
# hohoho
board_name = board_name.split(',')[0].strip()
return TO_REMOVE.sub('', board_name.lower())
| 12,743
|
def run(config, toml_config, args, _parser, _subparser, file=None):
"""Run project list command."""
config = ProjectListConfig.create(args, config, toml_config)
logger.info("Configuration: %s", config)
logger.info("Listing projects")
result = api.project.list_(
sodar_url=config.project_config.global_config.sodar_server_url,
sodar_api_token=config.project_config.global_config.sodar_api_token,
)
print(json.dumps(cattr.unstructure(result)), file=(file or sys.stdout))
| 12,744
|
def parse_args():
"""
Parse CLI arguments.
Returns
-------
argparse.Namespace
Parsed arguments
"""
parser = argparse.ArgumentParser(
description="Optimize model for inference")
parser.add_argument("-m", "--model",
dest="model_type",
help="Model type",
choices=["large"],
type=str,
default=None,
required=False)
parser.add_argument("--config",
dest="config_override",
help="Path to model config override file",
type=str,
default=None,
required=False)
parser.add_argument("--frvsr-weights",
help="Path to FRVSR weights",
type=str,
default=None,
required=False)
parser.add_argument("--gan-weights",
help="Path to GAN weights",
type=str,
default=None,
required=False)
parser.add_argument("output",
help="Output",
type=str,
default=None)
args = parser.parse_args()
if args.frvsr_weights is None and args.gan_weights is None:
parser.error("should specify FRVSR or GAN weights")
return args
| 12,745
|
def delete_news_site(user_id, news_name):
"""
Delete subscription to user list
Params:
- user_id: The user email
- news_name: The name of news provider
Return: void
"""
user_info = get_user_by_email(user_id)
user_info = user_info.to_dict()
list_news = user_info['news_sites']
if list_news.count(news_name) != 0:
list_news.remove(news_name)
else:
# The user is not subscribed to the currently passed news_name
return True
user_info['news_sites'] = list_news
db.collection('users').document(user_id).update(user_info)
| 12,746
|
def get_time_str(dt: datetime.datetime = None, tz_default=LocalTimeZone):
"""
@param dt 为None时,返回当前时间
@param tz_default dt无时区信息时的默认时区
"""
if not dt:
dt = datetime.datetime.now()
dt = convert_zone(dt, tz_default=tz_default)
time_str = dt.isoformat().split('+')[0]
return time_str + 'Z'
| 12,747
|
def plotLikesTablePair( likesTableFNs,
plotFile, nonNormedStats = (),
includeSpecialBins = True,
getio = None ):
"""Visually plot a likes table.
"""
if getio: return dict( depends_on = likesTableFNs,
creates = plotFile,
attrs = dict( piperun_short = True ) )
likesTable = map( LoadLikesTable, likesTableFNs )
hitsLikes = [ IDotData( likesTable[ i ].hitsLikes ) for i in range( 2 ) ]
missLikes = [ IDotData( likesTable[ i ].missLikes ) for i in range( 2 ) ]
regionLikes = [ IDotData( likesTable[ i ].regionLikes ) for i in range( 2 ) ]
pp.figure( figsize = ( 16, 18 ) )
stat_start, stat_end, stat_nbins = LoadBins( likesTable[0].likesBins )
stat_start1, stat_end1, stat_nbins1 = LoadBins( likesTable[1].likesBins )
assert( stat_start == stat_start1 )
assert( stat_end == stat_end1 )
assert( stat_nbins == stat_nbins1 )
assert( hitsLikes[0].headings == hitsLikes[1].headings )
assert( missLikes[0].headings == missLikes[1].headings )
assert( regionLikes[0].headings == regionLikes[1].headings )
regionLine = None
for statNum, stat in enumerate( hitsLikes[0].headings ):
rawStep = 1.0 / len( hitsLikes[0].headings ) * 0.93
rawBottom = rawStep * statNum
rawTop = rawBottom + rawStep
r = ( 0.1, 0.05 + rawBottom, 0.8, rawStep * 0.6 )
dbg( 'r' )
pp.axes( r )
pp.title( stat + ( ' (non-normed)' if stat in nonNormedStats else '' ) )
assert len( hitsLikes[0] ) == len( missLikes[0] ) == stat_nbins[ stat ] + CMSBins.maxSpecialBins
binSize = ( stat_end[stat] - stat_start[stat] ) / stat_nbins[stat]
binStarts = [ stat_start[stat] + binSize * i
for i in range( stat_nbins[ stat ] + ( CMSBins.stat_numSpecialBins[ stat ] if includeSpecialBins else 0 ) ) ]
pp.gca().set_xticks( binStarts )
pp.gca().set_xticklabels( [ '%.2f' % b for b in binStarts[: stat_nbins[stat] ] ] +
( list( DictGet( CMSBins.stat_specialBinNames,
stat, () ) ) if includeSpecialBins else [] ),
rotation = 'vertical' )
# pp.gca().set_xticklabels( map( str, binStarts ) + [ 's%d' % i for i in range( CMSBins.stat_numSpecialBins[ stat ] ) ] )
dbg( 'stat binStarts' )
hitsLine = [ None, None ]
missLine = [ None, None ]
regionLine = [ None, None ]
for i, style in ( ( 0, '-' ), ( 1, ':' ) ):
hitsLine[i], = pp.plot( binStarts , hitsLikes[i][ stat ][:len( binStarts )], 'r' + style )
missLine[i], = pp.plot( binStarts , missLikes[i][ stat ][:len( binStarts )], 'g' + style )
regionLine[i], = pp.plot( binStarts, regionLikes[i][ stat ][:len(binStarts)], 'b' + style )
pp.figlegend( filter( None, ( hitsLine[0], missLine[0], regionLine[0],
hitsLine[1], missLine[1], regionLine[1] ) ),
( 'selected SNPs 1', 'neutral SNPs in neutral regions 1', 'region snps 1',
'selected SNPs 2', 'neutral SNPs in neutral regions 2', 'region snps 2',
),
'upper center' )
pp.savefig( plotFile )
| 12,748
|
def test_with_invalid_params(flask_app):
"""Verify that the api responds correctly when invalid/wrong input params supplied."""
# str instead of int in request_id field
request_id = 'abcd'
request_type = 'registration_request'
rv = flask_app.get('{0}?request_id={1}&request_type={2}'.format(CLASSIFICATION_API, request_id, request_type))
assert rv.status_code == 422
data = json.loads(rv.data.decode('utf-8'))
assert data['request_id'] == ["Bad 'request_id':'abcd' argument format. Accepts only integer"]
# int instead of str in request_type field
request_id = 13123123123132324231312
request_type = 3
rv = flask_app.get('{0}?request_id={1}&request_type={2}'.format(CLASSIFICATION_API, request_id, request_type))
assert rv.status_code == 422
data = json.loads(rv.data.decode('utf-8'))
assert data['request_type'] == ["Bad 'request_type':'3' argument format. Accepts only one of ["
"'registration_request', 'de_registration_request']"]
# str other than registration_request/de-registration_request in request_type field
request_type = 'abc_request'
rv = flask_app.get('{0}?request_id={1}&request_type={2}'.format(CLASSIFICATION_API, request_id, request_type))
assert rv.status_code == 422
data = json.loads(rv.data.decode('utf-8'))
assert data['request_type'] == ["Bad 'request_type':'abc_request' argument format. Accepts only one of ["
"'registration_request', 'de_registration_request']"]
# no request_id argument
request_type = 'registration_request'
rv = flask_app.get('{0}?request_type={1}'.format(CLASSIFICATION_API, request_type))
assert rv.status_code == 422
data = json.loads(rv.data.decode('utf-8'))
assert data['error'] == ['request_id is required']
# no request_type argument
request_id = 1
rv = flask_app.get('{0}?request_id={1}'.format(CLASSIFICATION_API, request_id))
assert rv.status_code == 422
data = json.loads(rv.data.decode('utf-8'))
assert data['error'] == ['request_type is required']
| 12,749
|
def test_reload():
"""
Call this function from Jupyter to check that autoreload has been set up correctly!
"""
print('bar')
| 12,750
|
def get_res_details(f):
""" extracts bmaj, bmin, bpa and coordinate increment"""
cmd = "prthd in=%s 2>/dev/null"%(f)
pcmd = os.popen(cmd)
output = pcmd.read()
output = output.split('\n')
#print(output)
for lin in output:
if 'Beam Size' in lin:
print(lin)
bmaj = float(lin.split()[2])
bmin = float(lin.split()[4])
if 'Position ang' in lin:
print(lin.split())
bpa = float(lin.split()[2])
if lin.startswith("RA"):
inc = math.fabs(float(lin.split()[4]))
return bmaj,bmin,bpa,inc
| 12,751
|
def copytree(src, dst):
"""Copy directory, clear the dst if it existed."""
if src != dst:
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
Config.log.debug('HMI: Copying directory from %s to %s', src, dst)
| 12,752
|
def del_api_msg():
"""
@api {post} /v1/interfaceapimsg/del InterfaceApiImsg_删除接口信息
@apiName interfaceApiImsgDel
@apiGroup Interface
@apiDescription 删除接口信息
@apiParam {int} apiMsgId 接口信息id
@apiParamExample {json} Request-Example:
{
"apiMsgId": 1,
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"msg": "删除成功",
"status": 1
}
"""
data = request.json
api_msg_id = data.get('apiMsgId')
jsondata = InterfaceApiMsgBusiness.del_api_msg(api_msg_id)
return jsondata
| 12,753
|
def find_nearest(array, value):
""" Find nearest value of interest in array (used for frequencies,
no double value issues)
Parameters
----------
array: array
Give the array in which you want to find index of value nearest-by
value: int or float
The value of interest
Return
------
idx: int
Index of value nearest by value of interest
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
| 12,754
|
def tube_light_generation_by_func(k, b, alpha, beta, wavelength, w = 400, h = 400):
"""Description:
This functio generates a tube light (light beam) with given paratmers, in which,
k and b represent the function y = k*x + b
# TODO:
Test k, b range
Args:
k (int): y = k*x + b
b (int): y = k*x + b
alpha (int): An integer (0,1] denotes the illumination intensity.
beta (int): Annuatation factor. depends on the annuatation function, current beta/distance^2
wavelength (interger): An interger (380, 750) denotes the wavelength of the light.
w (int, optional): Width. Defaults to 400.
h (int, optional): Height. Defaults to 400.
Returns:
tube light: an numpy array with shape (w,h,3)
"""
tube_light = np.zeros((w,h,3))
full_light_end_y = int(math.sqrt(beta) + 0.5)
light_end_y = int(math.sqrt(beta * 20) + 0.5)
c = wavelength_to_rgb(wavelength)
for x in range(w):
for y in range(h):
distance = abs(k*x - y + b) / math.sqrt(1 + k*k)
if distance < 0:
print(distance)
if distance <= full_light_end_y:
tube_light[y,x,0] = c[0] * alpha
tube_light[y,x,1] = c[1] * alpha
tube_light[y,x,2] = c[2] * alpha
elif distance> full_light_end_y and distance <= light_end_y:
attenuation = beta/(distance * distance)
tube_light[y,x,0] = c[0] * alpha * attenuation
tube_light[y,x,1] = c[1] * alpha * attenuation
tube_light[y,x,2] = c[2] * alpha * attenuation
return tube_light
| 12,755
|
async def test_creating_entry_sets_up_climate(hass, discovery, device, setup):
"""Test setting up Gree creates the climate components."""
result = await hass.config_entries.flow.async_init(
GREE_DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(setup.mock_calls) == 1
| 12,756
|
def upsert_multi_sector_cheby_dict(conn, multi_sector_cheby_dict):
"""
insert/update multi_sector_cheby_dict for a single object
N.B ...
https://stackoverflow.com/questions/198692/can-i-pickle-a-python-dictionary-into-a-sqlite3-text-field
pdata = cPickle.dumps(data, cPickle.HIGHEST_PROTOCOL)
curr.execute("insert into table (data) values (:data)", sqlite3.Binary(pdata))
To insert multiple rows into a table, you use the following form of the INSERT statement:
INSERT INTO table1 (column1,column2 ,..)
VALUES
(value1,value2 ,...),
(value1,value2 ,...),
...
(value1,value2 ,...);
inputs:
-------
conn: Connection object
multi_sector_cheby_dict : dictionary
- see orbit_cheby module for detailed specification
return:
-------
"""
# Extract necessary info from dict
unpacked_designation = multi_sector_cheby_dict['unpacked_designation']
print( multi_sector_cheby_dict.keys() )
# Explit loop over sectors to generate insert statement
for sector_dict in multi_sector_cheby_dict['sectors']:
#
pdata = pickle.dumps(tracklet_dict, pickle.HIGHEST_PROTOCOL)
sql = """ INSERT OR REPLACE INTO tracklets(jd,hp,tracklet_name,tracklet)
VALUES(?,?,?,?)
"""
cur = conn.cursor()
cur.execute(sql, (jd, hp, tracklet_name, sqlite3.Binary(pdata),))
conn.commit()
| 12,757
|
def get_book(isbn):
"""
Retrieve a specific book record by it's ISBN
---------------------------------------------
Endpoints:
GET /books/isbn
GET /books/isbn?act=(borrow|handback)
@QueryParams:
act: (optional) specific action on book
Possible values: borrow, handback
@Response:
200: return book record
"""
try:
book = Book.objects.get(isbn=isbn)
if request.args.get("act") == "borrow":
if book["available"] > 0:
book["available"] -= 1
else:
return "This book is unavailable"
elif request.args.get("act") == "handback":
if book["available"] < book["copies"]:
book["available"] += 1
else:
return "You can't adda new copy"
book.save()
return jsonify(book)
except:
return "We don't carry this book"
| 12,758
|
def toint16(i):
""" Convert a number to a hexadecimal string of length 2 """
return f'{i:02x}'
| 12,759
|
def save_checkpoint(state, is_best, filename = 'checkpoint.pth.tar', folder = ''):
""" Save the lastest checkpoint and the best model so far.
"""
# make directory if folder doesn't exist
if not os.path.exists(folder):
os.makedirs(folder)
torch.save(state, folder + '/' + filename)
if is_best:
shutil.copyfile(folder + '/' + filename, folder + '/model_best.pth.tar')
| 12,760
|
def call_later(function:Callable, *args, delay=0.001):
"""
Call Your Function Later Even Between Other Operations
(This function uses threading module so be careful about
how, when, and on what object you are going to operate on)
Parameters
----------
function : Callable
this should be your function name
delay : float,int
delay before calling function in seconds, by default 0.001
"""
import threading
thread = threading.Thread(target=lambda: (sleep(delay), function(*args)))
thread.start()
#keyboard.call_later(function, args, delay)
| 12,761
|
def is_phone(text):
"""
验证字符串是否是固定电话
:param text: 需要检查的字符串
:return: 符合返回True,不符合返回False
"""
return check_string(text, '\(?0\d{2,3}[) -]?\d{7,8}$')
| 12,762
|
def get_loc(frameInfo, bbox_type):
"""Return GeoJSON bbox."""
bbox = np.array(frameInfo.getBBox()).astype(np.float)
print("get_loc bbox: %s" %bbox)
if bbox_type == "refbbox":
bbox = np.array(frameInfo.getReferenceBBox()).astype(np.float)
coords = [
[ bbox[0,1], bbox[0,0] ],
[ bbox[1,1], bbox[1,0] ],
[ bbox[2,1], bbox[2,0] ],
[ bbox[3,1], bbox[3,0] ],
[ bbox[0,1], bbox[0,0] ],
]
print("get_loc coords : [%s]" %coords)
return {
"type": "Polygon",
"coordinates": [coords]
}
| 12,763
|
def load_df(input_path, fname, ext):
"""Read chain as Pandas DataFrame"""
fname = os.path.join(input_path, fname + ext)
print 'loading %s' % fname
assert(os.path.isabs(fname))
X = pd.DataFrame.from_csv(fname)
return X
| 12,764
|
def _load_order_component(comp_name: str, load_order: OrderedSet,
loading: Set) -> OrderedSet:
"""Recursive function to get load order of components.
Async friendly.
"""
component = get_component(comp_name)
# If None it does not exist, error already thrown by get_component.
if component is None:
return OrderedSet()
loading.add(comp_name)
for dependency in getattr(component, 'DEPENDENCIES', []):
# Check not already loaded
if dependency in load_order:
continue
# If we are already loading it, we have a circular dependency.
if dependency in loading:
_LOGGER.error("Circular dependency detected: %s -> %s",
comp_name, dependency)
return OrderedSet()
dep_load_order = _load_order_component(dependency, load_order, loading)
# length == 0 means error loading dependency or children
if not dep_load_order:
_LOGGER.error("Error loading %s dependency: %s",
comp_name, dependency)
return OrderedSet()
load_order.update(dep_load_order)
load_order.add(comp_name)
loading.remove(comp_name)
return load_order
| 12,765
|
def zheng_he(qid_file_name,path,index):
"""
调用以上的所有函数,整合到一起
Parameters
----------
qid_file_name : str
文件名.
path : str
数据的储存文件路径..
index : int
第几个问题.
Returns
-------
None.
"""
try:
qid = get_qid(qid_file_name)
keywords = question_info_get(qid)
save_answer(qid,keywords,path)
if index%1000 == 0: #每 1000 次进行输入
percent = index/76589
print(str(index),' 占比 = ' + '{:.4%}'.format(percent))
except:
print(str(qid) + ' error')
pass
# qid = get_qid(qid_file_name)
# keywords = question_info_get(qid)
# save_answer(qid,keywords,path)
# if index%5 == 0:
# percent = index/55
# print(str(index),' 占比 = ' + '{:.0%}'.format(percent))
| 12,766
|
def clean_english_str_tf(input_str):
"""Clean English string with tensorflow oprations."""
# pylint: disable=anomalous-backslash-in-string
string = tf.regex_replace(input_str, r"[^A-Za-z0-9(),!?\'\`<>/]", " ")
string = tf.regex_replace(string, "\'s", " \'s")
string = tf.regex_replace(string, "\'ve", " \'ve")
string = tf.regex_replace(string, "n\'t", " n\'t")
string = tf.regex_replace(string, "\'re", " \'re")
string = tf.regex_replace(string, "\'d", " \'d")
string = tf.regex_replace(string, "\'ll", " \'ll")
string = tf.regex_replace(string, ",", " , ")
string = tf.regex_replace(string, "!", " ! ")
string = tf.regex_replace(string, "\(", " ( ")
string = tf.regex_replace(string, "\)", " ) ")
string = tf.regex_replace(string, "\?", " ? ")
string = tf.regex_replace(string, "\s{2,}", " ")
string = tf.string_strip(string)
string = py_x_ops.str_lower(string)
return string
| 12,767
|
def config_clear_protocol(_) -> None:
"""Clowder config clear protocol command entry point"""
CONSOLE.stdout(' - Clear protocol config value')
config = Config()
config.protocol = None
config.save()
| 12,768
|
def create():
""" Create an admin user """
print ("List of existing users :")
for user in User.all(User):
print (user.id, user.name, user.email)
print ()
print ("New user")
print ('Enter name: ')
name = input()
print ('Enter email: ')
email = input()
password = getpass.getpass()
assert password == getpass.getpass('Password (again):')
new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))
db.session.add(new_user)
db.session.commit()
print ('User added.')
| 12,769
|
def _square_eqt(x, y, x0, y0, angle):
"""simple equation for a square.
this returns: max(np.dstack([abs(x0 - x), abs(y0 -y)]), 2). this should then be compared to the
"radius" of the square (half the width)
the equation comes from this post:
http://polymathprogrammer.com/2010/03/01/answered-can-you-describe-a-square-with-1-equation/
x, y: either one number or arrays of the same size (as returned by meshgrid)
angle: angle in degrees. should lie in [-45, 45)
"""
x = np.array(x)
y = np.array(y)
vals = np.max(np.dstack([np.abs(x0 - x), np.abs(y0 - y)]), 2)
if x.ndim == 2:
# only rotate the image if x is 2d. in that case, we're returning a rotated image of the
# square. if x is 1d, then we just want the distance to the origin (which we don't rotate)
# -- the "radius" of the square will need to be rotated
vals = ndimage.rotate(vals, angle)
vals = _reshape_rotated_image(vals, x.shape)
return vals.reshape(x.shape)
| 12,770
|
def path_command(subparsers):
"""Adds the specific options for the path command"""
from argparse import SUPPRESS
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', dest="directory", default='', help="if given, this path will be prepended to every entry returned (defaults to '%(default)s')")
parser.add_argument('-e', '--extension', dest="extension", default='', help="if given, this extension will be appended to every entry returned (defaults to '%(default)s')")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", default=False,
action='store_true', help=SUPPRESS)
parser.set_defaults(func=path) # action
| 12,771
|
def multinomial(x, num_samples=1, replacement=False, name=None):
"""
This OP returns a Tensor filled with random values sampled from a Multinomical
distribution. The input ``x`` is a tensor with probabilities for generating the
random number. Each element in ``x`` should be larger or equal to 0, but not all
0. ``replacement`` indicates whether it is a replaceable sample. If ``replacement``
is True, a category can be sampled more than once.
Args:
x(Tensor): A tensor with probabilities for generating the random number. The data type
should be float32, float64.
num_samples(int, optional): Number of samples, default is 1.
replacement(bool, optional): Whether it is a replaceable sample, default is False.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with sampled category index after ``num_samples`` times samples.
Examples:
.. code-block:: python
import paddle
paddle.seed(100) # on CPU device
x = paddle.rand([2,4])
print(x)
# [[0.5535528 0.20714243 0.01162981 0.51577556]
# [0.36369765 0.2609165 0.18905126 0.5621971 ]]
paddle.seed(200) # on CPU device
out1 = paddle.multinomial(x, num_samples=5, replacement=True)
print(out1)
# [[3 3 0 0 0]
# [3 3 3 1 0]]
# out2 = paddle.multinomial(x, num_samples=5)
# InvalidArgumentError: When replacement is False, number of samples
# should be less than non-zero categories
paddle.seed(300) # on CPU device
out3 = paddle.multinomial(x, num_samples=3)
print(out3)
# [[3 0 1]
# [3 1 0]]
"""
assert core.is_compiled_with_rocm() == False, (
"multinomial op is not supported on ROCM yet.")
if in_dygraph_mode():
return _C_ops.multinomial(x, 'num_samples', num_samples, 'replacement',
replacement)
check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial")
helper = LayerHelper("multinomial", **locals())
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_('int64'))
helper.append_op(
type='multinomial',
inputs={"X": x},
outputs={'Out': out},
attrs={'num_samples': num_samples,
'replacement': replacement})
out.stop_gradient = True
return out
| 12,772
|
def run_experiment(
max_epochs,
log=None,
evaluate=True,
projection=True,
save_directory=".",
save_file=None,
save_interval=1,
**configuration,
):
"""Runs the Proof of Constraint experiment with the given configuration
:param max_epochs: number of epochs to run the experiment
:param log: function to use for logging. None supresses logging
:param evaluate: whether to run the evaluator once over the
training data at the end of an epoch
:param projection: whether to run the projection engine once over the
testing data at the end of an epoch
:param save_directory: optional directory to save checkpoints into. Defaults
to the directory that the main script was called from
:param save_file: base filename for checkpointing. If not provided, then no
checkpointing will be performed
:param save_interval: frequency of saving out model checkpoints. Defaults to
every epoch
:param configuration: kwargs for various settings. See default_configuration
for more details
:returns: the configuration dictionary, a tuple of all engines (first will
be the training engine), and a corresponding tuple of all monitors
"""
# Determine the parameters of the analysis
should_log = log is not None
should_checkpoint = save_file is not None
kwargs = default_configuration()
kwargs.update(configuration)
if should_log:
log(kwargs)
# Get the data
train_dl, test_dl = get_data(kwargs)
# Build the model, optimizer, loss, and constraint
model, opt, proj_opt = build_model_and_optimizer(kwargs)
loss, constraint = get_loss_and_constraint(kwargs)
# Setup Monitors and Checkpoints
training_monitor = TrainingMonitor("training")
evaluation_monitor = TrainingMonitor("evaluation") if evaluate else None
projection_monitor = ProjectionMonitor() if projection else None
prediction_logger = PredictionLogger(model)
if should_checkpoint:
checkpointer = ModelAndMonitorCheckpointer(
save_directory,
save_file,
kwargs,
[training_monitor, evaluation_monitor, projection_monitor],
prediction_logger,
save_interval=save_interval,
)
else:
checkpointer = None
# This is the trainer because we provide the optimizer
trainer = create_engine(
model,
loss,
constraint,
opt,
projection=False,
monitor=training_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
# These are not trainers simply because we don't provide the optimizer
if evaluate:
evaluator = create_engine(
model,
loss,
constraint,
optimizer=None,
projection=False,
monitor=evaluation_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
else:
evaluator = None
if projection:
projector = create_engine(
model,
loss,
constraint,
proj_opt,
projection=True,
monitor=projection_monitor,
regularization_weight=kwargs["regularization_weight"],
error_fn=kwargs["error_fn"],
device=kwargs["device"],
tolerance=kwargs["tolerance"],
max_iterations=kwargs["max_iterations"],
)
else:
projector = None
prediction_logger.attach(trainer, projector)
# Ensure evaluation happens once per epoch
@trainer.on(Events.EPOCH_COMPLETED)
def run_evaluation(trainer):
if training_monitor is not None and should_log:
summary = training_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Training Summary - {summary}"
)
if evaluate:
if should_log:
log(
f"Epoch[{trainer.state.epoch:05d}] - Evaluating on training data..."
)
evaluator.run(train_dl)
if evaluation_monitor is not None and should_log:
summary = evaluation_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Evaluation Summary - {summary}"
)
# Handle projection
if projection:
if should_log:
log(f"Epoch[{trainer.state.epoch:05d}] - Projecting...")
projector.run(test_dl, max_epochs=kwargs["max_iterations"])
if projection_monitor is not None and should_log:
summary = projection_monitor.summarize()
log(
f"Epoch[{trainer.state.epoch:05d}] Generalization Summary - {summary}"
)
if should_checkpoint:
checkpointer(trainer)
# Handle projection summary
if projection:
@projector.on(Events.EPOCH_COMPLETED)
def projection_summary(projector):
if projection_monitor is not None and should_log:
summary = projection_monitor.summarize(during_projection=True)
log(
f"Epoch[{trainer.state.epoch:05d}-{projector.state.epoch:05d}] Projection Summary - {summary}"
)
@projector.on(Events.EPOCH_COMPLETED)
def projection_stop(projector):
if projection_monitor is not None:
if projection_monitor.should_stop_projection(
kwargs["tolerance"]
):
projector.terminate()
@projector.on(Events.COMPLETED)
def projection_unterminate(projector):
# Unblock the projector so it can resume later
projector.should_terminate = False
if should_log:
@trainer.on(Events.ITERATION_COMPLETED)
def log_batch_summary(trainer):
log(
"Epoch[{:05d}] - Total loss: {:.5f}, Data Loss: {:.5f}, Constraint Error: {:.5f}".format(
trainer.state.epoch,
trainer.state.total_loss.cpu().item(),
trainer.state.mean_loss.cpu().item(),
trainer.state.constraints_error.cpu().item(),
)
)
trainer.run(train_dl, max_epochs=max_epochs)
# Save final model and monitors
if should_checkpoint:
checkpointer.retrieve_and_save(trainer)
return (
kwargs,
(trainer, evaluator, projector),
(training_monitor, evaluation_monitor, projection_monitor),
)
| 12,773
|
def compose_decorators(decorators: List[Callable]) -> Callable:
"""Compose multiple decorators into one.
Helper function for combining multiple instrumentation decorators into one.
:param list(Callable) decorators: A list of instrumentation decorators to be
combined into a single decorator.
"""
def composed(func: Callable, **dkwargs) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs):
wrapped_func = func
for decorator in decorators:
wrapped_func = decorator(wrapped_func, **dkwargs)
return wrapped_func(*args, **kwargs)
return wrapper
return composed
| 12,774
|
def xyz_to_polar(sphere_points):
"""
(B,3,N) -> theta, phi (B,2,N), r (B)
x = r*cos(theta)*sin(phi)
y = r*sin(theta)*sin(phi)
z = r*cos(phi)
"""
r = torch.sqrt(torch.sum(sphere_points*sphere_points, dim=1))
theta = torch.atan2(sphere_points[:,1,:], sphere_points[:,0,:])
z = sphere_points[:,2,:]/r
z.clamp_(-1.0+1e-5, 1.0-1e-5)
phi = torch.acos(z)
phi = phi.masked_fill(z==1, 0.0)
r = torch.mean(r, dim=-1)
assert(check_values(phi))
assert(check_values(theta))
return torch.stack([theta,phi], dim=1), r
| 12,775
|
def get_cpu_cores():
"""获取每个cpu核的信息
Returns:
统计成功返回是一个元组:
第一个元素是一个列表存放每个cpu核的信息
第二个元素是列表长度, 也就是计算机中cpu核心的总个数
若统计出来为空, 则返回None
"""
cpu_cores = []
with open('/proc/cpuinfo') as f:
for line in f:
info = line.strip()
if info.startswith('model name'):
model_name = info.split(':')[1].strip()
cpu_cores.append(model_name)
if cpu_cores:
return cpu_cores, len(cpu_cores)
return None
| 12,776
|
def itk_resample(image: sitk.Image, spacing: Union[float, Tuple[float, float, float]], *,
interpolation: str = "nearest", pad_value: int) -> sitk.Image:
"""
resample sitk image given spacing, pad value and interpolation.
Args:
image: sitk image
spacing: new spacing, either a scalar or a tuple of three scalars.
interpolation: interpolation method, "linear" or "nearest".
pad_value: pad value for out of space pixels.
Returns:
torch.Tensor: affine params in correct shape
"""
if check_scalar(spacing):
spacing: Tuple[float, float, float] = (spacing, spacing, spacing) # noqa
ori_spacing = image.GetSpacing()
ori_size = image.GetSize()
new_size = (round(ori_size[0] * (ori_spacing[0] / spacing[0])),
round(ori_size[1] * (ori_spacing[1] / spacing[1])),
round(ori_size[2] * (ori_spacing[2] / spacing[2])))
interp = {"linear": sitk.sitkLinear, "nearest": sitk.sitkNearestNeighbor, "cosine": sitk.sitkCosineWindowedSinc}[
interpolation]
return sitk.Resample(image, new_size, sitk.Transform(), interp, image.GetOrigin(), spacing, image.GetDirection(),
pad_value, image.GetPixelID())
| 12,777
|
def check_local_dir(local_dir_name):
"""本地文件夹是否存在,不存在则创建"""
if not os.path.exists(local_dir_name):
os.makedirs(local_dir_name)
| 12,778
|
def save(object, filename, bin = 1):
"""Saves a compressed object to disk
"""
file = gzip.GzipFile(filename, 'wb')
file.write(pickle.dumps(object, bin))
file.close()
| 12,779
|
def gen_anchor_targets(
anchors,
image,
bboxes,
labels,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Generate anchor targets for bbox detection.
@author: Eli
This is a version of anchor_targets_bbox that takes tensors for images, bboxes, and labels
to play nice with tensorflow.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of images.
bboxes_group: np.array(n, x1, y1, x2, y2)
labels_grpup: np.array(n)
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_target: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state (-1 for ignore, 0 for bg, 1 for fg).
regression_target: batch that contains bounding-box regression targets for an image & anchor states (np.array of shape (batch_size, N, 4 + 1),
where N is the number of anchors for an image, the first 4 columns define regression targets for (x1, y1, x2, y2) and the
last column defines anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
regression_target = np.zeros(
(anchors.shape[0], 4 + 1), dtype=np.float32)
labels_target = np.zeros(
(anchors.shape[0], num_classes + 1), dtype=np.float32)
# compute labels and regression targets
if bboxes.shape[0]:
# obtain indices of ground truth annotations with the greatest overlap
positive_indices, ignore_indices, argmax_overlaps_inds = utils.anchors.compute_gt_annotations(
anchors, bboxes, negative_overlap, positive_overlap)
labels_target[ignore_indices, -1] = -1
labels_target[positive_indices, -1] = 1
regression_target[ignore_indices, -1] = -1
regression_target[positive_indices, -1] = 1
# compute target class labels
labels_target[positive_indices, labels
[argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_target[:, : -1] = utils.anchors.bbox_transform(
anchors, bboxes[argmax_overlaps_inds, :])
# ignore annotations outside of image
anchors_centers = np.vstack(
[(anchors[:, 0] + anchors[:, 2]) / 2,
(anchors[:, 1] + anchors[:, 3]) / 2]).T
outside_indices = np.logical_or(
anchors_centers[:, 0] >= image.shape[1],
anchors_centers[:, 1] >= image.shape[0])
# -1 means ignore
labels_target[outside_indices, -1] = -1
regression_target[outside_indices, -1] = -1
return regression_target, labels_target
| 12,780
|
def extend_vocab_in_file(
vocab, max_tokens=10000, vocab_path="../models/vocabulary.json"
):
"""Extends JSON-formatted vocabulary with words from vocab that are not
present in the current vocabulary. Adds up to max_tokens words.
Overwrites file in vocab_path.
# Arguments:
new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.
must have run count_all_words() previously.
max_tokens: Maximum number of words to be added.
vocab_path: Path to the vocabulary json which is to be extended.
"""
try:
with open(vocab_path, "r") as f:
current_vocab = json.load(f)
except IOError:
print("Vocabulary file not found, expected at " + vocab_path)
return
extend_vocab(current_vocab, vocab, max_tokens)
# Save back to file
with open(vocab_path, "w") as f:
json.dump(current_vocab, f, sort_keys=True, indent=4, separators=(",", ": "))
| 12,781
|
def parameters_to_weights(parameters: Parameters) -> Weights:
"""Convert parameters object to NumPy weights."""
return [bytes_to_ndarray(tensor) for tensor in parameters.tensors]
| 12,782
|
def get_iou(data_list, class_num, save_path=None):
"""
Args:
data_list: a list, its elements [gt, output]
class_num: the number of label
"""
from multiprocessing import Pool
ConfM = ConfusionMatrix(class_num)
f = ConfM.generateM
pool = Pool()
m_list = pool.map(f, data_list)
pool.close()
pool.join()
for m in m_list:
ConfM.addM(m)
aveJ, j_list, M = ConfM.jaccard()
# print(j_list)
# print(M)
# print('meanIOU: ' + str(aveJ) + '\n')
if save_path:
with open(save_path, 'w') as f:
f.write('meanIOU: ' + str(aveJ) + '\n')
f.write(str(j_list) + '\n')
f.write(str(M) + '\n')
return aveJ, j_list
| 12,783
|
def hook_modules(module):
""" Temporarily adds the hooks to a `nn.Module` for tracing """
hooks = []
def register_submodule_tracer(module):
def _submodule_pre_tracer(module, input):
log.debug(f'pre tracer in _submodule_pre_tracer in {type(module).__name__}')
lock(True)
def _submodule_tracer(module, inputs, outputs):
log.debug(f'tracer in _submodule_tracer in {type(module).__name__}')
lock(False)
node = TraceNode(module)
add_forward_node(node, inputs, outputs)
module_unique_name = current_graph().module_unique_name_dict[id(module)]
if module_unique_name in current_graph().traced_modules:
log.debug(f"module {module_unique_name} is traced")
return None
related = False
if id(module) in module_constructor_traced:
if id(module) in module_constructor_lines:
related = True
else:
if type(module) in overridable_modules:
related = True
else:
for m in overridable_modules:
if isinstance(module, m):
related = True
break
if related:
hooks.append(module.register_forward_pre_hook(_submodule_pre_tracer))
hooks.append(module.register_forward_hook(_submodule_tracer))
current_graph().traced_modules.append(module_unique_name)
return None
def _model_pre_tracer(module, inputs):
log.debug('pre tracer in _model_pre_tracer')
for i in inputs:
node = TraceNode(TraceFunction("input"))
add_input_node(node, i)
def _model_tracer(module, inputs, outputs):
log.debug('tracer in _model_tracer')
if type(outputs) == torch.Tensor:
node = TraceNode(TraceFunction("output"))
add_output_node(node, outputs)
elif type(outputs) in (list, tuple):
for i in outputs:
if type(i) == torch.Tensor or (type(i) in (list, tuple) and all((type(x) == torch.Tensor for x in i))):
node = TraceNode(TraceFunction("output"))
add_output_node(node, i)
else:
log.warning(
"Only tensors or list, tuple of tensors are supported when nested in a class, dict, list or tuple")
elif type(outputs) == dict:
for k, v in outputs.items():
if type(v) == torch.Tensor or (type(v) in (list, tuple) and all((type(x) == torch.Tensor for x in v))):
node = TraceNode(TraceFunction("output"))
add_output_node(node, v)
else:
log.warning(
"Only tensors or list, tuple of tensors are supported when nested in a class, dict, list or tuple")
else:
log.warning(f'Output type is not supported: {type(outputs).__name__}, try to extract tensors from it')
for k in outputs.__dir__():
v = getattr(outputs, k)
if type(v) == torch.Tensor or (type(v) in (list, tuple) and all((type(x) == torch.Tensor for x in v))):
node = TraceNode(TraceFunction("output"))
add_output_node(node, v)
log.debug('trace: apply register_submodule_tracer')
module.apply(register_submodule_tracer)
log.debug('trace: add hooks')
hooks.append(module.register_forward_pre_hook(_model_pre_tracer))
hooks.append(module.register_forward_hook(_model_tracer))
yield module
for hook in hooks:
hook.remove()
| 12,784
|
def bridge_forward_delay(brname):
"""Read a bridge device's forward delay timer.
:returns ``int``:
Bridge forward delay timer.
:raises:
OSError, IOError (ENOENT) if the device doesn't exist.
"""
return int(_get_dev_attr(brname, 'bridge/forward_delay'))
| 12,785
|
def test_ins_with_4_bytes_payload_and_user_reject(dongle):
"""
"""
apdu = struct.pack('>BBBBBI', 0x80, 0x3, 0x80, 0x0, 0x0, 0x0)
with dongle.screen_event_handler(ui_interaction.confirm_on_lablel, clicked_labels, "reject"):
with pytest.raises(speculos.CommException) as excinfo:
_ = dongle.exchange(apdu)
assert excinfo.value.sw == 0x6985
| 12,786
|
def main(argv):
"""
Main wrapper for training sound event localization and detection network.
:param argv: expects two optional inputs.
first input: task_id - (optional) To chose the system configuration in parameters.py.
(default) 1 - uses default parameters
second input: job_id - (optional) all the output files will be uniquely represented with this.
(default) 1
"""
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two optional inputs')
print('\t>> python seld.py <task-id> <job-id>')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameter.get_params(task_id)
job_id = 1 if len(argv) < 3 else argv[-1]
train_splits, val_splits, test_splits = None, None, None
if params['mode'] == 'dev':
test_splits = [1, 2, 3, 4]
val_splits = [2, 3, 4, 1]
train_splits = [[3, 4], [4, 1], [1, 2], [2, 3]]
# SUGGESTION: Considering the long training time, major tuning of the method can be done on the first split.
# Once you finlaize the method you can evaluate its performance on the complete cross-validation splits
# test_splits = [1]
# val_splits = [2]
# train_splits = [[3, 4]]
elif params['mode'] == 'eval':
test_splits = [0]
val_splits = [1]
train_splits = [[2, 3, 4]]
avg_scores_val = []
avg_scores_test = []
for split_cnt, split in enumerate(test_splits):
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))
print('---------------------------------------------------------------------------------------------------')
# Unique name for the run
cls_feature_class.create_folder(params['model_dir'])
unique_name = '{}_{}_{}_{}_split{}'.format(
task_id, job_id, params['dataset'], params['mode'], split
)
unique_name = os.path.join(params['model_dir'], unique_name)
model_name = '{}_model.h5'.format(unique_name)
print("unique_name: {}\n".format(unique_name))
# Load train and validation data
print('Loading training dataset:')
data_gen_train = cls_data_generator.DataGenerator(
dataset=params['dataset'], split=train_splits[split_cnt], batch_size=params['batch_size'],
seq_len=params['sequence_length'], feat_label_dir=params['feat_label_dir']
)
print('Loading validation dataset:')
data_gen_val = cls_data_generator.DataGenerator(
dataset=params['dataset'], split=val_splits[split_cnt], batch_size=params['batch_size'],
seq_len=params['sequence_length'], feat_label_dir=params['feat_label_dir'], shuffle=False
)
# Collect the reference labels for validation data
data_in, data_out = data_gen_train.get_data_sizes()
print('FEATURES:\n\tdata_in: {}\n\tdata_out: {}\n'.format(data_in, data_out))
gt = collect_test_labels(data_gen_val, data_out, params['quick_test'])
sed_gt = evaluation_metrics.reshape_3Dto2D(gt[0])
doa_gt = evaluation_metrics.reshape_3Dto2D(gt[1])
# rescaling the reference elevation data from [-180 180] to [-def_elevation def_elevation] for scoring purpose
nb_classes = data_gen_train.get_nb_classes()
def_elevation = data_gen_train.get_default_elevation()
doa_gt[:, nb_classes:] = doa_gt[:, nb_classes:] / (180. / def_elevation)
print('MODEL:\n\tdropout_rate: {}\n\tCNN: nb_cnn_filt: {}, pool_size{}\n\trnn_size: {}, fnn_size: {}\n'.format(
params['dropout_rate'], params['nb_cnn2d_filt'], params['pool_size'], params['rnn_size'],
params['fnn_size']))
model = keras_model.get_model(data_in=data_in, data_out=data_out, dropout_rate=params['dropout_rate'],
nb_cnn2d_filt=params['nb_cnn2d_filt'], pool_size=params['pool_size'],
rnn_size=params['rnn_size'], fnn_size=params['fnn_size'],
weights=params['loss_weights'])
best_seld_metric = 99999
best_epoch = -1
patience_cnt = 0
seld_metric = np.zeros(params['nb_epochs'])
tr_loss = np.zeros(params['nb_epochs'])
val_loss = np.zeros(params['nb_epochs'])
doa_metric = np.zeros((params['nb_epochs'], 6))
sed_metric = np.zeros((params['nb_epochs'], 2))
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
# start training
for epoch_cnt in range(nb_epoch):
start = time.time()
# train once per epoch
hist = model.fit_generator(
generator=data_gen_train.generate(),
steps_per_epoch=2 if params['quick_test'] else data_gen_train.get_total_batches_in_data(),
validation_data=data_gen_val.generate(),
validation_steps=2 if params['quick_test'] else data_gen_val.get_total_batches_in_data(),
epochs=params['epochs_per_fit'],
verbose=2
)
tr_loss[epoch_cnt] = hist.history.get('loss')[-1]
val_loss[epoch_cnt] = hist.history.get('val_loss')[-1]
# predict once per peoch
pred = model.predict_generator(
generator=data_gen_val.generate(),
steps=2 if params['quick_test'] else data_gen_val.get_total_batches_in_data(),
verbose=2
)
# Calculate the metrics
sed_pred = evaluation_metrics.reshape_3Dto2D(pred[0]) > 0.5
doa_pred = evaluation_metrics.reshape_3Dto2D(pred[1])
# rescaling the elevation data from [-180 180] to [-def_elevation def_elevation] for scoring purpose
doa_pred[:, nb_classes:] = doa_pred[:, nb_classes:] / (180. / def_elevation)
sed_metric[epoch_cnt, :] = evaluation_metrics.compute_sed_scores(sed_pred, sed_gt, data_gen_val.nb_frames_1s())
doa_metric[epoch_cnt, :] = evaluation_metrics.compute_doa_scores_regr(doa_pred, doa_gt, sed_pred, sed_gt)
seld_metric[epoch_cnt] = evaluation_metrics.compute_seld_metric(sed_metric[epoch_cnt, :], doa_metric[epoch_cnt, :])
# Visualize the metrics with respect to epochs
plot_functions(unique_name, tr_loss, val_loss, sed_metric, doa_metric, seld_metric)
patience_cnt += 1
if seld_metric[epoch_cnt] < best_seld_metric:
best_seld_metric = seld_metric[epoch_cnt]
best_epoch = epoch_cnt
model.save(model_name)
patience_cnt = 0
print(
'epoch_cnt: %d, time: %.2fs, tr_loss: %.2f, val_loss: %.2f, '
'ER_overall: %.2f, F1_overall: %.2f, '
'doa_error_pred: %.2f, good_pks_ratio:%.2f, '
'seld_score: %.2f, best_seld_score: %.2f, best_epoch : %d\n' %
(
epoch_cnt, time.time() - start, tr_loss[epoch_cnt], val_loss[epoch_cnt],
sed_metric[epoch_cnt, 0], sed_metric[epoch_cnt, 1],
doa_metric[epoch_cnt, 0], doa_metric[epoch_cnt, 1],
seld_metric[epoch_cnt], best_seld_metric, best_epoch
)
)
if patience_cnt > params['patience']:
break
avg_scores_val.append([sed_metric[best_epoch, 0], sed_metric[best_epoch, 1], doa_metric[best_epoch, 0],
doa_metric[best_epoch, 1], best_seld_metric])
print('\nResults on validation split:')
print('\tUnique_name: {} '.format(unique_name))
print('\tSaved model for the best_epoch: {}'.format(best_epoch))
print('\tSELD_score: {}'.format(best_seld_metric))
print('\tDOA Metrics: DOA_error: {}, frame_recall: {}'.format(doa_metric[best_epoch, 0],
doa_metric[best_epoch, 1]))
print('\tSED Metrics: ER_overall: {}, F1_overall: {}\n'.format(sed_metric[best_epoch, 0],
sed_metric[best_epoch, 1]))
# ------------------ Calculate metric scores for unseen test split ---------------------------------
print('Loading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
dataset=params['dataset'], split=split, batch_size=params['batch_size'], seq_len=params['sequence_length'],
feat_label_dir=params['feat_label_dir'], shuffle=False, per_file=params['dcase_output'],
is_eval=True if params['mode'] is 'eval' else False
)
print('\nLoading the best model and predicting results on the testing split')
model = load_model('{}_model.h5'.format(unique_name))
pred_test = model.predict_generator(
generator=data_gen_test.generate(),
steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),
verbose=2
)
test_sed_pred = evaluation_metrics.reshape_3Dto2D(pred_test[0]) > 0.5
test_doa_pred = evaluation_metrics.reshape_3Dto2D(pred_test[1])
# rescaling the elevation data from [-180 180] to [-def_elevation def_elevation] for scoring purpose
test_doa_pred[:, nb_classes:] = test_doa_pred[:, nb_classes:] / (180. / def_elevation)
if params['dcase_output']:
# Dump results in DCASE output format for calculating final scores
dcase_dump_folder = os.path.join(params['dcase_dir'], '{}_{}_{}'.format(task_id, params['dataset'], params['mode']))
cls_feature_class.create_folder(dcase_dump_folder)
print('Dumping recording-wise results in: {}'.format(dcase_dump_folder))
test_filelist = data_gen_test.get_filelist()
# Number of frames for a 60 second audio with 20ms hop length = 3000 frames
max_frames_with_content = data_gen_test.get_nb_frames()
# Number of frames in one batch (batch_size* sequence_length) consists of all the 3000 frames above with
# zero padding in the remaining frames
frames_per_file = data_gen_test.get_frame_per_file()
for file_cnt in range(test_sed_pred.shape[0]//frames_per_file):
output_file = os.path.join(dcase_dump_folder, test_filelist[file_cnt].replace('.npy', '.csv'))
dc = file_cnt * frames_per_file
output_dict = evaluation_metrics.regression_label_format_to_output_format(
data_gen_test,
test_sed_pred[dc:dc + max_frames_with_content, :],
test_doa_pred[dc:dc + max_frames_with_content, :] * 180 / np.pi
)
evaluation_metrics.write_output_format_file(output_file, output_dict)
if params['mode'] is 'dev':
test_data_in, test_data_out = data_gen_test.get_data_sizes()
test_gt = collect_test_labels(data_gen_test, test_data_out, params['quick_test'])
test_sed_gt = evaluation_metrics.reshape_3Dto2D(test_gt[0])
test_doa_gt = evaluation_metrics.reshape_3Dto2D(test_gt[1])
# rescaling the reference elevation from [-180 180] to [-def_elevation def_elevation] for scoring purpose
test_doa_gt[:, nb_classes:] = test_doa_gt[:, nb_classes:] / (180. / def_elevation)
test_sed_loss = evaluation_metrics.compute_sed_scores(test_sed_pred, test_sed_gt, data_gen_test.nb_frames_1s())
test_doa_loss = evaluation_metrics.compute_doa_scores_regr(test_doa_pred, test_doa_gt, test_sed_pred, test_sed_gt)
test_metric_loss = evaluation_metrics.compute_seld_metric(test_sed_loss, test_doa_loss)
avg_scores_test.append([test_sed_loss[0], test_sed_loss[1], test_doa_loss[0], test_doa_loss[1], test_metric_loss])
print('Results on test split:')
print('\tSELD_score: {}, '.format(test_metric_loss))
print('\tDOA Metrics: DOA_error: {}, frame_recall: {}'.format(test_doa_loss[0], test_doa_loss[1]))
print('\tSED Metrics: ER_overall: {}, F1_overall: {}\n'.format(test_sed_loss[0], test_sed_loss[1]))
print('\n\nValidation split scores per fold:\n')
for cnt in range(len(val_splits)):
print('\tSplit {} - SED ER: {} F1: {}; DOA error: {} frame recall: {}; SELD score: {}'.format(cnt, avg_scores_val[cnt][0], avg_scores_val[cnt][1], avg_scores_val[cnt][2], avg_scores_val[cnt][3], avg_scores_val[cnt][4]))
if params['mode'] is 'dev':
print('\n\nTesting split scores per fold:\n')
for cnt in range(len(val_splits)):
print('\tSplit {} - SED ER: {} F1: {}; DOA error: {} frame recall: {}; SELD score: {}'.format(cnt, avg_scores_test[cnt][0], avg_scores_test[cnt][1], avg_scores_test[cnt][2], avg_scores_test[cnt][3], avg_scores_test[cnt][4]))
| 12,787
|
def RF(X, y, X_ind, y_ind, is_reg=False):
"""Cross Validation and independent set test for Random Forest model
Arguments:
X (ndarray): Feature data of training and validation set for cross-validation.
m X n matrix, m is the No. of samples, n is the No. of fetures
y (ndarray): Label data of training and validation set for cross-validation.
m-D vector, and m is the No. of samples.
X_ind (ndarray): Feature data of independent test set for independent test.
It has the similar data structure as X.
y_ind (ndarray): Feature data of independent set for for independent test.
It has the similar data structure as y
out (str): The file path for saving the result data.
is_reg (bool, optional): define the model for regression (True) or classification (False) (Default: False)
Returns:
cvs (ndarray): cross-validation results. The shape is (m, ), m is the No. of samples.
inds (ndarray): independent test results. It has similar data structure as cvs.
"""
if is_reg:
folds = KFold(5).split(X)
alg = RandomForestRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = RandomForestClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_estimators=500, n_jobs=1)
model.fit(X[trained], y[trained])
if is_reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
| 12,788
|
def test_parameter():
"""Request for dataset returns correct parameter metadata"""
meta = hapi(server,'dataset1')
pklFile = 'test_parameter.pkl'
pklFile = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data',pklFile)
if not os.path.isfile(pklFile):
writepickle(pklFile,meta)
assert True
return
else:
metatest = readpickle(pklFile)
assert DeepDiff(meta,metatest) == {}
| 12,789
|
def keystring2list(s):
"""convert a string of keys to a list of keys."""
if len(s) == 0:
return []
keys = []
i = 0
while i < len(s):
keylength = struct.unpack(data.MESSAGE_KEY_LENGTH_FORMAT, s[i:i + data.MESSAGE_KEY_LENGTH_SIZE])[0]
i += data.MESSAGE_KEY_LENGTH_SIZE
key = s[i:i + keylength]
keys.append(key)
i += keylength
return keys
| 12,790
|
def test_random_state_moon():
"""Tests the initialization with a fixed random state in K Nearest Neighbours classifier with moon data."""
clf1 = KNearestNeighbours(random_state=7, data_shape='moon')
clf2 = KNearestNeighbours(random_state=7, data_shape='moon')
data1 = clf1.data_points
data2 = clf2.data_points
assert equal(data1, data2).all()
| 12,791
|
def update_dockerfiles(old_version, new_version):
"""Update dockerfiles if there was a major change."""
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
r_major_minor = "r%s.%s" % (new_version.major, new_version.minor)
print("Detected Major.Minor change.")
print("Updating pattern %s to %s in additional files"
% (old_r_major_minor, r_major_minor))
# Update dockerfiles
replace_string_in_line(old_r_major_minor, r_major_minor, DEVEL_DOCKERFILE)
replace_string_in_line(old_r_major_minor, r_major_minor,
GPU_DEVEL_DOCKERFILE)
replace_string_in_line(old_r_major_minor, r_major_minor,
CPU_MKL_DEVEL_DOCKERFILE)
| 12,792
|
def binary_fmt(num, suffix='B'):
"""A binary pretty-printer."""
if num == 0.0:
return '0 %s' % suffix
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return '%.3g %s%s' % (num, unit, suffix)
num /= 1024.0
return '%.3g %s%s' % (num, 'Yi', suffix)
| 12,793
|
def transformMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate both forward and backward matrices for these parameters"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 ),
compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
)
| 12,794
|
def ulam(u=1,v=2):
"""
Ulam Sequence
Args:
u -- first term
v -- second term
OEIS A002858
"""
T = [u,v]
S = Counter({u+v:1})
yield u
yield v
while True:
new = min([v for v,c in S.items() if c == 1])
yield new
T.append(new)
for term in T[:-1]:
S[new+term] += 1
del S[new]
| 12,795
|
async def get_region(country=None, id=None):
"""
`linode_region` provides details about a specific Linode region.
"""
__args__ = dict()
__args__['country'] = country
__args__['id'] = id
__ret__ = await pulumi.runtime.invoke('linode:index/getRegion:getRegion', __args__)
return GetRegionResult(
country=__ret__.get('country'))
| 12,796
|
def entry_type(entry, default):
"""Return the type of and entry"""
if entry.attribute is None:
return default
return entry.attribute.get('entry_type', default)
| 12,797
|
def build_expression(backend, arrays, expr):
"""Build an expression, based on ``expr`` and initial arrays ``arrays``,
that evaluates using backend ``backend``.
"""
return CONVERT_BACKENDS[backend](arrays, expr)
| 12,798
|
def test_main_with_file_debug(clipboard, resource):
"""Test that debug mode for the main entry point has no effect on output."""
sys.argv = ['pygclip', '-s', 'monokai', '-l', 'python', resource('simple.py')]
main()
normal_result = clipboard()
sys.argv = ['pygclip', '-d', '-s', 'monokai', '-l', 'python', resource('simple.py')]
main()
debug_result = clipboard()
assert normal_result == debug_result
| 12,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.