content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def write_annotation_image(
parser: SimpleAnnotationParser,
image_size: InputDataSize,
label_color_dict: Dict[str, RGB],
output_image_file: Path,
background_color: Optional[Any] = None,
label_name_list: Optional[List[str]] = None,
):
"""
JSONファイルに記載されているアノテーション情報を、画像化する。
JSONファイルは、AnnoFabからダウンロードしたアノテーションzipに含まれるファイルを想定している。
Args:
parser: parser: Simple Annotationのparser
image_size: 画像のサイズ. Tuple[width, height]
label_color_dict: label_nameとRGBを対応付けたdict
output_image_file: 出力先の画像ファイルのパス
background_color: アノテーション画像の背景色.
(ex) "rgb(173, 216, 230)", "#add8e6", "lightgray", (173,216,230)
フォーマットは`ImageColor Module <https://hhsprings.bitbucket.io/docs/programming/examples/python/PIL/ImageColor.html>`_ # noqa: E501
'指定しない場合は、黒(rgb(0,0,0))になります。'))
label_name_list: 画像化対象のlabel_name. Noneの場合は、すべてのlabel_nameが画像化対象です。
Examples:
"simple-annotation.json" を読み込み、"out.png"画像を生成する。
from pathlib import Path
from annofabapi.parser import SimpleAnnotationDirParser
parser = SimpleAnnotationDirParser( "simple-annotation.json")
label_color_dict = {"dog": (255,0,0), "bird": (0,255,0)}
write_annotation_image(parser=parser, image_size=(64,64), label_color_dict=label_color_dict,
output_image_file=Path("out.png"), background_color=(64,64,64))
"""
image = PIL.Image.new(mode="RGB", size=image_size, color=background_color)
draw = PIL.ImageDraw.Draw(image)
fill_annotation_list(draw, parser, label_color_dict, label_name_list=label_name_list)
output_image_file.parent.mkdir(parents=True, exist_ok=True)
image.save(output_image_file)
| 25,400
|
def linkgen(osversion, radioversion=None, softwareversion=None, altsw=None, temp=False, sdk=False):
"""
Generate debrick/core/radio links for given OS, radio, software release.
:param osversion: OS version, 10.x.y.zzzz.
:type osversion: str
:param radioversion: Radio version, 10.x.y.zzzz. Can be guessed.
:type radioversion: str
:param softwareversion: Software version, 10.x.y.zzzz. Can be guessed.
:type softwareversion: str
:param altsw: Radio software release, if not the same as OS.
:type altsw: str
:param temp: If file we write to is temporary. Default is False.
:type temp: bool
:param sdk: If we specifically want SDK images. Default is False.
:type sdk: bool
"""
radioversion = return_radio_version(osversion, radioversion)
softwareversion, swc = return_sw_checked(softwareversion, osversion)
del swc
if altsw is not None:
altsw, aswc = return_radio_sw_checked(altsw, radioversion)
del aswc
baseurl = utilities.create_base_url(softwareversion)
oses, cores, radios = textgenerator.url_gen(osversion, radioversion, softwareversion)
if altsw is not None:
del radios
dbks, cors, radios = textgenerator.url_gen(osversion, radioversion, altsw)
del dbks
del cors
avlty = networkutils.availability(baseurl)
oses, cores = linkgen_sdk(sdk, oses, cores)
prargs = (softwareversion, osversion, radioversion, oses, cores, radios, avlty, False, None, temp, altsw)
lthr = threading.Thread(target=textgenerator.write_links, args=prargs)
lthr.start()
| 25,401
|
def reload(name: str) -> types.ModuleType:
"""
Finalize and reload a plugin and any plugins that (transitively) depend on it. We try to run all finalizers in
dependency order, and only load plugins that were successfully unloaded, and whose dependencies have been
successfully reloaded. If a plugin fails to initialize, we run any finalizers it managed to register, and the plugin
is not loaded. Any exceptions raised will be reraised together. Returns the module object of the requested plugin if
successful.
"""
reloads = deps.subgraph_paths_to(name)
logger.info("Reloading {} with dependencies: {}".format(name,
", ".join(dep for dep in reloads.topo_sort_fwd() if dep != name)))
unload_success = set()
reload_success = set()
unload_gen = reloads.topo_sort_fwd()
reload_gen = reloads.topo_sort_bck()
def cont_reload() -> None:
try:
for dep in reload_gen:
if dep == name:
continue
elif dep not in unload_success:
logger.info("Not reloading {} because it was not unloaded properly".format(name))
elif not all(m in reload_success
for m in reloads.edges_from(dep)):
logger.info("Not reloading {} because its dependencies were not reloaded properly".format(name))
else:
importlib.import_module(dep)
reload_success.add(dep)
except:
cont_reload()
raise
def cont_unload() -> types.ModuleType:
try:
for dep in unload_gen:
if dep == name:
continue
unsafe_unload(dep)
unload_success.add(dep)
except:
cont_unload()
raise
try:
unsafe_unload(name)
except:
cont_reload()
raise
try:
ret = importlib.import_module(name)
reload_success.add(name)
finally:
cont_reload()
return ret
return cont_unload()
| 25,402
|
def load_from_config(config_path, **kwargs):
"""Load from a config file. Config options can still be overwritten with kwargs"""
with open(config_path, "r") as config_file:
config = json.load(config_file)
config.update(kwargs)
return TokenizationConfig(**config)
| 25,403
|
def get_tecogan_monitors(monitor):
"""
Create monitors for displaying and storing TECOGAN losses.
"""
monitor_vgg_loss = MonitorSeries(
'vgg loss', monitor, interval=20)
monitor_pp_loss = MonitorSeries(
'ping pong', monitor, interval=20)
monitor_sum_layer_loss = MonitorSeries(
'd layer loss', monitor, interval=20)
monitor_adv_loss = MonitorSeries(
'adversarial loss', monitor, interval=20)
monitor_disc_loss = MonitorSeries(
'discriminator loss', monitor, interval=20)
monitor_tb = MonitorSeries(
'tb', monitor, interval=20)
Monitor_tecogan = collections.namedtuple('Monitor_tecogan',
['monitor_vgg_loss', 'monitor_pp_loss', 'monitor_sum_layer_loss',
'monitor_adv_loss', 'monitor_disc_loss', 'monitor_tb'])
return Monitor_tecogan(monitor_vgg_loss, monitor_pp_loss, monitor_sum_layer_loss, monitor_adv_loss, monitor_disc_loss, monitor_tb)
| 25,404
|
def mock_message_callback(*args):
"""
Test message callback is called
"""
print(*args)
| 25,405
|
def user_based_filtering_recommend(new_user,user_movies_ids,movies_num,n_neighbor,movies_ratings):
""" This function return number of recommended movies based on user based filtering using
cosine similarity to find the most similar users to the new user
it returns movies_num of movies from the top ranked movies of n_neighbour users
who are the most similar to the new user"""
#pivot the dataframe
users_inDB = movies_ratings.pivot_table(index='userId', columns='movieId', values='rating')
list_id_movies = movies_ratings['movieId'].unique()
new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T
#fill Nans with 3 rating
users_inDB = users_inDB.fillna(3.0)
new_user_vector_filled = new_user_vector.fillna(3.0)
#for cosine similarity we have to center the data in order to have a magnitude(0-1)
users_inDB = (users_inDB - 3.0)/2.0
new_user = (new_user_vector_filled - 3.0)/2.0
#label the new user that we want to recommend for:
new_user.index=['new_user']
#add the new use to the original df
users_matrix = pd.concat([users_inDB,new_user])
#calculate cosine similarity
users_similarity_matrix = cosine_similarity(users_matrix)
users_similarity_matrix = pd.DataFrame(users_similarity_matrix,index=users_matrix.index,columns=users_matrix.index)
#we get here (users_num*users_num) similarity matrix
#print(users_matrix_similarity)
# get the new user similarities row: except the last column value(similarity with himself=1)
new_user_similarity = users_similarity_matrix['new_user'].iloc[:-1]
# take the n_neighbors nearest users (N users who have the most similarity with the new user)
similar_users = new_user_similarity.nlargest(n_neighbor).index.values
#print(similar_users)
#we will get (movies_num*n_neighbor*2) movies to choose
recommended_movieIds = []
scores = []
for user in similar_users:
recommended_movieIds.extend(users_inDB.loc[user].nlargest(movies_num*2).index)
scores.extend(users_inDB.loc[user].nlargest(movies_num*2).values)
recommended_movies_dic = {'movie_id':recommended_movieIds,'score':scores}
recommended_movies_df = pd.DataFrame(recommended_movies_dic)
#print(recommended_movies_df)
#Shuffle the movies
recommended_movies_df = sklearn.utils.shuffle(recommended_movies_df)
#Order movies by score
recommended_movies_df = recommended_movies_df.sort_values(by='score',ascending=False)
recommended_movies_ids = recommended_movies_df['movie_id'].unique()
#get the final recommendation: retrn movies_num of movies which the user hasn't rated
top_recommended_movies = []
for movie_id in recommended_movies_ids:
if (movie_id not in user_movies_ids) and (len(top_recommended_movies) < movies_num) :
top_recommended_movies.append(movie_id)
#finally return the movies titles
top_recommended_movies = movieId_to_title(top_recommended_movies,movies_ratings)
return top_recommended_movies
| 25,406
|
def test_export_traces_chunks_only():
"""
Testing exporting traces considering the area file
"""
exporter = TraceExporter()
exporter.load_and_export(
flags_to_update=
{
"RM_ROITrace": 3,
"GDM_outputType": "chunks_only",
"GDM_chunkPostStim": 2, # in seconds
"GDM_chunkPreStim": 2, # in seconds
},
file_suffix="_chunks_only",
flags_suffix="_2secPrePostStim"
)
exporter.load_and_export(
flags_to_update=
{
"RM_ROITrace": 3,
"GDM_outputType": "chunks_only",
"GDM_chunkPostStim": 100, # in seconds
"GDM_chunkPreStim": 100, # in seconds
},
file_suffix="_chunks_only",
flags_suffix="_full"
)
| 25,407
|
def query_ps_from_wcs(w):
"""Query PanStarrs for a wcs.
"""
nra,ndec = w.array_shape[1:]
dra,ddec = w.wcs.cdelt[:2]
c = wcs.utils.pixel_to_skycoord(nra/2.,ndec/2.,w)
ddeg = np.linalg.norm([dra*nra/2,ddec*ndec/2])
pd_table = query(c.ra.value,c.dec.value,ddeg)
# Crop sources to those in the cube limits
scat = wcs.utils.skycoord_to_pixel(
SkyCoord(pd_table['raMean'],pd_table['decMean'], unit="deg"),
w,
origin=0,
mode='all'
)
mask = (scat[0] < nra)*(scat[1] < ndec)*(scat[0] > 0)*(scat[1] > 0)
pd_table = pd_table[mask]
pd_table['x'] = scat[0][mask]
pd_table['y'] = scat[1][mask]
return pd_table
| 25,408
|
def permute(x, in_shape='BCD', out_shape='BCD', **kw):
""" Permute the dimensions of a tensor.\n
- `x: Tensor`; The nd-tensor to be permuted.
- `in_shape: str`; The dimension shape of `x`. Can only have characters `'B'` or `'C'` or `'D'`,
which stand for Batch, Channel, or extra Dimensions. The default value `'BCD'` means
the input tensor `x` should be at lest 2-d with shape `(Batch, Channel, Dim0, Dim1, Dim2, ...)`,
where `Dim0, Dim1, Dim2 ...` stand for any number of extra dimensions.
- `out_shape: str or tuple or None`; The dimension shape of returned tensor. Default: `'BCD'`.
If a `str`, it is restricted to the same three characters `'B'`, `'C'` or `'D'` as the `in_shape`.
If a `tuple`, `in_shape` is ignored, and simply `x.permute(out_shape)` is returned.
If `None`, no permution will be performed.
- `return: Tensor`; Permuted nd-tensor. """
if (in_shape == out_shape) or (out_shape is None):
return x
if isinstance(out_shape, (list, tuple, torch.Size)):
return x.permute(*out_shape)
if isinstance(in_shape, str) and isinstance(out_shape, str) :
assert set(in_shape) == set(out_shape) <= {'B', 'C', 'D'}, 'In and out shapes must have save set of chars among B, C, and D.'
in_shape = in_shape.lower().replace('d', '...')
out_shape = out_shape.lower().replace('d', '...')
return torch.einsum(f'{in_shape}->{out_shape}', x)
return x
| 25,409
|
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=False,
labeltop=True, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", axis="x", color="black", linestyle='--', linewidth=1)
ax.grid(which="minor", axis="y", color="black", linestyle='-', linewidth=3)
ax.tick_params(which="minor", top=False, left=False)
return im
| 25,410
|
def assert_is_normal_rpyc(f):
"""
Analyze the structure of a single rpyc file object for correctness.
Does not actually say anything about the _contents_ of that section, just that we were able
to slice it out of there.
If succesful, returns the uncompressed contents of the first storage slot.
"""
f.seek(0)
header = f.read(1024)
f.seek(0)
if header[:10] != "RENPY RPC2":
# either legacy, or someone messed with the header
# assuming legacy, see if this thing is a valid zlib blob
raw_data = f.read()
f.seek(0)
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
raise ValueError("Did not find RENPY RPC2 header, but interpretation as legacy file failed")
return uncompressed
else:
if len(header) < 46:
# 10 bytes header + 4 * 9 bytes content table
return ValueError("File too short")
a,b,c,d,e,f,g,h,i = struct.unpack("<IIIIIIIII", header[10: 46])
# does the header format match default ren'py generated files?
if not (a == 1 and b == 46 and d == 2 and (g, h, i) == (0, 0, 0) and b + c == e):
return ValueError("Header data is abnormal, did the format gain extra fields?")
f.seek(b)
raw_data = f.read(c)
f.seek(0)
if len(raw_data) != c:
return ValueError("Header data is incompatible with file length")
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
return ValueError("Slot 1 did not contain a zlib blob")
if not uncompressed.endswith("."):
return ValueError("Slot 1 did not contain a simple pickle")
return uncompressed
| 25,411
|
def retrieve_s3_object_contents(s3_obj, bucket=os.environ["ARTIFACTS_BUCKET"]):
"""Retrieve S3 object contents."""
return json.loads(
s3.get_object(Bucket=bucket, Key=s3_obj)["Body"].read().decode("utf-8")
)
| 25,412
|
def test_molchoose_correct():
"""Tests that the correct data is returned from a trial csv document"""
test = [[1,'A01B01','A01','B01','6.5','OC(=O)[C@H](Cc1ccc(O)cc1)NS(=O)(=O)c2ccc(cc2)c3ccccc3',7,12,'>98','Gen-5']]
test_frame = pd.DataFrame(test, columns=['Index','Tag','atag','btag','pIC50_MMP12','Smiles','A_SortMax','B_SortMax','Final QC Purity','Generation-No'])
pd.testing.assert_frame_equal(combine.MolChoose('A01', 'B01', DataSource='tests/test_input.csv'), test_frame)
| 25,413
|
def substract_li(cfg, data, lats, lons, future_exp):
"""Difference between historical and future fields."""
pathlist = data.get_path_list(short_name='pr', exp='historical')
ar_diff_rain = np.zeros((len(lats), len(lons), len(pathlist)))
mism_diff_rain = np.zeros(len(pathlist))
mwp_hist_rain = np.zeros(len(pathlist))
ar_hist_rain = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_ua = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_va = np.zeros((len(lats), len(lons), len(pathlist)))
datasets = []
for iii, dataset_path in enumerate(pathlist):
# Substract historical experiment from rcp85 experiment
datasets.append(data.get_info(n.DATASET, dataset_path))
ar_diff_rain[:, :, iii] = (data.get_data(short_name='pr',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='pr',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
# ISM (60◦ –95◦ E, 10◦ –30◦ N)
mism_diff_rain[iii] = \
np.mean((ar_diff_rain[:,
get_latlon_index(lons, 60, 95),
iii])[get_latlon_index(lats, 10, 30), :])
ar_hist_rain[:, :, iii] = data.get_data(
short_name='pr', exp='historical', dataset=datasets[iii])
# Western pacific (140◦ E–170◦ W, 12◦ S–12◦ N)
mwp_hist_rain[iii] = \
np.mean((ar_hist_rain[:,
get_latlon_index(lons, 140, 170),
iii])[get_latlon_index(lats, -12, 12), :])
ar_diff_ua[:, :, iii] = (data.get_data(short_name='ua',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='ua',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
ar_diff_va[:, :, iii] = (data.get_data(short_name='va',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='va',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
plot_rain_and_wind(cfg, datasets[iii],
{'ar_diff_rain': ar_diff_rain[:, :, iii],
'ar_diff_ua': ar_diff_ua[:, :, iii],
'ar_diff_va': ar_diff_va[:, :, iii],
'lats': lats, 'lons': lons}, future_exp)
return {
"datasets": datasets,
"ar_diff_rain": ar_diff_rain,
"ar_diff_ua": ar_diff_ua,
"ar_diff_va": ar_diff_va,
"ar_hist_rain": ar_hist_rain,
"mism_diff_rain": mism_diff_rain,
"mwp_hist_rain": mwp_hist_rain
}
| 25,414
|
def identify_jobs_to_update(file_path, jobs):
"""identify jobs to update."""
name_map = {}
for job in jobs:
cluster = get_desired_cluster(file_path, job)
if cluster != job.get("cluster", ""):
name_map[job["name"]] = cluster
return name_map
| 25,415
|
def clear():
"""
Clears the current Cytoscape session, if any.
"""
try:
requests.delete(rest.get_url('session')).ok
except requests.exceptions.ConnectionError:
print("Error: Connection refused: is Cytoscape started?")
raise
| 25,416
|
def bootstrap_storage_bucket(project_id, bucket_name, google_credentials):
"""
Bootstrap the bucket used to store Terraform state for projects.
Args:
project_id:
The ID of the project to create the bucket in.
bucket_name:
The name of the bucket to create.
google_credentials:
The credentials authorizing the creation of the bucket.
Returns:
An object containing information about the bucket.
"""
print(f"Attempting to retrieve existing bucket: {bucket_name}'")
service = googleapiclient.discovery.build(
"storage", "v1", credentials=google_credentials
)
request = service.buckets().get(bucket=bucket_name)
try:
bucket = request.execute()
print("Bucket exists.\n")
return bucket
except googleapiclient.errors.HttpError as e:
if e.resp['status'] != '404':
raise
print("Bucket does not exist yet. Creating it...")
bucket_body = {
"name": bucket_name,
"versioning": {
"enabled": True,
},
}
request = service.buckets().insert(
body=bucket_body,
predefinedAcl="projectPrivate",
predefinedDefaultObjectAcl="projectPrivate",
project=project_id
)
bucket = request.execute()
print("Done.\n")
return bucket
| 25,417
|
def ValidateBucketForCertificateAuthority(bucket_name):
"""Validates that a user-specified bucket can be used with a Private CA.
Args:
bucket_name: The name of the GCS bucket to validate.
Returns:
A BucketReference wrapping the given bucket name.
Raises:
InvalidArgumentException: when the given bucket can't be used with a CA.
"""
messages = storage_util.GetMessages()
client = storage_api.StorageClient(messages=messages)
try:
bucket = client.GetBucket(
bucket_name,
messages.StorageBucketsGetRequest.ProjectionValueValuesEnum.full)
if not _BucketAllowsPublicObjectReads(bucket):
# Show a warning but don't fail, since this could be intentional.
log.warning(
'The specified bucket does not publicly expose new objects by '
'default, so some clients may not be able to access the CA '
'certificate or CRLs. For more details, see '
'https://cloud.google.com/storage/docs/access-control/making-data-public'
)
return storage_util.BucketReference(bucket_name)
except storage_api.BucketNotFoundError:
raise exceptions.InvalidArgumentException(
'gcs-bucket', 'The given bucket does not exist.')
| 25,418
|
def ingest(excel_file, db_name, table_name, db_type="pgsql", schema=None):
"""Ingest the file into the database table."""
logging.info(
f"file = {excel_file}, db = {db_name}, table = {table_name}, db type = {db_type}"
)
# Create database engine
db = db_provider.get(db_type)
engine = db.get_engine()
# Inspect the target table schema
inspector = inspect(engine)
dtypes = {}
for column in inspector.get_columns(table_name, schema=schema):
dtypes[column["name"]] = column["type"]
logging.info(dtypes)
# Load the excel into database
df = pd.read_excel(excel_file, engine="openpyxl")
df.to_sql(
table_name, engine, if_exists="append", chunksize=500, index=False, dtype=dtypes
)
# TODO - Validation
print(f"\nTotal records in {excel_file} - {len(df)}")
for c in df.columns:
print(f"{c} - {df[c].nunique()}")
| 25,419
|
def test_sumcovariancenormsmetric_compute_metric():
"""Test Sum of Covariance Norms compute metric."""
generator = SumofCovarianceNormsMetric()
time = datetime.datetime.now()
# Multiple tracks and truths present at two timesteps
tracks = {Track(states=[GaussianState(state_vector=[[1], [2], [1], [2]], timestamp=time,
covar=np.diag([i, i, i, i])),
GaussianState(state_vector=[[1.5], [2.5], [1.5], [2.5]],
timestamp=time + datetime.timedelta(seconds=1),
covar=np.diag([i+0.5, i+0.5, i+0.5, i+0.5]))])
for i in range(5)}
truths = {GroundTruthPath(states=[GroundTruthState(state_vector=[[0], [1], [0], [1]],
timestamp=time),
GroundTruthState(state_vector=[[0.5], [1.5], [0.5], [1.5]],
timestamp=time + datetime.timedelta(
seconds=1))])}
manager = SimpleManager([generator])
manager.add_data(truths, tracks)
main_metric = generator.compute_metric(manager)
first_association, second_association = main_metric.value
assert main_metric.title == "Sum of Covariance Norms Metric"
assert main_metric.time_range.start_timestamp == time
assert main_metric.time_range.end_timestamp == time + datetime.timedelta(
seconds=1)
assert first_association.title == "Covariance Matrix Norm Sum"
assert first_association.value == 20
assert first_association.timestamp == time
assert first_association.generator == generator
assert second_association.title == "Covariance Matrix Norm Sum"
assert second_association.value == 25
assert second_association.timestamp == time + datetime.timedelta(seconds=1)
assert second_association.generator == generator
| 25,420
|
def write_csv_data(tsk, csv_data, filelabel, spc_array):
""" Write the csv data dictionary into the correct type of csv
or text file
"""
if 'freq' in tsk:
final_csv_data = {}
for key in csv_data['freq']:
final_csv_data[key] = csv_data['freq'][key]
if csv_data['scalefactor']:
final_csv_data['Scale Factor'] = []
for key in csv_data['scalefactor']:
final_csv_data[key + '_scalefactor'] = csv_data['scalefactor'][key]
final_csv_data['Torsional Frequencies'] = []
for key in csv_data['tfreq']:
final_csv_data[key + '_tfreq'] = csv_data['tfreq'][key]
final_csv_data['All RT Harmonic Frequencies'] = []
for key in csv_data['allfreq']:
final_csv_data[key + '_RTFreq'] = csv_data['allfreq'][key]
print(final_csv_data)
ncols = max([len(x) for x in final_csv_data.values()])
df = pandas.DataFrame.from_dict(
final_csv_data, orient='index',
columns=['Path', 'ZPVE [A.U.]', *[''] * (ncols-2)])
df.to_csv(filelabel, float_format='%.5f')
if 'geo' in tsk:
all_data = '\n'.join([spc_data for spc_data in csv_data.values()])
io.write_file(filelabel, all_data)
if 'zma' in tsk:
all_data = '\n'.join([spc_data for spc_data in csv_data.values()])
io.write_file(filelabel, all_data)
if 'ene' in tsk:
df = pandas.DataFrame.from_dict(
csv_data, orient='index',
columns=['Path', 'Energy [A.U.]'])
df.to_csv(filelabel, float_format='%.8f')
if 'enthalpy' in tsk:
df = pandas.DataFrame.from_dict(
csv_data, orient='index',
columns=[
'Path', 'ZPVE+Energy [A.U.]', 'Hf (0 K) [kcal/mol]',
*spc_array])
df.to_csv(filelabel, float_format='%.6f')
if 'coeffs' in tsk:
df = pandas.DataFrame.from_dict(
csv_data, orient='index',
columns=[
*spc_array])
df.to_csv(filelabel, float_format='%.2f')
| 25,421
|
def login_with_invalid_username_and_valid_password(users, i, iterations=10):
"""Login with invalid username and valid password.
"""
with When(f"users try to login with invalid username and valid password #{i}"):
for i in range(iterations):
random_user = dict(users[random.randint(0, len(users)-1)])
random_user["cn"] += randomword(1)
login_and_execute_query(username=random_user["cn"],
password=random_user["userpassword"],
exitcode=4,
message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name",
steps=False)
| 25,422
|
def save_sample(generator, saved_samples, batches_done, sample_path):
"""
Save a sample of generated images.
Args:
generator: The generator model.
saved_samples: The directory to save the sample images.
batches_done: The number of batches done.
sample_path: path where to save the samples
"""
if sample_path is not None:
# Generate inpainted image
gen_imgs = generator(saved_samples["masked"], saved_samples["lowres"])
# Save sample
sample = torch.cat(
(saved_samples["masked"].data, gen_imgs.data, saved_samples["imgs"].data),
-2,
)
save_image(
sample, "%s/%d.png" % (sample_path, batches_done), nrow=5, normalize=True
)
| 25,423
|
def load_train_val(seq_len, batch_size, dataset="hollywood2"):
"""
This returns two dataloaders correponding to the train and validation sets. Each
iterator yields tensors of shape (N, 3, L, H, W) where N is the batch size, L is
the sequence length, and H and W are the height and width of the frame.
The batch size is always 1 in the validation set. The frames are always cropped
to (128, 128) windows in the training set. The frames in the validation set are
not cropped if they are smaller than 360x480; otherwise, they are cropped so the
maximum returned size is 360x480.
"""
train = DataLoader(VideoDataset(
"%s/train" % dataset,
crop_size=(160, 160),
seq_len=seq_len,
), shuffle=True, num_workers=16, batch_size=batch_size, pin_memory=True)
val = DataLoader(VideoDataset(
"%s/val" % dataset,
crop_size=False,
seq_len=seq_len,
), shuffle=False, batch_size=1, pin_memory=True)
return train, val
| 25,424
|
def SLC_burst_copy(SLC, SLC_par, TOPS_par, SLC_out, SLC_out_par, burst_num, drflg='-', SLC_par2='-', logpath=None):
"""
| Copy selected burst from Sentinel-1 TOPS SLC to a file
| Copyright 2014, Gamma Remote Sensing, v1.3 21-Oct-2014 awi/clw
Parameters
----------
SLC:
(input) Sentinel-1 TOPS mode burst SLC
SLC_par:
(input) SLC parameter file for the TOPS burst SLC
TOPS_par:
(input) TOPS parameter file for the TOPS burst SLC
SLC_out:
(output) SLC file containing a single burst
SLC_out_par:
(output) SLC parameter file for the single burst SLC
burst_num:
burst number of selected burst (1->number of bursts in the SLC)
drflg:
deramp phase flag:
* 0: no modification of the burst SLC phase (default)
* 1: subtract TOPS Doppler phase ramp (deramp)
SLC_par2:
(output) SLC parameter file for the single burst SLC with deramped phase (drflg: 1)
logpath: str or None
a directory to write command logfiles to
"""
process(['/cluster/GAMMA_SOFTWARE-20161207/ISP/bin/SLC_burst_copy', SLC, SLC_par, TOPS_par, SLC_out, SLC_out_par, burst_num, drflg, SLC_par2], logpath=logpath)
| 25,425
|
def update_to_v25(config_dict):
"""Major changes for V2.5:
- Option webpath is now station_url
- Drivers are now in their own package
- Introduction of the station registry
"""
major, minor = get_version_info(config_dict)
if major + minor >= '205':
return
try:
# webpath is now station_url
webpath = config_dict['Station'].get('webpath')
station_url = config_dict['Station'].get('station_url')
if webpath is not None and station_url is None:
config_dict['Station']['station_url'] = webpath
config_dict['Station'].pop('webpath', None)
except KeyError:
pass
# Drivers are now in their own Python package. Change the names.
# --- Davis Vantage series ---
try:
if config_dict['Vantage']['driver'].strip() == 'weewx.VantagePro':
config_dict['Vantage']['driver'] = 'weewx.drivers.vantage'
except KeyError:
pass
# --- Oregon Scientific WMR100 ---
# The section name has changed from WMR-USB to WMR100
if 'WMR-USB' in config_dict:
if 'WMR100' in config_dict:
sys.exit("\n*** Configuration file has both a 'WMR-USB' "
"section and a 'WMR100' section. Aborting ***\n\n")
config_dict.rename('WMR-USB', 'WMR100')
# If necessary, reflect the section name in the station type:
try:
if config_dict['Station']['station_type'].strip() == 'WMR-USB':
config_dict['Station']['station_type'] = 'WMR100'
except KeyError:
pass
# Finally, the name of the driver has been changed
try:
if config_dict['WMR100']['driver'].strip() == 'weewx.wmrx':
config_dict['WMR100']['driver'] = 'weewx.drivers.wmr100'
except KeyError:
pass
# --- Oregon Scientific WMR9x8 series ---
# The section name has changed from WMR-918 to WMR9x8
if 'WMR-918' in config_dict:
if 'WMR9x8' in config_dict:
sys.exit("\n*** Configuration file has both a 'WMR-918' "
"section and a 'WMR9x8' section. Aborting ***\n\n")
config_dict.rename('WMR-918', 'WMR9x8')
# If necessary, reflect the section name in the station type:
try:
if config_dict['Station']['station_type'].strip() == 'WMR-918':
config_dict['Station']['station_type'] = 'WMR9x8'
except KeyError:
pass
# Finally, the name of the driver has been changed
try:
if config_dict['WMR9x8']['driver'].strip() == 'weewx.WMR918':
config_dict['WMR9x8']['driver'] = 'weewx.drivers.wmr9x8'
except KeyError:
pass
# --- Fine Offset instruments ---
try:
if config_dict['FineOffsetUSB']['driver'].strip() == 'weewx.fousb':
config_dict['FineOffsetUSB']['driver'] = 'weewx.drivers.fousb'
except KeyError:
pass
# --- The weewx Simulator ---
try:
if config_dict['Simulator']['driver'].strip() == 'weewx.simulator':
config_dict['Simulator']['driver'] = 'weewx.drivers.simulator'
except KeyError:
pass
if 'StdArchive' in config_dict:
# Option stats_types is no longer used. Get rid of it.
config_dict['StdArchive'].pop('stats_types', None)
try:
# V2.5 saw the introduction of the station registry:
if 'StationRegistry' not in config_dict['StdRESTful']:
stnreg_dict = configobj.ConfigObj(StringIO("""[StdRESTful]
[[StationRegistry]]
# Uncomment the following line to register this weather station.
#register_this_station = True
# Specify a station URL, otherwise the station_url from [Station]
# will be used.
#station_url = http://example.com/weather/
# Specify a description of the station, otherwise the location from
# [Station] will be used.
#description = The greatest station on earth
driver = weewx.restful.StationRegistry
"""), encoding='utf-8')
config_dict.merge(stnreg_dict)
except KeyError:
pass
config_dict['version'] = '2.5.0'
| 25,426
|
def checkIfMeshId(projectPath, mesh, name, meshID):
"""Checks if exists another Object having the same name as the mesh
This function asks the user what to do.
If the object is not a mesh, gets all the children meshes
Args:
projectPath: a str with the path where the exported file will be
mesh: a string that representes one of the mesh in the scene
name: a str with the name of the mesh
meshID: the new name if needed
returns
name: as tr with the new value
abort: is the uses choose to cancel
"""
abort = False
if os.access(projectPath + meshID + ".obj" , os.W_OK):
msg = "There is already an existing Go file [" + name + "] in the Go project folder.\n"
msg = msg + "Do you want to replace it?\n'Yes' to replace, 'No' to rename the mesh " + name + ", 'Cancel' to cancel export"
res = showDialog("Exporting mesh object " + name, msg, "yesNoCancel")
if (res == "ok"):
# replace existing Object
lx.out(' -> itemID=({0}): set GoMo tag to "{0}"'.format(mesh, name))
meshID = name
lx.eval('select.drop item')
lx.eval('select.item {0} set'.format(mesh))
lx.eval('item.tag string GoMo "{0}"' %(meshID))
if (res == "no"):
#rename the mesh to an unique Object
name = meshID
lx.eval('select.drop item')
lx.eval('select.item {0} set'.format(mesh))
lx.eval('item.name "{0}"'.format(meshID))
lx.eval('item.tag string GoMo "{0}"'.format(meshID))
if (res == "cancel"):
abort = True
return name, meshID, abort
| 25,427
|
def initialize(repo_path):
"""Instance creation"""
global RepoExecs
RepoExecs = ExecutionsStorage(repo_path=repo_path)
| 25,428
|
def read_params_file(config_path: str) -> json:
"""Read the and open the params.yaml file
Args:
config_path (str): yaml config file
Returns:
yaml: yaml file
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
| 25,429
|
def test_read_xml_files(tmp_path):
"""Test function `_read_xml_files`."""
from clinica.iotools.converters.adni_to_bids.adni_json import _read_xml_files
with pytest.raises(IndexError, match="No ADNI xml files"):
_read_xml_files()
xml_path = tmp_path / "xml_files"
xml_path.mkdir()
os.chdir(xml_path)
clinica_path = xml_path / "Clinica_processed_metadata"
clinica_path.mkdir()
dummy_file = clinica_path / "ADNI_1234.xml"
with dummy_file.open("w") as fp:
fp.write("foo")
assert os.listdir() == ["Clinica_processed_metadata"]
assert _read_xml_files() == ["Clinica_processed_metadata/ADNI_1234.xml"]
subjects = ["01", "02", "06", "12"]
for subj in subjects:
with open(xml_path / f"ADNI_{subj}.xml", "w") as fp:
fp.write(f"foo {subj}")
assert _read_xml_files(subjects, xml_path) == [
str(xml_path / f"ADNI_{subj}.xml") for subj in subjects
]
os.chdir(os.path.dirname(__file__))
| 25,430
|
def fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
matched = re.match(regex, string, flags=flags)
if matched and matched.span()[1] == len(string):
return matched
return None
| 25,431
|
def html2text(html: str) -> str:
""" Change HTML to help torenizer and return text """
# Replace <br/> with PERIOD+NEW_LINE
html = re.sub(r'(\s*<br\s?\/\s*>)+', '. \n', html)
html = re.sub(r'<br\s?\/?>', '. \n', html)
html = re.sub(r'\s*(</?em>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?strong>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?b>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?i>)\s*', r' \1 ', html)
soup = bs4.BeautifulSoup(html, 'html5lib')
if REMOVE_TABLES:
for tag in soup('table'):
tag.extract()
if REMOVE_LISTS:
for tag in soup('ul'):
tag.extract()
divs_to_remove = '^(script|noscript|form|style|head|nav)$'
for tag in soup.find_all(re.compile(divs_to_remove)):
tag.extract()
if REMOVE_TIME_TAG:
for tag in soup('time'):
tag.extract()
if REMOVE_LISTS_OF_LINKS:
for ul in soup('ul'):
ul_can_be_removed_flags = []
for li in ul.find_all('li'):
can_be_removed = False
li_is_link = False
a_tags_in_li = li.find_all('a')
if len(a_tags_in_li) == 1:
li_is_link = True
if li_is_link and li.get_text().strip() == \
a_tags_in_li[0].get_text().strip():
can_be_removed = True
ul_can_be_removed_flags.append(li_is_link)
ul_can_be_removed_flags.append(can_be_removed)
if all(ul_can_be_removed_flags):
ul.extract()
# List of html-tags that we consider as BLOCK,
# so there are no sentences that begins in one and ends in another
pattern = re.compile('^(div|p|h1|h2|h3|h4|h5|code|blockquote)$')
for tag in soup.find_all(pattern):
if tag.name == 'div' and tag.find_all(pattern): # skip if has child
continue
tag_text = tag.get_text().strip()
if not tag_text:
continue
if tag_text[-1] == ':' and SKIP_FINALYZING_IF_ENDS_WITH_COLON:
continue
# Adding PERIOD in the end of text tag
if not tag_text[-1] in PARAGRAPH_LAST_CHAR_MUST_BE_IN:
# remove COLON in the end
new_tag = soup.new_tag('p')
if tag_text[-1] == ':':
tag_text = tag_text.rstrip(':')
new_tag.string = '. \n' + tag_text + '. '
tag.replace_with(new_tag)
text = soup.get_text()
# Remove possible period (side-effect) at the start
text = re.sub(r'^\s*\.', r'', text)
# Text..SPACE -> Text.SPACE
text = re.sub(r'([^\.])\.\. ', r'\1. ', text)
# Remove redundant punkt . \n . -> . TODO: optimize here
text = re.sub(r'\.(\s*\n\s*\.)+', r'. \n', text)
text = re.sub(r'\.\s*\n\.', r'. \n', text)
text = re.sub(r'\n\.', r'. ', text)
text = re.sub(r'\.\s\n\s*\.', r'. \n', text)
text = re.sub(r'\.\s*\.\s\n', r'. \n', text)
text = re.sub(r'\s+\.\s\n', r'. \n', text)
text = re.sub(r'\n\.\s*\n', r'\n', text)
return text
| 25,432
|
def config_settings(event):
""" opens the configuration """
global PROMPTING
shell_telemetry.track_key('F1')
PROMPTING = True
config = azclishell.configuration.CONFIGURATION
answer = ""
questions = {
"Do you want command descriptions": "command_description",
"Do you want parameter descriptions": "param_description",
"Do you want examples": "examples"
}
for question in questions:
while answer.lower() != 'y' and answer.lower() != 'n':
answer = prompt(u'\n%s (y/n): ' % question)
config.set_val('Layout', questions[question], format_response(answer))
answer = ""
PROMPTING = False
print("\nPlease restart the interactive mode for changes to take effect.\n\n")
event.cli.set_return_value(event.cli.current_buffer)
| 25,433
|
def set_dim(
fig: matplotlib.figure.Figure,
width: float = 398.3386,
fraction_of_line_width: float = 1,
ratio: float = (5 ** 0.5 - 1) / 2,
) -> None:
"""Set aesthetic figure dimensions to avoid scaling in latex.
Default width is `src.constants.REPORT_WIDTH`.
Default ratio is golden ratio, with figure occupying full page width.
Args:
fig (matplotlib.figure.Figure): Figure object to resize.
width (float): Textwidth of the report to make fontsizes match.
Defaults to `src.constants.REPORT_WIDTH`.
fraction_of_line_width (float, optional): Fraction of the document width
which you wish the figure to occupy. Defaults to 1.
ratio (float, optional): Fraction of figure width that the figure height
should be. Defaults to (5 ** 0.5 - 1)/2.
Returns:
void; alters current figure to have the desired dimensions
Example:
Here is an example of using this function::
>>> set_dim(fig, fraction_of_line_width=1, ratio=(5 ** 0.5 - 1) / 2)
"""
fig.set_size_inches(
get_dim(width=width, fraction_of_line_width=fraction_of_line_width, ratio=ratio)
)
| 25,434
|
def _is_class(module: Any, member: Type, clazz: Type) -> bool:
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True
| 25,435
|
def word_boundary(queries, count, degree, parallel=True, **kwargs):
"""
run augmentation on list of sentences
:param queries: sentences to augment
:type queries: list
:param count: number of output for each query
:type count: int
:param degree: degree of augmentation, takes value between 0 and 1
:type degree: float
:param parallel: run in parallel
:type parallel: bool
:param kwargs:
:return:
"""
if parallel:
function = partial(
__word_boundary__,
**kwargs,
degree=degree,
count=count,
)
return run_parallel(queries, function)
else:
return [
__word_boundary__(word, degree=degree, count=count, **kwargs)
for word in queries
]
| 25,436
|
def myFunction(objectIn):
"""What you are supposed to test."""
return objectIn.aMethodToMock() + 2
| 25,437
|
def read_config(filename, section):
""" Reads a section from a .ini file and returns a dict object
"""
parser = ConfigParser()
parser.read(filename)
dic = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
dic[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return dic
| 25,438
|
def get_training_set_count(disc):
"""Returns the total number of training sets of a discipline and all its
child elements.
:param disc: Discipline instance
:type disc: models.Discipline
:return: sum of training sets
:rtype: int
"""
training_set_counter = 0
for child in disc.get_descendants(include_self=True):
training_set_counter += child.training_sets.count()
return training_set_counter
| 25,439
|
def _check_type(var, _type):
"""
Genie returns generally unuseful type errors from the rest api. This
function wraps an "isinstance() -> assert" call with a more helpful
message.
"""
if not isinstance(var, _type):
raise GenieError('Invalid type. Expected %s but got %s: %s', _type,
type(var), var)
| 25,440
|
def test_backends_mixins_history_mixin_clean_history(fs):
"""Tests the clean_history method of the HistoryMixin."""
# pylint: disable=invalid-name
history = HistoryMixin()
# Add history events
events = [
{"command": "foo"},
{"command": "bar"},
{"command": "foo"},
{"command": "lol"},
{"command": "bar"},
]
fs.create_file(HISTORY_FILE, contents=json.dumps(events))
history.clean_history(lambda event: event.get("command") == "foo")
assert history.history == [
{"command": "bar"},
{"command": "lol"},
{"command": "bar"},
]
| 25,441
|
def search_dir(path, dir_name, type):
"""Search directory in certain path"""
target_path = ""
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
if lambda_fun(dir_name, item, type):
target_path = item_path
break
else:
target_path = search_dir(item_path, dir_name, type)
if target_path != "":
break
return target_path
| 25,442
|
def lazy_import(module_name, callback=None):
"""Returns a proxy module object that will lazily import the given module the first
time it is used.
Example usage::
# Lazy version of `import tensorflow as tf`
tf = lazy_import("tensorflow")
# Other commands
# Now the module is loaded
tf.__version__
Args:
module_name: the fully-qualified module name to import
callback (None): a callback function to call before importing the
module
Returns:
a proxy module object that will be lazily imported when first used
"""
return LazyModule(module_name, callback=callback)
| 25,443
|
def _top_k(array, k):
"""Returns top k values and their indices along the last axis of the array.
This function serves the same purpose as jax.lax.top_k, but in a more XLA
friendly manner for TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select the top k values.
This convoluted way of obtaining the top k values is generally faster on
TPUs.
(2) Otherwise, we fall back to jax.lax.top_k (and its underlying scatter op).
Args:
array: Source array.
k: Number of top values to select.
Returns:
- Top k values
- Associated top k indices.
"""
if _favor_one_hot_slices():
top_k_indices = jax.lax.top_k(array, k)[-1]
top_k_values = _take_along_axis(array, top_k_indices, axis=-1)
return top_k_values, top_k_indices
else:
return jax.lax.top_k(array, k)
| 25,444
|
def time_to_accuracy(raw_metrics, tag, threshold):
"""Calculate the amount of time for accuracy to cross a given threshold.
Args:
raw_metrics: dict mapping TensorBoard tags to list of MetricPoint.
tag: string name of accuracy metric.
threshold: the desired model accuracy.
Returns:
float, amount of time in seconds to reach the desired accuracy.
"""
values = raw_metrics.get(tag)
if not values:
raise ValueError('No values found for time to accuracy tag: {}. '
'Possible tags were: {}'.format(tag, raw_metrics.keys()))
# MetricPoints should be sorted by timestamp with earlier events first.
start_wall_time = values[0].wall_time
try:
end_wall_time = next(
v.wall_time for v in values
if v.metric_value >= threshold)
return MetricPoint(end_wall_time - start_wall_time, end_wall_time)
except StopIteration:
max_accuracy = max(v.metric_value for v in values)
raise ValueError(
'Accuracy metric `{}` was never high enough to satisfy the '
'`time_to_accuracy` settings from the config. Max accuracy: {}. '
'Target accuracy: {}. Config for `time_to_accuracy`: {}'.format(
tag, max_accuracy, threshold))
| 25,445
|
def AddArguments(parser):
"""Adds command-line arguments for platform fields.
Args:
parser: argparser.ArgumentParser object.
"""
parser.add_argument(
'--os',
help=('For multi-platform manifest lists, specifies the operating '
'system.'))
parser.add_argument(
'--os-version',
help=('For multi-platform manifest lists, specifies the operating system '
'version.'))
parser.add_argument(
'--os-features',
nargs='*',
help=('For multi-platform manifest lists, specifies operating system '
'features.'))
parser.add_argument(
'--architecture',
help=('For multi-platform manifest lists, specifies the CPU '
'architecture.'))
parser.add_argument(
'--variant',
help='For multi-platform manifest lists, specifies the CPU variant.')
parser.add_argument(
'--features',
nargs='*',
help='For multi-platform manifest lists, specifies CPU features.')
| 25,446
|
def create_stencil(image_shape, smooth):
"""The stencil is a mask that will enable a smooth transition between blocks. blocks will be multiplied
by the stencil so that when they are blitted to the image, transition between them are smoothed out.
image 1: 1 1 1 1 1 1 1 , image 2: 2 2 2 2 2 2 2, stencil: .25 .75 1 1 1 .75 .25
image 1 * stencil: .25 .75 1 1 1 .75 .25
image 2 * stencil: .5 1.5 2 2 2 1.5 .5
adding them: .25 .75 1 1 1 1.25 1.75 2 2 2 1.5 .5
"""
stencil = np.ones(image_shape, dtype=np.float32)
# 2 * smooth because we need to blend the inside of the block with the outside of the other block
# for smooth = 4, i1; inside image 1, o1: outside image 1
# o1 o1 o1 o1 | i1 i1 i1 i1
# i1 i1 i1 i1 | o1 o1 o1 o1
factors = np.linspace(0, 1, 2*smooth+1, endpoint=False)[1:]
for i, f in enumerate(factors):
stencil[i, :, :] *= f
stencil[:, i, :] *= f
for i, f in enumerate(factors):
stencil[image_shape[0] - i - 1, :, :] *= f
stencil[:, image_shape[1] - i - 1, :] *= f
return stencil
| 25,447
|
def evaluate_drivable(gt_dir: str, result_dir: str) -> None:
"""Evaluate drivable area."""
evaluate_segmentation(gt_dir, result_dir, 3, 17)
| 25,448
|
def a_m_to_P(a, m):
"""Compute the orbital period given the semi-major axis and total mass.
Parameters
----------
{a}
{m}
"""
return 2*np.pi * np.sqrt(a**3 / (G * m))
| 25,449
|
def _async_attr_mapper(attr_name, val):
"""The `async` attribute works slightly different than the other bool
attributes. It can be set explicitly to `false` with no surrounding quotes
according to the spec."""
if val in [False, 'False']:
return ' {}=false'.format(attr_name)
elif val:
return ' {}'.format(attr_name)
else:
return ''
| 25,450
|
def wrap(val: Any) -> Value:
"""Wraps the given native `val` as Protobuf `Value` message.
Supports converting collection/array of primitives types to `Value` message:
* numpy array of primitives.
* list of primitives.
* generator of finite no. of primitives.
Generally, wrapping only supports wrapping of collection of primitives
if all primitives share the same native primitive types. However, some
native type mixing is allowed as supported by `np.asarray()`, although
doing so is not recommended.
If the given `val` is already a Protobuf `Value` message, returns `val` as is.
Args:
val: The native value to wrap as a protobuf message. The value should
be native primitive, array of primitives.
Returns:
Wrapped `Value` protobuf message.
Throws:
TypeError: If the given native value is not of a supported type.
"""
# return as is if val is already value protobuf
if isinstance(val, Value):
return val
# try to wrap value as primitive
try:
return wrap_primitive(val)
except TypeError:
pass
# check that we are not trying to convert None
if val is None:
raise TypeError("Wrapping None is Value proto is not supported")
# extract values from if generator
if isgenerator(val):
val = list(val)
# extract flatten list of primitive protos from collect of primitives
val_arr = np.asarray(val)
primitives = [wrap_primitive(v) for v in val_arr.flatten()]
# resolve element data type and build value proto
element_type = primitives[0].data_type.primitive
return Value(
data_type=Type(
array=Type.Array(
dimensions=val_arr.shape,
element_type=element_type,
)
),
array=Value.Array(values=[p.primitive for p in primitives]),
)
| 25,451
|
def to_transform_msg(transform):
"""Convert a `Transform` object to a Transform message."""
msg = geometry_msgs.msg.Transform()
msg.translation = to_vector3_msg(transform.translation)
msg.rotation = to_quat_msg(transform.rotation)
return msg
| 25,452
|
def print_pos_neg(num):
"""Print if positive or negative in polarity level
>>> print_pos_neg(0.8)
'positive'
>>> print_pos_neg(-0.5)
'negative'
"""
if num > 0:
return "positive"
elif num == 0:
return "neutral"
else:
return "negative"
| 25,453
|
def delete_volume_op(name: str, namespace: str):
"""
Creates a kfp.dsl.ContainerOp that deletes a volume (Kubernetes Resource).
Parameters
----------
name : str
namespace : str
Returns
-------
kfp.dsl.ContainerOp
"""
kind = "PersistentVolumeClaim"
return kubernetes_resource_delete_op(
name=f"vol-{name}",
kind=kind,
namespace=namespace,
)
| 25,454
|
def distal(combo):
""" Returns the distal subspecies from a combo
:param combo: int representation of origin combination
:return: int representation of the distal origin
>>> distal(combine(CAS, DOM)) == DOM
True
"""
return combo & _DISTAL_MASK
| 25,455
|
def get_eval_dataset(files, ftDict, axes = [2], splits = None, one_hot = None, moments = None, **kwargs):
"""
Get the preprocessed evaluation dataset
Args:
files (list): list of tfrecords to be used for evaluation
Returns:
A tf.data.Dataset of evaluation data.
"""
dataset = get_dataset(files, ftDict, axes, splits, one_hot, moments, **kwargs)
dataset = dataset.batch(1)
return dataset
| 25,456
|
async def test_ndim_array_indexes(dut):
"""Test getting and setting values of multi-dimensional array indexes."""
cocotb.start_soon(Clock(dut.clk, 1000, "ns").start())
dut.array_2d.value = [[0xF0, 0xE0, 0xD0, 0xC0], [0xB0, 0xA0, 0x90, 0x80]]
await Timer(1000, "ns")
# Check indices
_check_value(tlog, dut.array_2d[1], [0xB0, 0xA0, 0x90, 0x80])
_check_value(tlog, dut.array_2d[0][31], 0xF0)
_check_value(tlog, dut.array_2d[1][29], 0x90)
_check_value(tlog, dut.array_2d[1][28], 0x80)
# Get sub-handles through NonHierarchyIndexableObject.__getitem__
dut.array_2d[1].value = [0xDE, 0xAD, 0xBE, 0xEF]
dut.array_2d[0][31].value = 0x0F
await Timer(1000, "ns")
_check_value(tlog, dut.array_2d[0][31], 0x0F)
_check_value(tlog, dut.array_2d[0][29], 0xD0)
_check_value(tlog, dut.array_2d[1][30], 0xAD)
_check_value(tlog, dut.array_2d[1][28], 0xEF)
| 25,457
|
def connect_to_nr_of_measurements_stream(context, stream):
"""Get the number of measurements server-sent-events."""
context.sse_messages = []
for message in SSEClient(f"{context.base_api_url.format('')}/nr_measurements"): # pragma: no cover-behave
context.sse_messages.append(message)
if stream == "stream":
break
context.execute_steps('when the collector measures "42"')
stream = "stream"
| 25,458
|
def test_scatter_nd_func_small_update(lock):
"""
Feature: ALL To ALL
Description: test cases for bool input of ScatterNdUpdate
Expectation: the result match to numpy implementation
"""
inputx = Tensor(np.array([True, False, True, False, True, True, False, True]), mstype.bool_)
indices = Tensor(np.array([[False], [True], [False], [True]]), mstype.int32)
updates = Tensor(np.array([9, 10, 11, 12]), mstype.bool_)
compare_scatter_nd_func("update", lock, inputx, indices, updates)
| 25,459
|
def getTopApSignals(slot_to_io):
""" HLS simulator requires that there is an ap_done at the top level """
# find which slot has the s_axi_control
for slot, io_list in slot_to_io.items():
if any('s_axi' in io[-1] for io in io_list):
# note the naming convention
ap_done_source = [f'{io[-1]}_in' for io in io_list if 'ap_done' in io[-1]]
ap_start_source = [f'{io[-1]}_out' for io in io_list if 'ap_start' in io[-1]]
top_ap_signals = []
top_ap_signals.append(f'wire ap_done = ' + ' & '.join(ap_done_source) + ';')
top_ap_signals.append('wire ap_idle = ap_done;')
top_ap_signals.append('wire ap_ready = ap_done;')
top_ap_signals.append(f'wire ap_start = {ap_start_source[0]};') # only need 1 ap_start
return top_ap_signals
assert False
| 25,460
|
def get_line_style(image: Image = None) -> int:
"""
Get line style of the specified image.
The line style will be used when drawing lines or shape outlines.
:param image: the target image whose line style is to be gotten. None means it is the target image
(see set_target() and get_target())
:return: line style used by the specified image
"""
image = _get_target_image(image)
return image.get_line_style()
| 25,461
|
def make_polygon_for_earth(lat_bottom_left, lon_bottom_left, lat_top_right, lon_top_right):
"""
Divides the region into two separate regions (if needed) so as to handle the cases where the regions
cross the international date
:param lat_bottom_left: float (-90 to 90)
:param lon_bottom_left: float (-180 to 180)
:param lat_top_right: float (-90 to 90)
:param lon_top_right: float (-180 to 180)
:return:
------------ <-----(lon top right, lat top right)
| |
| |
| |
| |
------------
^
|
---- (lon bottom left, lat bottom left)
"""
focus_regions = []
# case where region starts around 180 longitude and then wraps around to -180 longitude (complete cylinder)
# international date line crossed
if lon_bottom_left > lon_top_right: # overlap of latitudes
# we need two polygons.
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[180, lat_top_right],
[180, lat_bottom_left]])
focus_region2 = Polygon([
[-180, lat_bottom_left],
[-180, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1, focus_region2]
else: # international dateline not crossed
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1]
return focus_regions
| 25,462
|
def train_coral(s_dataloaders, t_dataloaders, val_dataloader, test_dataloader, metric_name, seed, **kwargs):
"""
:param s_dataloaders:
:param t_dataloaders:
:param kwargs:
:return:
"""
s_train_dataloader = s_dataloaders
t_train_dataloader = t_dataloaders
autoencoder = AE(input_dim=kwargs['input_dim'],
latent_dim=kwargs['latent_dim'],
hidden_dims=kwargs['encoder_hidden_dims'],
dop=kwargs['dop']).to(kwargs['device'])
encoder = autoencoder.encoder
target_decoder = MoMLP(input_dim=kwargs['latent_dim'],
output_dim=kwargs['output_dim'],
hidden_dims=kwargs['regressor_hidden_dims'],
out_fn=torch.nn.Sigmoid).to(kwargs['device'])
target_regressor = EncoderDecoder(encoder=encoder,
decoder=target_decoder).to(kwargs['device'])
train_history = defaultdict(list)
# ae_eval_train_history = defaultdict(list)
val_history = defaultdict(list)
s_target_regression_eval_train_history = defaultdict(list)
t_target_regression_eval_train_history = defaultdict(list)
target_regression_eval_val_history = defaultdict(list)
target_regression_eval_test_history = defaultdict(list)
model_optimizer = torch.optim.AdamW(target_regressor.parameters(), lr=kwargs['lr'])
for epoch in range(int(kwargs['train_num_epochs'])):
if epoch % 50 == 0:
print(f'Coral training epoch {epoch}')
for step, s_batch in enumerate(s_train_dataloader):
t_batch = next(iter(t_train_dataloader))
train_history = coral_train_step(model=target_regressor,
s_batch=s_batch,
t_batch=t_batch,
device=kwargs['device'],
optimizer=model_optimizer,
alpha=kwargs['alpha'],
history=train_history)
s_target_regression_eval_train_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=s_train_dataloader,
device=kwargs['device'],
history=s_target_regression_eval_train_history)
t_target_regression_eval_train_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=t_train_dataloader,
device=kwargs['device'],
history=t_target_regression_eval_train_history)
target_regression_eval_val_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=val_dataloader,
device=kwargs['device'],
history=target_regression_eval_val_history)
target_regression_eval_test_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=test_dataloader,
device=kwargs['device'],
history=target_regression_eval_test_history)
save_flag, stop_flag = model_save_check(history=target_regression_eval_val_history,
metric_name=metric_name,
tolerance_count=50)
if save_flag:
torch.save(target_regressor.state_dict(), os.path.join(kwargs['model_save_folder'], f'coral_regressor_{seed}.pt'))
if stop_flag:
break
target_regressor.load_state_dict(
torch.load(os.path.join(kwargs['model_save_folder'], f'coral_regressor_{seed}.pt')))
# evaluate_target_regression_epoch(regressor=target_regressor,
# dataloader=val_dataloader,
# device=kwargs['device'],
# history=None,
# seed=seed,
# output_folder=kwargs['model_save_folder'])
evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=test_dataloader,
device=kwargs['device'],
history=None,
seed=seed,
output_folder=kwargs['model_save_folder'])
return target_regressor, (
train_history, s_target_regression_eval_train_history, t_target_regression_eval_train_history,
target_regression_eval_val_history, target_regression_eval_test_history)
| 25,463
|
def first_nonzero_coordinate(data, start_point, end_point):
"""Coordinate of the first nonzero element between start and end points.
Parameters
----------
data : nD array, shape (N1, N2, ..., ND)
A data volume.
start_point : array, shape (D,)
The start coordinate to check.
end_point : array, shape (D,)
The end coordinate to check.
Returns
-------
coordinates : array of int, shape (D,)
The coordinates of the first nonzero element along the ray, or None.
"""
shape = np.asarray(data.shape)
length = np.linalg.norm(end_point - start_point)
length_int = np.round(length).astype(int)
coords = np.linspace(start_point, end_point, length_int + 1, endpoint=True)
clipped_coords = np.clip(np.round(coords), 0, shape - 1).astype(int)
nonzero = np.flatnonzero(data[tuple(clipped_coords.T)])
if len(nonzero) == 0:
return None
else:
return clipped_coords[nonzero[0]]
| 25,464
|
def l1_distance(prediction, ground_truth):
"""L1 distance difference between two vectors."""
if prediction.shape != ground_truth.shape:
prediction, ground_truth = np.squeeze(prediction), np.squeeze(ground_truth)
min_length = min(prediction.size, ground_truth.size)
return np.abs(prediction[:min_length] - ground_truth[:min_length])
| 25,465
|
def decode(match_id: str) -> Match:
"""Decode a match ID and return a Match.
>>> decode("QYkqASAAIAAA")
Match(cube_value=2, cube_holder=<Player.ZERO: 0>, player=<Player.ONE: 1>, crawford=False, game_state=<GameState.PLAYING: 1>, turn=<Player.ONE: 1>, double=False, resign=<Resign.NONE: 0>, dice=(5, 2), length=9, player_0_score=2, player_1_score=4)
"""
match_bytes: bytes = base64.b64decode(match_id)
match_key: str = "".join([format(b, "08b")[::-1] for b in match_bytes])
return Match(
cube_value=2 ** int(match_key[0:4][::-1], 2),
cube_holder=Player(int(match_key[4:6][::-1], 2)),
player=Player(int(match_key[6])),
crawford=bool(int(match_key[7])),
game_state=GameState(int(match_key[8:11][::-1], 2)),
turn=Player(int(match_key[11])),
double=bool(int(match_key[12])),
resign=Resign(int(match_key[13:15][::-1], 2)),
dice=(int(match_key[15:18][::-1], 2), int(match_key[18:21][::-1], 2)),
length=int(match_key[21:36][::-1], 2),
player_0_score=int(match_key[36:51][::-1], 2),
player_1_score=int(match_key[51:66][::-1], 2),
)
| 25,466
|
def _valid_proto_paths(transitive_proto_path):
"""Build a list of valid paths to build the --proto_path arguments for the ScalaPB protobuf compiler
In particular, the '.' path needs to be stripped out. This mirrors a fix in the java proto rules:
https://github.com/bazelbuild/bazel/commit/af3605862047f7b553b7d2c19fa645714ea19bcf
This is explained in this issue: https://github.com/bazelbuild/rules_scala/issues/687
"""
return depset([path for path in transitive_proto_path if path != "."])
| 25,467
|
def update_stats_objecness(obj_stats, gt_bboxes, gt_labels, pred_bboxes, pred_labels, pred_scores, mask_eval=False,
affordance_stats=None, gt_masks=None, pred_masks=None, img_height=None, img_width=None, iou_thres=0.3):
"""
Updates statistics for object classification and affordance detection.
:param obj_stats: accumulated statistics for object classification
:param gt_bboxes: ground truth normalized bounding boxes (batch_size, num_gt_bboxes, 4)
:param gt_labels: ground truth labels for gt_boxes (batch_size, num_gt_bboxes)
:param pred_bboxes: predicted normalized bounding boxes (batch_size, num_pred_bboxes, 4)
:param pred_labels: predicted labels for pred_bboxes (batch_size, num_pred_bboxes)
:param pred_scores: predicted scores for pred_bboxes (batch_size, num_pred_bboxes)
:param mask_eval: True if there are predicted masks, False otherwise
:param affordance_stats: accumulated statistics for affordance evaluation
:param gt_masks: ground truth masks (batch_size, num_gt_bboxes, orig_mask_height, orig_mask_width)
:param pred_masks: predicted masks with prob for each pixel for each class (batch_size, num_pred_bboxes, train_mask_size, train_mask_size, num_affordance_classes)
:param img_height: image height
:param img_width: image width
:returns: jsons with updated statistics for object classification and affordance detection
"""
# create empty mask to accumulate masks for all bboxes in one single mask
final_gt_mask = np.zeros((img_height, img_width))
final_pred_mask = np.zeros((img_height, img_width))
# iou for each pred_bbox wrt each gt_box
iou_map, zero_iou = bbox_utils.generate_iou_map(pred_bboxes, gt_bboxes)
# update stats only if there are some iou that are not 0
if not zero_iou:
# take max iou for each pred_bbox and its corresponding gt_box indices
merged_iou_map = tf.reduce_max(iou_map, axis=-1)
max_indices_each_gt = tf.argmax(iou_map, axis=-1, output_type=tf.int32)
sorted_ids = tf.argsort(merged_iou_map, direction="DESCENDING")
# Add total of true labels for each class to stats
count_holder = tf.unique_with_counts(tf.reshape(gt_labels, (-1,)))
for i, gt_label in enumerate(count_holder[0]):
if gt_label == -1:
continue
# gt_label = int(gt_label)
if int(gt_label) > 0:
gt_label = 1
obj_stats[gt_label]["total"] += int(count_holder[2][i])
for batch_id, m in enumerate(merged_iou_map):
true_labels = []
for i, sorted_id in enumerate(sorted_ids[batch_id]):
pred_label = pred_labels[batch_id, sorted_id]
if pred_label == 0:
continue
iou = merged_iou_map[batch_id, sorted_id]
gt_id = max_indices_each_gt[batch_id, sorted_id]
gt_label = int(gt_labels[batch_id, gt_id])
pred_label = int(pred_label)
score = pred_scores[batch_id, sorted_id]
obj_stats[pred_label]["scores"].append(score)
obj_stats[pred_label]["tp"].append(0)
obj_stats[pred_label]["fp"].append(0)
if int(gt_label) > 0:
gt_label = 1
# correct detection
if iou >= iou_thres and pred_label == gt_label and gt_id not in true_labels:
obj_stats[pred_label]["tp"][-1] = 1
true_labels.append(gt_id)
if mask_eval:
final_gt_mask, final_pred_mask = update_final_masks(final_gt_mask, final_pred_mask, gt_bboxes[batch_id, gt_id],
gt_masks[batch_id, gt_id].numpy(), pred_masks[batch_id, sorted_id],
img_height, img_width)
else:
obj_stats[pred_label]["fp"][-1] = 1
if mask_eval:
affordance_stats = update_stats_affordances(affordance_stats, final_gt_mask, final_pred_mask)
return obj_stats, affordance_stats
| 25,468
|
def load_schema(rel_path: str) -> Dict:
"""
Loads a schema from a relative path of the caller of this function.
:param rel_path: Relative path from the caller. e.g. ../schemas/schema.json
:return: Loaded schema as a `dict`.
"""
caller_path = Path((inspect.stack()[1])[1]).parent
fp = (caller_path / rel_path).resolve()
with open(fp, "r") as fh:
data = json.loads(fh.read())
return data
| 25,469
|
def bquantize(x, nsd=3, abstol=eps, reltol=10 * eps):
"""Bidirectionally quantize a 1D vector ``x`` to ``nsd`` signed digits.
This method will terminate early if the error is less than the specified
tolerances.
The quantizer details are repeated here for the user's convenience:
The quantizer is ideal, producing integer outputs centered about zero.
Quantizers with an even number of levels are of the mid-rise type and
produce outputs which are odd integers. Quantizers with an odd number
of levels are of the mid-tread type and produce outputs which are even
integers.
.. image:: ../doc/_static/quantizer_model.png
:align: center
:alt: Quantizer model
**Parameters:**
x : array_like or sequence
the data to be quantized.
nsd : int, optional
The number of signed digits.
abstol and reltol : floats, optional
If not supplied, the absolute tolerance and the relative
tolerance default to ``eps`` and ``10*eps``, resp.
**Returns:**
y : list
List of objects described below.
``y`` is a list of instances with the same length as ``x`` and the
following attributes:
* ``y[i].val`` is the quantized value in floating-point form,
* ``y[i].csd`` is a 2-by-nsd (or less) matrix containing
the powers of two (first row) and their signs (second row).
.. seealso::
:func:`bunquantize`, :func:`ds_quantize`
"""
n = x.shape[0] if isinstance(x, np.ndarray) else len(x)
#q = np.zeros((2*n, nsd)) in the original source #rep?
y = [empty() for i in range(n)]
offset = -np.log2(0.75)
for i in range(n):
xp = x[i]
y[i].val = 0.
y[i].csd = np.zeros((2, 0), dtype='int16')
for _ in range(nsd):
error = np.abs(y[i].val - x[i])
if error <= abstol and error <= np.abs(x[i]) * reltol: # rep? in the orig: or
break
p = mfloor(np.log2(np.abs(xp)) + offset)
p2 = 2 ** p
sx = np.sign(xp)
xp = xp - sx * p2
y[i].val = y[i].val + sx * p2
addme = np.array((p, sx)).reshape((2, 1))
y[i].csd = np.concatenate((y[i].csd, addme), axis=1)
return y
| 25,470
|
def timezone_keys(
*,
# allow_alias: bool = True,
# allow_deprecated: bool = True,
allow_prefix: bool = True,
) -> SearchStrategy[str]:
"""A strategy for :wikipedia:`IANA timezone names <List_of_tz_database_time_zones>`.
As well as timezone names like ``"UTC"``, ``"Australia/Sydney"``, or
``"America/New_York"``, this strategy can generate:
- Aliases such as ``"Antarctica/McMurdo"``, which links to ``"Pacific/Auckland"``.
- Deprecated names such as ``"Antarctica/South_Pole"``, which *also* links to
``"Pacific/Auckland"``. Note that most but
not all deprecated timezone names are also aliases.
- Timezone names with the ``"posix/"`` or ``"right/"`` prefixes, unless
``allow_prefix=False``.
These strings are provided separately from Tzinfo objects - such as ZoneInfo
instances from the timezones() strategy - to facilitate testing of timezone
logic without needing workarounds to access non-canonical names.
.. note::
The :mod:`python:zoneinfo` module is new in Python 3.9, so you will need
to install the :pypi:`backports.zoneinfo` module on earlier versions, and
the :pypi:`importlib_resources` backport on Python 3.6.
``pip install hypothesis[zoneinfo]`` will install these conditional
dependencies if and only if they are needed.
On Windows, you may need to access IANA timezone data via the :pypi:`tzdata`
package. For non-IANA timezones, such as Windows-native names or GNU TZ
strings, we recommend using :func:`~hypothesis.strategies.sampled_from` with
the :pypi:`dateutil` package, e.g. :meth:`dateutil:dateutil.tz.tzwin.list`.
"""
# check_type(bool, allow_alias, "allow_alias")
# check_type(bool, allow_deprecated, "allow_deprecated")
check_type(bool, allow_prefix, "allow_prefix")
if zoneinfo is None: # pragma: no cover
raise ModuleNotFoundError(
"The zoneinfo module is required, but could not be imported. "
"Run `pip install hypothesis[zoneinfo]` and try again."
)
available_timezones = ("UTC",) + tuple(sorted(zoneinfo.available_timezones()))
# TODO: filter out alias and deprecated names if disallowed
# When prefixes are allowed, we first choose a key and then flatmap to get our
# choice with one of the available prefixes. That in turn means that we need
# some logic to determine which prefixes are available for a given key:
def valid_key(key):
return key == "UTC" or _valid_key_cacheable(zoneinfo.TZPATH, key)
# TODO: work out how to place a higher priority on "weird" timezones
# For details see https://github.com/HypothesisWorks/hypothesis/issues/2414
strategy = sampled_from([key for key in available_timezones if valid_key(key)])
if not allow_prefix:
return strategy
def sample_with_prefixes(zone):
keys_with_prefixes = (zone, f"posix/{zone}", f"right/{zone}")
return sampled_from([key for key in keys_with_prefixes if valid_key(key)])
return strategy.flatmap(sample_with_prefixes)
| 25,471
|
def seg_to_bdry(seg, connectivity=1):
"""Given a borderless segmentation, return the boundary map."""
strel = generate_binary_structure(seg.ndim, connectivity)
return maximum_filter(seg, footprint=strel) != \
minimum_filter(seg, footprint=strel)
| 25,472
|
def depthwise_conv2d(x, filters, strides, padding, data_format="NHWC", dilations=1):
"""Computes a 2-D depthwise convolution given 4-D input x and filters arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating the per-dimension
paddings.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
Returns
-------
ret
The result of the convolution operation.
"""
return _cur_framework(x).depthwise_conv2d(
x, filters, strides, padding, data_format, dilations
)
| 25,473
|
def OUTA():
"""
The OUTA Operation
"""
control_signal = gen_control_signal_dict()
opcode_addr = gen_opcode_addr_component_dict()
mc_step_addr = gen_microcode_step_addr_component_dict()
input_sig_addr = gen_input_signal_addr_component_dict()
templates = []
# Step 2 - A -> OUT
addresses = rom_programmer.combine_address_components([
mc_step_addr[2],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["A_OUT"],
control_signal["OUT_IN"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
# Step 3: Reset microcode step
addresses = rom_programmer.combine_address_components([
mc_step_addr[3],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["STEP_COUNTER_RESET"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
return templates
| 25,474
|
def test_fit_returns_self(estimator_instance):
"""Check that fit returns self."""
estimator = estimator_instance
fit_args = _make_args(estimator, "fit")
assert (
estimator.fit(*fit_args) is estimator
), f"Estimator: {estimator} does not return self when calling fit"
| 25,475
|
def incidence_matrices(G, V, E, faces, edge_to_idx):
"""
Returns incidence matrices B1 and B2
:param G: NetworkX DiGraph
:param V: list of nodes
:param E: list of edges
:param faces: list of faces in G
Returns B1 (|V| x |E|) and B2 (|E| x |faces|)
B1[i][j]: -1 if node i is tail of edge j, 1 if node i is head of edge j, else 0 (tail -> head) (smaller -> larger)
B2[i][j]: 1 if edge i appears sorted in face j, -1 if edge i appears reversed in face j, else 0; given faces with sorted node order
"""
B1 = np.array(nx.incidence_matrix(G, nodelist=V, edgelist=E, oriented=True).todense())
B2 = np.zeros([len(E),len(faces)])
for f_idx, face in enumerate(faces): # face is sorted
edges = [face[:-1], face[1:], [face[0], face[2]]]
e_idxs = [edge_to_idx[tuple(e)] for e in edges]
B2[e_idxs[:-1], f_idx] = 1
B2[e_idxs[-1], f_idx] = -1
return B1, B2
| 25,476
|
def get_hosts_ram_total(nova, hosts):
"""Get total RAM (free+used) of hosts.
:param nova: A Nova client
:type nova: *
:param hosts: A set of hosts
:type hosts: list(str)
:return: A dictionary of (host, total_ram)
:rtype: dict(str: *)
"""
hosts_ram_total = dict() #dict of (host, total_ram)
for host in hosts:
data = nova.hosts.get(host)
hosts_ram_total[host] = data[0].memory_mb
return hosts_ram_total
| 25,477
|
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-ascii characters,
and converts spaces to hyphens. For use in urls and filenames
From Django's "django/template/defaultfilters.py".
"""
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
| 25,478
|
def wpr(c_close, c_high, c_low, period):
"""
William %R
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
mh = np.max(c_high[s:e])
out[i] = ((mh - c_close[i]) / (mh - np.min(c_low[s:e]))) * -100
return out
| 25,479
|
def delete_action_log(request, log_id):
"""
View for delete the action log.
This view can only access by superuser and staff.
"""
action = get_object_or_404(ActionLog, id=log_id)
if action.status == 0 or action.status == 1:
messages.error(request, "Cannot delete the Action log that is running or in idle state!")
return redirect('actions')
action.delete()
messages.success(request, "Delete action log successfully!")
return redirect('actions')
| 25,480
|
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255))
| 25,481
|
def prioritize(paths: Dict[int, Path], purpose: str) -> Optional[Path]:
"""Returns highest-priority and existing filepath from ``paths``.
Finds existing configuration or data file in ``paths`` with highest
priority and returns it, otherwise returns ``None``.
"""
for key in sorted(paths.keys(), reverse=True):
if purpose == "config":
if paths[key].exists():
return paths[key]
if purpose == "data":
return paths[key]
| 25,482
|
def traverse(d, path):
"""Return the value at the given path from the given nested dict/list"""
for k in path.split('.'):
if k.isdigit():
k = int(k)
d = d[k]
return d
| 25,483
|
def _bundle_name_with_extension(ctx):
"""Returns the name of the bundle with its extension.
Args:
ctx: The Skylark context.
Returns:
The bundle name with its extension.
"""
return _bundle_name(ctx) + _bundle_extension(ctx)
| 25,484
|
def test_qt_api_ini_config(testdir, monkeypatch, option_api):
"""
Test qt_api ini option handling.
"""
from pytestqt.qt_compat import qt_api
monkeypatch.delenv("PYTEST_QT_API")
testdir.makeini("""
[pytest]
qt_api={option_api}
""".format(option_api=option_api))
testdir.makepyfile('''
import pytest
def test_foo(qtbot):
pass
''')
result = testdir.runpytest_subprocess()
if qt_api.pytest_qt_api.replace('v2', '') == option_api: # handle pyqt4v2
result.stdout.fnmatch_lines([
'* 1 passed in *'
])
else:
try:
ModuleNotFoundError
except NameError:
# Python < 3.6
result.stderr.fnmatch_lines([
'*ImportError:*'
])
else:
# Python >= 3.6
result.stderr.fnmatch_lines([
'*ModuleNotFoundError:*'
])
| 25,485
|
def call(args):
"""
Call args in a subprocess and display output on the fly.
Return or raise stdout, stderr, returncode
"""
if TRACE: print('Calling:', ' '.join(args))
with subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8'
) as process:
while True:
line = process.stdout.readline()
if not line and process.poll() is not None:
break
if TRACE: print(line.rstrip(), flush=True)
stdout, stderr = process.communicate()
returncode = process.returncode
if returncode == 0:
return returncode, stdout, stderr
else:
raise Exception(returncode, stdout, stderr)
| 25,486
|
def main():
"""Executa o simulador."""
print("\n------------------------- SIMULADOR -------------------------\n")
custo_max = round(
float(
input(
"""Qual é o custo máximo que você está
disposto a pagar?
::: R$"""
)
)
)
print()
custo_ = float(
input(
f"""Em um intervalo de 0 a {round(custo_max)},
Qual é o custo?
::: R$"""
)
)
print()
beneficio_ = int(
input(
"""Em uma escala de 1 a 5, como você avalia o
benefício esperado?
[ 1 ] baixo
[ 2 ] baixo-moderado
[ 3 ] moderado
[ 4 ] moderado-alto
[ 5 } alto
::: """
)
)
print()
while True:
sim_nao = input(
"""Mostrar status do simulador? [s/n]
::: """
).lower()
if sim_nao in "sn":
status = True if sim_nao == "s" else False
break
print()
print(
custo_beneficio_(
custo=custo_,
beneficio=beneficio_,
status_=status,
modelo=lógica_fuzzy(max_=custo_max),
)
)
print("\n---------------------- FIM DO PROGRAMA ----------------------\n")
| 25,487
|
def cvt_axisang_t_o2i(axisang, trans):
"""-correction: t_r, R_rt_r. outer to inner"""
trans -= get_offset(axisang)
return axisang, trans
| 25,488
|
def test_valkeep_without_columns():
"""Testing the ColDrop pipeline stage."""
df = pd.DataFrame([[1, 4], [4, 5], [5, 11]], [1, 2, 3], ['a', 'b'])
res_df = ValKeep([4, 5]).apply(df)
assert 1 not in res_df.index
assert 2 in res_df.index
assert 3 not in res_df.index
| 25,489
|
def processData(dict, valuename, timename='Aika', multiplier=1.0):
"""Process "raw" OData dict and strip only the time and value.
Also convert time to UTC and hydrodynamics model (COHERENS) format.
Parameters
----------
dict: dictionary
Data dictionary as received from OData fetcher
valuename: string
Value nameto process
timename: string
Time field name
multiplier: float
Multiply value with this number. Useful in e.g. unit conversions.
Returns dictionary with processed data.
"""
# Gets valuename field from dict of sites along with timefield and multiplies values by multiplier
# Returns dict of sites with list of values: time, coherenstime, value
tz = pytz.timezone('Europe/Helsinki') # Default data timezone in case it doesn't exist
if numpy.isnan(multiplier):
print("Warning: multiplier ignored (NaN)")
multiplier = 1.0
newdict = {}
for site in dict:
newdata = []
for meas in dict[site]:
time = dateutil.parser.parse(meas[timename])
# If timezone not present, assume local (Finland) timezone
if time.tzinfo is None or time.tzinfo.utcoffset(time) is None:
time = tz.localize(time)
# If timezone is not UTC, convert time to UTC
if time.tzname() != 'UTC':
time = time.astimezone(pytz.utc)
# Convert time from datetime object to COHERENS ASCII format
coherenstime = time.strftime("%Y/%m/%d;%H:%M:%S,000")
value = float(meas[valuename])*multiplier
newdata.append([time, coherenstime, value])
newdict[site] = newdata
return newdict
| 25,490
|
def get_current_year():
"""Returns current year
"""
return str(datetime.date.today().year)
| 25,491
|
def test_dict_init():
"""
Tests Config.__init__() using input of dictionary type.
For inner structure check Config.flatten() is used.
"""
#Slashed-structured dictionary initialization
init_dict = {'a' : 1, 'b/c' : 2, 'b/d' : 3}
exp_flat = {'a': 1, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Nested-structured dictionary initialization
init_dict = {'a' : {}, 'b' : {'c' : 2, 'd' : 3}}
exp_flat = {'a': {}, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Mixed-structured dictionary initialization
init_dict = {'a' : None, 'b/c' : 2, 'b' : {'d' : 3}}
exp_flat = {'a': None, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Config-structured dictionary initialization
init_dict = {'a' : Config({'b' : 2})}
exp_flat = {'a/b': 2}
config = Config(init_dict)
assert config.flatten() == exp_flat
| 25,492
|
def parse_ans(text):
"""
Parses the given text as an answer set, i.e., a sequence of predicate
statements. Returns a (possibly empty) tuple of Predicate objects.
"""
return parser.parse_completely(
text,
parser.Rep(PredicateStatement),
devour=devour_asp
)
| 25,493
|
def get_xyz(data):
"""
:param data: 3D data
:return: 3D data coordinates
第1,2,3维数字依次递增
"""
nim = data.ndim
if nim == 3:
size_x, size_y, size_z = data.shape
x_arange = np.arange(1, size_x+1)
y_arange = np.arange(1, size_y+1)
z_arange = np.arange(1, size_z+1)
[xx, yy, zz] = np.meshgrid(x_arange, y_arange, z_arange, indexing='ij')
xyz = np.column_stack([zz.flatten(), yy.flatten(), xx.flatten()])
else:
"""
:param data: 2D data
:return: 2D data coordinates
第1,2维数字依次递增
"""
size_x, size_y = data.shape
x_arange = np.arange(1, size_x + 1)
y_arange = np.arange(1, size_y + 1)
[xx, yy] = np.meshgrid(x_arange, y_arange, indexing='ij')
xyz = np.column_stack([yy.flatten(), xx.flatten()])
return xyz
| 25,494
|
async def test_hls_stream(hass, hass_client):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch init
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + playlist.splitlines()[-1][1:]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
| 25,495
|
def test_zip_exception_03():
"""
Test zip: zip with tuple of 1 dataset
"""
logger.info("test_zip_exception_03")
data1 = ds.TFRecordDataset(DATA_DIR_1, SCHEMA_DIR_1)
try:
dataz = ds.zip((data1))
dataz = dataz.repeat(2)
num_iter = 0
for _, item in enumerate(dataz.create_dict_iterator()):
logger.info("item[input_mask] is {}".format(item["input_mask"]))
num_iter += 1
logger.info("Number of data in zipped dataz: {}".format(num_iter))
except BaseException as e:
logger.info("Got an exception in DE: {}".format(str(e)))
| 25,496
|
def wc_proximal_gradient(L, mu, gamma, n, verbose=1):
"""
Consider the composite convex minimization problem
.. math:: F_\\star \\triangleq \\min_x \\{F(x) \\equiv f_1(x) + f_2(x)\\},
where :math:`f_1` is :math:`L`-smooth and :math:`\\mu`-strongly convex,
and where :math:`f_2` is closed convex and proper.
This code computes a worst-case guarantee for the **proximal gradient** method (PGM).
That is, it computes the smallest possible :math:`\\tau(n, L, \\mu)` such that the guarantee
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\tau(n, L, \\mu) \\|x_0 - x_\\star\\|^2,
is valid, where :math:`x_n` is the output of the **proximal gradient**,
and where :math:`x_\\star` is a minimizer of :math:`F`.
In short, for given values of :math:`n`, :math:`L` and :math:`\\mu`,
:math:`\\tau(n, L, \\mu)` is computed as the worst-case value of
:math:`\\|x_n - x_\\star\\|^2` when :math:`\\|x_0 - x_\\star\\|^2 \\leqslant 1`.
**Algorithm**: Proximal gradient is described by
.. math::
\\begin{eqnarray}
y_t & = & x_t - \\gamma \\nabla f_1(x_t), \\\\
x_{t+1} & = & \\arg\\min_x \\left\\{f_2(x)+\\frac{1}{2\gamma}\|x-y_t\|^2 \\right\\},
\\end{eqnarray}
for :math:`t \in \\{ 0, \\dots, n-1\\}` and where :math:`\\gamma` is a step-size.
**Theoretical guarantee**: It is well known that a **tight** guarantee for PGM is provided by
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\max\\{(1-L\\gamma)^2,(1-\\mu\\gamma)^2\\}^n \\|x_0 - x_\\star\\|^2,
which can be found in, e.g., [1, Theorem 3.1]. It is a folk knowledge and the result can be found in many references
for gradient descent; see, e.g.,[2, Section 1.4: Theorem 3], [3, Section 5.1] and [4, Section 4.4].
**References**:
`[1] A. Taylor, J. Hendrickx, F. Glineur (2018). Exact worst-case convergence rates of the proximal gradient
method for composite convex minimization. Journal of Optimization Theory and Applications, 178(2), 455-476.
<https://arxiv.org/pdf/1705.04398.pdf>`_
[2] B. Polyak (1987). Introduction to Optimization. Optimization Software New York.
`[1] E. Ryu, S. Boyd (2016). A primer on monotone operator methods.
Applied and Computational Mathematics 15(1), 3-43.
<https://web.stanford.edu/~boyd/papers/pdf/monotone_primer.pdf>`_
`[4] L. Lessard, B. Recht, A. Packard (2016). Analysis and design of optimization algorithms via
integral quadratic constraints. SIAM Journal on Optimization 26(1), 57–95.
<https://arxiv.org/pdf/1408.3595.pdf>`_
Args:
L (float): the smoothness parameter.
mu (float): the strong convexity parameter.
gamma (float): proximal step-size.
n (int): number of iterations.
verbose (int): Level of information details to print.
- 1: No verbose at all.
- 0: This example's output.
- 1: This example's output + PEPit information.
- 2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value.
theoretical_tau (float): theoretical value.
Example:
>>> pepit_tau, theoretical_tau = wc_proximal_gradient(L=1, mu=.1, gamma=1, n=2, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 7x7
(PEPit) Setting up the problem: performance measure is minimum of 1 element(s)
(PEPit) Setting up the problem: initial conditions (1 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 2 function(s)
function 1 : 6 constraint(s) added
function 2 : 6 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.6560999999942829
*** Example file: worst-case performance of the Proximal Gradient Method in function values***
PEPit guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
Theoretical guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
"""
# Instantiate PEP
problem = PEP()
# Declare a strongly convex smooth function and a closed convex proper function
f1 = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L)
f2 = problem.declare_function(ConvexFunction)
func = f1 + f2
# Start by defining its unique optimal point xs = x_*
xs = func.stationary_point()
# Then define the starting point x0 of the algorithm
x0 = problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and x^*
problem.set_initial_condition((x0 - xs) ** 2 <= 1)
# Run the proximal gradient method starting from x0
x = x0
for _ in range(n):
y = x - gamma * f1.gradient(x)
x, _, _ = proximal_step(y, f2, gamma)
# Set the performance metric to the distance between x and xs
problem.set_performance_metric((x - xs) ** 2)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
theoretical_tau = max((1 - mu*gamma)**2, (1 - L*gamma)**2)**n
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of the Proximal Gradient Method in function values***')
print('\tPEPit guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2'.format(pepit_tau))
print('\tTheoretical guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau))
# Return the worst-case guarantee of the evaluated method ( and the reference theoretical value)
return pepit_tau, theoretical_tau
| 25,497
|
def basic_output_filter(
filtered_prefixes=None,
filtered_patterns=None,
):
"""
Create a line filtering function to help output testing.
:param filtered_prefixes: A list of byte strings representing prefixes that will cause
output lines to be ignored if they start with one of the prefixes. By default lines
starting with the process ID (`'pid'`) and return code (`'rc'`) will be ignored.
:param filtered_patterns: A list of byte strings representing regexes that will cause
output lines to be ignored if they match one of the regexes.
"""
if filtered_prefixes is None:
filtered_prefixes = get_default_filtered_prefixes()
if filtered_patterns is None:
filtered_patterns = get_default_filtered_patterns()
filtered_patterns = map(re.compile, filtered_patterns)
def _filter(output):
filtered_output = []
for line in output.splitlines():
# Filter out stdout that comes from underlying DDS implementation
# Note: we do not currently support matching filters across multiple stdout lines.
if any(line.startswith(prefix) for prefix in filtered_prefixes):
continue
if any(pattern.match(line) for pattern in filtered_patterns):
continue
filtered_output.append(line)
if output.endswith(os.linesep):
filtered_output.append(os.linesep)
return os.linesep.join(filtered_output)
return _filter
| 25,498
|
def all_asset_types_for_shot(shot, client=default):
"""
Args:
shot (str / dict): The shot dict or the shot ID.
Returns:
list: Asset types from assets casted in given shot.
"""
path = "shots/%s/asset-types" % shot["id"]
return sort_by_name(raw.fetch_all(path, client=client))
| 25,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.