content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_dask_workers(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test the dask_workers function."""
assert utilities.dask_workers(dask_client, cores_only=True) == len(
dask_client.ncores()) # type: ignore
assert utilities.dask_workers(dask_client, cores_only=False) == sum(
item for item in dask_client.nthreads().values()) | 30,900 |
def dispatch_for_binary_elementwise_apis(x_type, y_type):
"""Decorator to override default implementation for binary elementwise APIs.
The decorated function (known as the "elementwise api handler") overrides
the default implementation for any binary elementwise API whenever the value
for the first two arguments (typically named `x` and `y`) match the specified
type annotations. The elementwise api handler is called with two arguments:
`elementwise_api_handler(api_func, x, y)`
Where `x` and `y` are the first two arguments to the elementwise api, and
`api_func` is a TensorFlow function that takes two parameters and performs the
elementwise operation (e.g., `tf.add`).
The following example shows how this decorator can be used to update all
binary elementwise operations to handle a `MaskedTensor` type:
>>> class MaskedTensor(tf.experimental.ExtensionType):
... values: tf.Tensor
... mask: tf.Tensor
>>> @dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor)
... def binary_elementwise_api_handler(api_func, x, y):
... return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask)
>>> a = MaskedTensor([1, 2, 3, 4, 5], [True, True, True, True, False])
>>> b = MaskedTensor([2, 4, 6, 8, 0], [True, True, True, False, True])
>>> c = tf.add(a, b)
>>> print(f"values={c.values.numpy()}, mask={c.mask.numpy()}")
values=[ 3 6 9 12 5], mask=[ True True True False False]
Args:
x_type: A type annotation indicating when the api handler should be called.
y_type: A type annotation indicating when the api handler should be called.
Returns:
A decorator.
#### Registered APIs
The binary elementwise APIs are:
<<API_LIST>>
"""
def decorator(handler):
if (x_type, y_type) in _ELEMENTWISE_API_HANDLERS:
raise ValueError("A binary elementwise dispatch handler "
f"({_ELEMENTWISE_API_HANDLERS[x_type, y_type]}) "
f"has already been registered for ({x_type}, {y_type}).")
_ELEMENTWISE_API_HANDLERS[x_type, y_type] = handler
for api in _BINARY_ELEMENTWISE_APIS:
_add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler)
return handler
return decorator | 30,901 |
def grid(mat, i, j, k):
"""Returns true if the specified grid contains k"""
return lookup(k, [ mat[i + p][j + q] for p in range(3) for q in range(3) ]) | 30,902 |
def test_azurecli_binary_isfile(host):
"""
Tests if az binary is a file type.
"""
assert host.file(PACKAGE_BINARY).is_file | 30,903 |
def get_list_channels(sc):
"""Get list of channels."""
# https://api.slack.com/methods/channels.list
response = sc.api_call(
"channels.list",
)
return response['channels'] | 30,904 |
def error_handler(error):
"""エラーメッセージを生成するハンドラ"""
response = jsonify({ 'cause': error.description['cause'] })
return response, error.code | 30,905 |
def display_results(
matches: Sequence[Match], pattern: str, line_number: bool, files_with_matches: bool,
):
"""Display matches as a colorfull table.
"""
files = {match.file for match in matches}
if files_with_matches: # Just print filenames
for file in files:
print(MAGENTA + file + NO_COLOR)
return
prefixes = []
table = []
term_width = get_terminal_size()[0]
for match in matches:
left = match.msgid
if line_number:
pnum = str(match.line) + ":"
if len(files) > 1:
pfile = match.file + ":"
else:
pfile = ""
left = pfile + pnum + left
prefixes.append((pnum, pfile))
table.append(
[
fill(left, width=(term_width - 7) // 2),
fill(match.msgstr, width=(term_width - 7) // 2),
]
)
print(colorize(tabulate(table, tablefmt="fancy_grid"), pattern, prefixes)) | 30,906 |
def test_uuid():
"""Tests that hug's text validator correctly handles UUID values
Examples were taken from https://docs.python.org/3/library/uuid.html"""
assert hug.types.uuid('{12345678-1234-5678-1234-567812345678}') == UUID('12345678-1234-5678-1234-567812345678')
assert hug.types.uuid('12345678-1234-5678-1234-567812345678') == UUID('12345678123456781234567812345678')
assert hug.types.uuid('12345678123456781234567812345678') == UUID('12345678-1234-5678-1234-567812345678')
assert hug.types.uuid('urn:uuid:12345678-1234-5678-1234-567812345678') == \
UUID('12345678-1234-5678-1234-567812345678')
with pytest.raises(ValueError):
hug.types.uuid(1)
with pytest.raises(ValueError):
# Invalid HEX character
hug.types.uuid('12345678-1234-5678-1234-56781234567G')
with pytest.raises(ValueError):
# One character added
hug.types.uuid('12345678-1234-5678-1234-5678123456781')
with pytest.raises(ValueError):
# One character removed
hug.types.uuid('12345678-1234-5678-1234-56781234567') | 30,907 |
def search_records(
name: str,
search: TextClassificationSearchRequest = None,
common_params: CommonTaskQueryParams = Depends(),
include_metrics: bool = Query(
False, description="If enabled, return related record metrics"
),
pagination: PaginationParams = Depends(),
service: TextClassificationService = Depends(
TextClassificationService.get_instance
),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_user, scopes=[]),
) -> TextClassificationSearchResults:
"""
Searches data from dataset
Parameters
----------
name:
The dataset name
search:
The search query request
common_params:
Common query params
include_metrics:
Flag to enable include metrics
pagination:
The pagination params
service:
The dataset records service
datasets:
The dataset service
current_user:
The current request user
Returns
-------
The search results data
"""
search = search or TextClassificationSearchRequest()
query = search.query or TextClassificationQuery()
dataset = datasets.find_by_name(
user=current_user, name=name, task=TASK_TYPE, workspace=common_params.workspace
)
result = service.search(
dataset=dataset,
query=query,
sort_by=search.sort,
record_from=pagination.from_,
size=pagination.limit,
exclude_metrics=not include_metrics,
)
return result | 30,908 |
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic | 30,909 |
def matchPosAny (msg, pos, rules, subrules):
"""Indicates whether or not `msg` matches any (i.e. a single) `subrule`
in `rules`, starting at position `pos`.
Returns the position in `msg` just after a successful match, or -1
if no match was found.
"""
index = -1
for rule in subrules:
if (index := matchPos(msg, pos, rules, rule)) != -1:
break
return index | 30,910 |
def _save_rpg(rpg, output_file):
"""Saves the RPG radar file.
Notes:
"""
dims = {'time': len(rpg.data['time'][:]),
'range': len(rpg.data['range'][:]),
'chirp_sequence': len(rpg.data['chirp_start_indices'][:])}
rootgrp = output.init_file(output_file, dims, rpg.data, zlib=True)
rootgrp.title = f"Radar file from {rpg.location}"
rootgrp.year, rootgrp.month, rootgrp.day = rpg.date
rootgrp.location = rpg.location
rootgrp.history = f"{utils.get_time()} - radar file created"
rootgrp.source = rpg.source
rootgrp.close() | 30,911 |
async def load(ctx, module: str):
"""Load a cog located in /cogs"""
author = str(ctx.message.author)
module = module.strip()
try:
bot.load_extension("cogs.{}".format(module))
output.info('{} loaded module: {}'.format(author, module))
loaded_extensions.append(module)
await bot.say("Successfully loaded {}.py".format(module))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
output.error('{} attempted to load module \'{}\' but the following '
'exception occured;\n\t->{}'.format(author, module, exc))
await bot.say('Failed to load extension {}\n\t->{}'.format(module, exc)) | 30,912 |
def test_get_function_code_all():
"""Test get_function_code_all"""
function = {
"name": "code_test",
"code": {"start": ["line 1"], "mid": ["line 2"], "end": ["line 3"]},
}
assert functions.get_function_code("start", function) == [" line 1"]
assert functions.get_function_code("mid", function) == [" line 2"]
assert functions.get_function_code("end", function) == [" line 3"] | 30,913 |
def channelBox(*args, **kwargs):
"""
This command creates a channel box, which is sensitive to the active list.
Returns: `string` (the name of the new channel box)
"""
pass | 30,914 |
def compute_accuracy(model, loader):
"""
:param model: a model which returns classifier_output and segmentator_output
:param loader: data loader
"""
model.eval() # enter evaluation mode
score_accum = 0
count = 0
for x, y, _, _ in loader:
classifier_output, _ = model(x)
score_accum += accuracy(classifier_output.data.cpu().numpy(), y.data.cpu().numpy()) * y.shape[0]
count += y.shape[0]
return float(score_accum / count) | 30,915 |
def create_base_query_grouped_fifo(rse_id, filter_by_rse='destination', session=None):
"""
Build the sqlalchemy queries to filter relevant requests and to group them in datasets.
Group requests either by same destination RSE or source RSE.
:param rse_id: The RSE id.
:param filter_by_rse: Decide whether to filter by transfer destination or source RSE (`destination`, `source`).
:param session: The database session.
"""
# query DIDs that are attached to a collection and add a column indicating the order of attachment in case of mulitple attachments
attachment_order_subquery = session.query(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.scope,
func.row_number().over(partition_by=(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope),
order_by=models.DataIdentifierAssociation.created_at).label('order_of_attachment'))\
.subquery()
# query transfer requests and join with according datasets
filtered_requests_subquery = None
grouped_requests_subquery = None
dialect = session.bind.dialect.name
if dialect == 'mysql' or dialect == 'sqlite':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'postgresql':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'oracle':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
filtered_requests_subquery = filtered_requests_subquery.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
combined_attached_unattached_requests = combined_attached_unattached_requests.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
# depending if throttler is used for reading or writing
if filter_by_rse == 'source':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.source_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.source_rse_id == rse_id)
elif filter_by_rse == 'destination':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.dest_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.dest_rse_id == rse_id)
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.state == RequestState.WAITING).subquery()
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.state == RequestState.WAITING).subquery()
# group requests and calculate properties like oldest requested_at, amount of children, volume
grouped_requests_subquery = session.query(func.sum(combined_attached_unattached_requests.c.bytes).label('volume'),
func.min(combined_attached_unattached_requests.c.requested_at).label('oldest_requested_at'),
func.count().label('amount_childs'),
combined_attached_unattached_requests.c.name,
combined_attached_unattached_requests.c.scope)\
.group_by(combined_attached_unattached_requests.c.scope, combined_attached_unattached_requests.c.name)\
.subquery()
return grouped_requests_subquery, filtered_requests_subquery | 30,916 |
def then(state1, state2):
"""
Like ``bind``, but instead of a function that returns a statetful action,
just bind a new stateful action.
Equivalent to bind(state1, lambda _: state2)
"""
return bind(state1, lambda _: state2) | 30,917 |
def get_skyregions_collection(run_id: Optional[int]=None) -> Dict[str, Any]:
"""
Produce Sky region geometry shapes JSON object for d3-celestial.
Args:
run_id (int, optional): Run ID to filter on if not None.
Returns:
skyregions_collection (dict): Dictionary representing a JSON obejct
containing the sky regions.
"""
skyregions = SkyRegion.objects.all()
if run_id is not None:
skyregions = skyregions.filter(run=run_id)
features = []
for skr in skyregions:
ra_fix = 360. if skr.centre_ra > 180. else 0.
ra = skr.centre_ra - ra_fix
dec = skr.centre_dec
width_ra = skr.width_ra / 2.
width_dec = skr.width_dec / 2.
id = skr.id
features.append(
{
"type": "Feature",
"id": f"SkyRegion{id}",
"properties": {
"n": f"{id:02d}",
"loc": [ra, dec]
},
"geometry": {
"type": "MultiLineString",
"coordinates": [[
[ra+width_ra, dec+width_dec],
[ra+width_ra, dec-width_dec],
[ra-width_ra, dec-width_dec],
[ra-width_ra, dec+width_dec],
[ra+width_ra, dec+width_dec]
]]
}
}
)
skyregions_collection = {
"type": "FeatureCollection",
"features" : features
}
return skyregions_collection | 30,918 |
def construct_sru_query(keyword, keyword_type=None, mat_type=None, cat_source=None):
"""
Creates readable SRU/CQL query, does not encode white spaces or parenthesis -
this is handled by the session obj.
"""
query_elems = []
if keyword is None:
raise TypeError("query argument cannot be None.")
if keyword_type is None:
# take as straight sru query and pass to sru_query method
query_elems.append(keyword.strip())
elif keyword_type == "ISBN":
query_elems.append('srw.bn = "{}"'.format(keyword))
elif keyword_type == "UPC":
query_elems.append('srw.sn = "{}"'.format(keyword))
elif keyword_type == "ISSN":
query_elems.append('srw.in = "{}"'.format(keyword))
elif keyword_type == "OCLC #":
query_elems.append('srw.no = "{}"'.format(keyword))
elif keyword_type == "LCCN":
query_elems.append('srw.dn = "{}"'.format(keyword))
if mat_type is None or mat_type == "any":
pass
elif mat_type == "print":
query_elems.append('srw.mt = "bks"')
elif mat_type == "large print":
query_elems.append('srw.mt = "lpt"')
elif mat_type == "dvd":
query_elems.append('srw.mt = "dvv"')
elif mat_type == "bluray":
query_elems.append('srw.mt = "bta"')
if cat_source is None or cat_source == "any":
pass
elif cat_source == "DLC":
query_elems.append('srw.pc = "dlc"')
return " AND ".join(query_elems) | 30,919 |
def sitemap_xml():
"""Default Sitemap XML"""
show_years = retrieve_show_years(reverse_order=False)
sitemap = render_template("sitemaps/sitemap.xml",
show_years=show_years)
return Response(sitemap, mimetype="text/xml") | 30,920 |
def test_geo_value():
"""
Test geo values.
"""
lib.backup_and_restore(
lambda context: put_values(lib.SET, "key", GEO_VALUES),
None,
lambda context: check_values(lib.SET, "key", GEO_VALUES)
) | 30,921 |
def remove_property(product_id, property_id):
"""
Remove the property
"""
property = db.db.session.query(TypeProperty).\
filter_by(product_id = product_id, product_property_id = property_id).first()
try:
db.db.session.delete(property)
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise TypePropertyException("Could not remove type property")
tmp = property.to_dict()
return(json_util.dumps(tmp)) | 30,922 |
def plot_result(data, xlabel, ylabel, title, output_filename):
"""plot the results similar to the figures in our paper
:param data: The input data sets to plots. e.g., {algorithm_epsilon: [(test_epsilon, pvalue), ...]}
:param xlabel: The label for x axis.
:param ylabel: The label for y axis.
:param title: The title of the figure.
:param output_filename: The output file name.
:return: None
"""
# setup the figure
plt.ylim(0.0, 1.0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# colors and markers for each claimed epsilon
markers = ['s', 'o', '^', 'x', '*', '+', 'p']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# add an auxiliary line for p-value=0.05
plt.axhline(y=0.05, color='black', linestyle='dashed', linewidth=1.2)
for i, (epsilon, points) in enumerate(data.items()):
# add an auxiliary vertical line for the claimed privacy
plt.axvline(x=float(epsilon), color=colors[i % len(
colors)], linestyle='dashed', linewidth=1.2)
# plot the
x = [item[0] for item in points]
p = [item[1] for item in points]
plt.plot(x, p, 'o-',
label=f'$\\epsilon_0$ = {epsilon}', markersize=8, marker=markers[i % len(markers)], linewidth=3)
# plot legends
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
# save the figure and clear the canvas for next draw
plt.savefig(output_filename, bbox_inches='tight')
plt.gcf().clear() | 30,923 |
def format_solution_table_calc(solution, node_ids_to_nodes):
"""
:type solution: dict[int, list[int]]
:type node_ids_to_nodes: dict[int, int]
:rtype: dict[int, str]
"""
new_solution = {}
for (color, path) in solution.items():
new_path = []
for p in path:
back_p = node_ids_to_nodes[p]
new_p = "{0}{1}".format(
chr(back_p % width + ord("A")),
chr(back_p // width + ord("0")),
)
new_path.append(new_p)
new_solution[color] = " ".join(new_path)
return new_solution | 30,924 |
def hashable(func):
"""Decorator for functions with numpy arrays as input arguments that will benefit from caching
Example:
from midgard.math import nputil
from functools import lru_cache
@nputil.hashable
@lru_cache()
def test_func(a: np.ndarray, b: np.ndarray = None)
do_something
return something
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_args_list = list()
for arg in args:
if isinstance(arg, np.ndarray):
arg = HashArray(arg)
new_args_list.append(arg)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = HashArray(v)
return func(*new_args_list, **kwargs)
return wrapper | 30,925 |
def parse_args():
"""
Parse input arguments. Helps with the command line argument input.
"""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
args = parser.parse_args()
return args | 30,926 |
def at_export_ex():
"""
Similarly, :func:`at_export() <planar.at_export>` can be used to export any
Numpy ndarray data to a CSV file. This will open a simple GUI dialog box
asking for the location where the CSV file needs to be saved::
import arraytool.planar as planar
planar.at_export()
"""
planar.at_export() | 30,927 |
def _volume_operation(ctx, vca_client, operation):
"""
attach/detach volume
"""
vdc_name = get_vcloud_config()['vdc']
vdc = vca_client.get_vdc(vdc_name)
vmName = get_vapp_name(ctx.target.instance.runtime_properties)
if ctx.source.node.properties.get('use_external_resource'):
volumeName = ctx.source.node.properties['resource_id']
else:
volumeName = ctx.source.node.properties['volume']['name']
vapp = vca_client.get_vapp(vdc, vmName)
for ref in vca_client.get_diskRefs(vdc):
if ref.name == volumeName:
if operation == 'ATTACH':
ctx.logger.info("Attach volume node '{0}'."
.format(volumeName))
task = vapp.attach_disk_to_vm(vmName, ref)
if task:
wait_for_task(vca_client, task)
ctx.logger.info(
"Volume node '{0}' has been attached"
.format(volumeName))
else:
raise cfy_exc.NonRecoverableError(
"Can't attach disk: '{0}' with error: {1}".
format(volumeName, error_response(vapp)))
elif operation == 'DETACH':
ctx.logger.info("Detach volume node '{0}'.".format(volumeName))
task = vapp.detach_disk_from_vm(vmName, ref)
if task:
wait_for_task(vca_client, task)
ctx.logger.info(
"Volume node '{0}' has been detached.".
format(volumeName))
else:
raise cfy_exc.NonRecoverableError(
"Can't detach disk: '{0}'. With error: {1}".
format(volumeName, error_response(vapp)))
else:
raise cfy_exc.NonRecoverableError(
"Unknown operation '{0}'".format(operation)) | 30,928 |
def GetServerSupplicantInfo(TestCaseID):
"""
Gets the RADIUS Server Information and
Supplicant name for given test and load them into Env file
Parameters
----------
TestCaseID : str
Returns
-------
Pass(1)/Fail(-1) : int
"""
if dutInfoObject.DUTEAPMethod == "TLS":
tag = "TLS"
else:
tag = "Other"
serverName = find_Server(TestCaseID, tag)
VarList.setdefault("RadiusServerName", serverName)
if dutInfoObject.DUTCategory != -1:
suppName = find_Supplicant(TestCaseID, "DUT", dutInfoObject.DUTCategory.lower())
setattr(serverInfo, "Supplicant", suppName)
staSuppName = find_Supplicant(TestCaseID, "STA", "c2")
setattr(serverInfo, "STASupplicant", staSuppName)
setattr(serverInfo, "name", serverName)
setattr(serverInfo, "IP", ReadMapFile(uccPath+RADIUSServer, "%s%s"%(serverName, "IPAddress"), "!"))
setattr(serverInfo, "Port", ReadMapFile(uccPath+RADIUSServer, "%s%s"%(serverName, "Port"), "!"))
setattr(serverInfo, "Password", ReadMapFile(uccPath+RADIUSServer, "%s%s"%(serverName, "SharedSecret"), "!"))
LogMsg(serverInfo) | 30,929 |
def object_hash(fd, fmt, repo=None):
""" Function to read the content of a open file, create appropiate object
and write the object to vcs directory and return the hash of the file"""
data = fd.read()
# choosing constructor on the basis of the object type found in header
if fmt == b'commit' : obj = vcsCommit(repo, data)
elif fmt == b'tree' : obj = vcsTree(repo, data)
elif fmt == b'tag' : obj = vcsTag(repo, data)
elif fmt == b'blob' : obj = vcsBlob(repo, data)
else:
raise Exception('Unknown type %s!' % fmt)
return object_write(obj, repo) | 30,930 |
def read_from_pdf(pdf_file):
"""
读取PDF文件内容,并做处理
:param pdf_file: PDF 文件
:return: pdf文件内容
"""
# 二进制读取pdf文件内的内容
with open(pdf_file, 'rb') as file:
resource_manage = PDFResourceManager()
return_str = io.StringIO()
lap_params = LAParams()
# 内容转换
device = TextConverter(
resource_manage,
return_str,
laparams = lap_params
)
process_pdf(resource_manage, device, file)
device.close()
# 获取转换后的pdf文件内容
pdf_content = return_str.getvalue()
print(pdf_content)
return pdf_content | 30,931 |
def _descending(dbus_object):
"""
Verify levels of variant values always descend by one.
:param object dbus_object: a dbus object
:returns: None if there was a failure of the property, otherwise the level
:rtype: int or NoneType
None is a better choice than False, for 0, a valid variant level, is always
interpreted as False.
"""
# pylint: disable=too-many-return-statements
if isinstance(dbus_object, dbus.Dictionary):
key_levels = [_descending(x) for x in dbus_object.keys()]
value_levels = [_descending(x) for x in dbus_object.values()]
if any(k is None for k in key_levels) or \
any(v is None for v in value_levels):
return None
max_key_level = max(key_levels) if key_levels != [] else 0
max_value_level = max(value_levels) if value_levels != [] else 0
max_level = max(max_key_level, max_value_level)
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
elif isinstance(dbus_object, (dbus.Array, dbus.Struct)):
levels = [_descending(x) for x in dbus_object]
if any(l is None for l in levels):
return None
max_level = max(levels) if levels != [] else 0
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
else:
variant_level = dbus_object.variant_level
return variant_level if variant_level in (0, 1) else None | 30,932 |
def _is_avconv():
"""
Returns `True` if the `ffmpeg` binary is really `avconv`.
"""
out = _run_command(['ffmpeg', '-version'])
return out and isinstance(out, strtype) and 'DEPRECATED' in out | 30,933 |
def countries(request):
"""
Returns all valid countries and their country codes
"""
return JsonResponse({
"countries": [{
"id": unicode(code),
"name": unicode(name)
} for code, name in list(django_countries.countries)]
}) | 30,934 |
def get_reads(file, reads=None):
"""
Get the read counts from the file
"""
if not reads:
reads={}
# get the sample name from the file
sample=os.path.basename(file).split(".")[0]
reads[sample]={}
with open(file) as file_handle:
for line in file_handle:
if READ_COUNT_IDENTIFIER in line:
count, type = get_read_count_type(line)
reads[sample][type]=count
return reads | 30,935 |
def generate_monomer(species, monomerdict, initlen, initnames, tbobs):
"""
generate a PySB monomer based on species
:param species: a Species object
:param monomerdict: a dictionary with all monomers linked to their species id
:param initlen: number of the initial species
:param initnames: names of the initial species
:return: monomer, observable, and updated monomerdict
"""
if species.id <= initlen:
name = initnames[species.id - 1]
else:
name = 'sp_' + str(species.id)
sites = ['init']
m = Monomer(name, sites)
monomerdict[species.id] = m
if species.id in tbobs:
Observable('obs' + name, m(init=None))
return monomerdict | 30,936 |
def convert_bert_tokens(outputs):
"""
Converts BERT tokens into a readable format for the parser, i.e. using Penn Treebank tokenization scheme.
Does the heavy lifting for this script.
"""
logging.info("Adjusting BERT indices to align with Penn Treebank.")
mapped_outputs = [] # Will hold the final results: sentences and mapped span indices
for output in tqdm(outputs):
comb_text = [word for sentence in output['sentences'] for word in sentence]
sentence_start_idx = 0
sent_so_far = []
word_so_far = []
sentence_map = output['sentence_map']
subtoken_map = output['subtoken_map']
clusters = output['clusters']
# preds = output['predicted_clusters']
# top_mentions = output['top_spans']
for i, subword in enumerate(comb_text):
if i != 0 and sentence_map[i - 1] != sentence_map[i]: # New sentence
sent_so_far.append(convert_bert_word(''.join(word_so_far)))
word_so_far = []
mapped_outputs.append({'doc_key': output['doc_key'],
'num_speakers': num_speakers(output['speakers']),
'words': sent_so_far,
'clusters': adjust_cluster_indices(clusters, subtoken_map, sentence_start_idx, i - 1)
# 'predicted_clusters': adjust_cluster_indices(preds, subtoken_map, sentence_start_idx, i - 1),
# 'top_mentions': adjust_top_mentions(top_mentions, subtoken_map, sentence_start_idx, i - 1)
})
sent_so_far = []
sentence_start_idx = i
elif i != 0 and subtoken_map[i - 1] != subtoken_map[i]: # New word
fullword = ''.join(word_so_far)
if fullword != '[SEP][CLS]': # Need this because sentences indices increment at SEP and CLS tokens
sent_so_far.append(convert_bert_word(fullword))
else:
sentence_start_idx += 2 # The sentence actually starts two tokens later due to [SEP] and [CLS]
word_so_far = []
word_so_far.append(subword)
return mapped_outputs | 30,937 |
def load_proto_message(
config_path: AnyPath,
overrides: Sequence[str] = tuple(),
*,
msg_class=None,
extra_include_dirs: Sequence[pathlib.Path] = tuple(),
) -> ProtoMessage:
"""Loads message from the file and applies overrides.
If message type is not give, will try to guess message type.
All includes in the loaded messages (root message and includes) will be
recursively included.
Composition order:
* Create empty message of type msg_class
* Merge includes within the message in config_path
* Merge the content of confif_path
* Merge includes in overrides
* Merge scalars in overrides
Returns the message.
"""
config_path = pathlib.Path(config_path)
if msg_class is None:
msg_class = _guess_message_type(config_path)
elif hasattr(msg_class, "get_proto_class"):
msg_class = msg_class.get_proto_class()
def _resolve_mount(mount):
if msg_class is ROOT_CFG_CLASS:
# For convinience, top-level includes do not include name of task, i.e.,
# `lr=XXX` vs `train_sl.lr=XXX` where train_sl is a name of task. We
# manually add it.
return (_get_task_type(config_path) + "." + mount).strip(".")
else:
return mount
include_dirs = []
include_dirs.append(config_path.resolve().parent)
include_dirs.append(CONF_ROOT / "common")
include_dirs.extend(extra_include_dirs)
include_overides, scalar_overideds = _parse_overrides(overrides, include_dirs=include_dirs)
logging.debug(
"Constructing %s from %s with include overrides %s and scalar overrides %s",
msg_class.__name__,
config_path,
include_overides,
scalar_overideds,
)
msg = msg_class()
# Step 1: Populate message with includes.
default_includes = _get_config_includes(config_path, msg_class)
logging.debug("%s defaults %s", msg_class, default_includes)
for mount, include_msg_path in default_includes.items():
_apply_include(msg, _resolve_mount(mount), include_msg_path, include_dirs)
# Step 2: Override the includes with the config content.
_parse_text_proto_into(config_path, msg)
if hasattr(msg, INCLUDES_FIELD):
msg.ClearField(INCLUDES_FIELD)
# Step 3: Override with extra includes.
for mount, include_msg_path in include_overides.items():
_apply_include(msg, _resolve_mount(mount), include_msg_path, include_dirs)
# Step 4: Apply scalar overrides.
for mount, value in scalar_overideds.items():
logging.debug(
"Constructing %s. Applying scalar: mount=%r value=%r",
msg_class.__name__,
_resolve_mount(mount),
value,
)
_apply_scalar_override(msg, _resolve_mount(mount), value)
return msg | 30,938 |
def main() -> NoReturn:
"""
Prepare dataset splits - training, validation & testing splits
Compute ner distributions in our dataset. Based on this distribution
and whether we want to keep certain notes grouped (e.g by patient)
we assign notes to a split, such that the final ner type distribution
in each split is similar.
"""
# Compute the distribution of NER types in the grouped notes.
# For example the distribution of NER types in all notes belonging to a
# particular patient
# The following code sets up the arguments to be passed via CLI or via a JSON file
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes'
)
cli_parser.add_argument(
'--spans_key',
type=str,
default='spans',
help='the key where the note spans is present in the json object'
)
cli_parser.add_argument(
'--metadata_key',
type=str,
default='meta',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--group_key',
type=str,
default='note_id',
help='the key to group notes by in the json object'
)
cli_parser.add_argument(
'--train_proportion',
type=int,
default=70,
help='ratio of train dataset'
)
cli_parser.add_argument(
'--train_file',
type=str,
default=None,
help='The file to store the train data'
)
cli_parser.add_argument(
'--validation_proportion',
type=int,
default=15,
help='ratio of validation dataset'
)
cli_parser.add_argument(
'--validation_file',
type=str,
default=None,
help='The file to store the validation data'
)
cli_parser.add_argument(
'--test_proportion',
type=int,
default=15,
help='ratio of test dataset'
)
cli_parser.add_argument(
'--test_file',
type=str,
default=None,
help='The file to store the test data'
)
cli_parser.add_argument(
'--margin',
type=float,
default=0.3,
help='margin of error when maintaining proportions in the splits'
)
cli_parser.add_argument(
'--print_dist',
action='store_true',
help='whether to print the label distribution in the splits'
)
args = cli_parser.parse_args()
dataset_splitter = DatasetSplitter(
train_proportion=args.train_proportion,
validation_proportion=args.validation_proportion,
test_proportion=args.test_proportion
)
dataset_splitter.assign_splits(
input_file=args.input_file,
spans_key=args.spans_key,
metadata_key=args.metadata_key,
group_key=args.group_key,
margin=args.margin
)
if args.train_proportion > 0:
with open(args.train_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('train')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.validation_proportion > 0:
with open(args.validation_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('validation')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.test_proportion > 0:
with open(args.test_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('test')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.print_dist:
# Read the dataset splits file and compute the NER type distribution
key_counts = Counter()
ner_distribution = NERDistribution()
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
key_counts[key] += 1
ner_distribution.update_distribution(spans=note[args.spans_key], key=key)
print_distribution = PrintDistribution(ner_distribution=ner_distribution, key_counts=key_counts)
train_splits = dataset_splitter.get_split('train')
validation_splits = dataset_splitter.get_split('validation')
test_splits = dataset_splitter.get_split('test')
all_splits = train_splits + validation_splits + test_splits
# Print distribution for each split
print_distribution.split_distribution(split='total', split_info=all_splits)
print_distribution.split_distribution(split='train', split_info=train_splits)
print_distribution.split_distribution(split='validation', split_info=validation_splits)
print_distribution.split_distribution(split='test', split_info=test_splits) | 30,939 |
def calculate_iou(ground_truth_path, prediction_path):
""" Calculate the intersection over union of two raster images.
Args:
ground_truth_path (str): Path to the ground truth raster image.
prediction_path (str): Path to the prediction raster image.
Returns:
float: The intersection over union of the two raster datasets.
"""
with rasterio.open(ground_truth_path) as ground_truth_dataset:
with rasterio.open(prediction_path) as prediction_dataset:
ground_truth_array = ground_truth_dataset.read(1)
prediction_array = prediction_dataset.read(1)
intersection = np.logical_and(ground_truth_array, prediction_array)
union = np.logical_or(ground_truth_array, prediction_array)
iou = np.sum(intersection) / np.sum(union)
return iou | 30,940 |
def bokeh_scatter(x,
y=None,
*,
xlabel='x',
ylabel='y',
title='',
figure=None,
data=None,
saveas='scatter',
copy_data=False,
**kwargs):
"""
Create an interactive scatter plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `scatter`
"""
from bokeh.models import ColumnDataSource
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
copy_data=copy_data,
single_plot=True,
same_length=True,
use_column_source=True)
entry, source = plot_data.items(first=True)
plot_params.set_defaults(default_type='function', name=entry.y)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
plot_kwargs = plot_params.plot_kwargs(plot_type='scatter')
res = p.scatter(x=entry.x, y=entry.y, source=source, **plot_kwargs, **kwargs)
plot_params.add_tooltips(p, res, entry)
if plot_params['level'] is not None:
res.level = plot_params['level']
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.save_plot(p, saveas)
return p | 30,941 |
def rulesActionsHandler(args):
""" Check rule action and execute associates functions.
:param args: Rule action
:return: Return result from the executed functions.
"""
if 'get' == args.action:
# get rule arguments :
# - id:
# type: int
# args number : 1 or more
# required: False
# - type:
# type: str
# args number: 1 or more
# required: False
from utils.format import format_output
if not args.id and not args.type:
from utils.rule import get_all_rules
rules = get_all_rules()
else:
if args.type:
from utils.type import check_type
check_type(type=args.type)
from utils.rule import get_rules
rules = get_rules(type=args.type, id=args.id)
return format_output(rules)
if 'add' == args.action:
# add rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: True
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
from core.exceptions import RuleArgumentsError
try:
if args.audit_cmd and args.audit_script:
raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
elif not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
# elif not (args.remed_cmd or args.remed_script):
# raise RuleArgumentsError('Rule must have at least one remedcmd OR one remedscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
from utils.type import update_type, check_type
from utils.rule import add_rule
check_type(type=args.type)
updated_type = add_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script)
return update_type(type=updated_type)
if 'update' == args.action:
# update rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: False
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
from core.exceptions import RuleArgumentsError
try:
# if args.audit_cmd and args.audit_script:
# raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
if not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
from utils.rule import update_rule, check_rule
from utils.type import update_type
check_rule(type=args.type, id=args.id)
updated_type = update_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script, id=args.id)
return update_type(updated_type)
if 'remove' == args.action:
# remove rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - all
from core.exceptions import RuleArgumentsError
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be deleted)")
else:
from utils.rule import check_rule
check_rule(type=args.type, id=args.id)
from utils.profile import clean_profiles
clean_profiles(type=args.type, id=args.id)
from utils.rule import remove_rule
updated_type = remove_rule(type=args.type, id=args.id)
from utils.type import update_type
return update_type(updated_type)
else:
if args.all:
from utils.profile import clean_profiles
clean_profiles(type=args.type)
from utils.rule import remove_rule
updated_type = remove_rule(type=args.type)
from utils.type import update_type
return update_type(updated_type)
else:
raise RuleArgumentsError("For removing one rule, id must be set !")
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'link' == args.action:
# link rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
from core.exceptions import RuleArgumentsError
try:
if args.all and not args.type:
raise RuleArgumentsError("--all options can't be used without rule type")
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
from utils.rule import check_rule, link_rule
check_rule(type=args.type, id=args.id)
return link_rule(profile=args.profile, type=args.type, id=args.id)
else:
from utils.rule import link_rule
return link_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'unlink' == args.action:
# unlink rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
from core.exceptions import RuleArgumentsError
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
else:
from utils.rule import unlink_rule, check_rule
check_rule(type=args.type, id=args.id)
return unlink_rule(profile=args.profile, type=args.type, id=args.id)
else:
from utils.rule import unlink_rule
return unlink_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae)
if 'move' == args.action:
# move rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: True
# - newtype:
# type: str
# args number : 1
# required: True
# - all
from utils.type import check_type, update_type
check_type(args.type)
check_type(args.newtype)
from utils.rule import check_rule, move_rule
check_rule(type=args.type, id=args.id)
updated_oldtype, updated_newtype = move_rule(oldtype=args.type, id=args.id, newtype=args.newtype)
update_type(updated_oldtype)
return update_type(updated_newtype)
return | 30,942 |
def _check_new_accessions(newfiles: List[Union[NewFiles, PepFiles, SubFiles]]) -> None:
"""Runs new accession checker on new entries and displays the result to the
user.
Args:
newfiles: list of NewFiles, PepFiles and/or SubFiles objects.
"""
for f in newfiles:
if f:
click.echo(f"Checking secondary accessions in {str(f)}...")
new_checker = NewAccessionChecker(f)
try:
new_checker.check()
if new_checker.ok:
click.secho(f"Valid secondary accessions in {str(f)}.", fg="green")
else:
click.secho(
f"Found invalid secondary accessions in {str(f)}.", fg="red"
)
for error in new_checker.entries_with_error:
accession, invalid_accessions = error
click.echo(
f"{accession}: {' '.join(i for i in invalid_accessions)}"
)
except requests.ConnectionError:
click.secho(
f"Unable to connect to server: {os.environ['TREMBL_SERVER']}",
fg="red",
)
click.echo("---")
else:
click.echo(f"No {str(f)} to check, skipping accession checks.")
click.echo("---") | 30,943 |
def superposition_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different permutation of input images.
Check how accuracy for original images is changing through tasks using superposition training.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks (permutations of original images)
:param context_matrices: multidimensional numpy array with random context (binary superposition)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# context_multiplication(model, context_matrices, 0)
# first training task - original MNIST images
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=0)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks - permuted MNIST data
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
# multiply current weights with context matrices for each layer (without changing weights from bias node)
if nn_cnn == 'nn':
context_multiplication(model, context_matrices, i + 1)
elif nn_cnn == 'cnn':
context_multiplication_CNN(model, context_matrices, i + 1)
permuted_X_train = permute_images(X_train)
history, _, accuracies = train_model(model, permuted_X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=i + 1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies | 30,944 |
def json2dict(astr: str) -> dict:
"""将json字符串转为dict类型的数据对象
Args:
astr: json字符串转为dict类型的数据对象
Returns:
返回dict类型数据对象
"""
return json.loads(astr) | 30,945 |
def get_con_line(in_path, out_path):
"""draw contour line given a numpy array
Parameters:
in_path: path of input numpy
out_path: path of output figure
Returns:
None
"""
z = np.load(in_path)
z = np.squeeze(z)
xlist = np.linspace(0, z.shape[0], z.shape[0])
ylist = np.linspace(0, z.shape[1], z.shape[1])
x, y = np.meshgrid(xlist, ylist)
fig, ax = plt.subplots(1,1)
cp = ax.contourf(x, y, z)
fig.colorbar(cp) # Add a colorbar to a plot
ax.set_title('Filled Contours Plot')
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.savefig(out_path) | 30,946 |
def any(wanted_type=None):
"""Matches against type of argument (`isinstance`).
If you want to match *any* type, use either `ANY` or `ANY()`.
Examples::
when(mock).foo(any).thenReturn(1)
verify(mock).foo(any(int))
"""
return Any(wanted_type) | 30,947 |
def test_import_python_file_for_first_time(clean_repo, mocker, files_dir: Path):
"""Test that importing a python file as module works and allows for
importing of module attributes even with module popped from sys path"""
SOME_MODULE = "some_module"
SOME_MODULE_FILENAME = SOME_MODULE + ".py"
SOME_FUNC = "some_func"
os.chdir(str(files_dir))
Repository.initialize()
clean_repo.activate_root()
mocker.patch.object(sys, "path", [])
module = source_utils.import_python_file(SOME_MODULE_FILENAME)
# Assert that attr could be fetched from module
assert isinstance(getattr(module, SOME_FUNC), Callable)
# Assert that module has been loaded into sys.module
assert SOME_MODULE in sys.modules
# Assert that sys path is unaffected
assert len(sys.path) == 0
# Cleanup modules for future tests
del sys.modules[SOME_MODULE] | 30,948 |
def p_html_href(p):
"""html_href : HREF COLON CTESTR snp_href_quad
| empty""" | 30,949 |
def specplot_mel_spec(mel_spec,
vmin=-5,
vmax=16000,
rotate=True,
size=512 + 256,
**matshow_kwargs):
"""Plot the log magnitude spectrogram of mel spectrogram."""
# If batched, take first element.
#if len(mel_spec.shape) == 2:
# mel_spec = mel_spec[0]
# Shape needs to be (time dim, mel bins)
#mel_spec = tf.transpose(mel_spec)
logmag = spectral_ops.compute_logmag_mel_spec(core.tf_float32(mel_spec), size=size)
# logmag = spectral_ops.compute_logmel(core.tf_float32(audio), lo_hz=8.0, bins=80, fft_size=size)
# logmag = spectral_ops.compute_mfcc(core.tf_float32(audio), mfcc_bins=40, fft_size=size)
if rotate:
logmag = np.rot90(logmag)
# Plotting.
plt.matshow(logmag,
vmin=vmin,
vmax=vmax,
cmap=plt.cm.magma,
aspect='auto',
**matshow_kwargs)
plt.xticks([])
plt.yticks([])
plt.xlabel('Time')
plt.ylabel('Frequency') | 30,950 |
def list_top_level_blob_folders(container_client):
"""
List all top-level folders in the ContainerClient object *container_client*
"""
top_level_folders,_ = walk_container(container_client,max_depth=1,store_blobs=False)
return top_level_folders | 30,951 |
def keyword_encipher(message, keyword, wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Enciphers a message with a keyword substitution cipher.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
0 : from 'a'
1 : from the last letter in the sanitised keyword
2 : from the largest letter in the sanitised keyword
>>> keyword_encipher('test message', 'bayes')
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_a)
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_last)
'lskl dskkbus'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_largest)
'qspq jsppbcs'
"""
cipher_alphabet = keyword_cipher_alphabet_of(keyword, wrap_alphabet)
cipher_translation = ''.maketrans(string.ascii_lowercase, cipher_alphabet)
return unaccent(message).lower().translate(cipher_translation) | 30,952 |
def plt_to_img(dummy: any = None, **kwargs) -> PIL.Image.Image:
"""
Render the current figure as a (PIL) image
- Take dummy arg to support expression usage `plt_to_img(...)` as well as statement usage `...; plt_to_img()`
"""
return PIL.Image.open(plot_to_file(**kwargs)) | 30,953 |
def _escape_char(c, escape_char=ESCAPE_CHAR):
"""Escape a single character"""
buf = []
for byte in c.encode('utf8'):
buf.append(escape_char)
buf.append('%X' % _ord(byte))
return ''.join(buf) | 30,954 |
def get_syntax_error(error, logging_type="critical"):
"""logging error and after that raise SyntaxError"""
try:
_e = "Call from %s:%s" % (sys._getframe().f_back.f_code.co_name,sys._getframe( 1 ).f_lineno )
utils.logs.log(error=_e, logging_type=logging_type)
except Exception:
pass
utils.logs.log(error=error, logging_type=logging_type)
raise SyntaxError("%s" % error) | 30,955 |
def error_log_to_html(error_log):
"""Convert an error log into an HTML representation"""
doc = etree.Element('ul')
for l in error_log:
if l.message.startswith('<runtrace '):
continue
el = etree.Element('li')
el.attrib['class'] = 'domain_{domain_name} level_{level_name} type_{type_name}'.format( # NOQA: E501
domain_name=l.domain_name,
level_name=l.level_name,
type_name=l.type_name,
)
el.text = '{msg:s} [{line:d}:{column:d}]'.format(
msg=l.message,
line=l.line,
column=l.column,
)
doc.append(el)
return doc | 30,956 |
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == "linear":
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == "step":
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == "plateau":
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError("learning rate policy [%s] is not implemented", opt.lr_policy)
return scheduler | 30,957 |
def gen_colors(count, drop_high=True):
"""Generate spread of `count` colors from matplotlib inferno colormap"""
# drop the top end of the color range by defining the norm with one
# element too many
cvals = range(0, count + drop_high)
# and dropping the last element when calculating the normed values
cvals = plt.Normalize(min(cvals), max(cvals))(cvals[0 : len(cvals) - drop_high])
# we're using the 'inferno' colormap from matplotlib
colors = plt.cm.inferno(cvals)
return colors | 30,958 |
def validate_branch():
"""Checks the branch passed in against the branches available on remote.
Returns true if branch exists on remote. This may be subject to false
postivies, but that should not be an issue"""
output = subprocess.run(["/usr/bin/git", "ls-remote",
CURRENT_CONFIG['URL']],
stdout=subprocess.PIPE)
ls_remote_output = output.stdout.decode("utf-8")
return CURRENT_CONFIG['RELEASE'] in ls_remote_output | 30,959 |
def train_PCA(data, num_components):
"""
Normalize the face by subtracting the mean image
Calculate the eigenValue and eigenVector of the training face, in descending order
Keep only num_components eigenvectors (corresponding to the num_components largest eigenvalues)
Each training face is represented in this basis by a vector
Calculate the weight vectors for training images
Normalized training face = F - mean = w1*u1 + w2*u2 + ... + wk*uk => w = u.T * face
:param train_data: M * N^2, each row corresponding to each image, which is reshaped into 1-D vector
:param num_components: The number of the largest eigenVector to be kept
:return:
mean_image: 1 * N^2
eigenVectors: num_components * N^2 matrix, each row represents each eigenface, in descending order
weiVec_train: M * K matrix, each row is the weight vectors used to represent the training face
"""
mean_image = np.mean(data, axis=0)
data = data - mean_image
eigenValues, eigenVectors = eigen(data)
eigenVectors = eigenVectors[:num_components]
weiVec_train = np.dot(data, eigenVectors.T)
return mean_image, eigenVectors, weiVec_train | 30,960 |
def test_client():
"""
A fixture that initialises the Flask application, saves the current application context for the duration of a single
test and yields a testing client that can be used for making requests to the endpoints exposed by the application
"""
test_app = create_app(DATABASE_NAME='test_analysis', TESTING=True)
testing_client = test_app.test_client()
test_app_context = test_app.app_context()
test_app_context.push()
yield testing_client
test_app_context.pop() | 30,961 |
def main():
"""
example:
"he made the van this challenge ." vs. "the van made he this challenge ."
"""
# counterbalance both forms of verb as different forms are the contrast
vbds = [
'brought',
'made',
'built',
'gave',
'showed',
]
nouns_s = get_legal_words(tag='NN')
animates_ = (configs.Dirs.legal_words / 'animates.txt').open().read().split()
animates = find_counterbalanced_subset(animates_, min_size=8, max_size=len(animates_))
personal_pronouns_obj = ['me', 'him', 'her', 'us', 'them'] # in the objective case
personal_pronouns_subj = ['i', 'he', 'she', 'we', 'they'] # in the subjective case
determiners = ['a', 'one', 'this', 'that', 'the', 'my', 'his', 'her']
vowels = {'a', 'e', 'i', 'o', 'u'}
while True:
vbd = random.choice(vbds) # template 1
# random choices
slot2filler = {
'nn': random.choice(animates),
'nn2': random.choice(nouns_s),
'det': random.choice(determiners),
'prp_obj': random.choice(personal_pronouns_obj),
'prp_subj': random.choice(personal_pronouns_subj),
'vbd': vbd,
}
if slot2filler['det'] == 'a' and slot2filler['nn2'][0] in vowels:
slot2filler['det'] += 'n'
yield template1['b'].format(**slot2filler) # bad
yield template1['g'].format(**slot2filler) | 30,962 |
def test_2():
"""
Situation : This test will check the time complexity of the drop
aspect.
Rows with age > 50 will be dropped, so around half of the
rows will be dropped.
The drop aspect should work in O(number_of_rows *
average_bytes_per_column). And not in O(number_of_rows *
number_of_rows * average_bytes_per_column).
This test checks if the slice_table aspect actually works in
the desired time complexiy.
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# noting the calling time of the slice function
start_time = time.time()
table = aspects.slice_table(table, [('Age', enums.Filters.LESS_THAN, 51)])
# noting the end return time of the slice function
end_time = time.time()
time_taken = end_time - start_time
print('Execution Time ', time_taken)
assert(time_taken <= 20) | 30,963 |
def count_revoked_tickets_for_party(party_id: PartyID) -> int:
"""Return the number of revoked tickets for that party."""
return db.session \
.query(DbTicket) \
.filter_by(party_id=party_id) \
.filter_by(revoked=True) \
.count() | 30,964 |
def test_limits(client):
"""Make sure that requesting resources with limits return a slice of the result."""
for i in range(100):
badge = client.post("/api/event/1/badge", json={
"legal_name": "Test User {}".format(i)
}).json
assert(badge['legal_name'] == "Test User {}".format(i))
badges = client.get("/api/event/1/badge", query_string={"limit": 10}).json
assert(len(badges) == 10) | 30,965 |
def link_cohort_to_partition_group(cohort, partition_id, group_id):
"""
Create cohort to partition_id/group_id link.
"""
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=partition_id,
group_id=group_id,
).save() | 30,966 |
def unzip(zip_file_path, output_dir):
"""
Unzip the given file into the given directory while preserving file permissions in the process.
Parameters
----------
zip_file_path : str
Path to the zip file
output_dir : str
Path to the directory where the it should be unzipped to
"""
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
# For each item in the zip file, extract the file and set permissions if available
for file_info in zip_ref.infolist():
name = file_info.filename
extracted_path = os.path.join(output_dir, name)
zip_ref.extract(name, output_dir)
_set_permissions(file_info, extracted_path) | 30,967 |
def add_service_context(_logger, _method, event_dict):
"""
Function intended as a processor for structlog. It adds information
about the service environment and reasonable defaults when not running in Lambda.
"""
event_dict['region'] = os.environ.get('REGION', os.uname().nodename)
event_dict['service'] = os.environ.get('SERVICE', os.path.abspath(__file__))
event_dict['stage'] = os.environ.get('STAGE', 'dev')
return event_dict | 30,968 |
def get_player_stats():
"""
Get all the player stats
returns dict of dicts: ->
{ player_id: {
name -> str gamertag,
discord -> str discord,
rank -> int rank,
wins -> int wins,
losses -> int losses
is_challenged
avg_goals_per_challenge -> float goals
},
...
}
"""
players = {}
logger.info("Retrieving player stats")
ids = get_all_player_ids_ordered()
for player in ids:
# Get the basic info
# TODO: We shouldn't mix up name and gamertag here, needs a refactor
with Player(player) as p:
players[p.id] = {
"name": p.gamertag,
"discord": p.discord,
"rank": p.rank,
"wins": p.wins,
"losses": p.losses,
"is_challenged": p.challenged,
}
# Now get the average goals per challenge
players[player]["avg_goals_per_challenge"] = get_average_goals_per_challenge(player)
return players | 30,969 |
def get_data(data_x, data_y):
"""
split data from loaded data
:param data_x:
:param data_y:
:return: Arrays
"""
print('Data X Length', len(data_x), 'Data Y Length', len(data_y))
print('Data X Example', data_x[0])
print('Data Y Example', data_y[0])
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.4, random_state=40)
dev_x, test_x, dev_y, test_y, = train_test_split(test_x, test_y, test_size=0.5, random_state=40)
print('Train X Shape', train_x.shape, 'Train Y Shape', train_y.shape)
print('Dev X Shape', dev_x.shape, 'Dev Y Shape', dev_y.shape)
print('Test Y Shape', test_x.shape, 'Test Y Shape', test_y.shape)
return train_x, train_y, dev_x, dev_y, test_x, test_y | 30,970 |
async def api_download_profile() -> str:
"""Downloads required files for the current profile."""
global download_status
assert core is not None
download_status = {}
def update_status(url, path, file_key, done, bytes_downloaded, bytes_expected):
bytes_percent = 100
if (bytes_expected is not None) and (bytes_expected > 0):
bytes_percent = int(bytes_downloaded / bytes_expected * 100)
download_status[file_key] = {"done": done, "bytes_percent": bytes_percent}
await rhasspyprofile.download_files(
core.profile,
status_fun=update_status,
session=get_http_session(),
ssl_context=ssl_context,
)
download_status = {}
return "OK" | 30,971 |
def cumulative_segment_wrapper(fun):
"""Wrap a cumulative function such that it can be applied to segments.
Args:
fun: The cumulative function
Returns:
Wrapped function.
"""
def wrapped_segment_op(x, segment_ids, **kwargs):
with tf.compat.v1.name_scope(
None, default_name=fun.__name__+'_segment_wrapper', values=[x]):
segments, _ = tf.unique(segment_ids)
n_segments = tf.shape(segments)[0]
output_array = tf.TensorArray(
x.dtype, size=n_segments, infer_shape=False)
def loop_cond(i, out):
return i < n_segments
def execute_cumulative_op_on_segment(i, out):
segment_indices = tf.where(tf.equal(segment_ids, segments[i]))
seg_begin = tf.reduce_min(segment_indices)
seg_end = tf.reduce_max(segment_indices)
segment_data = x[seg_begin:seg_end+1]
out = out.write(i, fun(segment_data, **kwargs))
return i+1, out
i_end, filled_array = tf.while_loop(
loop_cond,
execute_cumulative_op_on_segment,
loop_vars=(tf.constant(0), output_array),
parallel_iterations=10,
swap_memory=True
)
output_tensor = filled_array.concat()
output_tensor.set_shape(x.get_shape())
return output_tensor
return wrapped_segment_op | 30,972 |
def read_pet_types(
skip: int = 0,
limit: int = 100,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_superuser)
) -> Any:
"""
Read pet types
:return:
"""
if not crud.user.is_superuser(current_user):
raise HTTPException(status_code=403, detail="Not enough permissions")
return crud.pettype.get_all(db=db, skip=skip, limit=limit) | 30,973 |
def get_approves_ag_request():
"""Creates the prerequisites for - and then creates and returns an instance of - ApprovesAgRequest."""
# Creates an access group request and an approver (required to create an instance of ApprovesAgRequest).
agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING)
approver = Approver(email="peter@example.org", password="abc123", name="Peter", surname="Parker")
db.session.add(agr)
db.session.add(approver)
# Returns a ApprovesAgRequest object.
return ApprovesAgRequest(ag_request=agr, approver=approver) | 30,974 |
def currency_history(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
) -> pd.DataFrame:
"""
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {"base": base, "date": date, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/historical"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df | 30,975 |
def compute_aggregate_scores(path_to_results: str, ignore_tasks: List[str] = None) -> None:
"""Computes aggregate scores from a given results file (generated from a previous call to
`run_senteval`) at path_to_results`. Tasks can be ignored (e.g. their score will not be computed
and therefore not contribute to the aggregate score) by passing the task name in the list
`ignore_tasks`.
"""
with open(path_to_results, "r") as f:
results = json.load(f)
aggregate_scores = _compute_aggregate_scores(results, ignore_tasks)
_print_aggregate_scores(aggregate_scores) | 30,976 |
def get_service_legacy(default=None):
"""Helper to get the old {DD,DATADOG}_SERVICE_NAME environment variables
and output a deprecation warning if they are defined.
Note that this helper should only be used for migrating integrations which
use the {DD,DATADOG}_SERVICE_NAME variables to the new DD_SERVICE variable.
If the environment variables are not in use, no deprecation warning is
produced and `default` is returned.
"""
for old_env_key in ["DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"]:
if old_env_key in os.environ:
debtcollector.deprecate(
(
"'{}' is deprecated and will be removed in a future version. Please use DD_SERVICE instead. "
"Refer to our release notes on Github: https://github.com/DataDog/dd-trace-py/releases/tag/v0.36.0 "
"for the improvements being made for service names."
).format(old_env_key)
)
return os.getenv(old_env_key)
return default | 30,977 |
def test_workload_rbd_cephfs_minimal(
workload_storageutilization_05p_rbd, workload_storageutilization_05p_cephfs
):
"""
Similar to test_workload_rbd_cephfs, but using only 5% of total OCS
capacity. This still test the workload, but it's bit faster and (hopefully)
without big impact on the cluster itself.
Mostly usefull as a libtest and regression test for
https://github.com/red-hat-storage/ocs-ci/issues/1327
"""
logger.info(workload_storageutilization_05p_rbd)
logger.info(workload_storageutilization_05p_cephfs) | 30,978 |
def get_met_data():
"""
Taken from Tensorflow tutorial on time series forecasting:
https://www.tensorflow.org/tutorials/structured_data/time_series
"""
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
df = pd.read_csv(csv_path)
# Slice [start:stop:step], starting from index 5 take every 6th record.
df = df[5::6]
date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S')
# Convert wind from direction/velocity to (x,y) components
wv = df.pop('wv (m/s)')
max_wv = df.pop('max. wv (m/s)')
# Convert to radians.
wd_rad = df.pop('wd (deg)')*np.pi / 180
# Calculate the wind x and y components.
df['Wx'] = wv*np.cos(wd_rad)
df['Wy'] = wv*np.sin(wd_rad)
# Calculate the max wind x and y components.
df['max Wx'] = max_wv*np.cos(wd_rad)
df['max Wy'] = max_wv*np.sin(wd_rad)
# Represent time with periodic signals
timestamp_s = date_time.map(pd.Timestamp.timestamp)
day = 24*60*60
year = (365.2425)*day
df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))
df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))
df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))
df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))
return df | 30,979 |
def get_drives():
"""A list of accessible drives"""
if os.name == "nt":
return _get_win_drives()
else:
return [] | 30,980 |
def run_once(sql, dbname, print_rows):
"""Run sql statement once on database
This is the default run mode for statements
"""
with connections[dbname].cursor() as cursor, timer(dbname):
cursor.execute(sql)
if print_rows:
rows = fetch_dicts(cursor)
for row in rows:
pprint.pprint(row)
print("({} rows from {})".format(len(rows), dbname)) | 30,981 |
def _ref_tier_copy(source_eaf: Type[Eaf] = None,
target_eaf: Type[Eaf] = None,
source_tier_name: str = "",
target_tier_name: str = "",
target_parent_tier_name: str = "",
override_params: Dict[str, str] = {}):
"""
Copy annotations from a ref tier in one EAF to a new ref tier in another EAF
:param source_eaf: The Eaf object to copy from
:param target_eaf: The Eaf object to write to
:param source_tier_name: Name of the tier to get
:param target_tier_name: The name to call this tier in the destination
:param target_parent_tier_name: The name of the parent for the ref tier in the destination object
:param override_params: Use this to change tier params from what the tier has in the source file
:return:
"""
params = override_params if override_params else source_eaf.get_parameters_for_tier(source_tier_name)
target_eaf.add_tier(target_tier_name, ling=params["LINGUISTIC_TYPE_REF"], parent=target_parent_tier_name, tier_dict=params)
annotations = source_eaf.get_ref_annotation_data_for_tier(source_tier_name)
for annotation in annotations:
target_eaf.add_ref_annotation(id_tier=target_tier_name,
tier2=target_parent_tier_name,
time=annotation[0]+1,
value=annotation[2])
return target_eaf | 30,982 |
def push_phy_link():
"""
make a query to fetch the phyLink table and serverNIC table, and concatenation them into a entry where the
flow_info is default.
"""
default_flow = list()
cursor.execute("SELECT * FROM phyLink")
links_dps = cursor.fetchall()
for link_dp in links_dps:
#ensure every entry has 3 fields.
assert len(link_dp) == 3
# link_dp[0]:phyLinkID(int11)
# link_dp[1]:srcPort(int11)
# link_dp[1]:dstPort(int11)
link_id = link_dp[0]
src_port = link_dp[1]
dst_port = link_dp[2]
sql = "SELECT dpid FROM ports WHERE portID=%s" % src_port
cnt = cursor.execute(sql)
assert cnt == 1
src_dpid = cursor.fetchone()[0]
sql = "SELECT dpid FROM ports WHERE portID=%s" % dst_port
cnt = cursor.execute(sql)
assert cnt == 1
dst_dpid = cursor.fetchone()[0]
default_flow.append(
dict(bid=src_dpid, eid=dst_dpid, type="dis")
)
cursor.execute("SELECT * FROM serverNIC")
links_network_card_interface = cursor.fetchall()
for link_NIC in links_network_card_interface:
assert len(link_NIC) == 3
# link_NIC[0]:serNICID(char16)
# link_NIC[1]:peer(int11)
# link_NIC[2]:MAC(varchar20)
serNICID = link_NIC[0]
peer_port = link_NIC[1]
sql = "SELECT dpid FROM ports WHERE portID=%s" % peer_port
cnt = cursor.execute(sql)
assert cnt == 1
peer_dpid = cursor.fetchone()[0]
default_flow.append(
dict(bid=serNICID, eid=peer_dpid, type="dis")
)
# FIXME the links between dps are bidirection but the dp2server.
sql = "INSERT INTO meshsr_connection VALUE (NULL, 'default', '%s','physical links','')" \
% (json.dumps(default_flow))
cursor.execute(sql)
print default_flow | 30,983 |
def test_url_message_init_with_text_must_raise_error():
"""Test the `URLMessage` type initialization with text must raise error"""
with pytest.raises(ValidationError):
_ = UrlMessage(
title="URL #1",
url="This is a text",
) | 30,984 |
def fibonacci(length=10):
"""Get fibonacci sequence given it length.
Parameters
----------
length : int
The length of the desired sequence.
Returns
-------
sequence : list of int
The desired Fibonacci sequence
"""
if length < 1:
raise ValueError("Sequence length must be > 0")
sequence = [0] * (length + 2)
sequence[0] = 0
sequence[1] = 1
for i in range(2, len(sequence)):
sequence[i] = sequence[i - 1] + sequence[i - 2]
return sequence[: -2] | 30,985 |
def readForecast(config, stid, model, date, hour_start=6, hour_padding=6, no_hourly_ok=False):
"""
Return a Forecast object from the main theta-e database for a given model and date. This is specifically designed
to return a Forecast for a single model and a single day.
hour_start is the starting hour for the 24-hour forecast period.
hour_padding is the number of hours on either side of the forecast period to include in the timeseries.
:param config:
:param stid: str: station ID
:param model: str: model name
:param date: datetime or str: date to retrieve
:param hour_start: int: starting hour of the day in UTC
:param hour_padding: int: added hours around the 24-hour TimeSeries
:param no_hourly_ok: bool: if True, does not raise an error if the hourly timeseries is empty
:return: Forecast
"""
# Basic sanity check for hour parameters
if hour_start < 0 or hour_start > 23:
raise ValueError('db.readForecast error: hour_start must be between 0 and 23.')
if hour_padding < 0 or hour_padding > 24:
raise ValueError('db.readForecast error: hour_padding must be between 0 and 24.')
# Set the default database configuration; create Forecast
data_binding = 'forecast'
if config['debug'] > 9:
print("db.readForecast: reading forecast from '%s' data binding" % data_binding)
forecast = Forecast(stid, model, date)
# The daily forecast part
table_type = 'DAILY_FORECAST'
daily = readDaily(config, stid, data_binding, table_type, model, start_date=date, end_date=date)
# The hourly forecast part
table_type = 'HOURLY_FORECAST'
date = date_to_datetime(date)
start_date = date + timedelta(hours=hour_start - hour_padding)
end_date = date + timedelta(hours=hour_start + 24 + hour_padding)
try:
timeseries = readTimeSeries(config, stid, data_binding, table_type, model, start_date, end_date)
except MissingDataError:
if no_hourly_ok:
timeseries = TimeSeries(stid)
else:
raise
# Assign and return
forecast.timeseries = timeseries
forecast.daily = daily
return forecast | 30,986 |
def main() -> None:
"""Read jupyterblack CLI arguments."""
run(sys.argv[1:]) | 30,987 |
def get_direct_hit_response(request, query, snuba_params, referrer):
"""
Checks whether a query is a direct hit for an event, and if so returns
a response. Otherwise returns None
"""
event_id = normalize_event_id(query)
if event_id:
snuba_args = get_snuba_query_args(
query=u'id:{}'.format(event_id),
params=snuba_params)
results = raw_query(
selected_columns=SnubaEvent.selected_columns,
referrer=referrer,
**snuba_args
)['data']
if len(results) == 1:
response = Response(
serialize([SnubaEvent(row) for row in results], request.user)
)
response['X-Sentry-Direct-Hit'] = '1'
return response | 30,988 |
def CXLayer(qc, qreg, order):
"""
Applies a layer of CX gates onto the qubits of register
qreg in circuit qc, with the order of application
determined by the value of the order parameter.
"""
if order:
qc.cx(qreg[0], qreg[1])
else:
qc.cx(qreg[1], qreg[0]) | 30,989 |
def get_env_var(var_name: str) -> Any:
"""Get envronment var or raise helpful exception.
:param var_name: Name of environment variable to get.
:raises: ImproperlyConfigured if environment variable not found.
"""
dotenv.load_dotenv()
try:
return os.environ[var_name]
except KeyError:
error_msg = f"Environment variable {var_name} not set"
raise ImproperlyConfigured(error_msg) | 30,990 |
def _canonicalize_clusters(clusters: List[List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
"""
The data might include 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans.
"""
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters:
cluster_with_overlapping_mention = None
for mention in cluster:
# Look at clusters we have already processed to
# see if they contain a mention in the current
# cluster for comparison.
for cluster2 in merged_clusters:
if mention in cluster2:
# first cluster in merged clusters
# which contains this mention.
cluster_with_overlapping_mention = cluster2
break
# Already encountered overlap - no need to keep looking.
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
# Merge cluster we are currently processing into
# the cluster in the processed list.
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters] | 30,991 |
def remove_outliers(matches, keypoints):
"""
Calculate fundamental matrix between 2 images to remove incorrect matches.
Return matches with outlier removed. Rejects matches between images if there are < 20
:param matches: List of lists of lists where matches[i][j][k] is the kth cv2.Dmatch object for images i and j
:param keypoints: List of lists of cv2.Keypoint objects. keypoints[i] is list for image i.
"""
for i in range(len(matches)):
for j in range(len(matches[i])):
if j <= i: continue
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
kpts_i = []
kpts_j = []
for k in range(len(matches[i][j])):
kpts_i.append(keypoints[i][matches[i][j][k].queryIdx].pt)
kpts_j.append(keypoints[j][matches[i][j][k].trainIdx].pt)
kpts_i = np.int32(kpts_i)
kpts_j = np.int32(kpts_j)
F, mask = cv2.findFundamentalMat(kpts_i, kpts_j, cv2.FM_RANSAC, ransacReprojThreshold=3)
if np.linalg.det(F) > 1e-7: raise ValueError(f"Bad F_mat between images: {i}, {j}. Determinant: {np.linalg.det(F)}")
matches[i][j] = np.array(matches[i][j])
if mask is None:
matches[i][j] = []
continue
matches[i][j] = matches[i][j][mask.ravel() == 1]
matches[i][j] = list(matches[i][j])
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
return matches | 30,992 |
def err_comp(uh, snap, times_offline, times_online):
"""
Computes the absolute l2 error norm and the rms error
norm between the true solution and the nirom solution projected
on to the full dimensional space
"""
err = {}
w_rms = {}
soln_names = uh.keys()
# ky = list(uh.keys())[0]
N = snap[list(uh.keys())[0]].shape[0]
tstep = np.searchsorted(times_offline, times_online)
for key in soln_names:
interp = uh[key]
true = snap[key][:, tstep]
err[key] = np.linalg.norm(true - interp, axis=0)
w_rms[key] = err[key]/(np.sqrt(N))
return w_rms | 30,993 |
def calc_stock_state(portfolio,code:int,date:datetime,stocks,used_days:int):
"""
状態を計算
- 株価・テクニカル指標・出来高の時系列情報
- 総資産、所持株数
Args:
stocks: 単元株数と始値、終値、高値、低値、出来高を含む辞書を作成
used_days: 用いる情報の日数
"""
stock_df=stocks[code]['prices']
date=datetime(date.year,date.month,date.day) #convert to datetime
try:
time_series_array=stock_df[stock_df.index<=date][-used_days:].values
except Exception as e:
logger.error("datetime comparison error")
logger.error(e)
time_series_array=time_series_array/time_series_array[0] #normalization
time_series_list=list(time_series_array.flatten())
s1=portfolio.initial_deposit
s2=portfolio.stocks[code].total_cost # 取得にかかったコスト(総額)
s3=portfolio.stocks[code].current_count # 現在保有している株数
s4=portfolio.stocks[code].average_cost # 平均取得価額
return time_series_list+[s1,s2,s3,s4] | 30,994 |
def lowercase_words(words):
"""
Lowercases a list of words
Parameters
-----------
words: list of words to process
Returns
-------
Processed list of words where words are now all lowercase
"""
return [word.lower() for word in words] | 30,995 |
def convert_images_to_arrays_train(file_path, df):
"""
Converts each image to an array, and appends each array to a new NumPy
array, based on the image column equaling the image file name.
INPUT
file_path: Specified file path for resized test and train images.
df: Pandas DataFrame being used to assist file imports.
OUTPUT
NumPy array of image arrays.
"""
lst_imgs = [l for l in df['train_image_name']]
return np.array([np.array(Image.open(file_path + img)) for img in lst_imgs]) | 30,996 |
def get_projection_matrix(X_src, X_trg, orthogonal, direction='forward', out=None):
"""
X_src: ndarray
X_trg: ndarray
orthogonal: bool
direction: str
returns W_src if 'forward', W_trg otherwise
"""
xp = get_array_module(X_src, X_trg)
if orthogonal:
if direction == 'forward':
u, s, vt = xp.linalg.svd(xp.dot(X_trg.T, X_src))
W = xp.dot(vt.T, u.T, out=out)
elif direction == 'backward':
u, s, vt = xp.linalg.svd(xp.dot(X_src.T, X_trg))
W = xp.dot(vt.T, u.T, out=out)
else:
if direction == 'forward':
W = xp.dot(xp.linalg.pinv(X_src), X_trg, out=out)
elif direction == 'backward':
W = xp.dot(xp.linalg.pinv(X_trg), X_src, out=out)
return W | 30,997 |
def _standardize_df(data_frame):
"""
Helper function which divides df by std and extracts mean.
:param data_frame: (pd.DataFrame): to standardize
:return: (pd.DataFrame): standardized data frame
"""
return data_frame.sub(data_frame.mean(), axis=1).div(data_frame.std(), axis=1) | 30,998 |
def test_reset_password(client):
"""Test password reset requests."""
# Create user and login
USER1 = dict(USER)
USER1[LABELS['VERIFY']] = False
r = client.post(config.API_PATH() + '/users/register', json=USER1)
data = {LABELS['NAME']: 'user1', LABELS['PASSWORD']: 'pwd'}
r = client.post(config.API_PATH() + '/users/login', json=data)
token = json.loads(r.data)[LABELS['TOKEN']]
headers = {HEADER_TOKEN: token}
r = client.get(config.API_PATH() + '/users/whoami', headers=headers)
assert r.status_code == 200
# Reset password for user1. This should also invalidate the access token.
data = {LABELS['NAME']: 'user1'}
url = config.API_PATH() + '/users/password/request'
r = client.post(url, json=data, headers=headers)
assert r.status_code == 200
req_id = json.loads(r.data)[LABELS['REQUEST_ID']]
data = {LABELS['REQUEST_ID']: req_id, LABELS['PASSWORD']: 'passwd'}
url = config.API_PATH() + '/users/password/reset'
r = client.post(url, json=data, headers=headers)
assert r.status_code == 200
r = client.get(config.API_PATH() + '/users', headers=headers)
assert r.status_code == 200
# The old password is invalid
data = {LABELS['NAME']: 'user1', LABELS['PASSWORD']: 'pwd'}
r = client.post(config.API_PATH() + '/users/login', json=data)
assert r.status_code == 404
data = {LABELS['NAME']: 'user1', LABELS['PASSWORD']: 'passwd'}
r = client.post(config.API_PATH() + '/users/login', json=data)
assert r.status_code == 200 | 30,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.