content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def example_data():
"""Makes some example objects for db testing"""
user = User(username='testo', password='testo', email='testo@test.com',
age=35, gender='nb_gf')
user2 = User(username='boolean', password='bear', email='bb@test.com',
age=32, gender='nb_gf')
user3 = User(username='instance', password='cat', email='ic@test.com',
age=30, gender='nb_gf')
review = Review(user_id=1, game_id=1, score=95, comment=None)
review2 = Review(user_id=2, game_id=1, score=95, comment=None)
critic_review = CriticReview(game_id=1, critic_code='ign', score=100, name="IGN", link="fake")
game = Game(game_id=1, name="Testo", release_date=datetime.datetime.now(), franchise_id=1)
current_game = CurrentGame(user_id=1, game_id=1)
cover = Cover(game_id=1, url="///testo.png", width=360, height=240)
franchise = Franchise(franchise_id=1, name="Testo Stories")
genre = Genre(genre="test")
developer = Developer(name="Testo Games")
platform = Platform(name="Testo360")
screenshot = Screenshot(game_id=1, url="///test.png", width=260, height=240)
db.session.add_all([user, user2, user3, franchise, game])
db.session.commit()
db.session.add_all([review, review2, critic_review, current_game,
cover, genre, developer, platform, screenshot])
db.session.commit()
gameGenre = GameGenre(game_id=1, genre_id=1)
gameDeveloper = GameDeveloper(game_id=1, developer_id=1)
gamePlatform = GamePlatform(game_id=1, platform_id=1)
db.session.add_all([gameGenre, gamePlatform, gameDeveloper])
db.session.commit()
| 11,900
|
def linear_resample(x: Union[ivy.Array, ivy.NativeArray], num_samples: int, axis: int = -1, f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Performs linear re-sampling on input image.
:param x: Input array
:type x: array
:param num_samples: The number of interpolated samples to take.
:type num_samples: int
:param axis: The axis along which to perform the resample. Default is last dimension.
:type axis: int, optional
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: The array after the linear resampling.
"""
return _cur_framework(x, f=f).linear_resample(x, num_samples, axis)
| 11,901
|
def mysql_settings():
"""Return a list of dict of settings for connecting to postgresql.
Will return the correct settings, depending on which of the environments it
is running in. It attempts to set variables in the following order, where
later environments override earlier ones.
1. Local
2. Github Actions
"""
if "GITHUB_ACTIONS" in os.environ:
instances = 2
user = password = db = "python_agent"
base_port = 8080
else:
instances = 1
user = db = USER
password = ""
base_port = 3306
settings = [
{
"user": user,
"password": password,
"name": db,
"host": "127.0.0.1",
"port": base_port + instance_num,
"namespace": str(os.getpid()),
}
for instance_num in range(instances)
]
return settings
| 11,902
|
def fill (surface, rect, colour):
"""Fill a single colour.
Takes a standard (r, g, b) tuple.
"""
surface.fill(colour, rect)
| 11,903
|
def update_milestones(repo, username=None, namespace=None):
"""Update the milestones of a project."""
repo = flask.g.repo
form = pagure.forms.ConfirmationForm()
error = False
if form.validate_on_submit():
redirect = flask.request.args.get("from")
milestones = flask.request.form.getlist("milestones")
miles = {}
keys = []
for idx in milestones:
milestone = flask.request.form.get(
"milestone_%s_name" % (idx), None
)
date = flask.request.form.get("milestone_%s_date" % (idx), None)
active = (
True
if flask.request.form.get("milestone_%s_active" % (idx))
else False
)
if milestone and milestone.strip():
milestone = milestone.strip()
if milestone in miles:
flask.flash(
"Milestone %s is present multiple times" % milestone,
"error",
)
error = True
break
miles[milestone] = {
"date": date.strip() if date else None,
"active": active,
}
keys.append(milestone)
if not error:
try:
repo.milestones = miles
repo.milestones_keys = keys
flask.g.session.add(repo)
flask.g.session.commit()
flask.flash("Milestones updated")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
flask.flash(str(err), "error")
if redirect == "issues":
return flask.redirect(
flask.url_for(
"ui_ns.view_issues",
username=username,
repo=repo.name,
namespace=namespace,
)
)
return flask.redirect(
flask.url_for(
"ui_ns.view_settings",
username=username,
repo=repo.name,
namespace=namespace,
)
+ "#roadmap-tab"
)
| 11,904
|
def installedState(item_pl):
"""Checks to see if the item described by item_pl (or a newer version) is
currently installed
All tests must pass to be considered installed.
Returns 1 if it looks like this version is installed
Returns 2 if it looks like a newer version is installed.
Returns 0 otherwise.
"""
foundnewer = False
if item_pl.get('softwareupdatename'):
availableAppleUpdates = appleupdates.softwareUpdateList()
munkicommon.display_debug2(
'Available Apple updates:\n%s' % availableAppleUpdates)
if item_pl['softwareupdatename'] in availableAppleUpdates:
munkicommon.display_debug1(
'%s is in available Apple Software Updates' %
item_pl['softwareupdatename'])
# return 0 so we're marked as needing to be installed
return 0
else:
munkicommon.display_debug1(
'%s is not in available Apple Software Updates' %
item_pl['softwareupdatename'])
# return 1 so we're marked as not needing to be installed
return 1
# does 'installs' exist and is it non-empty?
if item_pl.get('installs', None):
installitems = item_pl['installs']
for item in installitems:
try:
comparison = compareItemVersion(item)
if comparison in (-1, 0):
return 0
elif comparison == 2:
# this item is newer
foundnewer = True
except munkicommon.Error, errmsg:
# some problem with the installs data
munkicommon.display_error(errmsg)
return 0
# if there is no 'installs' key, then we'll use receipt info
# to determine install status.
elif 'receipts' in item_pl:
receipts = item_pl['receipts']
for item in receipts:
try:
comparison = compareReceiptVersion(item)
if comparison in (-1, 0):
# not there or older
return 0
elif comparison == 2:
foundnewer = True
except munkicommon.Error, errmsg:
# some problem with the receipts data
munkicommon.display_error(errmsg)
return 0
# if we got this far, we passed all the tests, so the item
# must be installed (or we don't have enough info...)
if foundnewer:
return 2
else:
return 1
| 11,905
|
def trapezoid_vectors(t, depth, big_t, little_t):
"""Trapezoid shape, in the form of vectors, for model.
Parameters
----------
t : float
Vector of independent values to evaluate trapezoid model.
depth : float
Depth of trapezoid.
big_t : float
Full trapezoid duration.
little_t : float
Ingress/egress duration.
Returns
-------
output : float
Vector of trapezoid model values.
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
big_t_half = big_t * 0.5
little_t_half = little_t * 0.5
one_minus_depth = 1.0 - depth
output = np.where(t <= big_t_half - little_t_half, one_minus_depth, output)
return np.where(
np.logical_and(t > big_t_half - little_t_half,
t < big_t_half + little_t_half),
one_minus_depth + ((depth / little_t) *
(t - big_t_half + little_t_half)),
output)
| 11,906
|
def get_rules(clf, class_names, feature_names):
"""
Extracts the rules from a decision tree classifier.
The keyword arguments correspond to the objects returned by
tree.build_tree.
Keyword arguments:
clf: A sklearn.tree.DecisionTreeClassifier.
class_names: A list(str) containing the class names.
feature_names: A list(str) containing the feature names.
Returns:
A list(str) where each element is a rule describing a leaf node.
"""
tree = clf.tree_
rules = traverse(tree, 0, class_names, feature_names, [], [], [], [])
rules = prune_rules(rules, feature_names)
n_rules = len(rules)
print('\tExtracted', n_rules, 'rule' + ('s.' if n_rules > 1 else '.'))
rules_str = []
for (features, thresholds, decisions, class_name) in rules:
rule = lists2rule(features, thresholds, decisions, class_name)
rules_str.append(rule)
return rules_str
| 11,907
|
def save_repo(repo, target="/run/install"):
"""copy a repo to the place where the installer will look for it later."""
newdir = mkdir_seq(os.path.join(target, "DD-"))
log.debug("save_repo: copying %s to %s", repo, newdir)
subprocess.call(["cp", "-arT", repo, newdir])
return newdir
| 11,908
|
def file_parser(localpath = None, url = None, sep = " ", delimiter = "\t"):
"""
DOCSTRING:
INPUT:
> 'localpath' : String (str). Ideally expects a local object with a read() method (such as a file handle or StringIO).
By default, 'localpath=dummy_file' parameter can be passed to auto-detect and parse one of our dummy 'Payments' file in Amazon format. Acceptable input file extensions include .CSV, .TSV and .TXT. Needs to be passed in within quotes, either single or double quotes. Default 'dummy_file' doesn't require additional quotes.
> 'url' : [OPTIONAL] String (str). If supplied with value, 'localpath' needs to be left at default 'None' or else shall output an error message. Expected file type contained within URL should be either in .CSV, .TSV and .TXT format. Needs to be passed in within quotes, either single or double quotes. Default 'url=ur' can be passed w/o additional quotes for fetching dummy data.
> 'sep' : [OPTIONAL] String (str). Optional, and isn't expected to be modified unless critical. Powered by Python’s builtin parsing sniffer tool.
In addition, separators longer than 1 character and different from '\s+' will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex separators are prone to ignoring quoted data. [Regex example: '\r\t'].
> 'delimiter' : [OPTIONAL] String (str). Parameter isn't expected to be modified (Like setting to 'None') unless critical. Alternative argument name for previous argument 'sep', so a careful choice needs to be made.
OUTPUT:
Shall result into a Pandas DataFrame or TextParser for further data processing.
"""
# Checking existence of 'filepath' or 'url' parameter before parsing:
if localpath == None and url == None:
return "Please input EITHER local file path to 'localpath' parameter OR any valid readable URL to 'url' parameter"
elif localpath != None and url == None:
if localpath.lower().endswith((".txt", ".csv", ".tsv")):
data = pd.read_csv(localpath, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "This file format is not supported. Kindly refer to our functional flow documentation for further assistance!"
elif localpath == None and url != None:
data = pd.read_csv(url, sep = sep, delimiter=delimiter, parse_dates=[0], infer_datetime_format=True)
return data
else:
return "Please pass valid input for processing."
| 11,909
|
def is_list(var, *, debug=False):
"""
is this a list or tuple? (DOES NOT include str)
"""
print_debug(debug, "is_list: got type %s" % (type(var)))
return isinstance(var, (list, tuple))
| 11,910
|
def zonotope_minimize(avfun, avdom, avdfun):
"""
Minimize a response surface defined on a zonotope.
:param function avfun: A function of the active variables.
:param ActiveVariableDomain avdom: Contains information about the domain of
`avfun`.
:param function avdfun: Returns the gradient of `avfun`.
:return: ystar, The estimated minimizer of `avfun`.
:rtype: ndarray
:return: fstar, The estimated minimum of `avfun`.
:rtype: float
**See Also**
optimizers.av_minimize
**Notes**
This function wraps the scipy.optimize implementation of SLSQP with linear
inequality constraints derived from the zonotope.
"""
n = avdom.subspaces.W1.shape[1]
logging.getLogger(__name__).debug('Zonotope minimization in {:d} vars.'.format(n))
opts = {'disp':False, 'maxiter':1e4, 'ftol':1e-9}
# a bit of globalization
curr_state = np.random.get_state()
np.random.seed(42)
minf = 1e100
minres = []
for i in range(10):
y0 = np.random.normal(size=(1, n))
cons = avdom.constraints
result = scopt.minimize(avfun, y0, constraints=cons, method='SLSQP', \
jac=avdfun, options=opts)
if not result.success:
raise Exception('SLSQP failed with message: {}.'.format(result.message))
if result.fun < minf:
minf = result.fun
minres = result
logging.getLogger(__name__).debug('\tMinimum {:6.4f}.'.format(minf))
np.random.set_state(curr_state)
ystar, fstar = minres.x, minres.fun
return ystar, fstar
| 11,911
|
def node_to_truncated_gr(node, bin_width=0.1):
"""
Parses truncated GR node to an instance of the
:class: openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD
"""
# Parse to float dictionary
if not all([node.attrib[key]
for key in ["minMag", "maxMag", "aValue", "bValue"]]):
return None
tgr = dict((key, float_(node.attrib[key])) for key in node.attrib)
return mfd.truncated_gr.TruncatedGRMFD(min_mag=tgr["minMag"],
max_mag=tgr["maxMag"],
bin_width=bin_width,
a_val=tgr["aValue"],
b_val=tgr["bValue"])
| 11,912
|
def nt(node, tag):
""" returns text of the tag or None if the
tag does not exist """
if node.find(tag) is not None and node.find(tag).text is not None:
return node.find(tag).text
else:
return None
| 11,913
|
def set_user_favorites(username, **_):
"""
Sets the user's Favorites
Variables:
username => Name of the user you want to set the favorites for
Arguments:
None
Data Block:
{ # Dictionary of
"alert": [
"<name_of_query>": # Named queries
"*:*", # The actual query to run
...
}
Result example:
{
"success": true # Was saving the favorites successful ?
}
"""
data = request.json
favorites = {
"alert": [],
"search": [],
"signature": [],
"submission": [],
"error": []
}
for key in data:
if key not in favorites:
return make_api_response("", err="Invalid favorite type (%s)" % key, status_code=400)
favorites.update(data)
return make_api_response({"success": STORAGE.user_favorites.save(username, data)})
| 11,914
|
def set_kaggle_dir(kaggle_dir, ctx=None):
""" manually set kaggle directory that contains API key """
if not os.path.exists(kaggle_dir):
click.secho("Error: {} does not exist".format(kaggle_dir), fg="red")
exit(1)
if not os.path.isdir(kaggle_dir):
click.secho("Error: {} is not a directory".format(kaggle_dir), fg="red")
exit(1)
config = fo.get_config(ctx)
config["kaggle_dir"] = kaggle_dir
fo.set_config(config, ctx=ctx)
| 11,915
|
def corpus():
"""语料生成器
"""
while True:
f = '/root/data_pretrain/data_shuf.json'
with open(f) as f:
for l in f:
l = json.loads(l)
for text in text_process(l['text']):
yield text
| 11,916
|
def delete_token(token_id):
"""Revoke a specific token in the application auth database.
:type token_id: str
:param token_id: Token identifier
:rtype: tuple
:return: None, status code
"""
client_data = g.client_data
if not valid_token_id(token_id):
raise MalformedTokenIdException
token = current_app.auth_db.lookup_token(token_id)
if token is None:
raise TokenNotFoundException
if not isinstance(token, Token):
raise InternalServerErrorException("auth_db.lookup_token did not return a token object")
if "admin" in client_data.roles:
current_app.auth_db.revoke_token(token_id)
else:
if token.client_id != client_data.client_id:
raise InadequateRolesException("Cannot revoke a token which you do not own")
current_app.auth_db.revoke_token(token_id)
return "", 204
| 11,917
|
async def update_country(identifier: Optional[str] = None, name: Optional[str] = None, capital: Optional[str] = None,
country: UpdateCountryModel = Body(...), current_user: AdminModel = Depends(get_current_user)):
"""
Update a country by name or capital name:
- **current user** should be admin
- **name**: country name
- **capital**: capital name of the country
"""
variables = locals()
options = {'identifier': '_id', 'name': 'name', 'capital': 'capital'}
for key in variables.keys():
if variables[key] is not None:
return await update_object({options[key]: variables[key]}, country, 'countries')
raise HTTPException(status_code=404, detail='Set some parameters')
| 11,918
|
def test_api_metrics(mocker, flask_app, api_version):
"""Test Depot ID TBD.
Verify that StatsD is sent statistics about the performance of TAC and IMEI APIs. The
metric name should contain the HTTP status code so that the response times can be
broken down by status code.
"""
# Can't import dirbs.api at top level as it configure logging
import dirbs.api
mocker.patch.object(dirbs.api, 'statsd', auto_spec=True)
rv = flask_app.get(url_for('{0}.imei_api'.format(api_version), imei='0117220037002633'))
assert rv.status_code == 200
dirbs.api.statsd.timing.assert_any_call('dirbs.api.response_time.imei.{0}.GET.200'.format(api_version), mocker.ANY)
dirbs.api.statsd.incr.assert_any_call('dirbs.api.successes.imei.{0}.200'.format(api_version))
dirbs.api.statsd.reset_mock()
rv = flask_app.get(url_for('{0}.tac_api'.format(api_version), tac='12345678'))
assert rv.status_code == 200
dirbs.api.statsd.timing.assert_any_call('dirbs.api.response_time.tac.{0}.GET.200'.format(api_version), mocker.ANY)
dirbs.api.statsd.incr.assert_any_call('dirbs.api.successes.tac.{0}.200'.format(api_version))
for api in ['tac', 'imei']:
dirbs.api.statsd.reset_mock()
rv = flask_app.get(url_for('{0}.{1}_api'.format(api_version, api), **{api: 'aaaaaaaaaaaaaaaaaaaaaaa'}))
assert rv.status_code == 400
dirbs.api.statsd.timing.assert_any_call('dirbs.api.response_time.{0}.{1}.GET.400'.format(api, api_version),
mocker.ANY)
dirbs.api.statsd.incr.assert_any_call('dirbs.api.failures.{0}.{1}.400'.format(api, api_version))
dirbs.api.statsd.reset_mock()
rv = flask_app.post(url_for('{0}.{1}_api'.format(api_version, api), **{api: 'a'}))
assert rv.status_code == 405
dirbs.api.statsd.timing.assert_any_call('dirbs.api.response_time.{0}.{1}.POST.405'.format(api, api_version),
mocker.ANY)
dirbs.api.statsd.incr.assert_any_call('dirbs.api.failures.{0}.{1}.405'.format(api, api_version))
| 11,919
|
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
"""method-1 time O(n), traverse all, get rest"""
for i in range(len(nums)):
res = target - nums[i]
if res in nums:
return [i, nums.index(res)]
else:
return []
| 11,920
|
def test_model_reset_correctly(tmpdir):
"""Check that model weights are correctly reset after scaling batch size."""
tutils.reset_seed()
model = BatchSizeModel(batch_size=2)
# logger file to get meta
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
before_state_dict = deepcopy(model.state_dict())
trainer.tuner.scale_batch_size(model, max_trials=5)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(
torch.eq(before_state_dict[key], after_state_dict[key])
), "Model was not reset correctly after scaling batch size"
assert not any(f for f in os.listdir(tmpdir) if f.startswith(".scale_batch_size"))
| 11,921
|
def transform_image(image, code_vectors):
"""
Quantize image using the code_vectors (aka centroids)
Return a new image by replacing each RGB value in image with the nearest code vector
(nearest in euclidean distance sense)
returns:
numpy array of shape image.shape
"""
assert image.shape[2] == 3 and len(image.shape) == 3, \
'Image should be a 3-D array with size (?,?,3)'
assert code_vectors.shape[1] == 3 and len(code_vectors.shape) == 2, \
'code_vectors should be a 2-D array with size (?,3)'
# TODO
# - replace each pixel (a 3-dimensional point) by its nearest code vector
N, M, D = image.shape
flatten = image.reshape(N*M, D)
# Assign every pixel to its nearest code vector
distances = squared_euclidean_distances(x=flatten, y=code_vectors)
assignment = np.argmin(distances, axis=1)
compressed = code_vectors[assignment].reshape(N, M, D)
return compressed
| 11,922
|
def main():
"""
Main method that collects the ASN from the spine switches using the native model
"""
asn_filter = """
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<bgp-items>
<inst-items>
<asn/>
</inst-items>
</bgp-items>
</System>
"""
for device in DEVICES:
with manager.connect(host=device, port=PORT, username=USER,
password=PASS, hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False, allow_agent=False) as m:
# Add the loopback interface
netconf_response = m.get(('subtree', asn_filter))
# Parse the XML response
xml_data = netconf_response.data_ele
asn = xml_data.find(".//{http://cisco.com/ns/yang/cisco-nx-os-device}asn").text
print("The ASN number for {} {} is {}".format(DEVICE_NAMES[device], device, asn))
| 11,923
|
def create_linking_context_from_compilation_outputs(
*,
actions,
additional_inputs = [],
alwayslink = False,
compilation_outputs,
feature_configuration,
label,
linking_contexts = [],
module_context,
name = None,
swift_toolchain,
user_link_flags = []):
"""Creates a linking context from the outputs of a Swift compilation.
On some platforms, this function will spawn additional post-compile actions
for the module in order to add their outputs to the linking context. For
example, if the toolchain that requires a "module-wrap" invocation to embed
the `.swiftmodule` into an object file for debugging purposes, or if it
extracts auto-linking information from the object files to generate a linker
command line parameters file, those actions will be created here.
Args:
actions: The context's `actions` object.
additional_inputs: A `list` of `File`s containing any additional files
that are referenced by `user_link_flags` and therefore need to be
propagated up to the linker.
alwayslink: If True, any binary that depends on the providers returned
by this function will link in all of the library's object files,
even if some contain no symbols referenced by the binary.
compilation_outputs: A `CcCompilationOutputs` value containing the
object files to link. Typically, this is the second tuple element in
the value returned by `swift_common.compile`.
feature_configuration: A feature configuration obtained from
`swift_common.configure_features`.
label: The `Label` of the target being built. This is used as the owner
of the linker inputs created for post-compile actions (if any), and
the label's name component also determines the name of the artifact
unless it is overridden by the `name` argument.
linking_contexts: A `list` of `CcLinkingContext`s containing libraries
from dependencies.
name: A string that is used to derive the name of the library or
libraries linked by this function. If this is not provided or is a
falsy value, the name component of the `label` argument is used.
module_context: The module context returned by `swift_common.compile`
containing information about the Swift module that was compiled.
Typically, this is the first tuple element in the value returned by
`swift_common.compile`.
swift_toolchain: The `SwiftToolchainInfo` provider of the toolchain.
user_link_flags: A `list` of strings containing additional flags that
will be passed to the linker for any binary that links with the
returned linking context.
Returns:
A tuple of `(CcLinkingContext, CcLinkingOutputs)` containing the linking
context to be propagated by the caller's `CcInfo` provider and the
artifact representing the library that was linked, respectively.
"""
extra_linking_contexts = [
cc_info.linking_context
for cc_info in swift_toolchain.implicit_deps_providers.cc_infos
]
if module_context and module_context.swift:
post_compile_linker_inputs = []
# Ensure that the .swiftmodule file is embedded in the final library or
# binary for debugging purposes.
if should_embed_swiftmodule_for_debugging(
feature_configuration = feature_configuration,
module_context = module_context,
):
post_compile_linker_inputs.append(
ensure_swiftmodule_is_embedded(
actions = actions,
feature_configuration = feature_configuration,
label = label,
swiftmodule = module_context.swift.swiftmodule,
swift_toolchain = swift_toolchain,
),
)
# Invoke an autolink-extract action for toolchains that require it.
if is_action_enabled(
action_name = swift_action_names.AUTOLINK_EXTRACT,
swift_toolchain = swift_toolchain,
):
autolink_file = derived_files.autolink_flags(
actions = actions,
target_name = label.name,
)
register_autolink_extract_action(
actions = actions,
autolink_file = autolink_file,
feature_configuration = feature_configuration,
module_name = module_context.name,
object_files = compilation_outputs.objects,
swift_toolchain = swift_toolchain,
)
post_compile_linker_inputs.append(
cc_common.create_linker_input(
owner = label,
user_link_flags = depset(
["@{}".format(autolink_file.path)],
),
additional_inputs = depset([autolink_file]),
),
)
extra_linking_contexts.append(
cc_common.create_linking_context(
linker_inputs = depset(post_compile_linker_inputs),
),
)
if not name:
name = label.name
return cc_common.create_linking_context_from_compilation_outputs(
actions = actions,
feature_configuration = get_cc_feature_configuration(
feature_configuration,
),
cc_toolchain = swift_toolchain.cc_toolchain_info,
compilation_outputs = compilation_outputs,
name = name,
user_link_flags = user_link_flags,
linking_contexts = linking_contexts + extra_linking_contexts,
alwayslink = alwayslink,
additional_inputs = additional_inputs,
disallow_static_libraries = False,
disallow_dynamic_library = True,
grep_includes = None,
)
| 11,924
|
def test_ClangFormat_long_line():
"""Test that extremely long lines are not wrapped."""
assert (
java.ClangFormat(
"""
public class VeryVeryLongNameeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee
extends VeryVeryLongNameeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeBase {
}
"""
)
== """
public class VeryVeryLongNameeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee \
extends VeryVeryLongNameeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeBase {}
"""
)
| 11,925
|
def wait_until_uds_reachable(uds_path, return_sock=False):
""" Wait until the unix domain socket at `uds_path` is reachable.
Returns
-------
socket.socket
"""
from miniworld.util import ConcurrencyUtil
sock = ConcurrencyUtil.wait_until_fun_returns_true(lambda x: x[0] is True, uds_reachable, uds_path,
return_sock=return_sock)[1]
return sock
| 11,926
|
def get_relation_count_df(
dataset: Dataset,
merge_subsets: bool = True,
add_labels: bool = True,
) -> pd.DataFrame:
"""Create a dataframe with relation counts.
:param dataset:
The dataset.
:param add_labels:
Whether to add relation labels to the dataframe.
:param merge_subsets:
Whether to merge subsets, i.e., train/validation/test.
:param add_labels:
Whether to add entity / relation labels.
:return:
A dataframe with columns (relation_id, count, relation_label?, subset?)
"""
return _common(
dataset=dataset,
triple_func=triple_analysis.get_relation_counts,
merge_subsets=merge_subsets,
add_labels=add_labels,
)
| 11,927
|
def set_data(group: h5py.Group, name: str, data: Union[np.ndarray, h5py.Dataset], dtype=None):
"""
Creates a dataset in Group with Name for data that is either an np.ndarray or a h5py.Dataset already (if using a
dataset, it will only create a link to that dataset which is good for saving storage space, but don't do this
if you intend to change the data)
Args:
group (h5py.Group): HDF group to store dataset in
name (str): Name with which to store the dataset (will overwrite existing datasets)
data (Union[np.ndarray, h5py.Dataset]): Data to be stored, can be np.ndarray or h5py.Dataset
Returns:
None
"""
ds = group.get(name, None)
if ds is not None:
# TODO: Do something better here to make sure I'm not just needlessly rewriting data
logger.info(f'Removing dataset {ds.name} with shape {ds.shape} to '
f'replace with data of shape {data.shape}')
del group[name]
group.create_dataset(name, data.shape, dtype, data)
| 11,928
|
def get_tests(run_id):
"""
Ручка для получения информации о тест (из тест-рана)
Выходящий параметр: test_id
"""
client = APIClient('https://testrail.homecred.it')
client.user = 'dmitriy.zverev@homecredit.ru'
client.password = 'Qwerty_22'
tests = client.send_get('get_tests/%s' % run_id)
return tests
| 11,929
|
def create_ffs():
"""
Create a new Powergate Filecoin Filesystem (FFS)
"""
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
ffs = powergate.ffs.create()
creation_date = datetime.now().replace(microsecond=0)
# TODO salt token id
filecoin_file_system = Ffs(
ffs_id=ffs.id, token=ffs.token, creation_date=creation_date, user_id=current_user.id,
)
db.session.add(filecoin_file_system)
# Create new FFS wallet and add entry in log table
address = powergate.ffs.addrs_list(ffs.token)
obj = MessageToDict(address)
wallet = obj["addrs"][0]["addr"]
wallet = Wallets(created=creation_date,
address=wallet,
ffs=ffs.id,
user_id=current_user.id,)
db.session.add(wallet)
db.session.commit()
new_ffs = Ffs.query.filter_by(ffs_id=ffs.id).first()
return new_ffs
| 11,930
|
def cleanup_pools():
"""Removes all storage pools from ScaleIO created by functional tests."""
for pool in StoragePool.all():
if not _is_test_name(pool):
continue
pool.delete()
assert not [p for p in StoragePool.all() if _is_test_name(p)]
| 11,931
|
def store_file(bucket, dataset, key, fileobj):
"""
Store a file on S3
Parameters
----------
bucket : str
dataset : str
key : str
fileobj : fileobj
"""
key = parsed_key(dataset, key)
total_sent = 0
logging.info("store_file: {}/{}".format(bucket, key))
def cb(bytes_sent):
nonlocal total_sent
total_sent += bytes_sent
logging.debug(
"store_file: {}/{} -- transferred {} bytes".format(bucket, key, total_sent)
)
s3_client.upload_fileobj(fileobj, bucket, key, Callback=cb)
| 11,932
|
def other(player):
"""Return the other player, for a player PLAYER numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - player
| 11,933
|
def _explored_parameters_in_group(traj, group_node):
"""Checks if one the parameters in `group_node` is explored.
:param traj: Trajectory container
:param group_node: Group node
:return: `True` or `False`
"""
explored = False
for param in traj.f_get_explored_parameters():
if param in group_node:
explored = True
break
return explored
| 11,934
|
def _execute(query,
data=None,
config_file=DEFAULT_CONFIG_FILE):
"""Execute SQL query on a postgres db"""
# Connect to an existing database.
postgres_db_credentials = postgres_db(config_file)
conn = psycopg2.connect(dbname=postgres_db_credentials["dbname"],
user=postgres_db_credentials["user"],
password=postgres_db_credentials["password"],
host=postgres_db_credentials["host"],
port=postgres_db_credentials["port"])
# Open a cursor to perform database operations.
cur = conn.cursor()
if data is None:
cur.execute(query)
elif isinstance(data, list):
execute_values(cur, query, data, template=None, page_size=100)
else:
cur.execute(query, data)
conn.commit()
if cur.description is None:
result = None
elif len(cur.description) == 1:
result, = cur.fetchone()
else:
result = cur.fetchall()
cur.close()
conn.close()
return result
| 11,935
|
def _construct_out_filename(fname, group_name):
"""
Construct a specifically formatted output filename.
The vrt will be placed adjacent to the HDF5 file, as
such write access is required.
"""
basedir = fname.absolute().parent
basename = fname.with_suffix('.vrt').name.replace(
'wagl',
group_name
)
out_fname = basedir.joinpath(Path(basename))
return out_fname
| 11,936
|
def test_add_record():
"""
"""
groups = groups_description
dataset_size = 3
temporal_context = 3
try:
shutil.rmtree('/tmp/fake_memmmaps/')
except:
pass
# build dataset
memmaps = generate_memmaps(dataset_size, three_groups_record_description,
memmaps_description_nested)
fake_dataset = DreemDataset(groups, features_description={}, temporal_context=temporal_context)
for i, memmap in enumerate(memmaps):
fake_dataset.add_record(memmap)
assert len(fake_dataset.records) == i + 1, 'Wrong nummber of records in the dataset'
assert fake_dataset.records[-1] == memmap, 'The wrong record has been added'
try:
shutil.rmtree('/tmp/fake_memmmaps/')
except:
pass
| 11,937
|
def _deposit_need_factory(name, **kwargs):
"""Generate a JSON argument string from the given keyword arguments.
The JSON string is always generated the same way so that the resulting Need
is equal to any other Need generated with the same name and kwargs.
"""
if kwargs:
for key, value in enumerate(kwargs):
if value is None:
del kwargs[key]
if not kwargs:
argument = None
else:
argument = json.dumps(kwargs, separators=(',', ':'), sort_keys=True)
return ParameterizedActionNeed(name, argument)
| 11,938
|
def not_equal(version1, version2):
"""
Evaluates the expression: version1 != version2.
:type version1: str
:type version2: str
:rtype: bool
"""
return compare(version1, '!=', version2)
| 11,939
|
def test_lf_acc_with_strict_match_with_prefixes_without_agg(
analysis_corpus,
analysis_corpus_y
):
""" Test expected accuracies across below spans:
Spans:
"name_1": Pierre (B-PERSON), Lison (L-PERSON), Pierre(U-PERSON)
"name_2": Pierre (B-PERSON), Lison (L-PERSON)
"org_1": Norwegian (B-ORG), Computing (I-ORG), Center(L-ORG)
"org_2": Norwegian (B-ORG), Computing (L-ORG), Oslo (U-ORG)
"place_1": Norwegian (U-NORP), Oslo (U-GPE)
Accuracy:
name_1:
B-PERSON: 19/19
I-PERSON: 19/19
L-PERSON: 19/19
U-PERSON: 19/19
name_2:
B-PERSON: 19/19
I-PERSON: 19/19
L-PERSON: 19/19
U-PERSON: 18/19 (misclassifies 2nd Pierre)
org_1:
B-ORG: 19/19
I-ORG: 19/19
L-ORG: 19/19
U-ORG: 19/19
org_2:
B-ORG: 19/19
I-ORG: 18/19
L-ORG: 17/19
U-ORG: 18/19
place_1:
B-NORP: 19/19
I-NORP: 19/19
L-NORP: 19/19
U-NORP: 18/19
B-GPE: 19/19
I-GPE: 19/19
L-GPE: 19/19
U-GPE: 19/19
"""
labels = ["O"]
labels += [
"%s-%s"%(p,l) for l in ["GPE", "NORP", "ORG", "PERSON"] for p in "BILU"
]
lf_analysis = LFAnalysis(
analysis_corpus,
labels,
strict_match=True
)
result = lf_analysis.lf_empirical_accuracies(
*analysis_corpus_y,
agg=False
)
assert(result['B-PERSON']['name_1'] == 1.0)
assert(result['I-PERSON']['name_1'] == 1.0)
assert(result['L-PERSON']['name_1'] == 1.0)
assert(result['U-PERSON']['name_1'] == 1.0)
assert(result['B-PERSON']['name_2'] == 1.0)
assert(result['I-PERSON']['name_2'] == 1.0)
assert(result['L-PERSON']['name_2'] == 1.0)
assert(abs(result['U-PERSON']['name_2'] - 18/19) <= 1e-5)
assert(result['B-ORG']['org_1'] == 1.0)
assert(result['I-ORG']['org_1'] == 1.0)
assert(result['L-ORG']['org_1'] == 1.0)
assert(result['U-ORG']['org_1'] == 1.0)
assert(result['B-ORG']['org_2'] == 1.0)
assert(abs(result['I-ORG']['org_2'] - 18/19) <= 1e-5)
assert(abs(result['L-ORG']['org_2'] - 17/19) <= 1e-5)
assert(abs(result['U-ORG']['org_2'] - 18/19) <= 1e-5)
assert(result['B-NORP']['place_1'] == 1.0)
assert(result['I-NORP']['place_1'] == 1.0)
assert(result['L-NORP']['place_1'] == 1.0)
assert(abs(result['U-NORP']['place_1'] - 18/19) <= 1e-5)
assert(result['B-GPE']['place_1'] == 1.0)
assert(result['I-GPE']['place_1'] == 1.0)
assert(result['L-GPE']['place_1'] == 1.0)
assert(result['U-GPE']['place_1'] == 1.0)
| 11,940
|
def get_sp_list():
"""
Gets all tickers from S&P 500
"""
bs = get_soup('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
sp_companies = bs.find_all('a', class_="external text")
return sp_companies
| 11,941
|
def loadNecessaryDatabases():
"""
loads transport and statmech databases
"""
from rmgpy.data.statmech import StatmechDatabase
from rmgpy.data.transport import TransportDatabase
#only load if they are not there already.
try:
getDB('transport')
getDB('statmech')
except DatabaseError:
logging.info("Databases not found. Making databases")
db = RMGDatabase()
db.statmech = StatmechDatabase()
db.statmech.load(os.path.join(settings['database.directory'],'statmech'))
db.transport = TransportDatabase()
db.transport.load(os.path.join(settings['database.directory'],'transport'))
| 11,942
|
def get_params_nowcast(
to, tf,
i, j,
path, nconst,
depthrange='None',
depav=False, tidecorr=tidetools.CorrTides):
"""This function loads all the data between the start and the end date that
contains hourly velocities in the netCDF4 nowcast files in the specified
depth range. Then masks, rotates and unstaggers the time series. The
unstaggering causes the shapes of the returned arrays to be 1 less than
those of the input arrays in the y and x dimensions. Finally it calculates
tidal ellipse parameters from the u and v time series. Maintains the shape
of the velocities enters only loosing the time dimensions.
:arg to: The beginning of the date range of interest
:type to: datetime object
:arg tf: The end of the date range of interest
:type tf: datetime object
:arg i: x index, must have at least 2 values for unstaggering, will loose
the first i during the unstaggering in prepare_vel.
:type i: float or list
:arg j: y index, must have at least 2 values for unstaggering, will loose
the first j during the unstaggering in prepare_vel.
:type j: float or list
:arg path: Defines the path used(eg. nowcast)
:type path: string
:arg depthrange: Depth values of interest in meters as a float for a single
depth or a list for a range. A float will find the closest depth that
is <= the value given. Default is 'None' for the whole water column
(0-441m).
:type depav: float, string or list.
:arg depav: True will depth average over the whole depth profile given.
Default is False.
:type depav: boolean
:arg depth: depth vector corresponding to the depth of the velocities, only
requiered if depav=True.
:type depth: :py:class:'np.ndarray' or string
:returns: params, dep
params is dictionary object of the ellipse parameters for each constituent
dep is the depths of the ellipse paramters
"""
u, v, time, dep = ellipse_files_nowcast(
to, tf,
i, j,
path,
depthrange=depthrange)
u_u, v_v = prepare_vel(u, v, depav=depav, depth=dep)
params = get_params(u_u, v_v, time, nconst, tidecorr=tidecorr)
return params, dep
| 11,943
|
def convert_size(size):
""" Helper function to convert ISPMan sizes to readable units. """
return number_to_human_size(int(size)*1024)
| 11,944
|
def get_suppressed_output(
detections,
filter_id: int,
iou: float,
confidence: float,
) -> tuple:
"""Filters detections based on the intersection of union theory.
:param detections: The tensorflow prediction output.
:param filter_id: The specific class to be filtered.
:param iou: The intersection of union threshold.
:param confidence: The confidence threshold.
:returns: tuple of suppressed bbox, suppressed scores and suppressed classes.
"""
detection_masks = (
detections["detection_masks"]
if "detection_masks" in detections
else None
)
detection_boxes = detections["detection_boxes"]
detection_scores = detections["detection_scores"]
detection_classes = detections["detection_classes"]
return (
_non_max_suppress_bbox(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
if detection_masks is None
else _non_max_suppress_mask(
bbox=detection_boxes,
scores=detection_scores,
classes=detection_classes,
masks=detection_masks,
filter_class=filter_id,
iou=iou,
confidence=confidence,
)
)
| 11,945
|
def tf_nan_func(func, **kwargs):
"""
takes function with X as input parameter and applies function only on
finite values,
helpful for tf value calculation which can not deal with nan values
:param func: function call with argument X
:param kwargs: other arguments for func
:return: executed func output with nan values
"""
mask = tfm.is_finite(kwargs["X"])
empty_t = tf.cast(tf.fill(mask.shape, np.nan), dtype=kwargs["X"].dtype)
for i in kwargs:
# workaround of tf.rank(kwargs[i]) > 0, avoid scalar value in mask
if kwargs[i].shape != ():
# keep only finite
kwargs[i] = tf.boolean_mask(kwargs[i], tfm.is_finite(kwargs[i]))
res_func = func(**kwargs)
full_t = tf.tensor_scatter_nd_update(empty_t, tf.where(mask), res_func)
return full_t
| 11,946
|
def deep_update(original,
new_dict,
new_keys_allowed=False,
allow_new_subkey_list=None,
override_all_if_type_changes=None):
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the allow_new_subkey_list, then new subkeys can be introduced.
Args:
original (dict): Dictionary with default values.
new_dict (dict): Dictionary with values to be updated
new_keys_allowed (bool): Whether new keys are allowed.
allow_new_subkey_list (Optional[List[str]]): List of keys that
correspond to dict values where new subkeys can be introduced.
This is only at the top level.
override_all_if_type_changes(Optional[List[str]]): List of top level
keys with value=dict, for which we always simply override the
entire value (dict), iff the "type" key in that value dict changes.
"""
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
deep_update(original[k], value, True)
# Non-allowed key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
| 11,947
|
def queued_archive_jobs():
"""Fetch the info about jobs waiting in the archive queue.
Returns
-------
jobs: dict
"""
jobs = pbs_jobs()
return [
job
for job in jobs
if (job["job_state"] == "Q" and job["queue"] == "archivelong")
]
| 11,948
|
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (RDK v%s; %s): Starting...\n" % (ScriptName, rdBase.rdkitVersion, time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
RetrieveOptions()
# Process and validate command line arguments and options...
ProcessOptions()
# Perform actions required by the script...
GenerateConformers()
MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName)
MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
| 11,949
|
def getCustomKernelSolutionObj(kernelName, directory=globalParameters["CustomKernelDirectory"]):
"""Creates the Solution object for a custom kernel"""
kernelConfig = getCustomKernelConfig(kernelName, directory)
for k, v in kernelConfig.items():
if k != "ProblemType":
checkParametersAreValid((k, [v]), validParameters)
kernelConfig["KernelLanguage"] = "Assembly"
kernelConfig["CustomKernelName"] = kernelName
return Solution(kernelConfig)
| 11,950
|
def test_experimet_wide_results():
"""Test the wide format of results."""
# Clone and fit experiment
experiment = clone(EXPERIMENT).fit(DATASETS)
ds_names = experiment.results_wide_tbl_.dataset_name.unique()
clfs_names = experiment.results_wide_tbl_.classifier.unique()
metric_names = experiment.results_wide_tbl_.metric.unique()
assert set(ds_names) == set(experiment.datasets_names_)
assert len(experiment.results_wide_tbl_) == len(ds_names) * len(clfs_names) * len(
metric_names
)
| 11,951
|
def get_public_key_permissions(session, public_key):
# type: (Session, PublicKey) -> List[Permission]
"""Returns the permissions that this public key has. Namely, this the set of permissions
that the public key's owner has, intersected with the permissions allowed by this key's
tags
Returns:
a list of all permissions this public key has
"""
# TODO: Fix circular dependency
from grouper.permissions import permission_intersection
my_perms = user_permissions(session, public_key.user)
for tag in get_public_key_tags(session, public_key):
my_perms = permission_intersection(my_perms, get_public_key_tag_permissions(session, tag))
return list(my_perms)
| 11,952
|
def remove_files(file_paths: List[str] = None) -> None:
"""Remove all files in the list of file paths.
Args:
file_paths (List[str]): A list of file paths to be deleted.
Returns:
None: None
"""
if not file_paths:
LOGGER.write('file_paths was None or empty, so not removing files.')
return
LOGGER.write('Removing files from the target system.')
for file_path in file_paths:
if Path(file_path).exists():
try:
os.remove(file_path)
except Exception as ex:
LOGGER.write(f'Failed to remove {file_path} with error: {str(ex)}')
raise ex
LOGGER.write(f'Removal of {file_path} was successful.')
LOGGER.write('All files were successfully removed.')
| 11,953
|
def _format_breed_name(name):
"""
Format breed name for displaying
INPUT
name: raw breed name, str
OUTPUT
name : cleaned breed name, str
"""
return name.split('.')[1].replace('_', ' ')
| 11,954
|
def test_create_receipt_with_no_receipt(session):
"""Try creating a receipt with invoice number."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment.id, payment_account.id)
invoice.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
PaymentService.update_payment(payment.id, get_payment_request(), 'test')
input_data = {
'corpName': 'Pennsular Coop ',
'filingDateTime': '1999',
'fileName': 'coopser'
}
with pytest.raises(BusinessException) as excinfo:
ReceiptService.create_receipt(payment.id, '', input_data, skip_auth_check=True)
assert excinfo.type == BusinessException
| 11,955
|
def loadIndianPinesData():
"""
加载数据
:return: data, labels
"""
data_path = os.path.join(os.getcwd( ), '../Indian Pines')
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
return data, labels
| 11,956
|
def _get_attribute_accesses_for_control_flow(node, variable):
"""
..code:: python
Pass | Break | Continue
"""
return
yield
| 11,957
|
def _lint_js_and_ts_files(
node_path, eslint_path, files_to_lint, result, verbose_mode_enabled):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
node_path: str. Path to the node binary.
eslint_path: str. Path to the ESLint binary.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_js_and_ts_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT(
'There are no JavaScript or Typescript files to lint.')
return
python_utils.PRINT('Total js and ts files: ', num_js_and_ts_files)
eslint_cmd_args = [node_path, eslint_path, '--quiet']
result_list = []
python_utils.PRINT('Linting JS and TS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = eslint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s JavaScript and Typescript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put(
'%s %s JavaScript and Typescript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_and_ts_files,
time.time() - start_time))
python_utils.PRINT('Js and Ts linting finished.')
| 11,958
|
def create_pid_redirected_error_handler():
"""Creates an error handler for `PIDRedirectedError` error."""
def pid_redirected_error_handler(e):
try:
# Check that the source pid and the destination pid are of the same
# pid_type
assert e.pid.pid_type == e.destination_pid.pid_type
# Redirection works only for the item route of the format
# `/records/<pid_value>`
location = url_for(
request.url_rule.endpoint,
pid_value=e.destination_pid.pid_value
)
data = dict(
status=301,
message='Moved Permanently.',
location=location,
)
response = make_response(jsonify(data), data['status'])
response.headers['Location'] = location
return response
except (AssertionError, BuildError, KeyError):
raise e
return pid_redirected_error_handler
| 11,959
|
def is_var_name_with_greater_than_len_n(var_name: str) -> bool:
"""
Given a variable name, return if this is acceptable according to the
filtering heuristics.
Here, we try to discard variable names like X, y, a, b etc.
:param var_name:
:return:
"""
unacceptable_names = {}
if len(var_name) < min_var_name_len:
return False
elif var_name in unacceptable_names:
return False
return True
| 11,960
|
def stacked_L(robot: RobotPlanar, q: list, q_goal: list):
"""
Stacks the L matrices for conviencne
"""
LL = []
LLinv = []
Ts_ee = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))
Ts_goal = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q_goal))
for ee in robot.end_effectors:
T_0_ee = SE2_to_SE3(Ts_ee[ee[0]])
Re = T_0_ee[0:3, 0:3]
T_0_goal = SE2_to_SE3(Ts_goal[ee[0]])
Rd = T_0_goal[0:3, 0:3]
ll, llinv = L(Rd, Re)
LL.append(np.eye(3))
LLinv.append(np.eye(3))
LL.append(ll)
LLinv.append(llinv)
LL = block_diag(*LL)
LLinv = block_diag(*LLinv)
return LL, LLinv
| 11,961
|
async def help() -> Dict:
"""Shows this help message."""
return {
'/': help.__doc__,
'/help': help.__doc__,
'/registration/malaysia': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/latest': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/{date}': format_docstring(get_registration_data_malaysia.__doc__),
'/vaccination/malaysia': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/latest': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/{date}': format_docstring(get_vax_data_malaysia.__doc__),
'/registration/state': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/latest': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/{date}': format_docstring(get_registration_data_all_state.__doc__),
'/registration/state/{state}/latest': format_docstring(get_latest_registration_data_for_state.__doc__),
'/registration/state/{state}/{date}': format_docstring(get_registration_data_state.__doc__),
'/vaccination/state': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/latest': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/{date}': format_docstring(get_vax_data_all_state.__doc__),
'/vaccination/state/{state}/latest': format_docstring(get_latest_vax_data_for_state.__doc__),
'/vaccination/state/{state}/{date}': format_docstring(get_vax_data_state.__doc__),
}
| 11,962
|
def eliminate_arrays(clusters, template):
"""
Eliminate redundant expressions stored in Arrays.
"""
mapper = {}
processed = []
for c in clusters:
if not c.is_dense:
processed.append(c)
continue
# Search for any redundant RHSs
seen = {}
for e in c.exprs:
f = e.lhs.function
if not f.is_Array:
continue
v = seen.get(e.rhs)
if v is not None:
# Found a redundant RHS
mapper[f] = v
else:
seen[e.rhs] = f
if not mapper:
# Do not waste time
processed.append(c)
continue
# Replace redundancies
subs = {}
for f, v in mapper.items():
for i in filter_ordered(i.indexed for i in c.scope[f]):
subs[i] = v[f.indices]
exprs = []
for e in c.exprs:
if e.lhs.function in mapper:
# Drop the write
continue
exprs.append(e.xreplace(subs))
processed.append(c.rebuild(exprs))
return processed
| 11,963
|
def data_generator(batch_size):
"""
Args:
dataset: Dataset name
seq_length: Length of sequence
batch_size: Size of batch
"""
vocab_size = 20000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
x_train, y_train, x_test, y_test = tf.ragged.constant(x_train), tf.constant(y_train[..., None]), \
tf.ragged.constant(x_test), tf.constant(y_test[..., None])
# Shuffle only train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) \
.shuffle(batch_size * 100).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
return train_dataset, test_dataset, vocab_size
| 11,964
|
def now():
""" Get current timestamp
Returns:
str: timestamp string
"""
current_time = datetime.now()
str_date = current_time.strftime("%d %B %Y, %I:%M:%S %p")
return str_date
| 11,965
|
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset
| 11,966
|
def projection(projection_matrix: tf.Tensor,
flattened_vector: tf.Tensor) -> tf.Tensor:
"""Projects `flattened_vector` using `projection_matrix`.
Args:
projection_matrix: A rank-2 Tensor that specifies the projection.
flattened_vector: A flat Tensor to be projected
Returns:
A flat Tensor returned from projection.
"""
return tf.reshape(
projection_matrix @ (tf.transpose(projection_matrix) @ tf.reshape(
flattened_vector, [-1, 1])), [-1])
| 11,967
|
def free_vars(e):
"""Get free variables from expression e.
Parameters
----------
e: tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables
"""
return _ir_pass.free_vars(e)
| 11,968
|
def evaluate(model, epoch, sess, dev_data, vocab, config):
"""
evaluate model.
"""
src_vocab, tgt_vocab = vocab
print('start evaluate...')
total_acc = 0
gold_num = 0
model.dropout = 0
train_batch_iter = create_batch_iter(dev_data,
config.batch_size,
shuffle=True)
for batch in train_batch_iter:
feature, target, word_list = pair_data_variable(
batch, src_vocab, tgt_vocab, config)
gold_num += len(target)
loss, acc = sess.run([model.loss_op, model.accuracy],
feed_dict={
model.w: feature,
model.gold: target
})
total_acc += acc
accuracy = total_acc / gold_num * 100
print('acc={:.2f}%'.format(accuracy))
_best_acc = best_acc
_best_epoch = best_epoch
if accuracy > _best_acc:
_best_acc = accuracy
_best_epoch = epoch
print('##Update! best_acc={:.2f}% in epoch {}'.format(
_best_acc, _best_epoch))
output_graph_def = convert_variables_to_constants(
sess, sess.graph_def, output_node_names=['s/logits'])
with tf.gfile.GFile(config.save_dirs + '/' + config.save_model_path,
mode='wb') as f:
f.write(output_graph_def.SerializeToString())
print('saved model successfully! in ' + config.save_dirs + '/' +
config.save_model_path)
else:
print('not update, best_acc={:.2f}% in epoch {}'.format(
_best_acc, _best_epoch))
| 11,969
|
def image_as_uint(im, bitdepth=None):
""" Convert the given image to uint (default: uint8)
If the dtype already matches the desired format, it is returned
as-is. If the image is float, and all values are between 0 and 1,
the values are multiplied by np.power(2.0, bitdepth). In all other
situations, the values are scaled such that the minimum value
becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
(255 for 8-bit and 65535 for 16-bit).
"""
if not bitdepth:
bitdepth = 8
if not isinstance(im, np.ndarray):
raise ValueError("Image must be a numpy array")
if bitdepth == 8:
out_type = np.uint8
elif bitdepth == 16:
out_type = np.uint16
else:
raise ValueError("Bitdepth must be either 8 or 16")
dtype_str1 = str(im.dtype)
dtype_str2 = out_type.__name__
if (im.dtype == np.uint8 and bitdepth == 8) or (
im.dtype == np.uint16 and bitdepth == 16
):
# Already the correct format? Return as-is
return im
if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1:
_precision_warn(dtype_str1, dtype_str2, "Range [0, 1].")
im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
elif im.dtype == np.uint16 and bitdepth == 8:
_precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.")
im = np.right_shift(im, 8)
elif im.dtype == np.uint32:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(32 - bitdepth),
)
im = np.right_shift(im, 32 - bitdepth)
elif im.dtype == np.uint64:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(64 - bitdepth),
)
im = np.right_shift(im, 64 - bitdepth)
else:
mi = np.nanmin(im)
ma = np.nanmax(im)
if not np.isfinite(mi):
raise ValueError("Minimum image value is not finite")
if not np.isfinite(ma):
raise ValueError("Maximum image value is not finite")
if ma == mi:
return im.astype(out_type)
_precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma))
# Now make float copy before we scale
im = im.astype("float64")
# Scale the values between 0 and 1 then multiply by the max value
im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999
assert np.nanmin(im) >= 0
assert np.nanmax(im) < np.power(2.0, bitdepth)
return im.astype(out_type)
| 11,970
|
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
| 11,971
|
def get_file(
path: str,
skip_hidden: bool = True,
skip_folder: bool = True,
) -> Iterator[str]:
"""
遍历path下的所有文件(不包括文件夹)
:param path: 待遍历的路径
:return: 一个返回文件名的迭代器
"""
for root, ds, fs in os.walk(path):
for file in fs:
file_path = os.path.join(root, file)
if skip_hidden and is_file_hidden(file_path):
continue
yield file_path
| 11,972
|
def _test_sparsify_densify(self, x, default_value):
"""Test roundtrip via Sparsify and Densify."""
numpy_source = in_memory_source.NumpySource(x, batch_size=len(x))()
(sparse_series,) = sparsify.Sparsify(default_value)(numpy_source[1])
(dense_series,) = densify.Densify(default_value)(sparse_series)
cache = {}
sparse_tensor = sparse_series.build(cache)
dense_tensor = dense_series.build(cache)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sparse_val, dense_val = sess.run([sparse_tensor, dense_tensor])
coord.request_stop()
coord.join(threads)
if x.dtype.kind not in ["S", "U"] and np.isnan(default_value):
x_values = x[~np.isnan(x)]
x_indexes = np.arange(len(x))[~np.isnan(x)].T.reshape(-1, 1)
else:
x_values = x[x != default_value]
x_indexes = np.arange(len(x))[x != default_value].T.reshape(-1, 1)
if x.dtype.kind in ["S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_x = [item.encode("utf-8") for item in x]
expected_x_values = [item.encode("utf-8") for item in x_values]
else:
expected_x = x
expected_x_values = x_values
np.testing.assert_array_equal(len(x), sparse_val.dense_shape[0])
np.testing.assert_array_equal(expected_x_values, sparse_val.values)
np.testing.assert_array_equal(x_indexes, sparse_val.indices)
np.testing.assert_array_equal(expected_x, dense_val)
| 11,973
|
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
L2_regularization_cost = (1. / m * lambd / 2) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
cost = cross_entropy_cost + L2_regularization_cost
return cost
| 11,974
|
def catalog_area(ra=[], dec=[], make_plot=True, NMAX=5000, buff=0.8, verbose=True):
"""Compute the surface area of a list of RA/DEC coordinates
Parameters
----------
ra, dec : `~numpy.ndarray`
RA and Dec. coordinates in decimal degrees
make_plot : bool
Make a figure.
NMAX : int
If the catalog has more then `NMAX` entries, draw `NMAX` random
samples.
buff : float
Buffer in arcmin to add around each catalog point.
Returns
-------
area : float
Computed catalog area in square arcminutes
fig : `~matplotlib.figure.Figure`
Figure object returned if `make_plot==True`.
"""
import matplotlib.pyplot as plt
from shapely.geometry import Polygon, Point, MultiPolygon, MultiLineString
from scipy import spatial
points = np.array([ra, dec])*1.
center = np.mean(points, axis=1)
points = (points.T - center)*60. # arcmin
points[:, 0] *= np.cos(center[1]/180*np.pi)
hull = spatial.ConvexHull(points)
edge = points[hull.vertices, :]
#pbuff = 1
if len(ra) > NMAX:
rnd_idx = np.unique(np.cast[int](np.round(np.random.rand(NMAX)*len(ra))))
else:
rnd_idx = np.arange(len(ra))
poly = Point(points[rnd_idx[0], :]).buffer(buff)
for i, ix in enumerate(rnd_idx):
if verbose:
print(NO_NEWLINE + '{0} {1}'.format(i, ix))
poly = poly.union(Point(points[ix, :]).buffer(buff))
# Final (multi)Polygon
pjoin = poly.buffer(-buff)
if make_plot:
fig = plt.figure()
ax = fig.add_subplot(111)
if isinstance(pjoin, MultiPolygon):
for p_i in pjoin:
if isinstance(p_i.boundary, MultiLineString):
for s in p_i.boundary:
p = s.xy
ax.plot(p[0], p[1])
else:
p = p_i.boundary.xy
ax.plot(p[0], p[1])
else:
p_i = pjoin
if isinstance(p_i.boundary, MultiLineString):
for s in p_i.boundary:
p = s.xy
ax.plot(p[0], p[1])
else:
p = p_i.boundary.xy
ax.plot(p[0], p[1])
ax.scatter(points[rnd_idx, 0], points[rnd_idx, 1], alpha=0.1, marker='+')
ax.set_xlim(ax.get_xlim()[::-1])
ax.set_xlabel(r'$\Delta$RA ({0:.5f})'.format(center[0]))
ax.set_ylabel(r'$\Delta$Dec. ({0:.5f})'.format(center[1]))
ax.set_title('Total area: {0:.1f} arcmin$^2$'.format(pjoin.area))
ax.grid()
fig.tight_layout(pad=0.1)
return pjoin.area, fig
else:
return pjoin.area
| 11,975
|
def cli(geotiff_file, meshmask_file, numpy_file, verbosity):
"""Command-line interface for :py:func:`moad_tools.midoss.geotiff_watermask`.
:param str geotiff_file: File path and name of an AIS ship tracks GeoTIFF file to use the
pixel lons/lats from to calculate water mask;
typically a ShipTrackDensityGeoTIFFs/all_yyyy_mm.tif file.
:param str meshmask_file: File path and name of a SalishSeaCast NEMO mesh mask file to use
the NEMO grid lons/lats and T-grid water/land maks from to
calculate the water mask.
:param str numpy_file: File path and name of Numpy array file to write the water mask to.
:param str verbosity: Verbosity level of logging messages about the progress of the
transformation.
Choices are :kbd:`debug, info, warning, error, critical`.
:kbd:`warning`, :kbd:`error`, and :kbd:`critical` should be silent
unless something bad goes wrong.
Default is :kbd:`warning`.
"""
logging_level = getattr(logging, verbosity.upper())
logging.basicConfig(
level=logging_level,
format="%(asctime)s geotiff-watermask %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
watermask = geotiff_watermask(geotiff_file, meshmask_file)
write_numpy_file(watermask, numpy_file)
| 11,976
|
def main():
"""Calculate metrics from collected data, generate plots and tables."""
# metrics and plots for sidechannels
fr = print_sidechannel(
"reload",
start=0,
end=250,
step=5,
threshold=100,
direction="-",
clusters=[(0, 49), (150, 200), (600, 800)],
)
ff = print_sidechannel(
"flush",
start=110,
end=140,
step=1,
threshold=124,
direction="+",
clusters=[(100, 200)],
)
plot_sidechannel(fr, ff)
print()
# metrics for attacks
calc_metrics_prefix("misprediction")
calc_metrics_prefix("ridl")
calc_metrics_prefix("wtf")
calc_metrics_prefix("zombieload")
calc_metrics_prefix("storetoleak")
# tables for attacks, as markdown tables to be copied to the documentation
print("Tables:")
print()
print_table_attack("ridl")
print_table_attack("wtf")
print_table_attack("zombieload")
| 11,977
|
def user_page(num_page=1):
"""Page with list of users route."""
form = SearchUserForm(request.args, meta={'csrf': False})
msg = False
if form.validate():
search_by = int(request.args.get('search_by'))
order_by = int(request.args.get('order_by'))
search_string = str(request.args.get('search'))
if len(search_string) >= MIN_SEARCH_STR:
condition = user_search(search_string, search_by)
else:
condition = ""
if search_string != "":
msg = True
order_list = [User.id, User.role_id, User.delete_date]
order = order_list[order_by]
search_users = db.session.query(User, Role).filter(and_(
User.role_id == Role.id, condition)).order_by(order).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
if msg:
flash("Search string is too small", category="danger")
return render_template('user_page.html', form=form, users=search_users,
get="?" + urlencode(request.args))
else:
users = db.session.query(User, Role).filter(
User.role_id == Role.id).order_by(User.id).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
return render_template('user_page.html', form=form, users=users,
get="?" + urlencode(request.args))
| 11,978
|
def set_status_label(opened, enabled):
"""update status button label to reflect indicated state."""
if not opened:
label = "closed"
elif enabled:
label = "enabled"
else:
label = "disabled"
status_button.set_label(label)
| 11,979
|
def validate_input(data: ConfigType) -> dict[str, str] | None:
"""Validate the input by the user."""
try:
SIAAccount.validate_account(data[CONF_ACCOUNT], data.get(CONF_ENCRYPTION_KEY))
except InvalidKeyFormatError:
return {"base": "invalid_key_format"}
except InvalidKeyLengthError:
return {"base": "invalid_key_length"}
except InvalidAccountFormatError:
return {"base": "invalid_account_format"}
except InvalidAccountLengthError:
return {"base": "invalid_account_length"}
except Exception as exc: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception from SIAAccount: %s", exc)
return {"base": "unknown"}
if not 1 <= data[CONF_PING_INTERVAL] <= 1440:
return {"base": "invalid_ping"}
return validate_zones(data)
| 11,980
|
def _setup_log():
"""Configure root logger.
"""
import logging
import sys
try:
handler = logging.StreamHandler(stream=sys.stdout)
except TypeError: # pragma: no cover
handler = logging.StreamHandler(strm=sys.stdout)
log = get_log()
log.addHandler(handler)
log.setLevel(logging.WARN)
log.propagate = False
| 11,981
|
def pypi_archive(
name,
package = None,
version = None,
sha256 = None,
strip_prefix = "",
build_file = None,
build_file_content = None,
workspace_file = None,
workspace_file_content = None,
mirrors = None):
"""Downloads and unpacks a PyPI package archive and adds it to the
WORKSPACE as an external.
Example:
Download and use the "foo" package, version 1.2.3, hosted on PyPI at
https://files.pythonhosted.org/packages/source/f/foo/foo-1.2.3.tar.gz.
WORKSPACE:
load("//tools/workspace:pypi.bzl", "pypi_archive")
pypi_archive(
name = "foo",
version = "1.2.3",
build_file = "foo.BUILD",
sha256 = "0123456789abcdef...",
)
foo.BUILD:
load("//tools/skylark:py.bzl", "py_library")
py_library(
name = "foo",
srcs = [
"foo/__init__.py",
"foo/bar.py",
],
visibility = ["//visibility:public"],
)
BUILD:
load("//tools/skylark:py.bzl", "py_binary")
py_binary(
name = "foobar",
deps = ["@foo//:foo"],
srcs = ["foobar.py"],
)
Arguments:
name: A unique name for this rule. This argument will be used for the
package name if the "package" argument is omitted [Name; required].
package: The name of the PyPI package to download. The "name" argument
will be used if this argument is omitted [String; optional].
version: The version of the PyPI package to be downloaded
[String; required].
sha256: The expected SHA-256 hash of the archive to download. This
argument must match the SHA-256 hash of the downloaded archive.
The download will fail if omitted, but the checksum-mismatch error
message will offer a suggestion for the correct value of this
argument [String; required].
strip_prefix: A directory prefix to strip from the extracted files
[String; optional].
build_file: The file to use as the BUILD file for this repository.
This argument is an absolute label. Either build_file or
build_file_content must be specified, but not both
[Label; optional].
build_file_content: The content for the BUILD file for this repository.
Either build_file or build_file_content must be specified, but not
both [Label; optional].
workspace_file: The file to use as the WORKSPACE file for this
repository. This argument is an absolute label. Either
workspace_file or workspace_file_content may be specified, or
neither, but not both [Label; optional].
workspace_file_content: The content for the WORKSPACE file for this
repository. Either workspace_file, workspace_file_content or
neither may be specified, but not both [Label; optional].
mirrors: A dict from string to list-of-string with key "pypi", where
the list-of-strings are URLs to use, formatted using {package},
{version}, and {p} (where {p} is the first letter of {package}).
"""
if not package:
package = name
if not version:
fail("The version argument to pypi_archive is required.")
if not build_file and not build_file_content:
fail("Either the build_file or build_file_content argument to " +
"pypi_archive is required.")
_pypi_archive(
name = name,
package = package,
version = version,
sha256 = sha256,
strip_prefix = strip_prefix,
build_file = build_file,
build_file_content = build_file_content,
workspace_file = workspace_file,
workspace_file_content = workspace_file_content,
mirrors = mirrors,
)
| 11,982
|
def delete_person(person_id):
"""Mark as void a person."""
try:
person_detail = get_object_or_404(RegPerson, pk=person_id)
person_detail.is_void = True
person_detail.save(update_fields=["is_void"])
except Exception, e:
raise e
| 11,983
|
def file_and_path_for_module(modulename):
"""Find the file and search path for `modulename`.
Returns:
filename: The filename of the module, or None.
path: A list (possibly empty) of directories to find submodules in.
"""
filename = None
path = []
try:
spec = importlib.util.find_spec(modulename)
except ImportError:
pass
else:
if spec is not None:
if spec.origin != "namespace":
filename = spec.origin
path = list(spec.submodule_search_locations or ())
return filename, path
| 11,984
|
def hello_page(request):
"""Simple view to say hello.
It is used to check the authentication system.
"""
text = "Welcome to test_project"
if not request.user.is_anonymous:
text = "Welcome '%s' to test_project" % request.user.username
return HttpResponse(text, content_type='text/plain')
| 11,985
|
def hash_all(bv: Binary_View) -> Dict[str, Function]:
"""
Iterate over every function in the binary and calculate its hash.
:param bv: binary view encapsulating the binary
:return: a dictionary mapping hashes to functions
"""
sigs = {}
for function in bv.functions:
sigs[hash_function(function)] = function
return sigs
| 11,986
|
def while_n():
""" Lower case Alphabet letter 'n' pattern using Python while loop"""
row = 0
while row<4:
col = 0
while col<3:
if row==0 and col in (0,1) or row>0 and col%2==0:
print('*', end = ' ')
else:
print(' ', end = ' ')
col += 1
print()
row += 1
| 11,987
|
def apply_deformation(
deformation_indices: Union[List[bool], np.ndarray], bsf: np.ndarray
) -> np.ndarray:
"""Return Hadamard-deformed bsf at given indices."""
n = len(deformation_indices)
deformed = np.zeros_like(bsf)
if len(bsf.shape) == 1:
if bsf.shape[0] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be {(2*n,)}'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[i] = bsf[i + n]
deformed[i + n] = bsf[i]
else:
deformed[i] = bsf[i]
deformed[i + n] = bsf[i + n]
else:
if bsf.shape[1] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be '
f'{(bsf.shape[0], 2*n)}.'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[:, i] = bsf[:, i + n]
deformed[:, i + n] = bsf[:, i]
else:
deformed[:, i] = bsf[:, i]
deformed[:, i + n] = bsf[:, i + n]
return deformed
| 11,988
|
def index(request):
"""Return the index.html file"""
return render(request, 'index.html')
| 11,989
|
def execute(cx, stmt, args=(), return_result=False):
"""
Execute query in 'stmt' over connection 'cx' (with parameters in 'args').
Be careful with query statements that have a '%' in them (say for LIKE)
since this will interfere with psycopg2 interpreting parameters.
Printing the query will not print AWS credentials IF the string used
matches "CREDENTIALS '[^']*'".
So be careful or you'll end up sending your credentials to the logfile.
"""
with cx.cursor() as cursor:
executable_statement = mogrify(cursor, stmt, args)
printable_stmt = remove_password(executable_statement.decode())
logger.debug("QUERY:\n%s\n;", printable_stmt) # lgtm[py/clear-text-logging-sensitive-data]
with Timer() as timer:
cursor.execute(executable_statement)
if cursor.rowcount is not None and cursor.rowcount > 0:
logger.debug(
"QUERY STATUS: %s [rowcount=%d] (%s)", cursor.statusmessage, cursor.rowcount, timer
)
else:
logger.debug("QUERY STATUS: %s (%s)", cursor.statusmessage, timer)
if cx.notices and logger.isEnabledFor(logging.DEBUG):
for msg in cx.notices:
logger.debug("QUERY " + msg.rstrip("\n"))
del cx.notices[:]
if return_result:
return cursor.fetchall()
| 11,990
|
def parse_custom_builders(builders: Optional[Iterable[str]]) -> Dict[str, Type[AbstractBuilder]]:
"""
Parse the custom builders passed using the ``--builder NAME`` option on the command line.
:param builders:
"""
custom_builders: Dict[str, Type[AbstractBuilder]] = {}
if builders is None:
return custom_builders
entry_points = get_entry_points()
for builder_name in builders:
if builder_name not in entry_points:
raise click.BadArgumentUsage(
f"Unknown builder {builder_name!r}. \n"
f"Is it registered as an entry point under 'whey.builder'?"
)
else:
custom_builders[builder_name] = entry_points[builder_name].load()
return custom_builders
| 11,991
|
def construct_user_rnn_inputs(document_feature_size=10,
creator_feature_size=None,
user_feature_size=None,
input_reward=False):
"""Returns user RNN inputs.
Args:
document_feature_size: Integer, length of document features.
creator_feature_size: Integer or None, length of creator features. If None,
no features about creators will be input.
user_feature_size: Integer or None, length of user features. If None, no
features about users will be input.
input_reward: Boolean, whether to input previous reward to RNN layer.
"""
# Previous consumed document.
rnn_input_doc_feature = tf.keras.layers.Input(
shape=(None, document_feature_size), name='user_consumed_doc_feature')
merged_embedding = rnn_input_doc_feature
inputs = [rnn_input_doc_feature]
# Previous consumed document-associated creator.
if creator_feature_size is not None:
# This vector includes creator's observable features and/or creator's hidden
# states inferred by creator model.
merged_embedding, inputs = _merge_inputs(
(None, creator_feature_size), 'user_consumed_doc-creator_feature',
merged_embedding, inputs)
# User current context.
if user_feature_size is not None:
merged_embedding, inputs = _merge_inputs(
(None, user_feature_size), 'user_current_feature', merged_embedding,
inputs)
# Previous reward.
if input_reward:
merged_embedding, inputs = _merge_inputs((None, 1), 'user_previous_reward',
merged_embedding, inputs)
return merged_embedding, inputs
| 11,992
|
def numpy_read(DATAFILE, BYTEOFFSET, NUM, PERMISSION, DTYPE):
"""
Read NumPy-compatible binary data.
Modeled after MatSeis function read_file in util/waveread.m.
"""
f = open(DATAFILE, PERMISSION)
f.seek(BYTEOFFSET, 0)
data = np.fromfile(f, dtype=np.dtype(DTYPE), count=NUM)
f.close()
return data
| 11,993
|
def _process_pmid(s: str, sep: str = '|', prefix: str = 'pubmed:') -> str:
"""Filter for PubMed ids.
:param s: string of PubMed ids
:param sep: separator between PubMed ids
:return: PubMed id
"""
for identifier in s.split(sep):
identifier = identifier.strip()
if identifier.startswith(prefix):
return identifier
| 11,994
|
def main(known_file, comparison, output_type):
"""
The main function handles the main operations of the script
:param known_file: path to known file
:param comparison: path to look for similar files
:param output_type: type of output to provide
:return: None
"""
# Check output formats
if output_type not in OUTPUT_OPTS:
logger.error(
"Unsupported output format '{}' selected. Please "
"use one of {}".format(
output_type, ", ".join(OUTPUT_OPTS)))
sys.exit(2)
elif output_type == 'csv':
# Special handling for CSV headers
print('"similarity","known_file","known_hash",'
'"comp_file","comp_hash"')
# Check provided file paths
known_file = os.path.abspath(known_file)
comparison = os.path.abspath(comparison)
# Generate ssdeep signature for known file
if not os.path.exists(known_file):
logger.error("Error - path {} not found".format(
comparison))
sys.exit(1)
known_hash = ssdeep.hash_from_file(known_file)
# Generate and test ssdeep signature for comparison file(s)
if os.path.isdir(comparison):
# Process files in folders
for root, _, files in os.walk(comparison):
for f in files:
file_entry = os.path.join(root, f)
comp_hash = ssdeep.hash_from_file(file_entry)
comp_val = ssdeep.compare(known_hash, comp_hash)
output(known_file, known_hash,
file_entry, comp_hash,
comp_val, output_type)
elif os.path.isfile(comparison):
# Process a single file
comp_hash = ssdeep.hash_from_file(comparison)
comp_val = ssdeep.compare(known_hash, comp_hash)
output(known_file, known_hash, file_entry, comp_hash,
comp_val, output_type)
else:
logger.error("Error - path {} not found".format(
comparison))
sys.exit(1)
| 11,995
|
def _classictautstring_TV1(x, w, y, **kwargs):
"""Classic taut string method for TV1 proximity"""
_call(lib.classicTautString_TV1, x, np.size(x), w, y)
| 11,996
|
def _get_import(name, module: ast.Module):
"""
get from import by name
"""
for stm in ast.walk(module):
if isinstance(stm, ast.ImportFrom):
for iname in stm.names:
if isinstance(iname, ast.alias):
if iname.name == name:
return 'from ' + str(stm.module) + ' import ' + name
if isinstance(stm, ast.Import):
pass
return None
| 11,997
|
def load_FIPS_data():
""" Load FIPS ref table """
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'FIPS_ref_data.csv')
df = pd.read_csv(filename)
df['fips'] = df['fips'].astype(str)
return df
| 11,998
|
def show_stats(number):
"""
Display stats
@param number number of times we ran the simulation
"""
average = SimInterface.score_total / number
print(f"Average Score: {average:8.3f}")
print(f" Best Score: {SimInterface.minscore:4d}")
print(f" Worst Score: {SimInterface.maxscore:4d}")
print(f" Losses: {SimInterface.losses:4d}")
SimInterface.score_total = 0
SimInterface.minscore = MAX_GUESS_ALLOWED
SimInterface.maxscore = 0
SimInterface.losses = 0
| 11,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.