content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def xnnpack_unit_test(name, srcs, copts = [], mingw_copts = [], msys_copts = [], deps = []):
"""Unit test binary based on Google Test.
Args:
name: The name of the test target to define.
srcs: The list of source and header files.
copts: The list of additional compiler flags for the target. -I flags
for include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
mingw_copts: The list of compiler flags to use with MinGW GCC compilers.
msys_copts: The list of compiler flags to use with MSYS (Cygwin) GCC compilers.
deps: The list of additional libraries to be linked. Google Test library
(with main() function) is always added as a dependency and does not
need to be explicitly specified.
"""
native.cc_test(
name = name,
srcs = srcs,
copts = xnnpack_std_cxxopts() + [
"-Iinclude",
"-Isrc",
] + copts + select({
":windows_x86_64_mingw": mingw_copts,
":windows_x86_64_msys": msys_copts,
"//conditions:default": [],
}),
linkopts = select({
":emscripten": xnnpack_emscripten_test_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = [
"@com_google_googletest//:gtest_main",
] + deps + select({
":emscripten": xnnpack_emscripten_deps(),
"//conditions:default": [],
}),
)
| 5,339,900
|
def zscore(collection, iteratee=None):
"""Calculate the standard score assuming normal distribution. If iteratee
is passed, each element of `collection` is passed through a iteratee before
the standard score is computed.
Args:
collection (list|dict): Collection to process.
iteratee (mixed, optional): Iteratee applied per iteration.
Returns:
float: Calculated standard score.
Example:
>>> results = zscore([1, 2, 3])
# [-1.224744871391589, 0.0, 1.224744871391589]
.. versionadded:: 2.1.0
"""
array = pyd.map_(collection, iteratee)
avg = mean(array)
sig = std_deviation(array)
return pyd.map_(array, lambda item: (item - avg) / sig)
| 5,339,901
|
def AND(
*logicals: Tuple[func_xltypes.XlExpr]
) -> func_xltypes.XlBoolean:
"""Determine if all conditions in a test are TRUE
https://support.office.com/en-us/article/
and-function-5f19b2e8-e1df-4408-897a-ce285a19e9d9
"""
if not logicals:
raise xlerrors.NullExcelError('logical1 is required')
# Use delayed evaluation to minimize th amount of values to evaluate.
for logical in logicals:
val = logical()
for item in xl.flatten([val]):
if func_xltypes.Blank.is_blank(item):
continue
if not bool(item):
return False
return True
| 5,339,902
|
def check_mask(mask):
"""Check if mask is valid by its area"""
area_ratio = np.sum(mask) / float(mask.shape[0] * mask.shape[1])
return (area_ratio > MASK_THRES_MIN) and (area_ratio < MASK_THRES_MAX)
| 5,339,903
|
def supported_estimators():
"""Return a `dict` of supported estimators."""
allowed = {
'LogisticRegression': LogisticRegression,
'RandomForestClassifier': RandomForestClassifier,
'DecisionTreeClassifier': DecisionTreeClassifier,
'KNeighborsClassifier': KNeighborsClassifier,
'MultinomialNB': MultinomialNB,
'GaussianNB': GaussianNB,
'BernoulliNB': BernoulliNB
}
return allowed
| 5,339,904
|
def validate_gateway(gateway):
"""Test that a gateway is correctly set up.
Returns True if successful, or an error message."""
from hiicart.gateway.base import GatewayError
from hiicart.gateway.amazon.gateway import AmazonGateway
from hiicart.gateway.google.gateway import GoogleGateway
from hiicart.gateway.paypal.gateway import PaypalGateway
from hiicart.gateway.paypal2.gateway import Paypal2Gateway
from hiicart.gateway.paypal_adaptive.gateway import PaypalAPGateway
from hiicart.gateway.braintree.gateway import BraintreeGateway
from hiicart.gateway.authorizenet.gateway import AuthorizeNetGateway
from hiicart.gateway.paypal_express.gateway import PaypalExpressCheckoutGateway
from hiicart.gateway.stripe.gateway import StripeGateway
gateways = {
'amazon': AmazonGateway,
'google': GoogleGateway,
'paypal': PaypalGateway,
'paypal2': Paypal2Gateway,
'paypal_adaptive': PaypalAPGateway,
'paypal_express': PaypalExpressCheckoutGateway,
'braintree': BraintreeGateway,
'authorizenet': AuthorizeNetGateway,
'stripe': StripeGateway
}
try:
cls = gateways[gateway]
obj = cls()
return obj._is_valid() or "Authentication Error"
except GatewayError, err:
return err.message
| 5,339,905
|
def execute_query(db, query):
"""get data from database
"""
result = []
with closing(sqlite3.connect(db)) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
for row in cur.execute(query):
result.append({name: row[name] for name in row.keys()})
return result
| 5,339,906
|
def set_seeds(seed: int, env = None) -> None:
"""
Sets seeds for reproducibility
:param seed: Seed Value
:param env: Optionally pass gym environment to set its seed
:type seed: int
:type env: Gym Environment
"""
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
if env is not None:
env.seed(seed)
| 5,339,907
|
def test_get_optimal_route(get_routes_in_parts):
"""Test of the function get_optimal_route.
Args:
get_routes_in_parts (fixture): Returns each route from the list
separately. Each route is represented by a dictionary,
dictionary of the form:
{'Source': ..., 'Transfer': ..., 'Destination': ...}.
represented by a dictionary {source, transfer, destination}.
"""
for route in get_routes_in_parts:
filtered_flight_direction = get_flights_filtered_direction(
route.get('Source'),
route.get('Destination'),
)
optimal_route = get_optimal_route(
route.get('Source'),
route.get('Destination'),
)
median_time_optimal_route = get_median_time(optimal_route)
median_price_optimal_route = get_median_price(optimal_route)
assert median_time_optimal_route <= get_median_time(
filtered_flight_direction,
)
assert median_price_optimal_route <= get_median_price(
filtered_flight_direction,
)
| 5,339,908
|
def check_for_tool(tool_name: str) -> None:
"""Check if analysis tool is present on the file system."""
if sys.platform in ["win32", "msys", "cygwin"]:
tool_name += ".exe"
if pathlib.Path(f"./resources/ztools/{tool_name}").is_file():
return
sys.stderr.write(
colored(
f"The tool `{tool_name}` was not found in `resources\\ztools`.\n"
"You can download this tool using the `downloader.py` script.\n\n",
"red",
),
)
| 5,339,909
|
def module_list(path):
"""
Return the list containing the names of the modules available in
the given folder.
:param path: folder path
:type path: str
:returns: modules
:rtype: list
"""
if os.path.isdir(path):
folder_list = os.listdir(path)
elif path.endswith('.egg'):
from zipimport import zipimporter
try:
folder_list = [f for f in zipimporter(path)._files]
except:
folder_list = []
else:
folder_list = []
#folder_list = glob.glob(os.path.join(path,'*'))
folder_list = [
p for p in folder_list
if (os.path.exists(os.path.join(path, p, '__init__.py')) or
p[-3:] in {'.py', '.so'} or
p[-4:] in {'.pyc', '.pyo', '.pyd'})]
folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]
return folder_list
| 5,339,910
|
def check_weighting_input(z_matrix, c_method, w_method):
"""
Raise an exception if any argument is inappropriate for the corresponding
weighting method
"""
if w_method.upper() in {"MW", "EM", "SD", "CRITIC", "VIC"}:
if not is_normalized_matrix(z_matrix):
raise ValueError(
"The decision matrix must be normalized in order to apply "
+ "the {} weighting method".format(w_method),
)
if w_method.upper() == "EM":
if (
not np.all(
np.isclose(
np.sum(z_matrix, axis=0),
np.ones(z_matrix.shape[1]),
)
)
):
raise ValueError(
"The columns of the decision matrix must sum to 1 in "
+ "order to apply the EM weighting method",
)
elif w_method.upper() == "CRITIC":
if c_method.upper() not in {"PEARSON", "ABSPEARSON", "DCOR"}:
raise ValueError(
"Unknown compatibility of the CRITIC weighting method "
+ "with the {} correlation method".format(c_method)
)
elif w_method.upper() == "VIC":
if c_method.upper() in {"PEARSON"}:
raise ValueError(
"The VIC weighting method is not compatible with the "
+ "{} correlation method".format(c_method),
)
if c_method.upper() not in {"ABSPEARSON", "DCOR"}:
raise ValueError(
"Unknown compatibility of the VIC weighting method with "
+ "the {} correlation method".format(c_method),
)
else:
raise ValueError("Unknown weighting method ({})".format(w_method))
| 5,339,911
|
def to_mgb_supported_dtype(dtype_):
"""get the dtype supported by megbrain nearest to given dtype"""
if (
dtype.is_lowbit(dtype_)
or dtype.is_quantize(dtype_)
or dtype.is_bfloat16(dtype_)
):
return dtype_
return _detail._to_mgb_supported_dtype(dtype_)
| 5,339,912
|
def get_reset_state_name(t_fsm):
"""
Returns the name of the reset state.
If an .r keyword is specified, that is the name of the reset state.
If the .r keyword is not present, the first state defined
in the transition table is the reset state.
:param t_fsm: blifparser.BlifParser().blif.fsm object
:return str reset_state: name of the reset state
"""
reset_state = None
if t_fsm.r is None:
if len(t_fsm.transtable) > 0:
reset_state = t_fsm.transtable[0][1]
else:
reset_state = t_fsm.r.name
return reset_state
| 5,339,913
|
def gridmake(*arrays):
"""
Expands one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
*arrays : tuple/list of np.ndarray
Tuple/list of vectors to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if all([i.ndim == 1 for i in arrays]):
d = len(arrays)
if d == 2:
out = _gridmake2(*arrays)
else:
out = _gridmake2(arrays[0], arrays[1])
for arr in arrays[2:]:
out = _gridmake2(out, arr)
return out
else:
raise NotImplementedError("Come back here")
| 5,339,914
|
def pagerotate(document: vp.Document, clockwise: bool):
"""Rotate the page by 90 degrees.
This command rotates the page by 90 degrees counter-clockwise. If the `--clockwise` option
is passed, it rotates the page clockwise instead.
Note: if the page size is not defined, an error is printed and the page is not rotated.
"""
page_size = document.page_size
if page_size is None:
logging.warning("pagerotate: page size is not defined, page not rotated")
return document
w, h = page_size
if clockwise:
document.rotate(math.pi / 2)
document.translate(h, 0)
else:
document.rotate(-math.pi / 2)
document.translate(0, w)
document.page_size = h, w
return document
| 5,339,915
|
def create_new_deployment(runner: Runner,
args: argparse.Namespace) -> Tuple[str, str]:
"""Create a new Deployment, return its name and Kubernetes label."""
run_id = str(uuid4())
def remove_existing_deployment():
runner.get_kubectl(
args.context, args.namespace, [
"delete",
"--ignore-not-found",
"all",
"--selector=telepresence=" + run_id,
]
)
atexit.register(remove_existing_deployment)
remove_existing_deployment()
command = [
"run",
# This will result in using Deployment:
"--restart=Always",
"--limits=memory=256Mi",
"--requests=memory=64Mi",
args.new_deployment,
"--image=" + TELEPRESENCE_REMOTE_IMAGE,
"--labels=telepresence=" + run_id,
]
for port in args.expose.remote():
command.append("--port={}".format(port))
if args.expose.remote():
command.append("--expose")
# If we're on local VM we need to use different nameserver to prevent
# infinite loops caused by sshuttle:
if args.method == "vpn-tcp" and args.in_local_vm:
command.append(
"--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
)
if args.needs_root:
override = {
"apiVersion": "extensions/v1beta1",
"spec": {
"template": {
"spec": {
"securityContext": {
"runAsUser": 0
}
}
}
}
}
command.append("--overrides=" + json.dumps(override))
runner.get_kubectl(args.context, args.namespace, command)
return args.new_deployment, run_id
| 5,339,916
|
def inv_send_rheader(r):
""" Resource Header for Send """
if r.representation == "html" and r.name == "send":
record = r.record
if record:
db = current.db
s3db = current.s3db
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
if settings.get_inv_send_packaging():
tabs.append((T("Packaging"), "send_package"))
if settings.get_inv_document_filing():
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
stable = s3db.org_site
send_id = record.id
status = record.status
site_id = record.site_id
if site_id:
site = db(stable.site_id == site_id).select(stable.organisation_id,
stable.instance_type,
limitby = (0, 1),
).first()
from .org import org_organisation_logo
logo = org_organisation_logo(site.organisation_id)
instance_table = s3db[site.instance_type]
if "phone1" in instance_table.fields:
site = db(instance_table.site_id == site_id).select(instance_table.phone1,
instance_table.phone2,
limitby = (0, 1),
).first()
phone1 = site.phone1
phone2 = site.phone2
else:
phone1 = None
phone2 = None
else:
logo = ""
phone1 = None
phone2 = None
to_site_id = record.to_site_id
if to_site_id:
site = db(stable.site_id == to_site_id).select(stable.location_id,
limitby = (0, 1),
).first()
address = s3db.gis_LocationRepresent(address_only = True)(site.location_id)
else:
address = NONE
if settings.get_inv_send_req():
req_ref_label = TH("%s: " % table.req_ref.label)
ltable = s3db.inv_send_req
rtable = s3db.inv_req
query = (ltable.send_id == send_id) & \
(ltable.req_id == rtable.id)
rows = db(query).select(rtable.id,
rtable.req_ref,
)
if len(rows) == 1:
row = rows.first()
req_ref_value = TD(inv_ReqRefRepresent(show_link = True)(row.req_ref, row))
else:
# Cache values in class
refs = [row.req_ref for row in rows]
represent = inv_ReqRefRepresent(show_link = True)
represent.bulk(refs, rows, show_link = True)
refs_repr = [s3_str(represent(ref)) for ref in refs]
refs_repr = ", ".join(refs_repr)
req_ref_value = TD(XML(refs_repr))
elif settings.get_inv_send_req_ref():
req_ref_label = TH("%s: " % table.req_ref.label)
#req_ref_value = TD(inv_ReqRefRepresent(show_link = True)(record.req_ref))
req_ref_value = TD(record.req_ref)
else:
req_ref_label = ""
req_ref_value = ""
shipment_details = TABLE(TR(TD(T(settings.get_inv_send_form_name().upper()),
_colspan = 2,
_class = "pdf_title",
),
TD(logo,
_colspan = 2,
),
),
TR(TH("%s: " % table.status.label),
table.status.represent(status),
),
TR(TH("%s: " % table.send_ref.label),
TD(table.send_ref.represent(record.send_ref)),
req_ref_label,
req_ref_value,
),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date),
TH("%s: " % table.delivery_date.label),
table.delivery_date.represent(record.delivery_date),
),
TR(TH("%s: " % table.to_site_id.label),
table.to_site_id.represent(record.to_site_id),
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR(TH("%s: " % T("Address")),
TD(address, _colspan=3),
),
TR(TH("%s: " % table.transported_by.label),
table.transported_by.represent(record.transported_by),
TH("%s: " % table.transport_ref.label),
table.transport_ref.represent(record.transport_ref),
),
TR(TH("%s: " % table.sender_id.label),
table.sender_id.represent(record.sender_id),
TH("%s: " % table.recipient_id.label),
table.recipient_id.represent(record.recipient_id),
),
TR(TH("%s: " % T("Complete? Please call")),
phone1 or "",
TH("%s: " % T("Problems? Please call")),
phone2 or phone1 or "",
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
)
)
rfooter = TAG[""]()
if status != SHIP_STATUS_CANCEL and \
r.method != "form":
if current.auth.s3_has_permission("update", "inv_send",
record_id = record.id,
):
packaging = None
# Don't show buttons unless Items have been added
tracktable = s3db.inv_track_item
query = (tracktable.send_id == send_id)
item = db(query).select(tracktable.id,
limitby = (0, 1),
).first()
if item:
actions = DIV()
jappend = s3.js_global.append
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.inv_send_rheader.js" % r.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.inv_send_rheader.min.js" % r.application)
if status == SHIP_STATUS_IN_PROCESS:
actions.append(A(ICON("print"),
" ",
T("Picking List"),
_href = URL(args = [record.id,
"pick_list.xls",
]
),
_class = "action-btn",
)
)
if settings.get_inv_send_packaging():
actions.append(A(ICON("print"),
" ",
T("Labels"),
_href = URL(args = [record.id,
"labels.xls",
]
),
_class = "action-btn",
)
)
actions.append(A(T("Send Shipment"),
_href = URL(args = [record.id,
"process",
]
),
_id = "send-process",
_class = "action-btn",
)
)
jappend('''i18n.send_process_confirm="%s"''' % \
T("Do you want to send this shipment?"))
elif status == SHIP_STATUS_RETURNING:
actions.append(A(T("Complete Returns"),
_href = URL(c = "inv",
f = "send",
args = [record.id,
"return_complete",
]
),
_id = "return-process",
_class = "action-btn"
)
)
jappend('''i18n.return_process_confirm="%s"''' % \
T("Do you want to complete the return process?"))
elif status == SHIP_STATUS_SENT:
actions.append(A(T("Manage Returns"),
_href = URL(c = "inv",
f = "send",
args = [record.id,
"return",
],
vars = None,
),
_id = "send-return",
_class = "action-btn",
_title = T("Only use this button to accept back into stock some items that were returned from a delivery.")
)
)
jappend('''i18n.send_return_confirm="%s"''' % \
T("Confirm that some items were returned from a delivery and they will be accepted back into stock."))
actions.append(A(T("Confirm Shipment Received"),
_href = URL(f = "send",
args = [record.id,
"received",
],
),
_id = "send-receive",
_class = "action-btn",
_title = T("Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system.")
)
)
jappend('''i18n.send_receive_confirm="%s"''' % \
T("Confirm that the shipment has been received by a destination which will not record the shipment directly into the system."))
if status != SHIP_STATUS_RECEIVED:
if settings.get_inv_send_packaging():
if status == SHIP_STATUS_IN_PROCESS:
# Insert in front of 'Send Shipment'
index = -1
else:
# Append at end
index = len(actions)
actions.insert(index, A(ICON("print"),
" ",
T("Packing List"),
_href = URL(args = [record.id,
"packing_list.xls",
]
),
_class = "action-btn",
)
)
if settings.get_inv_send_gift_certificate():
if status == SHIP_STATUS_IN_PROCESS:
# Insert in front of 'Send Shipment'
index = -1
else:
# Append at end
index = len(actions)
actions.insert(index, A(ICON("print"),
" ",
T("Gift Certificate"),
_href = URL(c = "inv",
f = "send",
args = [record.id,
"gift_certificate.xls",
]
),
_class = "action-btn"
)
)
if status != SHIP_STATUS_IN_PROCESS:
actions.append(A(T("Cancel Shipment"),
_href = URL(c = "inv",
f = "send",
args = [record.id,
"cancel",
]
),
_id = "send-cancel",
_class = "delete-btn"
)
)
jappend('''i18n.send_cancel_confirm="%s"''' % \
T("Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!"))
shipment_details.append(TR(TH(actions,
_colspan = 2,
)))
s3.rfooter = rfooter
rheader = DIV(shipment_details,
rheader_tabs,
#rSubdata
)
return rheader
return None
| 5,339,917
|
def gauss_reparametrize(mu, logvar, n_sample=1):
"""Gaussian reparametrization"""
std = logvar.mul(0.5).exp_()
size = std.size()
eps = Variable(std.data.new(size[0], n_sample, size[1]).normal_())
z = eps.mul(std[:, None, :]).add_(mu[:, None, :])
z = torch.clamp(z, -4., 4.)
return z.view(z.size(0)*z.size(1), z.size(2), 1, 1)
| 5,339,918
|
def run_setup_py(cmd, pypath=None, path=None,
data_stream=0, env=None):
"""
Execution command for tests, separate from those used by the
code directly to prevent accidental behavior issues
"""
if env is None:
env = dict()
for envname in os.environ:
env[envname] = os.environ[envname]
# override the python path if needed
if pypath is not None:
env["PYTHONPATH"] = pypath
# override the execution path if needed
if path is not None:
env["PATH"] = path
if not env.get("PATH", ""):
env["PATH"] = _which_dirs("tar").union(_which_dirs("gzip"))
env["PATH"] = os.pathsep.join(env["PATH"])
cmd = [sys.executable, "setup.py"] + list(cmd)
# http://bugs.python.org/issue8557
shell = sys.platform == 'win32'
try:
proc = _Popen(
cmd, stdout=_PIPE, stderr=_PIPE, shell=shell, env=env,
)
if isinstance(data_stream, tuple):
data_stream = slice(*data_stream)
data = proc.communicate()[data_stream]
except OSError:
return 1, ''
# decode the console string if needed
if hasattr(data, "decode"):
# use the default encoding
data = data.decode()
data = unicodedata.normalize('NFC', data)
# communicate calls wait()
return proc.returncode, data
| 5,339,919
|
def test():
"""Run all the tests in the `tests/` directory using pytest """
import pytest
here = os.path.abspath(os.path.dirname(__file__))
pytest.main([os.path.join(here, 'tests')])
| 5,339,920
|
def forward_pass(img, session, images_placeholder, phase_train_placeholder, embeddings, image_size):
"""Feeds an image to the FaceNet model and returns a 128-dimension embedding for facial recognition.
Args:
img: image file (numpy array).
session: The active Tensorflow session.
images_placeholder: placeholder of the 'input:0' tensor of the pre-trained FaceNet model graph.
phase_train_placeholder: placeholder of the 'phase_train:0' tensor of the pre-trained FaceNet model graph.
embeddings: placeholder of the 'embeddings:0' tensor from the pre-trained FaceNet model graph.
image_size: (int) required square image size.
Returns:
embedding: (numpy array) of 128 values after the image is fed to the FaceNet model.
"""
# If there is a human face
if img is not None:
# Normalize the pixel values of the image for noise reduction for better accuracy and resize to desired size
image = load_img(
img=img, do_random_crop=False, do_random_flip=False,
do_prewhiten=True, image_size=image_size
)
# Run forward pass on FaceNet model to calculate embedding
feed_dict = {images_placeholder: image, phase_train_placeholder: False}
embedding = session.run(embeddings, feed_dict=feed_dict)
return embedding
else:
return None
| 5,339,921
|
def restore_tf_variable(tf_sess, target_paras, model_name):
"""restore explorer variable with tf.train.checkpoint"""
reader = tf.train.NewCheckpointReader(model_name)
var_names = reader.get_variable_to_shape_map().keys()
result = dict()
for _name in var_names:
result[_name] = reader.get_tensor(_name)
logging.debug("read variable-{} from model file: {}".format(_name, model_name))
with tf_sess.as_default(): # must work with sess
for var_key in target_paras:
try:
var_key.load(result[var_key.name])
logging.debug("load {} success".format(var_key.name))
except BaseException as err:
raise KeyError("update {} encounter error:{}".format(var_key.name, err))
| 5,339,922
|
def check_url(url):
"""Returns True if the url returns a response code between 200-300,
otherwise return False.
"""
try:
req = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(req)
return response.code in range(200, 209)
except Exception:
return False
| 5,339,923
|
def build_permutation_importance(
data,
data_labels,
feature_names,
model,
metrics,
repeats=100,
random_seed=42
):
"""Calculates permutation feature importance."""
pi_results = {}
for metric in metrics:
pi = sklearn.inspection.permutation_importance(
model,
data,
data_labels,
n_repeats=repeats,
scoring=metric,
random_state=random_seed)
pi_results[metric] = []
for feature_id, feature_name in enumerate(feature_names):
pi_results[metric].append((
feature_name,
pi.importances_mean[feature_id],
pi.importances_std[feature_id]
))
# for i in pi.importances_mean.argsort()[::-1]:
# if pi.importances_mean[i] - 2 * pi.importances_std[i] > 0:
# print(f'{feature_name:<8}'
# f'{pi.importances_mean[feature_id]:.3f}'
# f' +/- {pi.importances_std[feature_id]:.3f}')
return pi_results
| 5,339,924
|
def _load_parent(collection, meta):
"""Determine the parent document for the document that is to be
ingested."""
parent = ensure_dict(meta.get("parent"))
parent_id = meta.get("parent_id", parent.get("id"))
if parent_id is None:
return
parent = Document.by_id(parent_id, collection=collection)
if parent is None:
raise BadRequest(
response=jsonify(
{"status": "error", "message": "Cannot load parent document"},
status=400,
)
)
return parent
| 5,339,925
|
def get_latest_sensor_reading(sensor_serial, metric):
"""
Get latest sensor reading from MT sensor
metrics: 'temperature', 'humidity', 'water_detection' or 'door'
"""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"X-Cisco-Meraki-API-Key": meraki_api_key
}
params = {
"serials[]": sensor_serial,
"metric": metric
}
try:
msg = requests.request('GET',
f"{base_url}/networks/{network_id}/sensors/stats/latestBySensor",
headers=headers, params=params)
if msg.ok:
data = msg.json()
return data
except Exception as e:
print("API Connection error: {}".format(e))
| 5,339,926
|
def border_msg(msg: str):
"""
This function creates boarders in the top and bottom of text
"""
row = len(msg)
h = ''.join(['+'] + ['-' * row] + ['+'])
return h + "\n" + msg + "\n" + h
| 5,339,927
|
def test_visualization_empty_visu_file(data_file_Fujita,
condition_file_Fujita,
visu_file_Fujita_empty):
"""
Test: Empty visualization spezification file should default to routine
for no file at all
"""
plot_data_and_simulation(data_file_Fujita,
condition_file_Fujita,
visu_file_Fujita_empty)
| 5,339,928
|
def create_app(config_name='development'):
"""Returns flask app based on the configuration"""
flask_app = Flask(__name__)
flask_app.config.from_object(app_config[config_name])
flask_app.config['JSON_SORT_KEYS'] = False
flask_app.url_map.strict_slashes = False
flask_app.register_error_handler(400, handle_bad_request)
flask_app.register_error_handler(404, handle_not_found)
flask_app.register_blueprint(v1_bp)
flask_app.register_blueprint(party_bp)
flask_app.register_blueprint(office_bp)
flask_app.register_blueprint(user_bp)
return flask_app
| 5,339,929
|
def test_exists(keys, key, expected_result):
"""
GIVEN keys to add, key to check for and expected exists result
WHEN keys are added to the bucket and exists is called with the key
THEN the expected result is returned.
"""
test_bucket = bucket.Bucket()
for insert_key in keys:
test_bucket.insert(insert_key, "value")
result = test_bucket.exists(key)
assert result == expected_result
| 5,339,930
|
def auto_merge_paths(data, auto_merge_distance, auto_close_paths=True):
"""
This function connects all paths in the given dataset, for which the start or endpoints are closer than
auto_merge_distance.
:param data: Should be a list or tuple containing paths, attributes, svg_attributes.
:param auto_merge_distance: If the start or endpoint of a pair of paths is closer than this distance in units of
milli meters, they are automatically merged. If one of the paths has to be reversed to do so, this is automatically
done. A line is added to the path to bridge the gap.
:param auto_close_paths: If set the paths are automatically closed after the merging operation if the start and
end point of one path are closer than the auto_merge_distance. It is closed by a line and it's closed flag is set.
:return paths, attributes, svg_attributes, iters, numclosed: Modified paths, modified attributes, svg_attributes,
number of pairs connected and number of paths that were closed.
"""
paths, attributes, svg_attributes = data
def fix_first_pair(paths_, attributes_):
"""
Helper function that fixes the next best pair of paths, if they fulfill the condition
:rtype: NoneType in case paths_ is empty. Else fixed paths_ and attributes_.
"""
for i_ in range(len(paths_)):
# Get start end end points
start1 = paths_[i_][0].start
end1 = paths_[i_][-1].end
for j in range(len(paths_)):
if i_ != j:
start2 = paths_[j][0].start
end2 = paths_[j][-1].end
# Calculate all relevant distances for this pair
distance_ = px2mm(np.abs(start2 - end1))
distance_r1 = px2mm(np.abs(start2 - start1))
distance_r2 = px2mm(np.abs(end2 - end1))
# Perform merger
if distance_ < auto_merge_distance or distance_r2 < auto_merge_distance:
first = i_
second = j
else:
first = j
second = i_
if distance_r1 < auto_merge_distance or distance_r2 < auto_merge_distance:
# Reverse paths_[j] if necessary
paths_[j] = svgpathtools.path.Path(
*[svgpathtools.path.bpoints2bezier(segment.bpoints()[::-1]) for segment in paths_[j]])
if min([distance_, distance_r1, distance_r2]) < auto_merge_distance:
# Merge both paths
paths_[first] = svgpathtools.path.Path(*[segment for segment in paths_[first]] + [
svgpathtools.path.Line(paths_[first][-1].end, paths_[second][0].start)] +
[segment for segment in paths_[second]])
return paths_[:second] + paths_[second + 1:], attributes_[:second] + attributes_[second + 1:]
return None
iters = 0
while True:
ret = fix_first_pair(paths, attributes)
if ret is not None:
paths, attributes = ret
iters += 1
else:
break
# Make sure, paths are closed...
numclosed = 0
if auto_close_paths:
for i, path in enumerate(paths):
# Get start end end point distance
start = path[0].start
end = path[-1].end
distance = px2mm(np.abs(start - end))
if distance < auto_merge_distance:
# Close the path
paths[i] = svgpathtools.path.Path(*[segment for segment in path] + [svgpathtools.path.Line(end, start)])
paths[i].closed = True
numclosed += 1
return paths, attributes, svg_attributes, iters, numclosed
| 5,339,931
|
def is_authorized(secure: AccessRestriction):
"""Returns authorization status based on the given access restriction.
:param secure: access restriction
:type secure: AccessRestriction
:return: authorization status (``True`` or ``False``)
"""
if secure == AccessRestriction.ALL:
return True
elif secure == AccessRestriction.STAFF:
return is_staff(get_course())
elif secure == AccessRestriction.STUDENT:
return is_enrolled(get_course())
else:
raise Exception(f"{secure} is not a valid AccessRestriction")
| 5,339,932
|
def create_link_forum(**attrs):
"""Save a new link forum."""
link = build_link_forum(**attrs)
link.save()
return link
| 5,339,933
|
def open_report():
"""Probe Services: Open report
---
parameters:
- in: body
name: open report data
required: true
schema:
type: object
properties:
data_format_version:
type: string
format:
type: string
probe_asn:
type: string
probe_cc:
type: string
software_name:
type: string
software_version:
type: string
test_name:
type: string
test_start_time:
type: string
test_version:
type: string
responses:
'200':
description: Open report confirmation
content:
application/json:
schema:
type: object
properties:
backend_version:
type: string
report_id:
type: string
supported_formats:
type: array
items:
type: string
"""
log = current_app.logger
try:
data = req_json()
except Exception as e:
log.error(e)
return jerror("JSON expected")
log.info("Open report %r", data)
asn = data.get("probe_asn", "AS0").upper()
if len(asn) > 8 or len(asn) < 3 or not asn.startswith("AS"):
asn = "AS0"
try:
asn_i = int(asn[2:])
except:
asn_i = 0
cc = data.get("probe_cc", "ZZ").upper().replace("_", "")
if len(cc) != 2:
cc = "ZZ"
test_name = data.get("test_name", "").lower().replace("_", "")
ts = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
cid = "1" # collector id TODO read from conf
rand = b64encode(urandom(12), b"oo").decode()
rid = f"{ts}_{test_name}_{cc}_{asn_i}_n{cid}_{rand}"
return jsonify(
backend_version="1.3.5", supported_formats=["yaml", "json"], report_id=rid
)
| 5,339,934
|
def prepare_scan():
"""
Returns a lexical scanner for HTSQL grammar.
"""
# Start a new grammar.
grammar = LexicalGrammar()
# Regular context.
query = grammar.add_rule('query')
# Whitespace characters and comments (discarded).
query.add_token(r'''
SPACE: [\s]+ | [#] [^\0\r\n]*
''', is_junk=True)
# A sequence of characters encloses in single quotes.
query.add_token(r'''
STRING: ['] ( [^'\0] | [']['] )* [']
''', unquote=(lambda t: t[1:-1].replace("''", "'")))
# An opening quote character without a closing quote.
query.add_token(r'''
BAD_STRING: [']
''', error="cannot find a matching quote mark")
# A number in exponential notation.
query.add_token(r'''
FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+
''')
# A number with a decimal point.
query.add_token(r'''
DECIMAL:
[0-9]+ [.] [0-9]* | [.] [0-9]+
''')
# An unsigned integer number.
query.add_token(r'''
INTEGER:
[0-9]+
''')
# A sequence of alphanumeric characters (not starting with a digit).
query.add_token(r'''
NAME: [\w]+
''')
# Operators and punctuation characters. The token code coincides
# with the token value.
query.add_token(r'''
SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] |
[=][=] | [=] | [!][=][=] | [!][=] |
[\^] | [?] | [-][>] | [@] | [:][=] |
[!] | [&] | [|] | [+] | [-] | [*] | [/] |
[(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$]
''', is_symbol=True)
# The `[` character starts an identity constructor.
query.add_token(r'''
LBRACKET:
[\[]
''', is_symbol=True, push='identity')
# An unmatched `]`.
query.add_token(r'''
BAD_RBRACKET:
[\]]
''', error="cannot find a matching '['")
# The input end.
query.add_token(r'''
END: $
''', is_symbol=True, pop=1)
# Identity constructor context.
identity = grammar.add_rule('identity')
# Whitespace characters (discarded).
identity.add_token(r'''
SPACE: [\s]+
''', is_junk=True)
# Start of a nested label group.
identity.add_token(r'''
LBRACKET:
[\[] | [(]
''', is_symbol=True, push='identity')
# End of a label group or the identity constructor.
identity.add_token(r'''
RBRACKET:
[\]] | [)]
''', is_symbol=True, pop=1)
# Label separator.
identity.add_token(r'''
SYMBOL: [.]
''', is_symbol=True)
# Unquoted sequence of alphanumeric characters and dashes.
identity.add_token(r'''
LABEL: [\w-]+
''')
# A sequence of characters encloses in single quotes.
identity.add_token(r'''
STRING: ['] ( [^'\0] | [']['] )* [']
''', unquote=(lambda t: t[1:-1].replace("''", "'")))
# An opening quote character without a closing quote.
identity.add_token(r'''
BAD_STRING: [']
''', error="cannot find a matching quote mark")
# A reference indicator.
identity.add_token(r'''
REFERENCE:
[$]
''', is_symbol=True, push='name')
# Unexpected end of input.
identity.add_token(r'''
END: $
''', error="cannot find a matching ']'")
# A context for an identifier following the `$` indicator
# in an identity constructor. We need a separate rule because
# `%NAME` and `%LABEL` productions intersect.
name = grammar.add_rule('name')
# Whitespace characters (discarded).
name.add_token(r'''
SPACE: [\s]+
''', is_junk=True)
# An integer number; not expected here, but ensures that the following
# `%NAME` production does not start with a digit.
name.add_token(r'''
INTEGER:
[0-9]+
''', pop=1)
# A sequence of alphanumeric characters (not starting with a digit).
name.add_token(r'''
NAME: [\w]+
''', pop=1)
# Anything else.
name.add_token(r'''
OTHER: ()
''', is_junk=True, pop=1)
# Add a `%DIRSIG` token in front of `+` and `-` direction indicators
# to distinguish them from addition/subtraction operators.
grammar.add_signal('''
DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` )
''')
# Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from
# being recognized as a division operator.
grammar.add_signal('''
PIPESIG:
`/` `:`
''')
# Add `%LHSSIG` in front of a left-hand side of an assignment expression.
grammar.add_signal('''
LHSSIG: `$`? %NAME ( `.` `$`? %NAME )*
( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )?
`:=`
''')
# Generate and return the scanner.
return grammar()
| 5,339,935
|
def check_arc(val):
"""Used to check if unlawful inverse trig function is executed by users
raise errors if happened. Cannot take inverse trig function that is not between -1 and 1
Args:
val ([int or float])
Raises:
raise error if number is not between -1 and 1
"""
if isinstance(val, np.ndarray):
if not np.all(val > -1 and val < 1):
raise ValueError(
f"error raised by undefined: invalid values {val}, which should all be within (-1, 1)")
elif isinstance(val, (int, float)):
if not (val > -1 and val < 1):
raise ValueError(
f"error raised by undefined: invalid value {val}, which should be within (-1, 1)")
| 5,339,936
|
def test_s3_hook_file_delete_404(
client_hook_s3_storage_1: object, s3_client: object, s3_resource: object, s3_bucket: str
) -> None:
"""Testing DELETE resource
Args:
client_hook_s3_storage_1 (fixture): The test client.
s3_client (fixture): A S3 client object.
s3_resource (fixture): A S3 resource object.
s3_bucket (fixture): The s3 bucket name.
"""
key = f'{uuid4()}'
params = {'filename': f'{key}.txt'}
response: Result = client_hook_s3_storage_1.simulate_delete('/middleware', params=params)
assert response.status_code == 404
| 5,339,937
|
def generate_question_answer(data_dir):
"""
根据三元组,生成问答数据集
:return:
"""
# 将json文件的内容导出列表
# path = '/Users/admin/Desktop/words2.json'
data = get_data()
ralations = ['审计', '子单位', '涉及', '简称', '存在', '审计日期', '篇章', '条款']
qa = []
# with open(path, 'r') as f:
# data = json.load(f)
for one_data in data:
if one_data[1] == '审计':
shen = '[' + one_data[0] + ']' + "审计了哪个单位"
qa_shen = [shen, one_data[2]]
print(qa_shen)
qa.append(qa_shen)
elif one_data[1] == "子单位":
zi = '[' + one_data[0] + ']' + "的子单位是什么"
qa_zi = [zi, one_data[2]]
print(qa_zi)
qa.append(qa_zi)
elif one_data[1] == "涉及":
she = '[' + one_data[0] + ']' + "涉及的资金是多少"
qa_she = [she, one_data[2]]
print(qa_she)
qa.append(qa_she)
elif one_data[1] == "简称":
jian = '[' + one_data[0] + ']' + "的简称是什么"
qa_jian = [jian, one_data[2]]
print(qa_jian)
qa.append(qa_jian)
elif one_data[1] == "存在":
cun = '[' + one_data[0] + ']' + "存在哪些审计问题"
qa_cun = [cun, one_data[2]]
print(qa_cun)
qa.append(qa_cun)
elif one_data[1] == "篇章":
cun = '[' + one_data[0] + ']' + "有哪些篇章"
qa_cun = [cun, one_data[2]]
print(qa_cun)
qa.append(qa_cun)
elif one_data[1] == "条款":
cun = '[' + one_data[0] + ']' + "有哪些条款"
qa_cun = [cun, one_data[2]]
print(qa_cun)
qa.append(qa_cun)
print(f"生成的问答对数据集大小:{len(qa)}")
train_data_num = int(len(qa) * 0.8)
valid_data_num = int(len(qa) * 0.1)
train_data, valid_data, test_data = qa[:train_data_num], qa[train_data_num:train_data_num + valid_data_num], qa[
train_data_num + valid_data_num:]
# 保存到json格式的文件中
with open(os.path.join(data_dir, 'train.json'), 'w', encoding='utf-8') as f:
json.dump(train_data, f, ensure_ascii=False)
with open(os.path.join(data_dir, 'valid.json'), 'w', encoding='utf-8') as f:
json.dump(valid_data, f, ensure_ascii=False)
with open(os.path.join(data_dir, 'test.json'), 'w', encoding='utf-8') as f:
json.dump(test_data, f, ensure_ascii=False)
print("生成问答数据集完成")
| 5,339,938
|
def get_test_server(ctxt, **kw):
"""Return a Server object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
kw['object_type'] = 'server'
get_db_server_checked = check_keyword_arguments(
db_utils.get_test_server)
db_server = get_db_server_checked(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_server['id']
server = objects.Server(ctxt, **db_server)
return server
| 5,339,939
|
def main():
"""
Initializes and executes the program.
"""
print("%s\n\n%s %s (%s)\n" % (BANNER, NAME, VERSION, URL))
args = parse_args()
if args.update:
update()
exit()
if args.list:
representations = list_representations()
for _ in representations:
print("- %s" % _)
print("\n")
exit()
inputs = []
params = {}
output = ""
representations = list_representations()
if args.only:
representations = [representation for representation in representations if representation in args.only]
elif args.exclude:
representations = [representation for representation in representations if representation not in args.exclude]
print("%s Loaded %d %s to apply." %
(INFO, len(representations), "representations" if len(representations) == 1 else "representations"))
if args.load_file:
if not isfile(args.load_file):
print("%s could not find the file \"%s\"" %
(WARN, color(args.load_file)))
exit()
_ = sum(1 for line in open(args.load_file, "r"))
if _ < 1:
print("%s the file \"%s\" doesn't contain any valid input." %
(WARN, color(args.load_file)))
exit()
inputs += [line.rstrip('\n') for line in open(args.load_file, "r")]
print("%s Loaded %d input strings%s from \"%s\".\n" %
(INFO, _, "s" if _ != 1 else "", color(args.load_file)))
if args.input:
inputs.append(args.input)
if(args.params):
params = parseParams(args.params)
print("%s Starting tests at: \"%s\"\n" % (INFO, color(strftime("%X"), BW)))
if not exists(OUTPUT_DIR):
makedirs(OUTPUT_DIR)
modules = load_representations(representations)
for string in inputs:
print("%s\n\n%s applying transformation...\n" % (string, INFO))
for module in modules:
transformation = module.transform(string, params[module.__class__.__name__] if module.__class__.__name__ in params else {}) + "\n"
output += transformation
print(module.__class__.__name__ + ":\n")
print(transformation)
print("==================================\n")
if args.output:
f = open(OUTPUT_DIR + '/' + args.output,'w')
f.write(output)
f.close()
| 5,339,940
|
def create_songs_played_by_user(**kwargs):
"""
This function is used to create data for SongsPlayedByUser Table
Args:
**kwargs: provided kwargs
Examples:
>>> create_songs_played_by_user(song_name='this song', user_name='this user', genre='rock', date_played='2010-01-09')
"""
SongsPlayedByUser.objects.create(**kwargs)
| 5,339,941
|
def keras_model(optimizer="Adamax", activation="softplus", units=32):
"""Function to create model, required for KerasClassifier"""
model = Sequential()
model.add(Dense(units, activation="relu", input_dim=2500))
model.add(Dense(2, activation=activation))
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return model
| 5,339,942
|
def start_end_key(custom_cmp):
"""
Compare models with start and end dates.
"""
class K(object):
"""
Define comparison operators.
http://code.activestate.com/recipes/576653-convert-a-cmp-function-to-a-key-function/
"""
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return custom_cmp(self.obj, other.obj) < 0
def __gt__(self, other):
return custom_cmp(self.obj, other.obj) > 0
def __eq__(self, other):
return custom_cmp(self.obj, other.obj) == 0
def __le__(self, other):
return custom_cmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return custom_cmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return custom_cmp(self.obj, other.obj) != 0
return K
| 5,339,943
|
def get_previous_release_date():
""" Fetch the previous release date (i.e. the release date of the current live database) """
releases = Release.objects.all().order_by('-date')
return str(releases[1].date)
| 5,339,944
|
def init_weather():
"""
This is called only once, when you want to enable the weather system.
"""
weather = create.create_script(WeatherScript)
weather.start()
| 5,339,945
|
def readLensModeParameters(calibfiledir, lensmode='WideAngleMode'):
"""
Retrieve the calibrated lens correction parameters
"""
# For wide angle mode
if lensmode == 'WideAngleMode':
LensModeDefaults, LensParamLines = [], []
with open(calibfiledir, 'r') as fc:
# Read the full file as a line-split string block
calib = fc.read().splitlines()
# Move read cursor back to the beginning
fc.seek(0)
# Scan through calibration file, find and append line indices
# (lind) to specific lens settings
for lind, line in enumerate(fc):
if '[WideAngleMode defaults' in line:
LensModeDefaults.append(lind)
elif '[WideAngleMode@' in line:
LensParamLines.append(lind)
# Specify regular expression pattern for retrieving numbers
numpattern = r'[-+]?\d*\.\d+|[-+]?\d+'
# Read detector settings at specific lens mode
aRange, eShift = [], []
for linum in LensModeDefaults:
# Collect the angular range
aRange = parsenum(
numpattern,
calib,
aRange,
linenumber=linum,
offset=2,
Range='all')
# Collect the eShift
eShift = parsenum(
numpattern,
calib,
eShift,
linenumber=linum,
offset=3,
Range='all')
# Read list calibrated Da coefficients at all retardation ratios
rr, aInner, Da1, Da3, Da5, Da7 = [], [], [], [], [], []
for linum in LensParamLines:
# Collect the retardation ratio (rr)
rr = parsenum(
numpattern,
calib,
rr,
linenumber=linum,
offset=0,
Range='all')
# Collect the aInner coefficient
aInner = parsenum(
numpattern,
calib,
aInner,
linenumber=linum,
offset=1,
Range='all')
# Collect Da1 coefficients
Da1 = parsenum(
numpattern,
calib,
Da1,
linenumber=linum,
offset=2,
Range='1:4')
# Collect Da3 coefficients
Da3 = parsenum(
numpattern,
calib,
Da3,
linenumber=linum,
offset=3,
Range='1:4')
# Collect Da5 coefficients
Da5 = parsenum(
numpattern,
calib,
Da5,
linenumber=linum,
offset=4,
Range='1:4')
# Collect Da7 coefficients
Da7 = parsenum(
numpattern,
calib,
Da7,
linenumber=linum,
offset=5,
Range='1:4')
aRange, eShift, rr, aInner = list(map(lambda x: np.asarray(
x, dtype='float').ravel(), [aRange, eShift, rr, aInner]))
Da1, Da3, Da5, Da7 = list(
map(lambda x: np.asarray(x, dtype='float'), [Da1, Da3, Da5, Da7]))
return aRange, eShift, rr, aInner, Da1, Da3, Da5, Da7
else:
print('This mode is currently not supported!')
| 5,339,946
|
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.boxes, num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_datas(scaled_boxlist, boxlist)
| 5,339,947
|
def fetch(uri: str, method: str = 'get', token: str = None):
""":rtype: (str|None, int)"""
uri = 'https://api.github.com/{0}'.format(uri)
auth = app.config['GITHUB_AUTH']
headers = {'Accept': 'application/vnd.github.mercy-preview+json'}
json = None
if token:
headers['Authorization'] = 'token {}'.format(token)
auth = None
try:
result = getattr(requests, method.lower())(uri, auth=auth, headers=headers)
result.raise_for_status()
json = result.json() if result.status_code != 204 else None
except requests.HTTPError as e:
app.logger.info(
"Request to {} is failed ({}, {}): {}\n{}\n"
.format(result.url, method, e.strerror, result.status_code, result.text)
)
return json, result.status_code
| 5,339,948
|
def test_branch_with_no_atoms():
"""Test SELFIES that have a branch, but the branch has no atoms in it.
Such branches should not be made in the outputted SMILES.
"""
assert is_eq(sf.decoder("[C][Branch1_1][Ring2][Branch1_1]"
"[Branch1_1][Branch1_1][F]"),
"CF")
assert is_eq(sf.decoder("[C][Branch1_1][Ring2][Ring1]"
"[Ring1][Branch1_1][F]"),
"CF")
assert is_eq(sf.decoder("[C][Branch1_2][Ring2][Branch1_1]"
"[C][Cl][F]"),
"C(Cl)F")
# special case: Branch3_3 takes Q_1, Q_2 = [O] and Q_3 = ''. However,
# there are no more symbols in the branch.
assert is_eq(sf.decoder("[C][C][C][C][Branch3_3][O][O]"), "CCCC")
| 5,339,949
|
def nested_tuple(container):
"""Recursively transform a container structure to a nested tuple.
The function understands container types inheriting from the selected abstract base
classes in `collections.abc`, and performs the following replacements:
`Mapping`
`tuple` of key-value pair `tuple`s. The order is preserved in the case of an
`OrderedDict`, otherwise the key-value pairs are sorted if orderable and
otherwise kept in the order of iteration.
`Sequence`
`tuple` containing the same elements in unchanged order.
`Container and Iterable and Sized` (equivalent to `Collection` in python >= 3.6)
`tuple` containing the same elements in sorted order if orderable and otherwise
kept in the order of iteration.
The function recurses into these container types to perform the same replacement,
and leaves objects of other types untouched.
The returned container is hashable if and only if all the values contained in the
original data structure are hashable.
Parameters
----------
container
Data structure to transform into a nested tuple.
Returns
-------
tuple
Nested tuple containing the same data as `container`.
"""
if isinstance(container, OrderedDict):
return tuple(map(nested_tuple, container.items()))
if isinstance(container, Mapping):
return tuple(sorted_if_possible(map(nested_tuple, container.items())))
if not isinstance(container, (str, bytes)):
if isinstance(container, Sequence):
return tuple(map(nested_tuple, container))
if (
isinstance(container, Container)
and isinstance(container, Iterable)
and isinstance(container, Sized)
):
return tuple(sorted_if_possible(map(nested_tuple, container)))
return container
| 5,339,950
|
def _validate_args(func, args, kwargs):
"""Validate customer function args and convert them to kwargs."""
# Positional arguments validate
all_parameters = [param for _, param in signature(func).parameters.items()]
# Implicit parameter are *args and **kwargs
if any(param.kind in {param.VAR_KEYWORD, param.VAR_POSITIONAL} for param in all_parameters):
raise UnsupportedParameterKindError(func.__name__)
all_parameter_keys = [param.name for param in all_parameters]
empty_parameters = {param.name: param for param in all_parameters if param.default is Parameter.empty}
min_num = len(empty_parameters)
max_num = len(all_parameters)
if len(args) > max_num:
raise TooManyPositionalArgsError(func.__name__, min_num, max_num, len(args))
provided_args = OrderedDict({param.name: args[idx] for idx, param in enumerate(all_parameters) if idx < len(args)})
for _k in kwargs.keys():
if _k not in all_parameter_keys:
raise UnexpectedKeywordError(func.__name__, _k, all_parameter_keys)
if _k in provided_args.keys():
raise MultipleValueError(func.__name__, _k)
provided_args[_k] = kwargs[_k]
if len(provided_args) < len(empty_parameters):
missing_keys = empty_parameters.keys() - provided_args.keys()
raise MissingPositionalArgsError(func.__name__, missing_keys)
for pipeline_input_name in provided_args:
data = provided_args[pipeline_input_name]
if data is not None and not isinstance(data, SUPPORTED_INPUT_TYPES):
msg = (
"Pipeline input expected an azure.ai.ml.Input or primitive types (str, bool, int or float), "
"but got type {}."
)
raise UserErrorException(
message=msg.format(type(data)),
no_personal_data_message=msg.format("[type(pipeline_input_name)]"),
)
return provided_args
| 5,339,951
|
def remove_file(file):
""" Deletes file from OS if it exists
Args:
file (str, Path):
a filename or opened readable file
"""
if isinstance(file, (str, Path)) and os.path.exists(file):
os.remove(file)
elif hasattr(file, 'name') and os.path.exists(file.name):
file.truncate(0)
| 5,339,952
|
def _qual_arg(user_value,
python_arg_name,
gblock_arg_name,
allowable):
"""
Construct and sanity check a qualitative argument to
send to gblocks.
user_value: value to try to send to gblocks
python_arg_name: name of python argument (for error string)
gblock_arg_name: name of argument in gblocks
allowable: dictionary of allowable values mapping python to
whatever should be jammed into gblocks
"""
if user_value in allowable.keys():
return "-{}={}".format(gblock_arg_name,allowable[user_value])
else:
err = "\n\n{} '{}' not recognized\n".format(python_arg_name,
user_value)
err += "must be one of:\n"
allowed = list(allowable)
allowed.sort()
for a in allowed:
err += " {}\n".format(a)
raise ValueError(err)
| 5,339,953
|
def is_valid_ip(ip_addr):
"""
:param ip_addr:
:return:
"""
octet_ip = ip_addr.split(".")
int_octet_ip = [int(i) for i in octet_ip]
if (len(int_octet_ip) == 4) and \
(0 <= int_octet_ip[0] <= 255) and \
(0 <= int_octet_ip[1] <= 255) and \
(0 <= int_octet_ip[2] <= 255) and \
(0 <= int_octet_ip[3] <= 255):
return True
else:
print("Invalid IP, closing program... \n")
exit(0)
| 5,339,954
|
def make_replay_buffer(env: gym.Env, size: int) -> ReplayBuffer:
"""Make a replay buffer.
If not ShinEnv:
Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout").
If ShinEnv:
Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout", "state").
"""
is_shin_env = hasattr(env, "mdp")
if isinstance(env.action_space, gym.spaces.Discrete):
act_type, act_shape = int, 1
elif isinstance(env.action_space, gym.spaces.Box):
act_type, act_shape = float, env.action_space.shape
env_dict = {
"rew": {"dtype": float, "shape": 1},
"done": {"dtype": bool, "shape": 1},
"obs": {"dtype": float, "shape": env.observation_space.shape},
"act": {"dtype": act_type, "shape": act_shape},
"log_prob": {"dtype": float, "shape": act_shape},
"timeout": {"dtype": bool, "shape": 1},
}
if is_shin_env:
env_dict.update({"state": {"dtype": int, "shape": 1}})
return ReplayBuffer(size, env_dict, next_of=("obs", "state"))
return ReplayBuffer(size, env_dict, next_of=("obs",))
| 5,339,955
|
def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]:
"""
Returns a function that shifts the decimal point of decimal values to the
right by ``places`` places.
"""
if not isinstance(places, int):
raise ValueError(
'Argument `places` must be int. Got value {} of type {}.'.
format(places, type(places)),
)
with decimal.localcontext(abi_decimal_context):
scaling_factor = TEN ** -places
def f(x: decimal.Decimal) -> decimal.Decimal:
with decimal.localcontext(abi_decimal_context):
return x * scaling_factor
places_repr = 'Eneg{}'.format(places) if places > 0 else 'Epos{}'.format(-places)
func_name = 'scale_by_{}'.format(places_repr)
f.__name__ = func_name
f.__qualname__ = func_name
return f
| 5,339,956
|
def format_dot_y_axis(axes: Axes, bottom: float, top: float) -> None:
"""Draw the ticks, format the labels, and adjust sizing for the day-axis.
Parameters
----------
axes: `Axes`
The Axes object describing the graph
bottom: `float`
Midnight of the earliest day
top: `float`
Dawn of the earliest day
"""
axes.yaxis_date()
axes.set_ylim(bottom=bottom, top=top)
axes.set_ylabel("Time of day", fontdict={"fontsize": 15})
axes.grid(which="major", axis="y", lw=1)
axes.grid(which="minor", axis="y", lw=0.5)
y_loc = HourLocator(interval=2)
y_formatter = DateFormatter("%-I:%M %p")
y_min_loc = HourLocator(interval=1)
y_axis = axes.get_yaxis()
y_axis.set_major_locator(y_loc)
y_axis.set_major_formatter(y_formatter)
y_axis.set_minor_locator(y_min_loc)
# Display morning on top and midnight on bottom. This is different than what
# we did at assigning `y_vals`
axes.invert_yaxis()
| 5,339,957
|
def create_model(params : model_params):
"""
Create ReasoNet model
Args:
params (class:`model_params`): The parameters used to create the model
"""
logger.log("Create model: dropout_rate: {0}, init:{1}, embedding_init: {2}".format(params.dropout_rate, params.init, params.embedding_init))
# Query and Doc/Context/Paragraph inputs to the model
query_seq_axis = Axis('sourceAxis')
context_seq_axis = Axis('contextAxis')
query_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=query_seq_axis, name='query')
context_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=context_seq_axis, name='context')
entity_ids_mask = sequence.input(shape=(1,), is_sparse=False, sequence_axis=context_seq_axis, name='entity_ids_mask')
# embedding
if params.embedding_init is None:
embedding_init = create_random_matrix(params.vocab_dim, params.embedding_dim)
else:
embedding_init = params.embedding_init
embedding = parameter(shape=(params.vocab_dim, params.embedding_dim), init=None)
embedding.value = embedding_init
embedding_matrix = constant(embedding_init, shape=(params.vocab_dim, params.embedding_dim))
if params.dropout_rate is not None:
query_embedding = ops.dropout(times(query_sequence , embedding), params.dropout_rate, name='query_embedding')
context_embedding = ops.dropout(times(context_sequence, embedding), params.dropout_rate, name='context_embedding')
else:
query_embedding = times(query_sequence , embedding, name='query_embedding')
context_embedding = times(context_sequence, embedding, name='context_embedding')
contextGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params')
queryGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params')
entity_embedding = ops.times(context_sequence, embedding_matrix, name='constant_entity_embedding')
# Unlike other words in the context, we keep the entity vectors fixed as a random vector so that each vector just means an identifier of different entities in the context and it has no semantic meaning
full_context_embedding = ops.element_select(entity_ids_mask, entity_embedding, context_embedding)
context_memory = ops.optimized_rnnstack(full_context_embedding, contextGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='context_mem')
query_memory = ops.optimized_rnnstack(query_embedding, queryGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='query_mem')
qfwd = ops.slice(sequence.last(query_memory), -1, 0, params.hidden_dim, name='fwd')
qbwd = ops.slice(sequence.first(query_memory), -1, params.hidden_dim, params.hidden_dim*2, name='bwd')
init_status = ops.splice(qfwd, qbwd, name='Init_Status') # get last fwd status and first bwd status
return attention_model(context_memory, query_memory, init_status, params.hidden_dim, params.attention_dim, max_steps = params.max_rl_steps)
| 5,339,958
|
def test_missing_management_form(live_server, selenium):
"""
Asserts the ConvenientFormset instantiation raises an error message when
the management form is missing.
"""
# Load webpage for test
params = {'template_name': 'initialization/missing_management_form.html'}
test_url = f'{live_server.url}?{urlencode(params)}'
selenium.get(test_url)
# Assert errors
error_log = selenium.find_element(By.CSS_SELECTOR, '#error-log')
error_messages = [
msg.strip() for msg in error_log.text.split('\n') if msg.strip()
]
assert error_messages == [
'[ConvenientFormset] Management form for formset '
'with prefix `formset` missing or has been tampered with.'
]
| 5,339,959
|
def _process_voucher_data_for_order(cart):
"""Fetch, process and return voucher/discount data from cart."""
vouchers = Voucher.objects.active(date=date.today()).select_for_update()
voucher = get_voucher_for_cart(cart, vouchers)
if cart.voucher_code and not voucher:
msg = pgettext(
'Voucher not applicable',
'Voucher expired in meantime. Order placement aborted.')
raise NotApplicable(msg)
if not voucher:
return {}
increase_voucher_usage(voucher)
return {
'voucher': voucher,
'discount_amount': cart.discount_amount,
'discount_name': cart.discount_name,
'translated_discount_name': cart.translated_discount_name}
| 5,339,960
|
def transform_batch(images,
max_rot_deg,
max_shear_deg,
max_zoom_diff_pct,
max_shift_pct,
experimental_tpu_efficiency=True):
"""Transform a batch of square images with the same randomized affine
transformation.
"""
def clipped_random():
rand = tf.random.normal([1], dtype=tf.float32)
rand = tf.clip_by_value(rand, -2., 2.) / 2.
return rand
batch_size = images.shape[0]
tf.debugging.assert_equal(
images.shape[1],
images.shape[2],
"Images should be square")
DIM = images.shape[1]
channels = images.shape[3]
XDIM = DIM % 2
rot = max_rot_deg * clipped_random()
shr = max_shear_deg * clipped_random()
h_zoom = 1.0 + clipped_random()*max_zoom_diff_pct
w_zoom = 1.0 + clipped_random()*max_zoom_diff_pct
h_shift = clipped_random()*(DIM*max_shift_pct)
w_shift = clipped_random()*(DIM*max_shift_pct)
# GET TRANSFORMATION MATRIX
m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift)
# LIST DESTINATION PIXEL INDICES
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) # 10000,
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],tf.int32)
idx = tf.stack( [x,y,z] ) # [3, 10000]
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = tf.matmul(m,tf.cast(idx,tf.float32))
idx2 = tf.cast(idx2,tf.int32)
idx2 = tf.clip_by_value(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
idx3 = tf.transpose(idx3)
batched_idx3 = tf.tile(idx3[tf.newaxis], [batch_size, 1, 1])
if experimental_tpu_efficiency:
# This reduces excessive padding in the original tf.gather_nd op
idx4 = idx3[:, 0] * DIM + idx3[:, 1]
images = tf.reshape(images, [batch_size, DIM * DIM, channels])
d = tf.gather(images, idx4, axis=1)
return tf.reshape(d, [batch_size,DIM,DIM,channels])
else:
d = tf.gather_nd(images, batched_idx3, batch_dims=1)
return tf.reshape(d,[batch_size,DIM,DIM,channels])
| 5,339,961
|
def prep_seven_zip_path(path, talkative=False):
"""
Print p7zip path on POSIX, or notify if not there.
:param path: Path to use.
:type path: str
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
if path is None:
talkaprint("NO 7ZIP\nPLEASE INSTALL p7zip", talkative)
sentinel = False
else:
talkaprint("7ZIP FOUND AT {0}".format(path), talkative)
sentinel = True
return sentinel
| 5,339,962
|
async def root() -> Dict[str, str]:
"""
Endpoint for basic connectivity test.
"""
logger.debug('root requested')
return {'message': 'OK'}
| 5,339,963
|
def detach_policy(user_name, policy_arn):
"""
Detaches a policy from a user.
:param user_name: The name of the user.
:param policy_arn: The Amazon Resource Name (ARN) of the policy.
"""
try:
iam.User(user_name).detach_policy(PolicyArn=policy_arn)
logger.info("Detached policy %s from user %s.", policy_arn, user_name)
except ClientError:
logger.exception(
"Couldn't detach policy %s from user %s.", policy_arn, user_name)
raise
| 5,339,964
|
def user_create_profile(sender, instance, created, **kwargs):
"""
Depending of user_type on the User model, we want to create
a specific "type of profile"
"""
logger.info('[entities receiver]')
if created:
# Here we will put the logic of which type of user we will create.
# Will be dicted by the type of user, given by the user_type field.
logger.info('[entities receiver created]')
if instance.user_type == 1:
Person.objects.create(user=instance)
elif instance.user_type == 2:
Company.objects.create(user=instance)
else:
pass
| 5,339,965
|
def overlapping_community(G, community):
"""Return True if community partitions G into overlapping sets.
"""
community_size = sum(len(c) for c in community)
# community size must be larger to be overlapping
if not len(G) < community_size:
return False
# check that the set of nodes in the communities is the same as G
if not set(G) == set.union(*community):
return False
return True
| 5,339,966
|
def validate_credential(zone, credential):
"""
Token is already calculated
"""
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
canAccess = source.get_or_create_client_access_rights(credential, zone)
if canAccess:
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
return json.dumps({'success':False}), 403, {'ContentType':'application/json'}
| 5,339,967
|
def test_fast_gradient_method():
"""
Fast gradient method unit test.
"""
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32)
label = np.asarray([2], np.int32)
label = np.eye(3)[label].astype(np.float32)
attack = FastGradientMethod(Net())
ms_adv_x = attack.generate(input_np, label)
assert np.any(ms_adv_x != input_np), 'Fast gradient method: generate value' \
' must not be equal to original value.'
| 5,339,968
|
def test_swap_child():
""" """
for test in run_hotswap_test(DEFAULT_TIME+2,
original="""
from enaml.widgets.api import *
enamldef Main(Window): view:
Container:
Label:
text = "child 1"
""",
modified="""
from enaml.widgets.api import *
enamldef Main(Window): view:
Container:
PushButton:
text = "child 1"
""",
initial_state={}):
container = test.view.children[0]
if not test.reloaded:
assert container.children[0].__class__.__name__ == 'Label'
else:
assert len(container.children) == 1
assert container.children[0].__class__.__name__ == 'PushButton'
| 5,339,969
|
def gm(data,g1=0.0,g2=0.0,g3=0.0,inv=False):
"""
Lorentz-to-Gauss Apodization
Functional form:
gm(x_i) = exp(e - g*g)
Where: e = pi*i*g1
g = 0.6*pi*g2*(g3*(size-1)-i)
Parameters:
* data Array of spectral data.
* g1 Inverse exponential width.
* g2 Gaussian broaden width.
* g3 Location of gauss maximum.
* inv Set True for inverse apodization.
"""
size = data.shape[-1]
e = pi*np.arange(size)*g1
g = 0.6*pi*g2*(g3*(size-1) - np.arange(size))
apod = np.exp(e-g*g, sig = data.dtype)
if inv:
apod = 1/apod
return apod*data
| 5,339,970
|
def exp_t(u, t):
"""Compute exp_t for `u`."""
if t == 1.0:
return torch.exp(u)
else:
return torch.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
| 5,339,971
|
def decode_json_dict(data):
# type: (Dict) -> Dict
"""Converts str to python 2 unicodes in JSON data."""
return _strify(data)
| 5,339,972
|
def linear_search(lst: list, x: Any) -> int:
"""Return the index of the first element of `lst` equal to `x`, or -1 if no
elements of `lst` are equal to `x`.
Design idea: Scan the list from start to finish.
Complexity: O(n) time, O(1) space.
For an improvement on linear search for sorted lists, see the binary search
function in the decrease_and_conquer module.
"""
for i, y in enumerate(lst):
if x == y:
return i
return -1
| 5,339,973
|
def get_color_cmap(name, n_colors=6):
"""
Return discrete colors from a matplotlib palette.
:param name: Name of the palette. This should be a named matplotlib colormap.
:type: str
:param n_colors: Number of discrete colors in the palette.
:type: int
:return: List-like object of colors as hexadecimal tuples
:type: list
"""
brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8,
"Set1": 9, "Set2": 8, "Set3": 12, 'tab20':20, 'tab20b':20}
if name == 'tab20' and n_colors > 19:
second = 'tab20b'
ncolor2 = n_colors - 19
n_colors = 19
else :
second = False
cmap = getattr(cm, name)
if name in brewer_qual_pals:
bins = np.linspace(0, 1, brewer_qual_pals[name])
if 'tab20' == name :
len_bins = len(bins)
bins = [bins[i] for i in range(len_bins) if i != 14][:n_colors]
else :
bins = bins[:n_colors]
else:
bins = np.linspace(0, 1, n_colors + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
if second :
cmap = getattr(cm, second)
bins = np.linspace(0, 1, brewer_qual_pals[second])[:ncolor2]
palette += list(map(tuple, cmap(bins)[:, :3]))
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors+ncolor2)]
else :
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
return [colors.rgb2hex(rgb) for rgb in palette]
| 5,339,974
|
def logkde2entropy(vects, logkde):
"""
computes the entropy of the kde
incorporates vects so that kde is properly normalized (transforms into a truly discrete distribution)
"""
vol = vects2vol(vects)
truth = logkde > -np.infty
return -vects2vol(vects)*np.sum(np.exp(logkde[truth])*logkde[truth])
| 5,339,975
|
def issue_2021_02_16():
"""
"""
import psana.pscalib.calib.MDBWebUtils as wu
det_uniqueid = 'epix10ka_3926196238-0175152897-1157627926-0000000000-0000000000-0000000000-0000000000_3926196238-0174824449-0268435478-0000000000-0000000000-0000000000-0000000000_3926196238-0175552257-3456106518-0000000000-0000000000-0000000000-0000000000_3926196238-0176373505-4043309078-0000000000-0000000000-0000000000-0000000000'
calib_const = wu.calib_constants_all_types(det_uniqueid, exp='ueddaq02', run=86)
| 5,339,976
|
def get_deployment_json(
runner: Runner,
deployment_name: str,
context: str,
namespace: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
"""Get the decoded JSON for a deployment.
If this is a Deployment we created, the run_id is also passed in - this is
the uuid we set for the telepresence label. Otherwise run_id is None and
the Deployment name must be used to locate the Deployment.
"""
assert context is not None
assert namespace is not None
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_kubectl(
context,
namespace,
get_deployment + [deployment_name],
stderr=STDOUT
)
)
else:
# When using a selector we get a list of objects, not just one:
return json.loads(
runner.get_kubectl(
context,
namespace,
get_deployment + ["--selector=telepresence=" + run_id],
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise SystemExit(
"Failed to find Deployment '{}': {}".format(
deployment_name, str(e.stdout, "utf-8")
)
)
finally:
span.end()
| 5,339,977
|
def test_append_test_for_small(small_linklist):
""" tests to see if node appended to small group"""
assert len(small_linklist) == 4
small_linklist.append(1)
assert len(small_linklist) == 5
| 5,339,978
|
def generate_logo(filename):
"""
Load component images, apply a sinogram, and assemble to form the svmbir logo.
Args:
filename: Name of image file used to generate the sinogram for inclusion in the logo.
Returns:
None
"""
# Load the svmbir image, convert to negative, display, and save
image = read_grey(filename)
image = 1-image
plt.imshow(image, cmap=plt.cm.Greys_r)
plt.title("Original")
plt.show()
svmbir_letters = np.copy(image)
# Apply the radon transform and do gamma correction to improve contrast
theta = np.linspace(0., 360., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
sinogram_scaled = sinogram / np.amax(sinogram)
gamma_corrected = exposure.adjust_gamma(sinogram_scaled, 0.4)
# Display the sinogram and gamma corrected version
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Radon transform\n(Sinogram)")
ax1.set_xlabel("Projection angle (deg)")
ax1.set_ylabel("Projection position (pixels)")
ax1.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 360, 0, sinogram.shape[0]), aspect='auto')
ax2.set_title("Gamma corrected")
ax2.imshow(gamma_corrected, cmap=plt.cm.Greys_r,
extent=(0, 360, 0, sinogram.shape[0]), aspect='auto')
fig.tight_layout()
plt.show()
# Save the gamma-corrected, rotated sinogram
sinogram_int = np.round(gamma_corrected*255)
sinogram_copy = np.copy(sinogram_int)
sinogram_rot = rotate(sinogram_copy, 90)
imsave('sinogram_rot.png', sinogram_rot.astype(np.uint8))
# Do the reconstruction and display
# sinogram_int = imread('sinogram_rot.png')
# sinogram_int = rotate(sinogram_int, -90)
sinogram = exposure.adjust_gamma(sinogram_copy / 255, 2)
image_recov = iradon(sinogram, theta=theta)
image_recov_orig = iradon(np.round(sinogram_scaled * 255) / 255, theta)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Recon from original")
ax1.imshow(image_recov_orig, cmap=plt.cm.Greys_r, aspect='equal')
ax2.set_title("Recon from gamma corrected")
ax2.imshow(image_recov, cmap=plt.cm.Greys_r,aspect='equal')
fig.tight_layout()
plt.show()
# Load the images for the logo
sino = sinogram_rot.astype(float)/255 # read_grey('sinogram_rot.png')
mbir_letters = read_grey('images/mbir.png')
arrow_left = read_grey('images/arrow_left.png', channel=3)
arrow_right = read_grey('images/arrow_right.png', channel=3)
svmbir_text = read_grey('images/text.png', channel=3)
# Set dimensions for logo elements - rescale images as needed
spacer = 5
pad = 10
new_width = sino.shape[1] + svmbir_letters.shape[0]
mbir_letters = rescale(mbir_letters, new_width / mbir_letters.shape[1])
mbir_letters = exposure.adjust_gamma(mbir_letters, 0.2)
mbir_letters[mbir_letters > 0.95] = 1
mbir_letters[mbir_letters < 0.05] = 0
new_height = sino.shape[0] + mbir_letters.shape[0] + spacer
svmbir_text = 1 - rescale(svmbir_text, new_height / svmbir_text.shape[0])
svmbir_text = exposure.adjust_gamma(svmbir_text, 1.5)
svmbir_text[svmbir_text > 0.95] = 1
svmbir_text[svmbir_text < 0.05] = 0
height = sino.shape[0] + mbir_letters.shape[0] + 3 * spacer
width = mbir_letters.shape[1] + svmbir_text.shape[1] + 2 * spacer
# Get the empty image and add the sinogram
logo = np.zeros((height, width))
i = spacer
j = spacer
logo = copy_in(logo, sino, i, j)
# Add the vertical bar and the svmbir letters with arrows
i = spacer
j = sino.shape[1]
white_bar_ver = np.ones((sino.shape[1], spacer))
logo = copy_in(logo, white_bar_ver, i, j)
j = sino.shape[1] + spacer
arrow_left = rescale(arrow_left, (svmbir_letters.shape[1]/2) / arrow_left.shape[1])
logo = copy_in(logo, arrow_left, i, j)
j = j + arrow_left.shape[1]
arrow_right = rescale(arrow_right, (svmbir_letters.shape[1]/2) / arrow_right.shape[1])
logo = copy_in(logo, arrow_right, i, j)
j = sino.shape[1] + spacer
logo = copy_in(logo, svmbir_letters, i, j, method="max")
# Add the svmbir text
i = spacer
j = sino.shape[1] + svmbir_letters.shape[1] + spacer
logo = copy_in(logo, svmbir_text, i, j)
# Add the horizontal bar and the MBIR text
i = spacer + sino.shape[0]
j = spacer
white_bar_hor = np.ones((spacer, sino.shape[1] + spacer + svmbir_letters.shape[1]))
logo = copy_in(logo, white_bar_hor, i, j)
i = i + spacer
logo = copy_in(logo, mbir_letters, i, j)
# Display the logo
plt.imshow(logo, cmap=plt.cm.Greys_r)
plt.show()
imsave('logo.png', logo)
| 5,339,979
|
def is_admin() -> bool:
"""Check does the script has admin privileges."""
import ctypes
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except AttributeError: # Windows only
return None
| 5,339,980
|
def get_firewall_status(gwMgmtIp, api_key):
"""
Reruns the status of the firewall. Calls the op command show chassis status
Requires an apikey and the IP address of the interface we send the api request
:param gwMgmtIp:
:param api_key:
:return:
"""
global gcontext
# cmd = urllib.request.Request('https://google.com')
cmd = urllib.request.Request(
"https://" + gwMgmtIp + "/api/?type=op&cmd=<show><chassis-ready></chassis-ready></show>&key=" + api_key)
# Send command to fw and see if it times out or we get a response
logger.info('[INFO]: Sending command: {}'.format(cmd))
try:
response = urllib.request.urlopen(cmd, data=None, context=gcontext, timeout=5).read()
logger.info(
"[INFO]:Got http 200 response from FW with address {}. So need to check the response".format(gwMgmtIp))
# Now we do stuff to the gw
except urllib.error.URLError:
logger.info("[INFO]: No response from FW with address {}. So maybe not up!".format(gwMgmtIp))
return 'down'
# sleep and check again?
else:
logger.info("[INFO]: FW is responding!!")
logger.info("[RESPONSE]: {}".format(response))
resp_header = et.fromstring(response)
if resp_header.tag != 'response':
logger.info("[ERROR]: didn't get a valid response from firewall...maybe a timeout")
return 'down'
if resp_header.attrib['status'] == 'error':
logger.info("[ERROR]: Got response header error for the command")
return 'down'
if resp_header.attrib['status'] == 'success':
# The fw responded with a successful command execution
for element in resp_header:
if element.text.rstrip() == 'yes':
# Call config gw command?
logger.info("[INFO]: FW with ip {} is ready ".format(gwMgmtIp))
return 'running'
else:
return 'down'
| 5,339,981
|
def inferDistanceRelations(matcher, reqNode, ego, line):
"""Infer bounds on distances from a requirement."""
distMatcher = lambda node: matcher.matchUnaryFunction('DistanceFrom', node)
allBounds = matcher.matchBounds(reqNode, distMatcher)
for target, bounds in allBounds.items():
if not isinstance(target, Object):
continue
assert target is not ego
if ego is None:
raise InvalidScenarioError('distance w.r.t. unassigned ego on line {line}')
lower, upper = bounds
if lower < 0:
lower = 0
if upper == float('inf'):
continue # skip trivial bounds
rel = DistanceRelation(target, lower, upper)
ego._relations.append(rel)
conv = DistanceRelation(ego, lower, upper)
target._relations.append(conv)
| 5,339,982
|
def test_vgp_unchanged_at_optimum(with_tf_random_seed, vgp_gpr_optim_setup):
"""Test that the update does not change sites at the optimum"""
vgp, _ = vgp_gpr_optim_setup
# ELBO at optimum
optim_elbo = vgp.elbo()
# site update step
vgp.update_sites()
# ELBO after step
new_elbo = vgp.elbo()
with tf.GradientTape() as g:
g.watch(vgp.trainable_variables)
elbo = vgp.classic_elbo()
grad_elbo = g.gradient(elbo, vgp.trainable_variables)
for g in grad_elbo:
if g is not None:
np.testing.assert_allclose(g, tf.zeros_like(g), atol=1e-9)
np.testing.assert_allclose(optim_elbo, new_elbo, atol=1e-9)
| 5,339,983
|
def test_get_map_data():
"""Tests that a SimilarChecker respects the MapReduceMixin interface"""
linter = PyLinter(reporter=Reporter())
# Add a parallel checker to ensure it can map and reduce
linter.register_checker(similar.SimilarChecker(linter))
source_streams = (
str(INPUT / "similar_lines_a.py"),
str(INPUT / "similar_lines_b.py"),
)
expected_linelists = (
(
"",
"",
"",
"",
"",
"",
"def adipiscing(elit):",
'etiam = "id"',
'dictum = "purus,"',
'vitae = "pretium"',
'neque = "Vivamus"',
'nec = "ornare"',
'tortor = "sit"',
"return etiam, dictum, vitae, neque, nec, tortor",
"",
"",
"class Amet:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
"",
"def lorem(self, ipsum):",
'dolor = "sit"',
'amet = "consectetur"',
"return (lorem, dolor, amet)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
"return some_var, someother_var, fusce, amet",
"",
'def __init__(self, moleskie, lectus="Mauris", ac="pellentesque"):',
'metus = "ut"',
'lobortis = "urna."',
'Integer = "nisl"',
'(mauris,) = "interdum"',
'non = "odio"',
'semper = "aliquam"',
'malesuada = "nunc."',
'iaculis = "dolor"',
'facilisis = "ultrices"',
'vitae = "ut."',
"",
"return (",
"metus,",
"lobortis,",
"Integer,",
"mauris,",
"non,",
"semper,",
"malesuada,",
"iaculis,",
"facilisis,",
"vitae,",
")",
"",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
),
(
"",
"",
"",
"",
"",
"",
"",
"class Nulla:",
'tortor = "ultrices quis porta in"',
'sagittis = "ut tellus"',
"",
"def pulvinar(self, blandit, metus):",
"egestas = [mauris for mauris in zip(blandit, metus)]",
"neque = (egestas, blandit)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
'iaculis = "dolor"',
"return some_var, someother_var, fusce, amet, iaculis, iaculis",
"",
"",
"def tortor(self):",
"ultrices = 2",
'quis = ultricies * "porta"',
"return ultricies, quis",
"",
"",
"class Commodo:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
'laoreet = "commodo "',
"return agittis, tellus, laoreet",
),
)
data = []
# Manually perform a 'map' type function
for source_fname in source_streams:
sim = similar.SimilarChecker(linter)
with open(source_fname) as stream:
sim.append_stream(source_fname, stream)
# The map bit, can you tell? ;)
data.extend(sim.get_map_data())
assert len(expected_linelists) == len(data)
for source_fname, expected_lines, lineset_obj in zip(
source_streams, expected_linelists, data
):
assert source_fname == lineset_obj.name
# There doesn't seem to be a faster way of doing this, yet.
lines = (line for idx, line in lineset_obj.enumerate_stripped())
assert tuple(expected_lines) == tuple(lines)
| 5,339,984
|
def create_logger(name, logfile, level):
"""
Sets up file logger.
:param name: Logger name
:param logfile: Location of log file
:param level: logging level
:return: Initiated logger
"""
logger = logging.getLogger(name)
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
return logger
| 5,339,985
|
def restore_builtins():
"""Restore the original builtin functions."""
for k, v in builtins.items():
mod, func = k.rsplit('.', 1) # 'os.path.isdir' -> ('os.path', 'isdir')
name_elts = mod.split('.')
top = name_elts.pop(0)
module = globals()[top]
for elt in name_elts:
module = getattr(module, elt)
setattr(module, func, v)
storage.restore_builtins()
| 5,339,986
|
def canonicalize_path(cwd, path, debug):
"""Given a path composed by concatenating two or more parts,
clean up and canonicalize the path."""
# // => /
# foo/bar/../whatever => foo/whatever [done]
# foo/bar/./whatever => foo/whatever [done]
# /foo/bar => /foo/bar [done]
# foo/bar => cwd/foo/bar [done]
# <empty_path> => cwd [done]
# Since we construct cwd from a node_id now, it always ends in /,
# so trim off the last empty string in cwd_parts
cwd_parts = cwd.split('/')[:-1]
path_parts = path.split('/')
new = path_parts if path and path[0] == '/' else cwd_parts + path_parts
if debug:
print("# canonicalize_path(cwd: '" + cwd \
+ "', path: '" + path + "')")
print("# cwd_parts: " + str(cwd_parts))
print("# path_parts: " + str(path_parts))
print("# new: '" + str(new) + "'")
# Now we will do some canonicalization ...
while '..' in new:
where = new.index('..')
new = new[:where-1] + new[where+1:] if where >= 2 else new[where+1:]
while '.' in new:
where = new.index('.')
new = new[:where] + new[where+1:] if where >= 1 else new[where+1:]
# Get rid of trailing slashes
while new and new[-1] == "":
new = new[:-1]
# Get rid of double slashes (an empty string in the middle of new)
while '' in new[1:-1]:
where = new[1:-1].index('')
new = new[:where+1] + new[where+2:]
# Make sure it's not empty
if new and new[0] != '':
new.insert(0, "")
new_path = '/'.join(new)
if not new_path:
new_path = '/'
if debug:
print("# new: '" + str(new) + "'")
print("new_path: '" + new_path + "'")
return new_path
| 5,339,987
|
def context_command(func):
"""
Base options for jobs that can override context variables on the command
line.
The command receives a *context_overrides* argument, a dict ready to be
deep merged in templates contexts.
"""
@click.option('--context', '-c', 'context_vars', multiple=True,
metavar='VAR=VALUE', help='Override context VAR with '
'VALUE; use --context multiple times to override multiple '
'variables. Use dots to target a nested variable: '
'foo.bar=baz')
@functools.wraps(func)
def wrapper(context_vars, **kwargs):
try:
context_overrides = parse_context_vars(context_vars)
except exceptions.MalformedContextVar as exc:
click.secho('Malformed context var in command-line: %s' % exc,
fg='red', bold=True)
click.secho('')
click.secho('Use PATH.TO.VAR=VALUE format.', fg='green')
sys.exit(1)
return func(context_overrides=context_overrides, **kwargs)
return wrapper
| 5,339,988
|
def checksum(number):
"""Calculate the checksum. A valid number should have a checksum of 1."""
check = 0
for n in number:
check = (2 * check + int(10 if n == 'X' else n)) % 11
return check
| 5,339,989
|
def instanceof(value, type_):
"""Check if `value` is an instance of `type_`.
:param value: an object
:param type_: a type
"""
return isinstance(value, type_)
| 5,339,990
|
def step(y, t, dt):
""" RK2 method integration"""
n = y.shape[0]
buf_f0 = np.zeros((n, ndim+1))
buf_f1 = np.zeros((n, ndim+1))
buf_y1 = np.zeros((n, ndim+1))
buf_f0 = tendencies(y)
buf_y1 = y + dt * buf_f0
buf_f1 = tendencies(buf_y1)
Y = y + 0.5 * (buf_f0 + buf_f1) * dt
return Y
| 5,339,991
|
def sample_deletes(graph_, rgb_img_features, xyz,
delete_scores, num_deletes, threshold,
gc_neighbor_dist, padding_config,
**kwargs):
"""Sample Deletes.
Args:
graph_: a torch_geometric.data.Batch instance with attributes:
- rgb: a [N x C_app] torch.FloatTensor of rgb features
- depth: a [N x 3 x H' x W'] torch.FloatTensor
- mask: a [N x 1 x H' x W'] torch.FloatTensor
- orig_masks: a [N x H x W] torch.FloatTensor of original masks
- crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax.
rgb_img_features: an OrderedDict of image features. Output of gc.extract_rgb_img_features()
xyz_img: a [3, H, W] torch.FloatTensor. 3D point cloud from camera frame of reference
delete_scores: a [N] torch.FloatTensor with values in [0, 1]. Output of
DeleteNetWrapper.delete_scores().
num_deletes: Maximum number of deletes allowed.
threshold: Minimum delete score required to consider the delete.
gc_neighbor_dist: Distance threshold for connecting nodes in new graph
padding_config: a Python dictionary with padding parameters.
Returns:
boolean of whether merge operation was successful.
a torch_geometric.data.Data instance.
"""
# Sort scores, consider only the ones above a certain threshold
sorted_scores, score_indices = torch.sort(delete_scores, descending=True)
num_potential_deletes = torch.sum(sorted_scores > threshold)
if num_potential_deletes == 0 and torch.all(~graph_.added): # Nothing to delete
return False, None
score_indices = score_indices[:num_potential_deletes]
delete_inds = torch.zeros(graph_.orig_masks.shape[0]).bool()
# Sample some masks to delete
leftover_delete_scores = delete_scores[score_indices]
leftover_delete_indices = score_indices
while torch.sum(delete_inds) < num_deletes and leftover_delete_indices.shape[0] > 0:
# Sample delete index
sample_idx = torch.multinomial(leftover_delete_scores, 1)
delete_idx = leftover_delete_indices[sample_idx][0]
delete_inds[delete_idx] = True
# Get leftover potential deletes
temp = torch.ones(leftover_delete_scores.shape[0]).bool()
temp[sample_idx] = False
leftover_delete_indices = leftover_delete_indices[temp]
leftover_delete_scores = leftover_delete_scores[temp]
# If the deleting only undoes the potential adds, consider the sampling to be a failure
if torch.all(delete_inds == graph_.added):
return False, None
# Keep the un-deleted masks
new_masks = graph_.orig_masks[~delete_inds]
# Create new graph
new_masks = new_masks[1:] # Get rid of BG mask
new_masks = util_.convert_mask_NHW_to_HW(new_masks.float(), start_label=constants.OBJECTS_LABEL)
new_graph = gc.construct_segmentation_graph(rgb_img_features, xyz, new_masks,
neighbor_dist=gc_neighbor_dist,
padding_config=padding_config)
return True, new_graph
| 5,339,992
|
def make_char(hex_val):
"""
Create a unicode character from a hex value
:param hex_val: Hex value of the character.
:return: Unicode character corresponding to the value.
"""
try:
return unichr(hex_val)
except NameError:
return chr(hex_val)
| 5,339,993
|
def add_chemicals_from_file(filename : str):
"""Parses specified file, adding a chemical to the library for each line in the file.
Each line in the file should first contain the chemicals's molar mass, followed by a list of its names.
All words should be separated by spaces. Example file:
58.44 NaCl table_salt sodium_chloride
74.55 KCl potassium_chloride
"""
if os.path.isfile(filename) == False:
error_messages.file_not_found(filename)
try:
with open(filename, "r") as file:
lines = file.readlines()
except:
error_messages.file_read_error(filename)
existing_chemical_library = load_chemicals()
new_chemical_names = []
new_chemical_objects = []
for line_number, line in enumerate(lines):
try:
words = line.split()
if len(words) == 0:
continue
elif len(words) < 2:
error_messages.line_too_short_in_chemical_file(line_number)
molar_mass = words[0]
names = words[1:]
new_chemical = make_safe_chemical(molar_mass, names, chemical_library=existing_chemical_library)
for name in names:
if name in new_chemical_names:
error_messages.duplicate_file_entry(name)
new_chemical_names.append(name)
new_chemical_objects.append(new_chemical)
except:
error_messages.add_from_file_termination(line_number, erroneous_line=line.strip("\n"), upper_case_data_type="Chemicals")
with open(chemical_library_file, "a") as file:
for new_chemical in new_chemical_objects:
file.write(str(new_chemical) + "\n")
print("Added the following chemicals to your library:", *new_chemical_names)
| 5,339,994
|
def normalize(features):
"""
Normalizes data using means and stddevs
"""
means, stddevs = compute_moments(features)
normalized = (np.divide(features, 255) - means) / stddevs
return normalized
| 5,339,995
|
def get_args_from_str(input: str) -> list:
"""
Get arguments from an input string.
Args:
input (`str`): The string to process.
Returns:
A list of arguments.
"""
return ARG_PARSE_REGEX.findall(input)
| 5,339,996
|
def get_all_files(repo_root):
"""Get all files from in this repo."""
output = []
for root, _, files in os.walk(repo_root):
for f in files:
if f.lower().endswith(tuple(CPP_SUFFIXES + ['.py'])):
full_name = os.path.join(root, f)[len(repo_root) + 1:]
if not any(n in full_name
for n in ALL_FILES_BLACKLISTED_NAMES):
output.append(full_name)
return output
| 5,339,997
|
def simulate(school: List[int], days: int) -> int:
"""Simulates a school of fish for ``days`` and returns the number of fish."""
school = flatten_school(school)
for day in range(1, days + 1):
school = simulate_day(school)
return sum(school)
| 5,339,998
|
def SPTU(input_a, input_b, n_channels: int):
"""Softplus Tanh Unit (SPTU)"""
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels, :])
s_act = torch.nn.functional.softplus(in_act[:, n_channels:, :])
acts = t_act * s_act
return acts
| 5,339,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.