content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def ensuredir(path):
"""
Creates a folder if it doesn't exists.
:param path: path to the folder to create
"""
if len(path) == 0:
return
if not os.path.exists(path):
os.makedirs(path) | 5,331,400 |
def get_language_titles():
""" Extract language and title from input file. """
language_titles = {}
input_file = open("resources/events/%s.tsv" % args.event).readlines()
for line in sorted(input_file):
try:
language, title = line.split('\t')[0], line.split('\t')[1].strip()
except IndexError:
language, title = line.split(',')[0], line.split(',')[1].strip()
if args.language:
if language != args.language: continue
if language == "lang": continue
if language.startswith("%"): continue # languages with % in front of them can't be scraped.
language_titles[language] = title
return language_titles | 5,331,401 |
def _find_additional_age_entities(request, responder):
"""
If the user has a query such as 'list all employees under 30', the notion of age is
implicit rather than explicit in the form of an age entity. Hence, this function is
beneficial in capturing the existence such implicit entities.
Returns a true/false depending on the existence or lack of the combination of
numerical entities and comparators, thereby indicating an implicit age entitiy or
lack of it, respectively.
"""
try:
comparator_entity = [e for e in request.entities if e['type'] == 'comparator'][0]
num_entity = [float(e['value'][0]['value'])
for e in request.entities
if e['type'] == 'sys_number']
# if any token in the text query is numeric that was missed by the num_entity,
# add it to the list
for i in request.text.split():
try:
num_entity.append(float(i))
except ValueError:
continue
except (IndexError, ValueError):
comparator_entity = []
num_entity = []
return True if comparator_entity and num_entity else False | 5,331,402 |
def stop_flops_count(self):
"""Stop computing the mean flops consumption per image.
A method to stop computing the mean flops consumption per image, which will
be available after ``add_flops_counting_methods()`` is called on a desired
net object. It can be called to pause the computation whenever.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function) | 5,331,403 |
def drawRectangle(x_cor, y_cor, width, height, color):
"""
Draws rectangle on the screen
"""
pygame.draw.rect(DISPLAY_SURFACE, color, (x_cor, y_cor, width, height)) | 5,331,404 |
def metrics():
"""
selected linecounts:
356 ascoltami (music player)
318 curvecalc (curve and expression editor)
279 keyboardcomposer
189 dmxserver (hardware output)
153 subcomposer
17 wavecurve (create smoothed waveforms from .wav)
311 light9/curve.py (curve widgets)
191 light9/FlyingFader.py (enhanced tk.Scale)
168 light9/Submaster.py
* 151 light9/zoomcontrol.py
137 light9/dmxchanedit.py
40 light9/wavepoints.py
65 light9/io/parport.c (dmx interface protocol)
50 light9/io/serport.i (i2c interface to sliders)
total in project: about 3200 in about 30 files
""" | 5,331,405 |
def merge_local_and_remote_resources(resources_local, service_sync_type, service_id, session):
"""
Main function to sync resources with remote server.
"""
if not get_last_sync(service_id, session):
return resources_local
remote_resources = _query_remote_resources_in_database(service_id, session=session)
max_depth = SYNC_SERVICES_TYPES[service_sync_type]("", "").max_depth
merged_resources = _merge_resources(resources_local, remote_resources, max_depth)
_sort_resources(merged_resources)
return merged_resources | 5,331,406 |
def alertmanager():
"""
to test this:
$ curl -H "Content-Type: application/json" -d '[{"labels":{"alertname":"test-alert"}}]' 172.17.0.2:9093/api/v1/alerts
or
$ curl -H "Content-Type: application/json" -d '{"alerts":[{"labels":{"alertname":"test-alert"}}]}' 127.0.0.1:5000/alertmanager
"""
alert_json=request.get_json()
#print (alert["alerts"])
with open(alertfile, 'a') as f:
for alert in alert_json["alerts"]:
f.write(alert["labels"]["alertname"])
f.write('\n')
return ("HTTP 200 received") | 5,331,407 |
def task_build():
"""build code and intermediate packages"""
if C.TESTING_IN_CI or C.DOCS_IN_CI:
return
if not (C.RTD or C.CI):
yield dict(
name="docs:favicon",
doc="rebuild favicons from svg source, requires imagemagick",
file_dep=[P.DOCS_ICON],
targets=[P.LAB_FAVICON],
actions=[["echo", "... `convert` not found, install imagemagick"]]
if not shutil.which("convert")
else [
lambda: [
subprocess.call(
[
"convert",
"-verbose",
"-density",
"256x256",
"-background",
"transparent",
P.DOCS_ICON,
"-define",
"icon:auto-resize",
"-colors",
"256",
P.LAB_FAVICON,
]
),
None,
][-1]
],
)
yield dict(
name="js:ui-components",
doc="copy the icon and wordmark to the ui-components package",
file_dep=[P.DOCS_ICON, P.DOCS_WORDMARK, B.YARN_INTEGRITY],
targets=[P.LITE_ICON, P.LITE_WORDMARK],
actions=[
U.do(
*C.SVGO,
P.DOCS_ICON,
P.DOCS_WORDMARK,
"-o",
P.LITE_ICON,
P.LITE_WORDMARK,
),
],
)
yield dict(
name="js:lib",
doc="build .ts files into .js files",
file_dep=[
*L.ALL_ESLINT,
*P.PACKAGE_JSONS,
B.PYOLITE_WHEEL_TS,
B.YARN_INTEGRITY,
P.ROOT_PACKAGE_JSON,
],
actions=[
U.do("yarn", "build:lib"),
],
targets=[B.META_BUILDINFO],
)
js_wheels = []
for py_pkg, version in P.PYOLITE_PACKAGES.items():
name = py_pkg.name
wheel = py_pkg / f"dist/{name}-{version}-{C.NOARCH_WHL}"
js_wheels += [wheel]
yield dict(
name=f"js:py:{name}",
doc=f"build the {name} python package for the browser with flit",
file_dep=[*py_pkg.rglob("*.py"), py_pkg / "pyproject.toml"],
actions=[(U.build_one_flit, [py_pkg])],
# TODO: get version
targets=[wheel],
)
# a temporary environment to reuse build logic for app, for now
bs_env = dict(os.environ)
bs_env["PYTHONPATH"] = str(P.MAIN_SRC)
yield dict(
name="js:piplite:wheels",
file_dep=js_wheels,
actions=[
(doit.tools.create_folder, [B.PYOLITE_WHEELS]),
(U.copy_wheels, [B.PYOLITE_WHEELS, js_wheels]),
U.do(
*C.PYM, "jupyterlite.app", "pip", "index", B.PYOLITE_WHEELS, env=bs_env
),
(U.make_pyolite_wheel_js),
],
targets=[B.PYOLITE_WHEEL_INDEX, B.PYOLITE_WHEEL_TS],
)
app_deps = [
B.META_BUILDINFO,
P.WEBPACK_CONFIG,
P.LITE_ICON,
P.LITE_WORDMARK,
P.APP_PACKAGE_JSON,
*[p for p in P.APP_HTMLS if p.name == "index.template.html"],
]
all_app_targets = []
extra_app_deps = []
for app_json in P.APP_JSONS:
app = app_json.parent
app_build = app / "build"
app_targets = [
P.APP / "build" / app.name / "bundle.js",
app_build / "index.js",
app_build / "style.js",
]
all_app_targets += app_targets
extra_app_deps += [
app / "index.template.js",
app_json,
]
yield dict(
name="js:app",
doc="build JupyterLite with webpack",
file_dep=[
*app_deps,
*extra_app_deps,
B.PYOLITE_WHEEL_INDEX,
B.PYOLITE_WHEEL_TS,
],
actions=[
U.do("yarn", "lerna", "run", "build:prod", "--scope", "@jupyterlite/app")
],
targets=[*all_app_targets],
)
yield dict(
name="js:pack",
doc="build the JupyterLite distribution",
file_dep=[
*all_app_targets,
*P.APP.glob("*.js"),
*P.APP.glob("*.json"),
*P.APP.rglob("*.ipynb"),
*P.APP.glob("*/*/index.html"),
*P.APP.glob("*/build/schemas/**/.json"),
B.PYOLITE_WHEEL_INDEX,
B.META_BUILDINFO,
P.APP / "index.html",
P.APP_NPM_IGNORE,
P.APP_SCHEMA,
],
actions=[
(doit.tools.create_folder, [B.DIST]),
U.do("npm", "pack", "../app", cwd=B.DIST),
],
targets=[B.APP_PACK],
)
yield dict(
name=f"py:{C.NAME}:pre:readme",
file_dep=[P.README],
targets=[P.PY_README],
actions=[(U.copy_one, [P.README, P.PY_README])],
)
yield dict(
name=f"py:{C.NAME}:pre:app",
file_dep=[B.APP_PACK],
targets=[B.PY_APP_PACK],
actions=[
(U.copy_one, [B.APP_PACK, B.PY_APP_PACK]),
],
)
for py_name, setup_py in P.PY_SETUP_PY.items():
py_pkg = setup_py.parent
wheel = (
py_pkg
/ f"""dist/{py_name.replace("-", "_")}-{D.PY_VERSION}-{C.NOARCH_WHL}"""
)
sdist = py_pkg / f"""dist/{py_name.replace("_", "-")}-{D.PY_VERSION}.tar.gz"""
actions = [U.do("python", "setup.py", "sdist", "bdist_wheel", cwd=py_pkg)]
file_dep = [
*P.PY_SETUP_DEPS[py_name](),
*py_pkg.rglob("src/*.py"),
*py_pkg.glob("*.md"),
setup_py,
]
pyproj_toml = py_pkg / "pyproject.toml"
targets = [wheel, sdist]
# we might tweak the args
if pyproj_toml.exists() and "flit_core" in pyproj_toml.read_text(**C.ENC):
actions = [(U.build_one_flit, [py_pkg])]
file_dep += [pyproj_toml]
yield dict(
name=f"py:{py_name}",
doc=f"build the {py_name} python package",
file_dep=file_dep,
actions=actions,
targets=targets,
) | 5,331,408 |
def parse_args():
"""Process arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train', '-t', required=True, type=str, help="Training ProteinNet data")
parser.add_argument('--val', '-v', type=str, help="Validation ProteinNet data")
parser.add_argument('--no_gpu', '-n', action="store_true",
help="Prevent GPU usage for ESM1b even when available")
parser.add_argument('--threshold', '-r', default=None, type=float,
help="Perform frequency classification at given threshold")
parser.add_argument('--model', '-m', default="esm_top_model.pth", help="Path to save model")
parser.add_argument('--epochs', '-e', default=3, type=int, help="Epochs to train for")
parser.add_argument('--report_batch', '-p', default=1000, type=int,
help="Batch multiple to report at")
return parser.parse_args() | 5,331,409 |
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
"""
Computes refundable payroll tax credit amounts.
"""
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc) | 5,331,410 |
def test_load_extra_first():
"""Test that solutions that refer to a requirement with an extra before it is defined correctly
add the requirement with the extra"""
solution_repo = SolutionRepository(
os.path.join(os.path.dirname(__file__), "extra_only_solution.txt")
)
assert solution_repo.solution["extra_only"].metadata.name == "extra_only" | 5,331,411 |
def open_and_swap(filename, mode="w+b", buffering=-1, encoding=None, newline=None):
"""
Open a file for writing and relink it to the desired path in an atomic
operation when the file is closed without an exception. This prevents
the existing file data from being lost if an unexpected failure occurs
while writing the file.
"""
fd, tmppath = tempfile.mkstemp(
dir=os.path.dirname(filename) or ".",
text="b" not in mode,
)
fh = None
try:
fh = open(
fd,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=False,
)
yield fh
os.rename(tmppath, filename)
tmppath = None
finally:
if fh is not None:
fh.close()
os.close(fd)
if tmppath is not None:
os.unlink(tmppath) | 5,331,412 |
def is_stable(A, domain='z'):
"""Determines if a linear state-space model is stable from eigenvalues of `A`
Parameters
----------
A : ndarray(n,n)
state matrix
domain : str, optional {'z', 's'}
'z' for discrete-time, 's' for continuous-time state-space models
returns
-------
bool
"""
if domain == 'z': # discrete-time
# Unstable if at least one pole outside unit circle
if any(abs(eigvals(A)) > 1):
return False
elif domain == 's': # continuous-time
# Unstable if at least one pole in right-half plane
if any(np.real(eigvals(A)) > 0):
return False
else:
raise ValueError(f"{domain} wrong. Use 's' or 'z'")
return True | 5,331,413 |
def email_sent_ipn(path: str) -> tuple:
"""
**email_sent_ipn**
Delivered ipn for mailgun
:param path: organization_id
:return: OK, 200
"""
# NOTE: Delivered ipn will end up here
if path == "delivered":
pass
elif path == "clicks":
pass
elif path == "opens":
pass
elif path == "failure":
pass
elif path == "spam":
pass
elif path == "unsubscribe":
pass
return "OK", 200 | 5,331,414 |
def decode(path: str) -> str:
"""
utility fct to encode/decode
"""
return path.encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding) | 5,331,415 |
def db_manage():
""" Database management commands """
pass | 5,331,416 |
def load_posts_view(request):
"""Load posts view, handles asynchronous queries to retrieve more posts.
"""
import json
if request.method == 'GET':
results, start = get_more_posts(request.GET)
json_result = json.dumps({'posts': results,
'start': start
})
return HttpResponse(json_result, mimetype='application/json')
else:
return HttpResponse('', mimetype='application/json') | 5,331,417 |
def create_dataset(m, timestep, var='all', chunks=(10, 300, 300)):
"""
Create xarray Dataset from binary model data
for one time step. This also incorporates all model
grid information and dimensions, regardless of the variable selected.
Parameters
----------
m : LLCRegion
Model class generated with LLCRegion()
var : str, optional
Variable to be read. Defaults to 'all', but only one variable,
e.g. 'v', or a list of variabbles, e.g. ['t', 'v']
can be selected here instead.
chunks : tuple, optional
Chunk size for dask. Defaults to (10, 300, 300)
Returns
-------
ds : xarray Dataset
Dataset
"""
if var is 'all':
vars = _model_variables
else:
vars = {k: _model_variables[k] for k in var}
# vars = {var: _model_variables[var]}
# reduce xc/yc, xg/yg to 1d vector
lon, lat = _reduce_2d_coords(m.xc, m.yc)
xc, yc = _reduce_2d_coords(m.xc, m.yc)
xg, yg = _reduce_2d_coords(m.xg, m.yg)
# calculate Zu, Zl, Zp1 (combination of Zu, Zl)
tmp = m.drf
tmp = np.insert(tmp, 0, 0)
Zp1 = np.cumsum(tmp)
Zl = Zp1[0:-1]
Zu = Zp1[1::]
# calculate drc
drc = np.diff(m.z)
drc = np.insert(drc, 0, m.z[0])
drc = np.append(drc, Zp1[-1]-m.z[-1])
# generate xarray dataset with only grid information first
ds = xr.Dataset(coords={'xc': (['xc'], xc, {'axis': 'X'}),
'yc': (['yc'], yc, {'axis': 'Y'}),
'lon': (['xc'], xc, {'axis': 'X'}),
'lat': (['yc'], yc, {'axis': 'Y'}),
'dxc': (['yc', 'xg'], m.dxc),
'dyc': (['yg', 'xc'], m.dxc),
'xg': (['xg'], xg, {'axis': 'X', 'c_grid_axis_shift': -0.5}),
'yg': (['yg'], yg, {'axis': 'Y', 'c_grid_axis_shift': -0.5}),
'dxg': (['yg', 'xc'], m.dxg),
'dyg': (['yc', 'xg'], m.dyg),
'dxv': (['yg', 'xg'], m.dxv),
'dyu': (['yg', 'xg'], m.dyu),
'z': (['z'], m.z, {'axis': 'Z'}, {'axis': 'Z'}),
'zl': (['zl'], Zl, {'axis': 'Z', 'c_grid_axis_shift': -0.5}),
'zu': (['zu'], Zu, {'axis': 'Z', 'c_grid_axis_shift': +0.5}),
'zp1': (['zp1'], Zp1, {'axis': 'Z', 'c_grid_axis_shift': (-0.5,0.5)}),
'drc': (['zp1'], drc, {'axis': 'Z'}),
'drf': (['z'], m.drf, {'axis': 'Z'}),
'ra': (['yc', 'xc'], m.rac),
'raz': (['yg', 'xg'], m.raz),
'depth': (['yc', 'xc'], m.hb),
'hfacc': (['z', 'yc', 'xc'], m.hfacc),
'hfacw': (['z', 'yc', 'xg'], m.hfacw),
'hfacs': (['z', 'yg', 'xc'], m.hfacs)})
# define dictionary that will hold dask arrays
d = {}
# read all variables into a dict with dask arrays
for k, v in vars.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.90'
# account for funky V file names
if v=='V':
exist = _check_file_exists(filename, verbose=False)
if ~exist:
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6858.1_936.1062.90_Neg'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_3d_data)(filename), (m.Nz, m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks)
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
# add 2d variables
if var is 'all':
vars2d = _model_2dvariables
d = {}
for k, v in vars2d.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.1'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_2d_data)(filename), (m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks[1:])
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
return ds | 5,331,418 |
def fetchnl2bash(m:Manager, shuffle:bool=True)->DRef:
"""
FIXME: Unhardcode '3rdparty'-based paths
"""
allnl=fetchlocal(m,
path=join('3rdparty','nl2bash_essence','src','data','bash','all.nl'),
sha256='1db0c529c350b463919624550b8f5882a97c42ad5051c7d49fbc496bc4e8b770',
mode='asis',
output=[promise, 'all.nl'] )
allcm=fetchlocal(m,
path=join('3rdparty','nl2bash_essence','src','data','bash','all.cm'),
sha256='3a72eaced7fa14a0938354cefc42b2dcafb2d47297102f1279086e18c3abe57e',
mode='asis',
output=[promise, 'all.cm'] )
if shuffle:
s=lineshuffle(m, src={'allnl':mklens(allnl).output.refpath,
'allcm':mklens(allcm).output.refpath})
allnl_refpath=mklens(s).allnl.refpath
allcm_refpath=mklens(s).allcm.refpath
else:
allnl_refpath=mklens(allnl).output.refpath
allcm_refpath=mklens(allcm).output.refpath
nlfiles=splitfile(m, src=allnl_refpath,
fractions=[('train',f'train_nl.txt',0.9),
('eval', f'eval_nl.txt',0.1)])
cmfiles=splitfile(m, src=allcm_refpath,
fractions=[('train',f'train_cm.txt',0.9),
('eval', f'eval_cm.txt',0.1)])
return mknode(m, name='fetchnl2bash', sources={
'train_input_combined':mklens(nlfiles).train.refpath,
'train_target_combined':mklens(cmfiles).train.refpath,
'eval_input_combined':mklens(nlfiles).eval.refpath,
'eval_target_combined':mklens(cmfiles).eval.refpath
}) | 5,331,419 |
def mummer_cmds_four(path_file_four):
"""Example MUMmer commands (four files)."""
return MUMmerExample(
path_file_four,
[
"nucmer --mum -p nucmer_output/file1_vs_file2 file1.fna file2.fna",
"nucmer --mum -p nucmer_output/file1_vs_file3 file1.fna file3.fna",
"nucmer --mum -p nucmer_output/file1_vs_file4 file1.fna file4.fna",
"nucmer --mum -p nucmer_output/file2_vs_file3 file2.fna file3.fna",
"nucmer --mum -p nucmer_output/file2_vs_file4 file2.fna file4.fna",
"nucmer --mum -p nucmer_output/file3_vs_file4 file3.fna file4.fna",
],
[
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file2.delta "
"nucmer_output/file1_vs_file2.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file3.delta "
"nucmer_output/file1_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file4.delta "
"nucmer_output/file1_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file3.delta "
"nucmer_output/file2_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file4.delta "
"nucmer_output/file2_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file3_vs_file4.delta "
"nucmer_output/file3_vs_file4.filter"
),
],
) | 5,331,420 |
def Exponweibull(a=1, c=1, scale=1, shift=0):
"""
Expontiated Weibull distribution.
Args:
a (float, Dist) : First shape parameter
c (float, Dist) : Second shape parameter
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
"""
dist = cores.exponweibull(a, c)*scale + shift
dist.addattr(str="Exponweibull(%s,%s,%s,%s)"%(a, c, scale,shift))
return dist | 5,331,421 |
def test_run_ml_with_segmentation_model(test_output_dirs: OutputFolderForTests) -> None:
"""
Test training and testing of segmentation models, when it is started together via run_ml.
"""
config = DummyModel()
config.num_dataload_workers = 0
config.restrict_subjects = "1"
# Increasing the test crop size should not have any effect on the results.
# This is for a bug in an earlier version of the code where the wrong execution mode was used to
# compute the expected mask size at training time.
config.test_crop_size = (75, 75, 75)
config.inference_on_train_set = False
config.inference_on_val_set = True
config.inference_on_test_set = True
config.set_output_to(test_output_dirs.root_dir)
azure_config = get_default_azure_config()
azure_config.train = True
MLRunner(config, azure_config=azure_config).run() | 5,331,422 |
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up device tracker for Keenetic NDMS2 component."""
router: KeeneticRouter = hass.data[DOMAIN][config_entry.entry_id][ROUTER]
tracked = set()
@callback
def update_from_router():
"""Update the status of devices."""
update_items(router, async_add_entities, tracked)
update_from_router()
registry = await entity_registry.async_get_registry(hass)
# Restore devices that are not a part of active clients list.
restored = []
for entity_entry in registry.entities.values():
if (
entity_entry.config_entry_id == config_entry.entry_id
and entity_entry.domain == DEVICE_TRACKER_DOMAIN
):
mac = entity_entry.unique_id.partition("_")[0]
if mac not in tracked:
tracked.add(mac)
restored.append(
KeeneticTracker(
Device(
mac=mac,
# restore the original name as set by the router before
name=entity_entry.original_name,
ip=None,
interface=None,
),
router,
)
)
if restored:
async_add_entities(restored)
async_dispatcher_connect(hass, router.signal_update, update_from_router) | 5,331,423 |
def authed_request_for_id(gplus_id, request):
"""Adds the proper access credentials for the specified user and then makes an HTTP request."""
# Helper method to make retry easier
def make_request(retry=True):
token = get_access_token_for_id(gplus_id)
request.headers['Authorization'] = 'Bearer %s' % token
prepared_request = request.prepare()
response = session.send(prepared_request, timeout=GOOGLE_API_TIMEOUT)
if response.status_code == 401:
# Our access token is invalid. If this is the first failure,
# try forcing a refresh of the access token.
if retry:
Cache.delete(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
return make_request(retry=False)
return response
response = make_request()
if response.status_code == 403:
# Typically used to indicate that Google is rate-limiting the API call
raise UnavailableException('API 403 response: %r' % api_response.json(), 503)
elif response.status_code == 401:
raise UnavailableException('Invalid access token.', 401)
elif response.status_code != 200:
raise UnavailableException(
'Unknown API error (code=%d): %r' % (response.status_code, response.json()), 502)
return response | 5,331,424 |
def test_suggest_regex_for_string(input, output, expected_exception):
"""Test different strings to represent them as a regex."""
with expected_exception:
if output is not None:
assert suggest_regex_for_string(**input) == output
else:
assert suggest_regex_for_string(**input) is None | 5,331,425 |
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper | 5,331,426 |
def get_playlist_by_id(playlist_id):
""" Returns a playlist by playlist id """
return Playlist.query.filter(Playlist.playlist_id == playlist_id).first() | 5,331,427 |
def section_cfield(xs, x_a, c_field, rmax = 60e3):
"""
extract a section of a sound speed transcet for use in xmission calculation
"""
x_i = np.bitwise_and(x_a >= xs, x_a <= xs + rmax)
return x_a[x_i], c_field[:, x_i] | 5,331,428 |
def main_cli(ctx, instance_name=None, init=False, settings=False, checkversion=False, history=False, identifier=None, websearch=None):
"""
Python client for the Dimensions Analytics API.
More info: https://github.com/digital-science/dimcli
"""
if not (identifier or websearch):
click.secho("Dimcli - Dimensions API Client (" + VERSION + ")", dim=True)
if init:
init_config_folder(USER_DIR, USER_CONFIG_FILE_PATH)
return
if checkversion:
print_dimcli_report()
return
if identifier:
url = dimensions_url(identifier)
if not url:
click.secho("Cannot resolve automatically. Can be a patent, dataset or clinical trial ID. Falling back to search ..")
url = dimensions_search_url(identifier)
else:
click.secho("Got a match: " + url)
webbrowser.open(url)
return
if websearch:
url = dimensions_search_url(websearch)
click.secho("Opening url: " + url)
webbrowser.open(url)
return
if not os.path.exists(USER_CONFIG_FILE_PATH):
click.secho(
"Credentials file not found - you can create one by typing: `dimcli --init`",
fg="red",
)
click.secho(
"More info: https://github.com/digital-science/dimcli#credentials-file",
dim=True,
)
return
if settings:
preview_contents(USER_CONFIG_FILE_PATH)
return
if history:
if os.path.exists(USER_HISTORY_FILE):
open_multi_platform(USER_HISTORY_FILE)
return
if PROMPT_TOOLKIT_VERSION_OK:
from .repl import repl
# try online version check
test = is_dimcli_outdated()
if test:
click.secho("====")
click.secho("Heads up: there is a newer version of Dimcli available.", bold=True)
click.secho("Update with `pip install dimcli -U` or, for more info, visit https://pypi.org/project/dimcli .\n====")
# launch REPL
repl.run(instance_name)
else:
print_warning_prompt_version() | 5,331,429 |
def shared_dropout(shape, use_noise, trng, value):
"""
Shared dropout mask (pervasive dropout)
:param shape:
:param use_noise:
:param trng:
:param value:
:return:
"""
return tensor.switch(use_noise,
trng.binomial(shape, p=value, n=1,
dtype=floatX),
theano.shared(np.float32(value))) | 5,331,430 |
def setup_flow_assembler(gb, method, data_key=None, coupler=None):
"""Setup a standard assembler for the flow problem for a given grid bucket.
The assembler will be set up with primary variable name 'pressure' on the
GridBucket nodes, and mortar_flux for the mortar variables.
Parameters:
gb: GridBucket.
method (EllipticDiscretization).
data_key (str, optional): Keyword used to identify data dictionary for
node and edge discretization.
Coupler (EllipticInterfaceLaw): Defaults to RobinCoulping.
Returns:
Assembler, ready to discretize and assemble problem.
"""
if data_key is None:
data_key = "flow"
if coupler is None:
coupler = pp.RobinCoupling(data_key, method)
if isinstance(method, pp.MVEM) or isinstance(method, pp.RT0):
mixed_form = True
else:
mixed_form = False
for g, d in gb:
if mixed_form:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1, "faces": 1}}
else:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1}}
d[pp.DISCRETIZATION] = {"pressure": {"diffusive": method}}
for e, d in gb.edges():
g1, g2 = gb.nodes_of_edge(e)
d[pp.PRIMARY_VARIABLES] = {"mortar_flux": {"cells": 1}}
d[pp.COUPLING_DISCRETIZATION] = {
"lambda": {
g1: ("pressure", "diffusive"),
g2: ("pressure", "diffusive"),
e: ("mortar_flux", coupler),
}
}
d[pp.DISCRETIZATION_MATRICES] = {"flow": {}}
assembler = pp.Assembler(gb)
num_blocks = assembler.full_dof.size
block_info = np.zeros((num_blocks, 5))
block_start = np.hstack((0, np.cumsum(assembler.full_dof)))
# map from grids to block dof index. Will be unique, since there is a single
# dof per subdomain
subdom_block_map = {}
for (g, var), ind in assembler.block_dof.items():
is_mortar = 0
if var == "mortar_flux":
is_mortar = 1
dim = g[0].dim
else:
dim = g.dim
subdom_block_map[g] = ind
block_info[ind, :3] = np.array([dim, is_mortar, block_start[ind]], dtype=np.int)
# Second loop over the blocks. This time, we will fill in the two last
# columns, on neighboring subdomains.
for (g, var), ind in assembler.block_dof.items():
if var == "mortar_flux":
block_info[ind, 3] = subdom_block_map[g[0]]
block_info[ind, 4] = subdom_block_map[g[1]]
else:
block_info[ind, 3:] = np.array([-1, -1])
return assembler, block_info | 5,331,431 |
def get_article(article_id: str, db: Session = Depends(deps.get_db),
current_user: schemas.UserVerify = Depends(
deps.get_current_user)) -> JSONResponse:
""" Return Single Article"""
data = crud_articles.get_article(article_id=article_id, db=db)
if data is None:
return JSONResponse(status_code=500,
content={"message": "No Records Found"})
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(status_code=200, content=json_compatible_item_data) | 5,331,432 |
def _get_sp_instance():
"""Create an spotify auth_manager and check whether the current user has
a token (has been authorized already). If the user has a token, then they
are authenticated -- return their spotipy instance. If the user does not have
a token, then they are not authenticated -- raise an exception
"""
auth_manager = _get_auth_manager()
if auth_manager.get_cached_token():
return spotipy.Spotify(auth_manager=auth_manager)
else:
raise SpotifyUserAuthFailure(get_auth_url(show_dialog=True)) | 5,331,433 |
def get_func_global(op_type, dtype):
"""Generate function for global address space
Used as `generator(op_type, dtype)`.
"""
op = getattr(dppy.atomic, op_type)
def f(a):
op(a, 0, 1)
return f | 5,331,434 |
def klucb(x, d, div, upperbound, lowerbound=-float("inf"), precision=1e-6):
"""The generic klUCB index computation.
Input args.:
x,
d,
div:
KL divergence to be used.
upperbound,
lowerbound=-float('inf'),
precision=1e-6,
"""
l = max(x, lowerbound)
u = upperbound
while u - l > precision:
m = (l + u) / 2
if div(x, m) > d:
u = m
else:
l = m
return (l + u) / 2 | 5,331,435 |
def convert_pk_to_index(pk_tuples, indices):
"""
For a list of tuples with elements referring to pk's of indices,
convert pks to 0-index values corresponding to order of queryset
:param pk_tuples: list of tuples [(row_pk, col_pk), ... ]
:param indices: list of querysets
:return: list of tuples [(row_idx, col_idx), ... ]
"""
output_tuples = []
maps = [pk_index_map(idx) for idx in indices]
for pk_tuple in pk_tuples:
try:
idxs = tuple(maps[axis][pk] for axis, pk in enumerate(pk_tuple))
output_tuples.append(idxs)
except KeyError:
# pk may not be in index scope which is fine
pass
return output_tuples | 5,331,436 |
def device_path_to_str(path: Union[bytes, str]) -> str:
"""
Converts a device path as returned by the fido2 library to a string.
Typically, the path already is a string. Only on Windows, a bytes object
using an ANSI encoding is used instead. We use the ISO 8859-1 encoding to
decode the string which should work for all systems.
"""
if isinstance(path, bytes):
return path.decode("iso-8859-1", errors="ignore")
else:
return path | 5,331,437 |
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov) | 5,331,438 |
def emitir_extrato(contas, numero_conta, movimentacoes, data_inicial):
"""
Retorna todas as movimentações de <movimentacoes> feitas pela conta
com o <numero_conta> a partir da <data_inicial>
"""
historico_movimentacoes = []
if numero_conta in contas:
minhas_movimentacoes = movimentacoes_da_conta(numero_conta, movimentacoes)
inicial = -1
#Verifica a partir de qual data o extrato vai ser emitido
for i, movimentacao in enumerate(minhas_movimentacoes):
data_movimentacao = movimentacao[5]
#Verifica qual data é mais recente, a data da emissão do extrato ou a data
#da movimentação em questão
if verificar_data_mais_recente(data_inicial, data_movimentacao):
continue
inicial = i
break
#Verifica se há alguma movimentação após a data pedida, se sim, guarda todas essas
#movimentações para retornar depois
if(inicial >= 0):
historico_movimentacoes = minhas_movimentacoes[inicial:]
return historico_movimentacoes
else:
return 0 | 5,331,439 |
def wavelength_to_energy(wavelength):
"""
Converts wavelength (A) to photon energy (keV)
"""
return 12.39842/wavelength | 5,331,440 |
def torch_dataset_download_helper():
"""Call this function if you want to download dataset via PyTorch API"""
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener) | 5,331,441 |
def calculate_hash_512(filepath, verbose):
"""
SHA512 Hash Digest
"""
if verbose:
print 'Calculating hash...'
sha512_hash = hashlib.sha512()
with open(filepath, 'rb') as f:
statinfo = os.stat(filepath)
block_size = 100 * (2**20) #Magic number: 100 * 1MB blocks
nb_blocks = (statinfo.st_size / block_size) + 1
cnt_blocks = 0
while True:
block = f.read(block_size)
if not block: break
sha512_hash.update(block)
cnt_blocks = cnt_blocks + 1
progress = 100 * cnt_blocks / nb_blocks
if verbose:
draw_progress_bar(progress)
f.close()
return sha512_hash.digest() | 5,331,442 |
def _crop_after_rotation(im, angle, xres, yres, surroundings):
"""Crop image to the bounding box of bite's surroundings.
Arguments:
im: PIL.Image, rotated map part
angle: by which the map has been rotated, in degrees (counterclockwise)
xres: width of one tile in pixels
yres: height of one tile in pixels
surroundings: shapely.geometry.polygon.Polygon
"""
#before rotation
x1, y1, x2, y2 = surroundings.bounds
old_bb_upper_left = Point(x1, y1)
old_bb_upper_right = Point(x2, y1)
old_bb_bottom_left = Point(x1, y2)
old_bb_center = ((x1+x2)/2, (y1+y2)/2)
#shapely y-axis goes upwards
shapely_angle = -angle
#after rotation
x1, y1, x2, y2 = affinity.rotate(surroundings, shapely_angle, origin=old_bb_center).bounds
crop_upper_left = Point(x1, y1)
crop_width = x2 - x1
crop_height = y2 - y1
#points where old bounding box of surroundings (i.e. the old image) touches
#its bounding box after rotation
tl = None #touch at the left side of the new bounding box
tt = None #touch at the top side of the new bounding box
if angle > 0:
tl = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_right, shapely_angle, origin=old_bb_center)
else:
tl = affinity.rotate(old_bb_bottom_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
#upper left corner of ther new bounding box
new_bb_upper_left = Point(tl.x, tt.y)
#from these we get b: upper left corner of the crop area relative to new_bb_upper_left
b = (crop_upper_left.x - new_bb_upper_left.x, crop_upper_left.y - new_bb_upper_left.y)
#crop rectangle in pixels relative to new_bb_upper_left
crop_box = [int(x) for x in [
b[0] * xres,
b[1] * yres,
(b[0] + crop_width) * xres,
(b[1] + crop_height) * yres
]]
cropped = im.crop(box=crop_box)
cropped.load()
return cropped | 5,331,443 |
def get_custom_scorer(metric, gib=True, needs_proba=False, needs_threshold=False):
"""Get a scorer from a str, func or scorer.
Scorers used by ATOM have a name attribute.
Parameters
----------
metric: str, func or scorer
Name, metric or scorer to get ATOM's scorer from.
gib: bool, optional (default=True)
whether the metric is a score function or a loss function,
i.e. if True, a higher score is better and if False, lower is
better. Is ignored if the metric is a string or a scorer.
needs_proba: bool, optional (default=False)
Whether the metric function requires probability estimates of
a classifier. Is ignored if the metric is a string or a scorer.
needs_threshold: bool, optional (default=False)
Whether the metric function takes a continuous decision
certainty. Is ignored if the metric is a string or a scorer.
Returns
-------
scorer: scorer
Custom sklearn scorer with name attribute.
"""
# Copies are needed to not alter SCORERS
if isinstance(metric, str):
metric = metric.lower()
if metric in SCORERS:
scorer = copy(SCORERS[metric])
scorer.name = metric
elif metric in SCORERS_ACRONYMS:
scorer = copy(SCORERS[SCORERS_ACRONYMS[metric]])
scorer.name = SCORERS_ACRONYMS[metric]
elif metric in CUSTOM_SCORERS:
scorer = make_scorer(copy(CUSTOM_SCORERS[metric]))
scorer.name = scorer._score_func.__name__
else:
raise ValueError(
"Unknown value for the metric parameter, got "
f"{metric}. Choose from: {', '.join(SCORERS)}."
)
elif hasattr(metric, "_score_func"): # Scoring is a scorer
scorer = copy(metric)
# Some scorers use default kwargs
default_kwargs = ("precision", "recall", "f1", "jaccard")
if any(name in scorer._score_func.__name__ for name in default_kwargs):
if not scorer._kwargs:
scorer._kwargs = {"average": "binary"}
for key, value in SCORERS.items():
if scorer.__dict__ == value.__dict__:
scorer.name = key
break
else: # Scoring is a function with signature metric(y, y_pred)
scorer = make_scorer(
score_func=metric,
greater_is_better=gib,
needs_proba=needs_proba,
needs_threshold=needs_threshold,
)
scorer.name = scorer._score_func.__name__
return scorer | 5,331,444 |
def julian_day(t='now'):
"""
Wrap a UTC -> JD conversion from astropy.
"""
return Time(parse_time(t)).jd | 5,331,445 |
def add_stop_words(dataframe: pd.DataFrame,
k_words: int) -> list:
"""
Получить список стоп-слов, которые наиболее часто встречаются в документе
:param dataframe:
:param k_words: кол-во наиболее часто повторяющихся уникальных слов
:return:
"""
split_words = dataframe['text'].values
split_words = " ".join(split_words)
split_words = split_words.split()
_counter = Counter(split_words).most_common(k_words)
n_words = [i[0] for i in _counter]
return list(set(n_words)) | 5,331,446 |
def get_batch(data_iterator):
"""Build the batch."""
# Items and their type.
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
datatype = torch.int64
# Broadcast data.
data = next(data_iterator) if data_iterator is not None else None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['text'].long()
types = data_b['types'].long()
sentence_order = data_b['is_random'].long()
loss_mask = data_b['loss_mask'].float()
lm_labels = data_b['labels'].long()
padding_mask = data_b['padding_mask'].long()
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask | 5,331,447 |
def test_interruption(env):
"""With asynchronous interrupts, the victim expects an interrupt
while waiting for an event, but will process this even if no
interrupt occurred.
"""
def interruptee(env):
try:
yield env.timeout(10)
pytest.fail('Expected an interrupt')
except simpy.Interrupt as interrupt:
assert interrupt.cause == 'interrupt!'
def interruptor(env):
child_process = env.process(interruptee(env))
yield env.timeout(5)
child_process.interrupt('interrupt!')
env.process(interruptor(env))
env.run() | 5,331,448 |
def outcomes_by_resected_lobe(directory='L:\\', filename='All_Epilepsy_Ops_CROSSTAB_Statistics_YAY_2019.xlsx',
lobes=['T Lx', 'T Lesx']):
"""
Creates the list of Gold_standard post-operative ILAE 1 at all follow up years MRNs in patients who had only
specific lobe resections.
lobes = chose from this list NBOTE THE SPACES:
['CCx', 'F Lesx', 'F Lesx ', 'F Lx', 'F T Lx', 'Hx', 'Hx ', 'MST', 'O Lesx', 'O Lx',
'O P Lx', 'P Lesx', 'P Lx', 'T F Lx', 'T Lesx', 'T Lx', 'T O Lesx', 'T P Lesx', 'T P Lx']
These are MRNs - not exactly patients (some patients have more than one)
"""
excel_file = os.path.join(directory, filename)
df_outcomes = pd.read_excel(excel_file, sheet_name = 'Aa_E_Only_All_E_Ops_CROSSTAB', usecols=[1, 4, 36]) # non-indexed
df_outcomes2 = df_outcomes['Hospital No'].str.split(', ').apply(pd.Series) # makes 4 columns of hosp no's
df_outcomes2.index = df_outcomes.set_index(['boolean', 'OP Type']).index # set index (this weird line so can use deepcopy prior if req'd)
df_outcomes3 = df_outcomes2.stack().reset_index(['boolean', 'OP Type']) # now 1,105 non-null row DataFrame
df_outcomes3.columns = ['Gold_outcome', 'Resected Lobe', 'MRN'] # rename columns
df_outcomes3.set_index('MRN', inplace=True) # now have list of 1,105 MRNs(=index) and boolean Gold_outcome as two columns in pd.DataFrame
# from the above chose the temporal lobe resections:
df_temporal = df_outcomes3.loc[df_outcomes3['Resected Lobe'].isin(lobes)] # returns the rows with T Lx or T Lesx
# now to access all the Gold_outcome True from the temporal lobe resections:
df_gold_temporal_outcomes = df_temporal.loc[df_temporal.Gold_outcome == True] # gives a DataFrame of all MRNs and outcome Trues
temporal_gold_outcomes_MRNs = list(df_gold_temporal_outcomes.index.values) # list of just MRNs for use in temporal_find_MRN_label_outcomes()
# the false dataframe index values gives all temporal lobe resected patients who had surgery without gold outcome
df_temporal_had_surgery = df_temporal.loc[df_temporal.Gold_outcome == False]
temporal_had_surgery_MRNs = list(df_temporal_had_surgery.index.values)
return temporal_gold_outcomes_MRNs, temporal_had_surgery_MRNs | 5,331,449 |
def run_experiment_rotated_mnist_SVIGP_Hensman(args, args_dict):
"""
Function with tensorflow graph and session for SVIGP_Hensman experiments on rotated MNIST data.
:param args:
:return:
"""
# define some constants
n = len(args.dataset)
N_train = n * 4050
N_eval = n * 640
N_test = n * 270
if args.save:
# Make a folder to save everything
extra = args.elbo + "_" + str(args.beta)
chkpnt_dir = make_checkpoint_folder(args.base_dir, args.expid, extra)
pic_folder = chkpnt_dir + "pics/"
res_file = chkpnt_dir + "res/ELBO_pandas"
res_file_GP = chkpnt_dir + "res/ELBO_GP_pandas"
print("\nCheckpoint Directory:\n" + str(chkpnt_dir) + "\n")
json.dump(args_dict, open(chkpnt_dir + "/args.json", "wt"))
# Init plots
if args.show_pics:
plt.ion()
graph = tf.Graph()
with graph.as_default():
# ====================== 1) import data ======================
# shuffled data or not
ending = args.dataset + ".p"
iterator, training_init_op, eval_init_op, test_init_op, train_data_dict, eval_data_dict, test_data_dict, \
eval_batch_size_placeholder, test_batch_size_placeholder = import_rotated_mnist(args.mnist_data_path,
ending, args.batch_size,
global_index=True)
# get the batch
input_batch = iterator.get_next()
# ====================== 2) build ELBO graph ======================
# init VAE object
VAE = SVIGP_Hensman_decoder(L=args.L)
beta = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
# init inducing points
inducing_points_init = generate_init_inducing_points(args.mnist_data_path + 'train_data' + ending,
n=args.nr_inducing_points,
remove_test_angle=None,
PCA=args.PCA, M=args.M)
ip_joint = not args.ip_joint
GP_joint = not args.GP_joint
# init GP-LVM vectors
if args.ov_joint:
if args.PCA: # use PCA embeddings for initialization of object vectors
object_vectors_init = pickle.load(open(args.mnist_data_path +
'pca_ov_init{}.p'.format(args.dataset), 'rb'))
else: # initialize object vectors randomly
object_vectors_init = np.random.normal(0, 1.5,
len(args.dataset)*400*args.M).reshape(len(args.dataset)*400,
args.M)
else:
object_vectors_init = None
# init SVGP object
SVGP_ = SVIGP_Hensman(fixed_inducing_points=ip_joint, initial_inducing_points=inducing_points_init,
fixed_gp_params=GP_joint, object_vectors_init=object_vectors_init, name='main',
jitter=args.jitter, N_train=N_train, L=args.L,
K_obj_normalize=args.object_kernel_normalize, dtype=np.float64)
# forward pass
elbo, recon_loss, KL_term, inside_elbo, recon_images, \
inside_elbo_recon, inside_elbo_kl, latent_samples = forward_pass_deep_SVIGP_Hensman(input_batch, vae=VAE, svgp=SVGP_)
# test loss and predictions
recon_images_test, recon_loss_test = predict_deep_SVIGP_Hensman(input_batch, vae=VAE, svgp=SVGP_)
# GP diagnostics
GP_l, GP_amp, GP_ov, GP_ip = SVGP_.variable_summary()
# ====================== 3) optimizer ops ======================
global_step = tf.Variable(0, name='global_step', trainable=False)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
lr = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
# minimizing negative elbo
gradients = tf.gradients(-elbo, train_vars)
optim_step = optimizer.apply_gradients(grads_and_vars=zip(gradients, train_vars), global_step=global_step)
# ====================== 4) Pandas saver ======================
if args.save:
res_vars = [global_step,
elbo,
recon_loss,
KL_term]
res_names = ["step",
"ELBO",
"recon loss",
"KL term"]
res_vars += [inside_elbo,
inside_elbo_recon,
inside_elbo_kl,
latent_samples]
res_names += ["inside elbo",
"inside elbo recon",
"inside elbo KL",
"latent_samples"]
res_vars_GP = [GP_l,
GP_amp,
GP_ov,
GP_ip]
res_names_GP = ['length scale', 'amplitude', 'object vectors', 'inducing points']
res_saver_GP = pandas_res_saver(res_file_GP, res_names_GP)
res_saver = pandas_res_saver(res_file, res_names)
# ====================== 5) print and init trainable params ======================
print_trainable_vars(train_vars)
init_op = tf.global_variables_initializer()
# ====================== 6) saver and GPU ======================
if args.save_model_weights:
saver = tf.compat.v1.train.Saver(max_to_keep=3)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.ram)
# ====================== 7) tf.session ======================
nr_epochs = args.nr_epochs
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(init_op)
# training loop
start_time = time.time()
cgen_test_set_MSE = []
for epoch in range(nr_epochs):
# 7.1) train for one epoch
sess.run(training_init_op)
elbos, losses = [], []
start_time_epoch = time.time()
while True:
try:
_, g_s_, elbo_, recon_loss_ = sess.run([optim_step, global_step, elbo, recon_loss],
{beta: args.beta, lr: args.lr})
elbos.append(elbo_)
losses.append(recon_loss_)
except tf.errors.OutOfRangeError:
if (epoch + 1) % 10 == 0:
print('Epoch {}, mean ELBO per batch: {}'.format(epoch, np.mean(elbos)))
MSE = np.sum(losses) / N_train
print('MSE loss on train set for epoch {} : {}'.format(epoch, MSE))
end_time_epoch = time.time()
print("Time elapsed for epoch {}: {}".format(epoch, end_time_epoch - start_time_epoch))
break
# 7.2) save metrics to Pandas df for model diagnostics
if args.save and (epoch + 1) % 10 == 0:
if args.test_set_metrics:
sess.run(test_init_op, {test_batch_size_placeholder: args.batch_size})
else:
sess.run(eval_init_op, {eval_batch_size_placeholder: args.batch_size})
new_res = sess.run(res_vars, {beta: args.beta})
res_saver(new_res, 1)
# save GP params
new_res_GP = sess.run(res_vars_GP, {beta: args.beta})
res_saver_GP(new_res_GP, 1)
# 7.3) calculate loss on test set and visualize reconstructed images
if (epoch + 1) % 10 == 0:
# test set: conditional generation
# predict test data (in batches)
sess.run(test_init_op, {test_batch_size_placeholder: args.batch_size})
recon_loss_cgen, recon_images_cgen = [], []
while True:
try:
loss_, pics_ = sess.run([recon_loss_test, recon_images_test])
recon_loss_cgen.append(loss_)
recon_images_cgen.append(pics_)
except tf.errors.OutOfRangeError:
break
recon_loss_cgen = np.sum(recon_loss_cgen) / N_test
recon_images_cgen = np.concatenate(recon_images_cgen, axis=0)
# test set: plot generations
cgen_test_set_MSE.append((epoch, recon_loss_cgen))
print("Conditional generation MSE loss on test set for epoch {}: {}".format(epoch,
recon_loss_cgen))
plot_mnist(test_data_dict['images'],
recon_images_cgen,
title="Epoch: {}. CGEN MSE test set:{}".format(epoch + 1, round(recon_loss_cgen, 4)))
if args.show_pics:
plt.show()
plt.pause(0.01)
if args.save:
plt.savefig(pic_folder + str(g_s_) + "_cgen.png")
with open(pic_folder + "test_metrics.txt", "a") as f:
f.write("{},{},{}\n".format(epoch + 1, round(MSE, 4), round(recon_loss_cgen, 4)))
# save model weights
if args.save and args.save_model_weights:
saver.save(sess, chkpnt_dir + "model", global_step=g_s_)
# log running time
end_time = time.time()
print("Running time for {} epochs: {}".format(nr_epochs, round(end_time - start_time, 2)))
# report best test set cgen MSE achieved throughout training
best_cgen_MSE = sorted(cgen_test_set_MSE, key=lambda x: x[1])[0]
print("Best cgen MSE on test set throughout training at epoch {}: {}".format(best_cgen_MSE[0],
best_cgen_MSE[1]))
# save images from conditional generation
if args.save:
with open(chkpnt_dir + '/cgen_images.p', 'wb') as test_pickle:
pickle.dump(recon_images_cgen, test_pickle) | 5,331,450 |
def search_file_for_monkeys(file_name, threshold_confidence, wav_folder, model, tidy=True, full_verbose=True, hnm=False, summary_file=False):
"""
Splits 60-second file into 3-second clips. Runs each through
detector. If activation surpasses confidence threshold, clip
is separated.
If hard-negative mining functionality selected, function
takes combination of labelled praat file and 60-second wave file,
runs detector on 3-second clips, and seperates any clips that
the detector incorrectly identifies as being positives.
These clips are then able to be fed in as negative examples, to
improve the discriminatory capability of the network
Example call: search_file_for_monkeys('5A3AD7A6', 60, '/home/dgabutler/Work/CMEEProject/Data/whinnies/shady-lane/')
"""
audio_folder = wav_folder
# isolate folder name from path:
p = pathlib.Path(wav_folder)
isolated_folder_name = p.parts[2:][-1]
wav = audio_folder+file_name+'.WAV'
try:
wavfile = AudioSegment.from_wav(wav)
except OSError:
print("\nerror: audio file",os.path.basename(wav),"at path", os.path.dirname(wav), "cannot be loaded - probably improperly recorded")
return
clip_length_ms = 3000
clips = make_chunks(wavfile, clip_length_ms)
print("\n-- processing file " + file_name +'.WAV')
# if hard-negative mining, test for presence of praat file early for efficiency:
if hnm:
praat_file_path = '../Data/praat-files/'+file_name+'.TextGrid'
try:
labelled_starts = wavtools.whinny_starttimes_from_praatfile(praat_file_path)[1]
except IOError:
print('error: no praat file named',os.path.basename(praat_file_path),'at path', os.path.dirname(praat_file_path))
return
clip_dir = wav_folder+'clips-temp/'
# delete temporary clips directory if interuption to previous
# function call failed to remove it
if os.path.exists(clip_dir) and os.path.isdir(clip_dir):
rmtree(clip_dir)
# create temporary clips directory
os.makedirs(clip_dir)
# Export all inviduals clips as wav files
# print 'clipping 60 second audio file into 3 second snippets to test...\n'
for clipping_idx, clip in enumerate(clips):
clip_name = "clip{0:02}.wav".format(clipping_idx+1)
clip.export(clip_dir+clip_name, format="wav")
D_test = []
clipped_wavs = glob.glob(clip_dir+'clip*')
clipped_wavs.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
for clip in clipped_wavs:
y, sr = librosa.load(clip, sr=48000, duration=3.00)
ps = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=512, win_length=1024, window='hamming')
if ps.shape != (128, 282): continue
D_test.append(ps)
# D_test = wavtools.denoise_dataset(D_test)
call_count = 0
hnm_counter = 0
if not os.path.exists('../Data/mined-false-positives/'):
os.makedirs('../Data/mined-false-positives/')
# reshape to be correct dimension for CNN input
# NB. dimensions are: num.samples, num.melbins, num.timeslices, num.featmaps
# print "...checking clips for monkeys..."
for idx, clip in enumerate(D_test):
D_test[idx] = clip.reshape(1,128,282,1)
predicted = model.predict(D_test[idx])
# if NEGATIVE:
if predicted[0][0] <= (threshold_confidence/100.0): ########## THIS IS SECTION THAT CHANGED BETWEEN 1 node/2 node:
continue # WAS: if predicted[0][1] <= (threshold_confidence/100.0)
# furthermore 3 changes (predicted[0][1] -> ..cted[0][0]) below
else:
# if POSITIVE
call_count+=1
lower_clip_bound = (3*(idx+1))-3
upper_clip_bound = 3*(idx+1)
# i.e. clip 3 would be 6-9 seconds into original 60-sec file
approx_position = str(lower_clip_bound)+'-'+str(upper_clip_bound)
# regular detector behaviour - not hard negative mining
if not hnm:
# suspected positives moved to folder in Results, files renamed 'filename_numcallinfile_confidence.WAV'
# results_dir = '/media/dgabutler/My Passport/Audio/detected-positives/'+isolated_folder_name+'-results'
results_dir = '../Results/detected-positives/'+isolated_folder_name+'-results'
if not os.path.exists(results_dir):
os.makedirs(results_dir)
copyfile(clipped_wavs[idx], results_dir+'/'+file_name+'_'+str(call_count)+'_'+approx_position+'_'+str(int(round(predicted[0][0]*100)))+'.WAV')
# making summary file
if summary_file:
summary_file_name = '../Results/'+isolated_folder_name+'-results-summary.csv'
# obtain datetime from file name if possible
try:
datetime_of_recording = wavtools.filename_to_localdatetime(file_name)
date_of_recording = datetime_of_recording.strftime("%d/%m/%Y")
time_of_recording = datetime_of_recording.strftime("%X")
# if not possible due to unusual file name,
# assign 'na' value to date time
except ValueError:
date_of_recording = 'NA'
time_of_recording = 'NA'
# values to be entered in row of summary file:
column_headings = ['file name', 'approx. position in recording (secs)', 'time of recording', 'date of recording', 'confidence']
csv_row = [file_name, approx_position, time_of_recording, date_of_recording, str(int(round(predicted[0][0]*100)))+'%']
# make summary file if it doesn't already exist
summary_file_path = pathlib.Path(summary_file_name)
if not summary_file_path.is_file():
with open(summary_file_name, 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',')
filewriter.writerow(column_headings)
filewriter.writerow(csv_row)
# if summary file exists, *append* row to it
else:
with open(summary_file_name, 'a') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',')
filewriter.writerow(csv_row)
else:
# if hard-negative mining for false positives to enhance training:
labelled_ends = wavtools.whinny_endtimes_from_praatfile(praat_file_path)[1]
if not any(lower_clip_bound <= starts/1000.0 <= upper_clip_bound for starts in labelled_starts) \
and not any(lower_clip_bound <= ends/1000.0 <= upper_clip_bound for ends in labelled_ends):
# i.e. if section has not been labelled as containing a call
# (therefore a false positive has been detected)
hnm_counter+=1
copyfile(clipped_wavs[idx], '../Data/mined-false-positives/'+file_name+'_'+str(hnm_counter)+'_'+approx_position+'_'+str(int(round(predicted[0][0]*100)))+'.WAV')
else: continue
# if full_verbose:
# print 'clip number', '{0:02}'.format(idx+1), '- best guess -', best_guess
# delete all created clips and temporary clip folder
if tidy:
rmtree(clip_dir)
# empty recycling bin to prevent build-up of trashed clips
subprocess.call(['rm -rf /home/dgabutler/.local/share/Trash/*'], shell=True)
# print statements to terminal
if full_verbose:
if not hnm:
print('\nfound', call_count, 'suspected call(s) that surpass %d%% confidence threshold in 60-second file %s.WAV' % (threshold_confidence, file_name))
else:
print('\nhard negative mining generated', hnm_counter, 'suspected false positive(s) from file', file_name, 'for further training of network') | 5,331,451 |
def register_metric(cls: Type[Metric]):
"""Registers metric under the list of standard TFMA metrics."""
_METRIC_OBJECTS[cls.__name__] = cls | 5,331,452 |
def pool(sparkdf, start_column, end_column, var):
"""
Generate pools and calculate maximum var unpooled.
:param sparkdf: Input Spark dataframe.
:param start_column: Start time column name.
:param end_column: End time column name.
:param var: Variable for which to calculate metric.
:return: A Spark dataframe with pools (sizes and counts).
:return: Maximum active metric for var.
"""
starts_dict, ends_dict, starts_sorted, ends_sorted = sorted_dicts(sparkdf, start_column, end_column, var)
size_groups = {s:{'current': 0, 'max': 0} for s in [r.size for r in sparkdf.select(var).distinct().collect()]}
active = {'current': 0, 'max': 0}
start_index, end_index = 0, 0
while start_index < len(starts_sorted) or end_index < len(ends_sorted):
start, end = None, ends_sorted[end_index]
if start_index < len(starts_sorted):
start = starts_sorted[start_index]
if start is None or start > end:
group = size_groups[ends_dict[end]]
group['current'] -= 1
active['current'] -= ends_dict[end]
end_index += 1
else:
group = size_groups[starts_dict[start]]
group['current'] += 1
if group['current'] > group['max']:
group['max'] = group['current']
active['current'] += starts_dict[start]
if active['current'] > active['max']:
active['max'] = active['current']
start_index += 1
pool_counts = [{var: int(s), 'count': int(size_groups[s]['max'])} for s in size_groups.keys()]
max_unpooled = active['max']
return pool_counts, max_unpooled | 5,331,453 |
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt返回
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
:param token:
:param user:
:param request:
:return:
"""
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
} | 5,331,454 |
def query_yelp_lookup(biz_id):
""" Lookup resturant using id """
headers = {'Authorization': ('Bearer '
'w5JFtwCUKq05GlSpm8cKo51dBYDQ6r9tyzo-qRsKt4wDyB5'
'_ro6gW5gnG9hS6bvnNHNxOQLHfw7o_9S1e86nkvgcU7DQI_'
'sM6GVt9rqcq_rRYKtagQrexuH0zsU0WXYx')}
url = 'https://api.yelp.com/v3/businesses/' + biz_id
query = requests.get(url, headers=headers)
return query.json() | 5,331,455 |
def dashboard(request, condition='recent'):
"""Dashboard"""
post_count = settings.DASHBOARD_POST_COUNT
comment_count = settings.DASHBOARD_COMMENT_COUNT
if condition == 'recent':
order = '-id'
elif condition == 'view':
order = '-view_count'
elif condition == 'like':
order = '-like_count'
elif condition == 'comment':
order = '-comment_count'
else:
return error_page(request)
posts = Blog.objects.filter(status='1normal').order_by(order)[:post_count]
comments = Comment.objects.filter(
status='1normal').order_by('-id')[:comment_count]
total_posts = Blog.objects.filter(status='1normal').count()
total_comments = Comment.objects.filter(status='1normal').count()
total_spams = Comment.objects.filter(status='7spam').count()
total_users = User.objects.count()
return render(
request,
"blogs/dashboard.html",
{
'posts': posts,
'comments': comments,
'condition': condition,
'total_posts': total_posts,
'total_comments': total_comments,
'total_spams': total_spams,
'total_users': total_users,
}
) | 5,331,456 |
def read_manifest(path):
"""Read dictionary of workflows from the Packal manifest.xml file."""
workflows = {}
tree = ET.parse(path)
root = tree.getroot()
for workflow in root:
data = {"packal": True}
for child in workflow:
if child.tag == "short":
data["description"] = child.text.strip()
else:
data[child.tag] = child.text.strip() if child.text else None
# print(child.tag, ':', child.text)
data["author_url"] = packal_user_url(data["author"])
if "bundle" in data:
workflows[data["bundle"]] = data
return workflows | 5,331,457 |
def _load_v1_txt(path):
"""Parses a SIF V1 text file, returning numpy arrays.
Args:
path: string containing the path to the ASCII file.
Returns:
A tuple of 4 elements:
constants: A numpy array of shape (element_count). The constant
associated with each SIF element.
centers: A numpy array of shape (element_count, 3). The centers of the
SIF elements.
radii: A numpy array of shape (element_count, 3). The axis-aligned
radii of the gaussian falloffs.
rotations: A numpy array of shape (element_count, 3). The euler-angle
rotations of the SIF elements.
symmetry_count: An integer. The number of elements which are left-right
symmetric.
features: A numpy array of shape (element_count, implicit_len). The LDIF
neural features, if they are present.
"""
lines = file_util.readlines(path)
if lines[0] != 'SIF':
raise ValueError(f'Could not parse {path} as a sif txt. First line was {lines[0]}')
shape_count, version, implicit_len = [int(x) for x in lines[1].split(' ')]
version += 1
if version != 1:
raise ValueError(f'This function can only parse v1 files. This version: {version}.')
symmetry_count = 0
last_was_symmetric = True
constants = []
centers = []
radii = []
rotations = []
features = []
for row in lines[2:]:
elts = row.split(' ')
if len(elts) != 11 + implicit_len:
raise ValueError('Failed to parse the following row with '
f'implicit_len {implicit_len}: {row}')
explicit_params = np.array([float(x) for x in elts[:10]], dtype=np.float32)
is_symmetric = bool(int(elts[10]))
if is_symmetric:
symmetry_count += 1
if not last_was_symmetric:
raise ValueError(f'File not supported by parser: row {row} is '
'symmetric but follows an asymmetric element.')
constants.append(explicit_params[0])
centers.append(explicit_params[1:4])
radii.append(explicit_params[4:7])
rotations.append(explicit_params[7:10])
if implicit_len > 0:
implicit_params = np.array([float(x) for x in elts[11:]], dtype=np.float32)
features.append(implicit_params)
constants = np.stack(constants)
centers = np.stack(centers)
radii = np.stack(radii)
rotations = np.stack(rotations)
features = np.stack(features) if features else None
# Radii have their sqrt stored for GAPS:
radii = radii * radii
return constants, centers, radii, rotations, symmetry_count, features | 5,331,458 |
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensor platform."""
_LOGGER.info("setup_platform called for Webcomic")
add_devices([ComicSensor(hass, config)]) | 5,331,459 |
def split_model_name(model):
"""
Split model names by _
Takes into account packages with _ and processor types with _
"""
model = model[:-3].replace('.', '_')
# sort by key length so that nertagger is checked before tagger, for example
for processor in sorted(ending_to_processor.keys(), key=lambda x: -len(x)):
if model.endswith(processor):
model = model[:-(len(processor)+1)]
processor = ending_to_processor[processor]
break
else:
raise AssertionError(f"Could not find a processor type in {model}")
lang, package = model.split('_', 1)
return lang, package, processor | 5,331,460 |
def dg83_setup(
ghz = 95,
lat_of_cen = 10,
cml = 20,
n_alpha = 10,
n_E = 10,
E0 = 0.1,
E1 = 10.,
nn_dir = None,
no_synch = False,
):
"""Create and return a VanAllenSetup object prepared to use the Divine &
Garrett 1983 model of Jupiter's magnetic field and plasma.
ghz
The observing frequency, in GHz.
lat_of_cen
The body's latitude-of-center, in degrees.
cml
The body's central meridian longitude, in degrees.
n_alpha
Number of pitch angles to sample when deriving p/k distribution parameters.
n_E
Number of energies to sample when deriving p/k distribution parameters.
E0
Low end of energy sampling regime, in MeV.
E1
High end of energy sampling regime, in MeV.
nn_dir
The directory with the neural-network data used to generate synchrotron
radiative transfer coefficients.
no_synch
If true, ignore `nn_dir` and do not load synchrotron computatation info.
Makes things faster if you just want to evaluate the DG83 model and not
actually do any radiative transfer.
"""
lat_of_cen *= astutil.D2R
cml *= astutil.D2R
from .divine1983 import JupiterD4Field
o2b = ObserverToBodycentric(lat_of_cen, cml)
bfield = JupiterD4Field()
from .distributions import DG83Distribution
distrib = DG83Distribution()
distrib.n_alpha = n_alpha
distrib.n_E = n_E
distrib.E0 = E0
distrib.E1 = E1
ray_tracer = FormalRayTracer()
ray_tracer.ne0_cutoff = 1e-6
from .integrate import FormalRTIntegrator
rad_trans = FormalRTIntegrator()
if no_synch:
synch_calc = None
else:
from .synchrotron import NeuroSynchrotronCalculator
synch_calc = NeuroSynchrotronCalculator(nn_dir=nn_dir)
return VanAllenSetup(o2b, bfield, distrib, ray_tracer, synch_calc,
rad_trans, cgs.rjup, ghz * 1e9) | 5,331,461 |
def test_shell_cmd_inputs_list_sep_1():
"""providing list as an additional input:, sep, no argstr"""
my_input_spec = SpecInfo(
name="Input",
fields=[
(
"inpA",
attr.ib(
type=str,
metadata={
"position": 1,
"help_string": "inpA",
"sep": ",",
"argstr": "",
},
),
)
],
bases=(ShellSpec,),
)
shelly = ShellCommandTask(
executable="executable", inpA=["aaa", "bbb", "ccc"], input_spec=my_input_spec
)
# separated by commas
assert shelly.cmdline == "executable aaa,bbb,ccc" | 5,331,462 |
def jump_to_start(self, line_start, line_end, letter_start, letter_end) -> None:
"""
Chosen LineIndex set to start of highlighted area
"""
if line_start <= line_end:
# downward highlight or the same line
self.chosen_LineIndex = line_start
self.chosen_LetterIndex = letter_start
else: # upward highlight
self.chosen_LineIndex = line_end
self.chosen_LetterIndex = letter_end | 5,331,463 |
def compute() -> int:
"""
Returns the sum of all numbers whose
sum of the factorials of all digits
add up to the number itself.
>>> compute()
40730
"""
return sum(
num
for num in range(3, 7 * factorial(9) + 1)
if sum_of_digit_factorial(num) == num
) | 5,331,464 |
def dijkstra(matrix, start=None, end=None):
"""
Implementation of Dijkstra algorithm to find the (s,t)-shortest path between top-left and bottom-right nodes
on a nxn grid graph (with 8-neighbourhood).
NOTE: This is an vertex variant of the problem, i.e. nodes carry weights, not edges.
:param matrix (np.ndarray [grid_dim, grid_dim]): Matrix of node-costs.
:return: matrix (np.ndarray [grid_dim, grid_dim]), indicator matrix of nodes on the shortest path.
"""
if start is None:
start = (0, 0)
def neighbors_func(pos):
pos = np.array(pos)
neighbors = get_neighbor_pattern(dim=2)
for off in neighbors:
new_pos = pos+off
if np.all(new_pos > 0) and np.all(new_pos < matrix.shape):
yield new_pos
costs = np.full_like(matrix, 1.0e10)
costs[start] = matrix[start]
priority_queue = [(matrix[0][0], start)]
certain = set()
transitions = dict()
while priority_queue:
_, (cur_x, cur_y) = heapq.heappop(priority_queue)
if (cur_x, cur_y) in certain:
pass
for x, y in neighbors_func(cur_x, cur_y):
if (x, y) not in certain:
if matrix[x][y] + costs[cur_x][cur_y] < costs[x][y]:
costs[x][y] = matrix[x][y] + costs[cur_x][cur_y]
heapq.heappush(priority_queue, (costs[x][y], (x, y)))
transitions[(x, y)] = (cur_x, cur_y)
certain.add((cur_x, cur_y))
if end is None:
return transitions
# retrieve the path
cur_x, cur_y = end
on_path = np.zeros_like(matrix)
on_path[-1][-1] = 1
while (cur_x, cur_y) != start:
cur_x, cur_y = transitions[(cur_x, cur_y)]
on_path[cur_x, cur_y] = 1.0
return on_path | 5,331,465 |
def is_finally_visible_segm(*args):
"""is_finally_visible_segm(segment_t s) -> bool"""
return _idaapi.is_finally_visible_segm(*args) | 5,331,466 |
def make_course_dictionary(debug=False):
"""
Make a course dictionary from VIVO contents. Key is course number
such as ABF2010C. Value is URI.
"""
from vivofoundation import vivo_sparql_query
query = """
SELECT ?x ?label ?coursenum
WHERE {
?x a ufVivo:Course .
?x ufVivo:courseNum ?coursenum .
}"""
result = vivo_sparql_query(query)
try:
count = len(result["results"]["bindings"])
except:
count = 0
if debug:
print query, count, result["results"]["bindings"][0],\
result["results"]["bindings"][1]
course_dictionary = {}
i = 0
while i < count:
b = result["results"]["bindings"][i]
coursenum = b['coursenum']['value']
uri = b['x']['value']
course_dictionary[coursenum] = uri
i = i + 1
return course_dictionary | 5,331,467 |
def get_undisbursed_principal(loan):
"""Gets undisbursed principal"""
principal = frappe.get_value("Microfinance Loan", loan, "loan_principal")
if not principal:
raise frappe.DoesNotExistError("Loan: {} not found".format(loan))
return principal - get_disbursed(loan) | 5,331,468 |
def northing_and_easting(dictionary):
"""
Retrieve and return the northing and easting strings to be used as
dictionary keys
Parameters
----------
dictionary : dict
Returns
-------
northing, easting : tuple
"""
if not 'x' and 'y' in dictionary.keys():
northing = 'latitude'
easting = 'longitude'
else:
northing = 'x'
easting = 'y'
return northing, easting | 5,331,469 |
def update():
""" re read the config """
sudo("supervisorctl update") | 5,331,470 |
def test_get_fgco2_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'GFDL-ESM4', 'Omon', 'fgco2')
assert fix == [Fgco2(None), Omon(None)] | 5,331,471 |
def visual_encrypt(fi):
"""
Encode selected region with visual encrypt algorithm
"""
offset = fi.getSelectionOffset()
length = fi.getSelectionLength()
if length > 0:
data = list(fi.getDocument())
# Do not show command prompt window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Execute visual_encrypt_dialog.py to show GUI
p = subprocess.Popen(["py.exe", "-3", "XOR/visual_encrypt_dialog.py"], startupinfo=startupinfo, stdout=subprocess.PIPE)
# Get amount input
stdout_data, stderr_data = p.communicate()
if stdout_data == "":
return
key_length = int(stdout_data.rstrip())
for i in range(offset + key_length, offset + length, key_length):
for j in range(0, key_length):
data[i + j] = chr(ord(data[i + j]) ^ ord(data[i + j - key_length]))
fi.newDocument("Output of Visual Encrypt", 1)
fi.setDocument("".join(data))
fi.setBookmark(offset, length, hex(offset), "#c8ffff")
if length == 1:
print("Encoded one byte from offset %s to %s." % (hex(offset), hex(offset)))
else:
print("Encoded %s bytes from offset %s to %s." % (length, hex(offset), hex(offset + length - 1)))
print("Added a bookmark to encoded region.") | 5,331,472 |
def decode_path(name):
""" Attempt to decode path with correct encoding """
return name.decode(sys.getfilesystemencoding()) | 5,331,473 |
def save_account(account):
"""
Function that serializes the account such
that it can be saved.
"""
root_dir = "./accounts/"+account.name+"/"
if not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(root_dir+account.name, "wb+") as f:
pickle.dump(account, f)
return 0 | 5,331,474 |
def create_simulation_job(clientRequestToken=None, outputLocation=None, loggingConfig=None, maxJobDurationInSeconds=None, iamRole=None, failureBehavior=None, robotApplications=None, simulationApplications=None, dataSources=None, tags=None, vpcConfig=None, compute=None):
"""
Creates a simulation job.
See also: AWS API Documentation
Exceptions
:example: response = client.create_simulation_job(
clientRequestToken='string',
outputLocation={
's3Bucket': 'string',
's3Prefix': 'string'
},
loggingConfig={
'recordAllRosTopics': True|False
},
maxJobDurationInSeconds=123,
iamRole='string',
failureBehavior='Fail'|'Continue',
robotApplications=[
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
simulationApplications=[
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
dataSources=[
{
'name': 'string',
's3Bucket': 'string',
's3Keys': [
'string',
]
},
],
tags={
'string': 'string'
},
vpcConfig={
'subnets': [
'string',
],
'securityGroups': [
'string',
],
'assignPublicIp': True|False
},
compute={
'simulationUnitLimit': 123
}
)
:type clientRequestToken: string
:param clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\nThis field is autopopulated if not provided.\n
:type outputLocation: dict
:param outputLocation: Location for output files generated by the simulation job.\n\ns3Bucket (string) --The S3 bucket for output.\n\ns3Prefix (string) --The S3 folder in the s3Bucket where output files will be placed.\n\n\n
:type loggingConfig: dict
:param loggingConfig: The logging configuration.\n\nrecordAllRosTopics (boolean) -- [REQUIRED]A boolean indicating whether to record all ROS topics.\n\n\n
:type maxJobDurationInSeconds: integer
:param maxJobDurationInSeconds: [REQUIRED]\nThe maximum simulation job duration in seconds (up to 14 days or 1,209,600 seconds. When maxJobDurationInSeconds is reached, the simulation job will status will transition to Completed .\n
:type iamRole: string
:param iamRole: [REQUIRED]\nThe IAM role name that allows the simulation instance to call the AWS APIs that are specified in its associated policies on your behalf. This is how credentials are passed in to your simulation job.\n
:type failureBehavior: string
:param failureBehavior: The failure behavior the simulation job.\n\nContinue\nRestart the simulation job in the same host instance.\n\nFail\nStop the simulation job and terminate the instance.\n
:type robotApplications: list
:param robotApplications: The robot application to use in the simulation job.\n\n(dict) --Application configuration information for a robot.\n\napplication (string) -- [REQUIRED]The application information for the robot application.\n\napplicationVersion (string) --The version of the robot application.\n\nlaunchConfig (dict) -- [REQUIRED]The launch configuration for the robot application.\n\npackageName (string) -- [REQUIRED]The package name.\n\nlaunchFile (string) -- [REQUIRED]The launch file name.\n\nenvironmentVariables (dict) --The environment variables for the application launch.\n\n(string) --\n(string) --\n\n\n\n\nportForwardingConfig (dict) --The port forwarding configuration.\n\nportMappings (list) --The port mappings for the configuration.\n\n(dict) --An object representing a port mapping.\n\njobPort (integer) -- [REQUIRED]The port number on the simulation job instance to use as a remote connection point.\n\napplicationPort (integer) -- [REQUIRED]The port number on the application.\n\nenableOnPublicIp (boolean) --A Boolean indicating whether to enable this port mapping on public IP.\n\n\n\n\n\n\n\nstreamUI (boolean) --Boolean indicating whether a streaming session will be configured for the application. If True , AWS RoboMaker will configure a connection so you can interact with your application as it is running in the simulation. You must configure and luanch the component. It must have a graphical user interface.\n\n\n\n\n\n\n
:type simulationApplications: list
:param simulationApplications: The simulation application to use in the simulation job.\n\n(dict) --Information about a simulation application configuration.\n\napplication (string) -- [REQUIRED]The application information for the simulation application.\n\napplicationVersion (string) --The version of the simulation application.\n\nlaunchConfig (dict) -- [REQUIRED]The launch configuration for the simulation application.\n\npackageName (string) -- [REQUIRED]The package name.\n\nlaunchFile (string) -- [REQUIRED]The launch file name.\n\nenvironmentVariables (dict) --The environment variables for the application launch.\n\n(string) --\n(string) --\n\n\n\n\nportForwardingConfig (dict) --The port forwarding configuration.\n\nportMappings (list) --The port mappings for the configuration.\n\n(dict) --An object representing a port mapping.\n\njobPort (integer) -- [REQUIRED]The port number on the simulation job instance to use as a remote connection point.\n\napplicationPort (integer) -- [REQUIRED]The port number on the application.\n\nenableOnPublicIp (boolean) --A Boolean indicating whether to enable this port mapping on public IP.\n\n\n\n\n\n\n\nstreamUI (boolean) --Boolean indicating whether a streaming session will be configured for the application. If True , AWS RoboMaker will configure a connection so you can interact with your application as it is running in the simulation. You must configure and luanch the component. It must have a graphical user interface.\n\n\n\n\n\n\n
:type dataSources: list
:param dataSources: Specify data sources to mount read-only files from S3 into your simulation. These files are available under /opt/robomaker/datasources/data_source_name .\n\nNote\nThere is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.\n\n\n(dict) --Information about a data source.\n\nname (string) -- [REQUIRED]The name of the data source.\n\ns3Bucket (string) -- [REQUIRED]The S3 bucket where the data files are located.\n\ns3Keys (list) -- [REQUIRED]The list of S3 keys identifying the data source files.\n\n(string) --\n\n\n\n\n\n
:type tags: dict
:param tags: A map that contains tag keys and tag values that are attached to the simulation job.\n\n(string) --\n(string) --\n\n\n\n
:type vpcConfig: dict
:param vpcConfig: If your simulation job accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.\n\nsubnets (list) -- [REQUIRED]A list of one or more subnet IDs in your VPC.\n\n(string) --\n\n\nsecurityGroups (list) --A list of one or more security groups IDs in your VPC.\n\n(string) --\n\n\nassignPublicIp (boolean) --A boolean indicating whether to assign a public IP address.\n\n\n
:type compute: dict
:param compute: Compute information for the simulation job.\n\nsimulationUnitLimit (integer) --The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'arn': 'string',
'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',
'lastStartedAt': datetime(2015, 1, 1),
'lastUpdatedAt': datetime(2015, 1, 1),
'failureBehavior': 'Fail'|'Continue',
'failureCode': 'InternalServiceError'|'RobotApplicationCrash'|'SimulationApplicationCrash'|'BadPermissionsRobotApplication'|'BadPermissionsSimulationApplication'|'BadPermissionsS3Object'|'BadPermissionsS3Output'|'BadPermissionsCloudwatchLogs'|'SubnetIpLimitExceeded'|'ENILimitExceeded'|'BadPermissionsUserCredentials'|'InvalidBundleRobotApplication'|'InvalidBundleSimulationApplication'|'InvalidS3Resource'|'LimitExceeded'|'MismatchedEtag'|'RobotApplicationVersionMismatchedEtag'|'SimulationApplicationVersionMismatchedEtag'|'ResourceNotFound'|'RequestThrottled'|'BatchTimedOut'|'BatchCanceled'|'InvalidInput'|'WrongRegionS3Bucket'|'WrongRegionS3Output'|'WrongRegionRobotApplication'|'WrongRegionSimulationApplication',
'clientRequestToken': 'string',
'outputLocation': {
's3Bucket': 'string',
's3Prefix': 'string'
},
'loggingConfig': {
'recordAllRosTopics': True|False
},
'maxJobDurationInSeconds': 123,
'simulationTimeMillis': 123,
'iamRole': 'string',
'robotApplications': [
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
'simulationApplications': [
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
'dataSources': [
{
'name': 'string',
's3Bucket': 'string',
's3Keys': [
{
's3Key': 'string',
'etag': 'string'
},
]
},
],
'tags': {
'string': 'string'
},
'vpcConfig': {
'subnets': [
'string',
],
'securityGroups': [
'string',
],
'vpcId': 'string',
'assignPublicIp': True|False
},
'compute': {
'simulationUnitLimit': 123
}
}
Response Structure
(dict) --
arn (string) --
The Amazon Resource Name (ARN) of the simulation job.
status (string) --
The status of the simulation job.
lastStartedAt (datetime) --
The time, in milliseconds since the epoch, when the simulation job was last started.
lastUpdatedAt (datetime) --
The time, in milliseconds since the epoch, when the simulation job was last updated.
failureBehavior (string) --
the failure behavior for the simulation job.
failureCode (string) --
The failure code of the simulation job if it failed:
InternalServiceError
Internal service error.
RobotApplicationCrash
Robot application exited abnormally.
SimulationApplicationCrash
Simulation application exited abnormally.
BadPermissionsRobotApplication
Robot application bundle could not be downloaded.
BadPermissionsSimulationApplication
Simulation application bundle could not be downloaded.
BadPermissionsS3Output
Unable to publish outputs to customer-provided S3 bucket.
BadPermissionsCloudwatchLogs
Unable to publish logs to customer-provided CloudWatch Logs resource.
SubnetIpLimitExceeded
Subnet IP limit exceeded.
ENILimitExceeded
ENI limit exceeded.
BadPermissionsUserCredentials
Unable to use the Role provided.
InvalidBundleRobotApplication
Robot bundle cannot be extracted (invalid format, bundling error, or other issue).
InvalidBundleSimulationApplication
Simulation bundle cannot be extracted (invalid format, bundling error, or other issue).
RobotApplicationVersionMismatchedEtag
Etag for RobotApplication does not match value during version creation.
SimulationApplicationVersionMismatchedEtag
Etag for SimulationApplication does not match value during version creation.
clientRequestToken (string) --
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
outputLocation (dict) --
Simulation job output files location.
s3Bucket (string) --
The S3 bucket for output.
s3Prefix (string) --
The S3 folder in the s3Bucket where output files will be placed.
loggingConfig (dict) --
The logging configuration.
recordAllRosTopics (boolean) --
A boolean indicating whether to record all ROS topics.
maxJobDurationInSeconds (integer) --
The maximum simulation job duration in seconds.
simulationTimeMillis (integer) --
The simulation job execution duration in milliseconds.
iamRole (string) --
The IAM role that allows the simulation job to call the AWS APIs that are specified in its associated policies on your behalf.
robotApplications (list) --
The robot application used by the simulation job.
(dict) --
Application configuration information for a robot.
application (string) --
The application information for the robot application.
applicationVersion (string) --
The version of the robot application.
launchConfig (dict) --
The launch configuration for the robot application.
packageName (string) --
The package name.
launchFile (string) --
The launch file name.
environmentVariables (dict) --
The environment variables for the application launch.
(string) --
(string) --
portForwardingConfig (dict) --
The port forwarding configuration.
portMappings (list) --
The port mappings for the configuration.
(dict) --
An object representing a port mapping.
jobPort (integer) --
The port number on the simulation job instance to use as a remote connection point.
applicationPort (integer) --
The port number on the application.
enableOnPublicIp (boolean) --
A Boolean indicating whether to enable this port mapping on public IP.
streamUI (boolean) --
Boolean indicating whether a streaming session will be configured for the application. If True , AWS RoboMaker will configure a connection so you can interact with your application as it is running in the simulation. You must configure and luanch the component. It must have a graphical user interface.
simulationApplications (list) --
The simulation application used by the simulation job.
(dict) --
Information about a simulation application configuration.
application (string) --
The application information for the simulation application.
applicationVersion (string) --
The version of the simulation application.
launchConfig (dict) --
The launch configuration for the simulation application.
packageName (string) --
The package name.
launchFile (string) --
The launch file name.
environmentVariables (dict) --
The environment variables for the application launch.
(string) --
(string) --
portForwardingConfig (dict) --
The port forwarding configuration.
portMappings (list) --
The port mappings for the configuration.
(dict) --
An object representing a port mapping.
jobPort (integer) --
The port number on the simulation job instance to use as a remote connection point.
applicationPort (integer) --
The port number on the application.
enableOnPublicIp (boolean) --
A Boolean indicating whether to enable this port mapping on public IP.
streamUI (boolean) --
Boolean indicating whether a streaming session will be configured for the application. If True , AWS RoboMaker will configure a connection so you can interact with your application as it is running in the simulation. You must configure and luanch the component. It must have a graphical user interface.
dataSources (list) --
The data sources for the simulation job.
(dict) --
Information about a data source.
name (string) --
The name of the data source.
s3Bucket (string) --
The S3 bucket where the data files are located.
s3Keys (list) --
The list of S3 keys identifying the data source files.
(dict) --
Information about S3 keys.
s3Key (string) --
The S3 key.
etag (string) --
The etag for the object.
tags (dict) --
The list of all tags added to the simulation job.
(string) --
(string) --
vpcConfig (dict) --
Information about the vpc configuration.
subnets (list) --
A list of subnet IDs associated with the simulation job.
(string) --
securityGroups (list) --
A list of security group IDs associated with the simulation job.
(string) --
vpcId (string) --
The VPC ID associated with your simulation job.
assignPublicIp (boolean) --
A boolean indicating if a public IP was assigned.
compute (dict) --
Compute information for the simulation job.
simulationUnitLimit (integer) --
The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided.
Exceptions
RoboMaker.Client.exceptions.ResourceNotFoundException
RoboMaker.Client.exceptions.InvalidParameterException
RoboMaker.Client.exceptions.InternalServerException
RoboMaker.Client.exceptions.ThrottlingException
RoboMaker.Client.exceptions.LimitExceededException
RoboMaker.Client.exceptions.IdempotentParameterMismatchException
RoboMaker.Client.exceptions.ServiceUnavailableException
:return: {
'arn': 'string',
'status': 'Pending'|'Preparing'|'Running'|'Restarting'|'Completed'|'Failed'|'RunningFailed'|'Terminating'|'Terminated'|'Canceled',
'lastStartedAt': datetime(2015, 1, 1),
'lastUpdatedAt': datetime(2015, 1, 1),
'failureBehavior': 'Fail'|'Continue',
'failureCode': 'InternalServiceError'|'RobotApplicationCrash'|'SimulationApplicationCrash'|'BadPermissionsRobotApplication'|'BadPermissionsSimulationApplication'|'BadPermissionsS3Object'|'BadPermissionsS3Output'|'BadPermissionsCloudwatchLogs'|'SubnetIpLimitExceeded'|'ENILimitExceeded'|'BadPermissionsUserCredentials'|'InvalidBundleRobotApplication'|'InvalidBundleSimulationApplication'|'InvalidS3Resource'|'LimitExceeded'|'MismatchedEtag'|'RobotApplicationVersionMismatchedEtag'|'SimulationApplicationVersionMismatchedEtag'|'ResourceNotFound'|'RequestThrottled'|'BatchTimedOut'|'BatchCanceled'|'InvalidInput'|'WrongRegionS3Bucket'|'WrongRegionS3Output'|'WrongRegionRobotApplication'|'WrongRegionSimulationApplication',
'clientRequestToken': 'string',
'outputLocation': {
's3Bucket': 'string',
's3Prefix': 'string'
},
'loggingConfig': {
'recordAllRosTopics': True|False
},
'maxJobDurationInSeconds': 123,
'simulationTimeMillis': 123,
'iamRole': 'string',
'robotApplications': [
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
'simulationApplications': [
{
'application': 'string',
'applicationVersion': 'string',
'launchConfig': {
'packageName': 'string',
'launchFile': 'string',
'environmentVariables': {
'string': 'string'
},
'portForwardingConfig': {
'portMappings': [
{
'jobPort': 123,
'applicationPort': 123,
'enableOnPublicIp': True|False
},
]
},
'streamUI': True|False
}
},
],
'dataSources': [
{
'name': 'string',
's3Bucket': 'string',
's3Keys': [
{
's3Key': 'string',
'etag': 'string'
},
]
},
],
'tags': {
'string': 'string'
},
'vpcConfig': {
'subnets': [
'string',
],
'securityGroups': [
'string',
],
'vpcId': 'string',
'assignPublicIp': True|False
},
'compute': {
'simulationUnitLimit': 123
}
}
:returns:
(string) --
(string) --
"""
pass | 5,331,475 |
def affine(p, scale, theta, offset):
""" Scale, rotate and translate point """
return arcpy.Point((p.X * math.cos(theta) - p.Y * math.sin(theta)) * scale.X + offset.X,
(p.X * math.sin(theta) + p.Y * math.cos(theta)) * scale.Y + offset.Y) | 5,331,476 |
def test_interface_two_segments_noweek_nomonth(constant_days_two_segments_df: pd.DataFrame):
"""This test checks that bad-inited SpecialDaysTransform raises AssertionError during fit_transform."""
with pytest.raises(ValueError):
_ = SpecialDaysTransform(find_special_weekday=False, find_special_month_day=False) | 5,331,477 |
def GetMarkedPos(slot):
"""
Get marked position
@param slot: slot number: 1..1024 if the specifed value is <= 0
range, IDA will ask the user to select slot.
@return: BADADDR - the slot doesn't contain a marked address
otherwise returns the marked address
"""
curloc = idaapi.curloc()
intp = idaapi.int_pointer()
intp.assign(slot)
return curloc.markedpos(intp) | 5,331,478 |
def elslib_CylinderParameters(*args):
"""
* parametrization P (U, V) = Location + V * ZDirection + Radius * (Cos(U) * XDirection + Sin (U) * YDirection)
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param U:
:type U: float &
:param V:
:type V: float &
:rtype: void
"""
return _ElSLib.elslib_CylinderParameters(*args) | 5,331,479 |
def test_no_output():
"""Makes sure that when no output flag is on, no files are created.
Should be run first so that other tests haven't created files"""
args = Arguments(
src_dir="src",
extra_data=["gbextradata"],
pkg_dir=os.path.join("tests", "gbtestapp"),
clean=True,
no_file=True
)
package_generator = PackageGenerator(args)
generated_okay = package_generator.generate()
sha_filename = package_generator.args.formats["sha"].format(
an=package_generator.args.info['app_name'],
v=package_generator.args.info['app_version'],
os=package_generator.args.info['operating_system'],
m=package_generator.args.info['machine_type']
)
assert generated_okay == EXIT_OKAY \
and not os.path.exists(PackageGenerator.INFO_FILE) \
and not os.path.exists(PackageGenerator.FILES_FILE) \
and not os.path.exists(PackageGenerator.ENVIRON_SCRIPT
+ PackageGenerator.ENVIRON_SCRIPT_POSIX_EXT) \
and not os.path.exists(PackageGenerator.ENVIRON_SCRIPT
+ PackageGenerator.ENVIRON_SCRIPT_WIN_EXT) \
and not os.path.exists(sha_filename) | 5,331,480 |
def totaled_no_review_url(cc, sql_time_specification): # pragma: no cover
"""Counts the number of commits with no review url in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
count(int): a count of all commits with no review_url
results(list): a list of lists with all tbr'ed commits with no lgtm in the
format [rietveld_url, git_timestamp, git_subject, git_hash]
"""
cc.execute("""SELECT git_commit.review_url, git_commit.timestamp,
git_commit.subject, git_commit.hash
FROM git_commit
WHERE git_commit.review_url = ''
AND %s""" % sql_time_specification)
result = cc.fetchall()
count = len(result)
formatted_data = []
for data in result:
subject = data[2]
formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"),
subject.replace('-', ' '), data[3]])
results = sorted(formatted_data, key=lambda x: x[1], reverse=True)
return count, results | 5,331,481 |
def is_sat(formula, solver_name=None, logic=None, portfolio=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used
:type solver_name: string
:param logic: Specify the logic that is going to be used
:param portfolio: A list of solver names to perform portfolio solving.
:type portfolio: An iterable of solver names
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic,
portfolio=portfolio) | 5,331,482 |
def bounds(*tile):
"""Returns the bounding box of a tile
Parameters
----------
tile : Tile or tuple
May be be either an instance of Tile or 3 ints (X, Y, Z).
Returns
-------
LngLatBbox
"""
tile = _parse_tile_arg(*tile)
xtile, ytile, zoom = tile
Z2 = math.pow(2, zoom)
ul_lon_deg = xtile / Z2 * 360.0 - 180.0
ul_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / Z2)))
ul_lat_deg = math.degrees(ul_lat_rad)
lr_lon_deg = (xtile + 1) / Z2 * 360.0 - 180.0
lr_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * (ytile + 1) / Z2)))
lr_lat_deg = math.degrees(lr_lat_rad)
return LngLatBbox(ul_lon_deg, lr_lat_deg, lr_lon_deg, ul_lat_deg) | 5,331,483 |
def make_evo_plots(x_dot, x_dot_train, x_dot_sim,
x_true, x_sim, time, t_train, t_test):
"""
Plots the true evolution of X and Xdot, along with
the model evolution of X and Xdot, for both the
training and test data.
Parameters
----------
x_dot: 2D numpy array of floats
(M = number of time samples, r = truncation number of the SVD)
True Xdot for the entire time range
x_dot_train: 2D numpy array of floats
(M_train = number of time samples in training data region,
r = truncation number of the SVD)
Model Xdot for the training data
x_dot_test: 2D numpy array of floats
(M_test = number of time samples in training data region,
r = truncation number of the SVD)
Model Xdot for the test data
x_true: 2D numpy array of floats
(M_test = number of time samples in the test data region,
r = truncation number of the SVD)
The true evolution of the temporal BOD modes
x_sim: 2D numpy array of floats
(M_test = number of time samples in the test data region,
r = truncation number of the SVD)
The model evolution of the temporal BOD modes
time: numpy array of floats
(M = number of time samples)
Time in microseconds
t_train: numpy array of floats
(M_train = number of time samples in the test data region)
Time in microseconds in the training data region
t_test: numpy array of floats
(M_test = number of time samples in the test data region)
Time in microseconds in the test data region
"""
r = x_true.shape[1]
fig, axs = plt.subplots(r, 1, sharex=True, figsize=(7, 9))
if r == 12 or r == 6:
fig, axs = plt.subplots(3, int(r / 3), figsize=(16, 9))
axs = np.ravel(axs)
# Loop over the r temporal Xdot modes that were fit
for i in range(r):
axs[i].plot(t_test / 1.0e3, x_dot[t_train.shape[0]:, i], color='k',
linewidth=2, label='numerical derivative')
# axs[i].plot(t_train/1.0e3, x_dot_train[:, i], color='red',
# linewidth=2, label='model prediction')
axs[i].plot(t_test / 1.0e3, x_dot_sim[:, i], color='r',
linewidth=2, label='model forecast')
axs[i].set_yticklabels([])
axs[i].set_xticklabels([])
axs[i].tick_params(axis='both', which='major', labelsize=18)
axs[i].tick_params(axis='both', which='minor', labelsize=18)
axs[i].grid(True)
plt.savefig('Pictures/xdot.pdf')
plt.savefig('Pictures/xdot.eps')
# Repeat for X
fig, axs = plt.subplots(r, 1, sharex=True, figsize=(7, 9))
if r == 12 or r == 6:
fig, axs = plt.subplots(3, int(r / 3), figsize=(16, 9))
axs = np.ravel(axs)
for i in range(r):
axs[i].plot(t_test / 1.0e3, x_true[:, i], 'k',
linewidth=2, label='true simulation')
axs[i].plot(t_test / 1.0e3, x_sim[:, i], color='r',
linewidth=2, label='model forecast')
axs[i].set_yticklabels([])
axs[i].set_xticklabels([])
axs[i].tick_params(axis='both', which='major', labelsize=18)
axs[i].tick_params(axis='both', which='minor', labelsize=18)
axs[i].grid(True)
plt.savefig('Pictures/x.pdf')
plt.savefig('Pictures/x.eps') | 5,331,484 |
def http(func: str, arg: Tuple[str]) -> int:
"""Summary.
Args:
func (str): Path to a function.
arg (Tuple[str]): Description
Returns:
int: Description
"""
return ERGO_CLI.http(func, *list(arg)) | 5,331,485 |
def formatting(session: Session) -> None:
"""Run formatter (using black)."""
args = session.posargs or locations
session.install("black")
session.run("black", *args) | 5,331,486 |
def isMWS_bhb(primary=None, objtype=None,
gaia=None, gaiaaen=None, gaiadupsource=None, gaiagmag=None,
gflux=None, rflux=None, zflux=None,
w1flux=None, w1snr=None, maskbits=None,
gnobs=None, rnobs=None, znobs=None,
gfracmasked=None, rfracmasked=None, zfracmasked=None,
parallax=None, parallaxerr=None):
"""Set bits for BHB Milky Way Survey targets
Parameters
----------
see :func:`~desitarget.cuts.set_target_bits` for other parameters.
Returns
-------
mask : array_like.
True if and only if the object is a MWS-BHB target.
Notes
-----
- Criteria supplied by Sergey Koposov
- gflux, rflux, zflux, w1flux have been corrected for extinction
(unlike other MWS selections, which use obs_flux).
- Current version (03/20/21) is version 1 on `the SV3 wiki`_.
"""
if primary is None:
primary = np.ones_like(gaia, dtype='?')
mws = primary.copy()
# ADM do not target any objects for which entries are NaN
# ADM and turn off the NaNs for those entries
nans = np.isnan(gflux) | np.isnan(rflux) | np.isnan(zflux) | np.isnan(w1flux) | np.isnan(parallax) | np.isnan(gaiagmag)
w = np.where(nans)[0]
if len(w) > 0:
# ADM make copies as we are reassigning values
rflux, gflux, zflux, w1flux = rflux.copy(), gflux.copy(), zflux.copy(), w1flux.copy()
parallax = parallax.copy()
gaigmag = gaiagmag.copy()
rflux[w], gflux[w], zflux[w], w1flux[w] = 0., 0., 0., 0.
parallax[w] = 0.
gaiagmag[w] = 0.
mws &= ~nans
log.info('{}/{} NaNs in file...t = {:.1f}s'
.format(len(w), len(mws), time()-start))
gmag = 22.5 - 2.5 * np.log10(gflux.clip(1e-7))
rmag = 22.5 - 2.5 * np.log10(rflux.clip(1e-7))
zmag = 22.5 - 2.5 * np.log10(zflux.clip(1e-7))
gmr = gmag-rmag
rmz = rmag-zmag
# ADM don't target MWS-like targets in Legacy Surveys mask regions.
mws &= imaging_mask(maskbits, mwsmask=True)
# APC must be a Legacy Surveys object that matches a Gaia source
mws &= gaia
# APC type must be PSF
mws &= _psflike(objtype)
# APC no sources brighter than Gaia G = 10
mws &= gaiagmag > 10.
# APC exclude nearby sources by parallax
mws &= parallax <= 0.1 + 3*parallaxerr
mws &= (gfracmasked < 0.5) & (gflux > 0) & (gnobs > 0)
mws &= (rfracmasked < 0.5) & (rflux > 0) & (rnobs > 0)
mws &= (zfracmasked < 0.5) & (zflux > 0) & (znobs > 0)
# APC no gaia duplicated sources
mws &= ~gaiadupsource
# APC gaia astrometric excess noise < 3
mws &= gaiaaen < 3.0
# APC BHB extinction-corrected color range -0.35 <= gmr <= -0.02
mws &= (gmr >= -0.35) & (gmr <= -0.02)
# Coefficients from Sergey Koposov
bhb_sel = rmz - (1.07163*gmr**5 - 1.42272*gmr**4 + 0.69476*gmr**3 - 0.12911*gmr**2 + 0.66993*gmr - 0.11368)
mws &= (bhb_sel >= -0.05) & (bhb_sel <= 0.05)
# APC back out the WISE error = 1/sqrt(ivar) from the SNR = flux*sqrt(ivar)
w1fluxerr = w1flux/(w1snr.clip(1e-7))
w1mag_faint = 22.5 - 2.5 * np.log10((w1flux-3*w1fluxerr).clip(1e-7))
# APC WISE cut (Sergey Koposov)
mws &= rmag - 2.3*gmr - w1mag_faint < -1.5
# APC Legacy magnitude limits
mws &= (rmag >= 16.) & (rmag <= 20.)
return mws | 5,331,487 |
def bernpoly(n, z):
"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-3.33333e-2, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`bernoulli`)::
>>> print bernpoly(12, 0), bernoulli(12)
-0.253113553113553 -0.253113553113553
>>> print bernpoly(13, 0), bernoulli(13)
0.0 0.0
"""
n = int(n)
assert n >= 0
# XXX: optimize
return sum(binomial(n,k)*bernoulli(k)*z**(n-k) for k in xrange(0,n+1)) | 5,331,488 |
def sort(obs, pred):
"""
Return sorted obs and pred time series'
"""
obs = obs.sort_values(ascending=True)
pred = pred.sort_values(ascending=True)
return obs,pred | 5,331,489 |
def random_point_of_triangle(vertices):
"""Compute a random point of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
while True:
x = random.random()
y = random.random()
if x + y <= 1:
return p + pq*x + pr*y | 5,331,490 |
def get_minutes(hour:str) -> int:
""" Get total number of minutes from time in %H:%M .
Args:
hour (str): String containing time in 24 hour %H:%M format
Returns:
int: Returns total number of minutes
"""
t = time.strptime(hour, '%H:%M')
minutes = t[3] * 60 + t[4]
return minutes | 5,331,491 |
def get_rotation_scale_from_transformation(matrix: np.array) -> Tuple[np.array,np.array] :
"""
This function breaks the given transformation matrix into a Rotation matrix and a Scale matrix
as described in "As-Rigid-As-Possible Shape Interpolation" by Alexa et al
Arguments:
matrix : Any transformation matrix
Returns:
R_gamma: Rotation matrix 3x3
S: Scale matrix 3x3
"""
R_alpha,D,R_beta = np.linalg.svd(matrix,full_matrices=True)
D = np.eye(3)*D
R_gamma = R_alpha @ R_beta
if np.linalg.det(R_gamma) < 0:
R_gamma[0,:] *= -1
S = R_beta.T @ D @ R_beta
assert is_rotation_matrix(R_gamma), logging.error("Computed matrix is not a rotation")
return (R_gamma,S) | 5,331,492 |
def test_read_compressed_file(compressed_file):
"""Test that wkr.open can read compressed file formats."""
with wkr.open(compressed_file, 'rb') as input_file:
data = input_file.read()
assert isinstance(data, binary_type)
assert data == BINARY_DATA | 5,331,493 |
def cls():
"""
Purpose
-------
Clears the current line in the terminal with whitespace and carriage
returns to the begining of the line.
"""
sys.stdout.write('\r' + blnk_ln)
sys.stdout.flush() | 5,331,494 |
def calc_check_digit(number):
"""Calculate the check digit."""
weights = (7, 9, 8, 6, 5, 4, 3, 2)
check = sum(w * int(n) for w, n in zip(weights, number)) % 11
return str((10 - check) % 9 + 1) | 5,331,495 |
def leave_studygroup(request):
"""
Remove a student from the list of participants of a study group.
"""
body = json.loads(request.body)
group_id = body['id']
token = body['token']
rcs = Student.objects.get(token=token).rcs
group = Studygroup.objects.get(id=group_id)
participants = json.loads(group.participants)
participants.remove(rcs)
group.participants = json.dumps(participants)
group.save()
res = {'res': 'OK'}
return JsonResponse(res, safe=False) | 5,331,496 |
def list_revisions_courses(request_ctx, course_id, url, per_page=None, **request_kwargs):
"""
List the revisions of a page. Callers must have update rights on the page in order to see page history.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param url: (required) ID
:type url: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List revisions
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/pages/{url}/revisions'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, url=url)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response | 5,331,497 |
def test():
"""Just print count every five seconds to test progress"""
for n in range(0,10):
time.sleep(5)
print(n) | 5,331,498 |
def corpus():
"""循环读取语料
"""
while True:
with open(args.train_data_path) as f:
for l in f:
l = json.loads(l)
yield l | 5,331,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.