content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from salt.cloud.exceptions import SaltCloudException
def avail_images(call=None):
"""
REturns available upcloud templates
"""
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
manager = _get_manager()
templates = manager.get_storages(storage_type='template')
ret = {}
for storage in templates:
ret[storage.uuid] = {
attr: getattr( storage, attr )
for attr in storage.ATTRIBUTES if hasattr(storage, attr)
}
return ret | 8adf233a1c1bfeef23b94689c0ccb4230fc2b5b5 | 27,200 |
def batch_dataset(x, batch_size):
"""
partitions dataset x into args.batch_size batches
TODO: ensure that x.shape[0] is divisible by batch_size so no leftovers
"""
size_modulo = len(x) % batch_size # hack to ensure data is batches successfully
if size_modulo != 0:
x = x[:-size_modulo]
partitioned = np.split(x, batch_size)
return partitioned | ed2ce6edeafd1213b1465b0d3c1438809b259483 | 27,201 |
import requests
def _verify_email_upload(transfer_id: str, session: requests.Session) -> str:
"""Given a transfer_id, read the code from standard input.
Return the parsed JSON response.
"""
code = input('Code:')
j = {
"code": code,
"expire_in": WETRANSFER_EXPIRE_IN,
}
r = session.post(WETRANSFER_VERIFY_URL.format(transfer_id=transfer_id),
json=j)
return r.json() | d46610f4dc7582df68fc82654de167a43729d8af | 27,202 |
from .error import RunFailed, DownloadFailed, Terminated, error_json
from .task import run_local_task
from .. import parse_document, values_from_json, values_to_json, Walker
import logging
import os
import traceback
def run(cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs) -> str:
"""
Download the URI and return the local filename.
kwargs are passed through to ``run_local_task``, so ``run_dir`` and ``logger_prefix`` may be
useful in particular.
"""
gen = _downloader(cfg, uri)
assert gen
try:
with compose_coroutines([lambda kwargs: gen(cfg, logger, **kwargs)], {"uri": uri}) as cor:
recv = next(cor)
if "task_wdl" in recv:
task_wdl, inputs = (recv[k] for k in ["task_wdl", "inputs"])
doc = parse_document(task_wdl, version="1.0") # pyre-ignore
assert len(doc.tasks) == 1 and not doc.workflow
doc.typecheck()
Walker.SetParents()(doc)
task = doc.tasks[0]
inputs = values_from_json(inputs, task.available_inputs) # pyre-ignore
subdir, outputs_env = run_local_task(
cfg, task, inputs, run_id=("download-" + task.name), **kwargs
)
recv = cor.send(
{"outputs": values_to_json(outputs_env), "dir": subdir} # pyre-ignore
)
ans = recv["outputs"]["file"]
assert isinstance(ans, str) and os.path.isfile(ans)
return ans
except RunFailed as exn:
if isinstance(exn.__cause__, Terminated):
raise exn.__cause__ from None
raise DownloadFailed(uri) from exn.__cause__
except Exception as exn:
logger.debug(traceback.format_exc())
logger.error(_("downloader error", uri=uri, **error_json(exn)))
raise DownloadFailed(uri) from exn | 5d5ac767f8076aef453e4ef52512ddbf63396d90 | 27,203 |
def envelope_generic(pshape, *args, **kwargs):
"""
Envelope for a given pulse shape at a given time or times.
Parameters
----------
pshape : str or function object
Pulse shape type or user-provided function.
Allowed string values are 'square', 'gauss', 'cos',
'flattop_gauss', 'flattop_cos', 'user'.
*args and **kwargs
Positional and keyword arguments to pass on to a pulse shaping
function.
See Also
--------
derenv_generic
"""
if callable(pshape):
return pshape(*args, **kwargs)
elif pshape == 'square':
return envelope_square(*args, **kwargs)
elif pshape == 'gauss':
return envelope_gauss(*args, **kwargs)
elif pshape == 'cos':
return envelope_cos(*args, **kwargs)
elif pshape == 'flattop_gauss':
return envelope_flattop_gauss(*args, **kwargs)
elif pshape == 'flattop_cos':
return envelope_flattop_cos(*args, **kwargs)
else:
raise ValueError(
'`pshape` must be a functin object or one of ' +
'"square", "gauss", "cos", "flattop_gauss", "flattop_cos"')
return None | 6e6d007a6602c90c1d39b4749442b910d90f9cf1 | 27,204 |
def sdb_longitude(longitude):
"""Return an 8 character, zero padded string version of the
longitude parameter.
**Arguments:**
* *longitude* -- Longitude.
"""
adjusted = (180 + float(longitude)) * 100000
return str(int(adjusted)).zfill(8) | b7b82819952d30ea58dc9dc08e0c4ab63d92743e | 27,205 |
import os
def run_qemu(kernel, machine='lm3s811evb', dump_file=None, dump_range=None):
""" Runs qemu on a given kernel file """
if not has_qemu():
return ''
# Check bin file exists:
assert os.path.isfile(kernel)
logger.debug('Running qemu with machine=%s and image %s', machine, kernel)
args = [
'qemu-system-arm', '-M', machine, '-m', '16M',
'-nographic', '-kernel', kernel]
return qemu(args) | c22d22caaa6c077bbcc1bbc0cd25a886828ad8c5 | 27,206 |
def is_allowed_location(location, allowed_location):
""""
Returns true if the location is allowed_location
Args:
location: location id
allowed_location: allowed_location
Returns:
is_allowed(bool): Is location allowed.
"""
if allowed_location == 1:
return True
global allowed_locations_locs
if allowed_locations_locs is None:
allowed_locations_locs = get_locations(db.session)
if is_child(allowed_location, int(location), allowed_locations_locs):
return True
return False | e16b8bb15827b34c65b3ee6aa36f710ad2aaeea5 | 27,207 |
def gsl_isinf(*args, **kwargs):
"""gsl_isinf(double x) -> int"""
return _gslwrap.gsl_isinf(*args, **kwargs) | 71b1a114cdb581721eaafc442a038b8183a056d1 | 27,208 |
def abstract_clone(__call__, self, x, *args):
"""Clone an abstract value."""
def proceed():
if isinstance(x, AbstractValue) and x in cache:
return cache[x]
result = __call__(self, x, *args)
if not isinstance(result, GeneratorType):
return result
cls = result.send(None)
if cls is not None:
inst = cls.empty()
else:
inst = None
constructor = _make_constructor(inst)
cache[x] = inst
try:
result.send(constructor)
except StopIteration as e:
if inst is not None:
assert e.value is inst
return e.value
else:
raise AssertionError(
'Generators in abstract_clone must yield once, then return.'
)
cache = self.state.cache
prop = self.state.prop
if prop:
if hasattr(x, prop):
return getattr(x, prop)
elif isinstance(x, AbstractValue):
if self.state.check(x, *args):
res = x
else:
res = proceed()
setattr(x, prop, res)
return res
else:
return proceed()
elif self.state.check and self.state.check(x, *args):
return x
else:
return proceed() | 4c5f85a710d29f751adb7b2a5cd37ca33136e227 | 27,209 |
from typing import Union
from typing import Dict
from typing import Any
import typing
def Layout(
align_baseline: bool = None,
align_center: bool = None,
align_content_center: bool = None,
align_content_end: bool = None,
align_content_space_around: bool = None,
align_content_space_between: bool = None,
align_content_start: bool = None,
align_end: bool = None,
align_start: bool = None,
attributes: dict = {},
children: list = [],
class_: str = None,
column: bool = None,
d_block: bool = None,
d_contents: bool = None,
d_flex: bool = None,
d_grid: bool = None,
d_inherit: bool = None,
d_initial: bool = None,
d_inline: bool = None,
d_inline_block: bool = None,
d_inline_flex: bool = None,
d_inline_grid: bool = None,
d_inline_table: bool = None,
d_list_item: bool = None,
d_none: bool = None,
d_run_in: bool = None,
d_table: bool = None,
d_table_caption: bool = None,
d_table_cell: bool = None,
d_table_column: bool = None,
d_table_column_group: bool = None,
d_table_footer_group: bool = None,
d_table_header_group: bool = None,
d_table_row: bool = None,
d_table_row_group: bool = None,
fill_height: bool = None,
id: str = None,
justify_center: bool = None,
justify_end: bool = None,
justify_space_around: bool = None,
justify_space_between: bool = None,
justify_start: bool = None,
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
ma_0: bool = None,
ma_1: bool = None,
ma_2: bool = None,
ma_3: bool = None,
ma_4: bool = None,
ma_5: bool = None,
ma_auto: bool = None,
mb_0: bool = None,
mb_1: bool = None,
mb_2: bool = None,
mb_3: bool = None,
mb_4: bool = None,
mb_5: bool = None,
mb_auto: bool = None,
ml_0: bool = None,
ml_1: bool = None,
ml_2: bool = None,
ml_3: bool = None,
ml_4: bool = None,
ml_5: bool = None,
ml_auto: bool = None,
mr_0: bool = None,
mr_1: bool = None,
mr_2: bool = None,
mr_3: bool = None,
mr_4: bool = None,
mr_5: bool = None,
mr_auto: bool = None,
mt_0: bool = None,
mt_1: bool = None,
mt_2: bool = None,
mt_3: bool = None,
mt_4: bool = None,
mt_5: bool = None,
mt_auto: bool = None,
mx_0: bool = None,
mx_1: bool = None,
mx_2: bool = None,
mx_3: bool = None,
mx_4: bool = None,
mx_5: bool = None,
mx_auto: bool = None,
my_0: bool = None,
my_1: bool = None,
my_2: bool = None,
my_3: bool = None,
my_4: bool = None,
my_5: bool = None,
my_auto: bool = None,
pa_0: bool = None,
pa_1: bool = None,
pa_2: bool = None,
pa_3: bool = None,
pa_4: bool = None,
pa_5: bool = None,
pa_auto: bool = None,
pb_0: bool = None,
pb_1: bool = None,
pb_2: bool = None,
pb_3: bool = None,
pb_4: bool = None,
pb_5: bool = None,
pb_auto: bool = None,
pl_0: bool = None,
pl_1: bool = None,
pl_2: bool = None,
pl_3: bool = None,
pl_4: bool = None,
pl_5: bool = None,
pl_auto: bool = None,
pr_0: bool = None,
pr_1: bool = None,
pr_2: bool = None,
pr_3: bool = None,
pr_4: bool = None,
pr_5: bool = None,
pr_auto: bool = None,
pt_0: bool = None,
pt_1: bool = None,
pt_2: bool = None,
pt_3: bool = None,
pt_4: bool = None,
pt_5: bool = None,
pt_auto: bool = None,
px_0: bool = None,
px_1: bool = None,
px_2: bool = None,
px_3: bool = None,
px_4: bool = None,
px_5: bool = None,
px_auto: bool = None,
py_0: bool = None,
py_1: bool = None,
py_2: bool = None,
py_3: bool = None,
py_4: bool = None,
py_5: bool = None,
py_auto: bool = None,
reverse: bool = None,
row: bool = None,
slot: str = None,
style_: str = None,
tag: str = None,
v_model: Any = "!!disabled!!",
v_on: str = None,
v_slots: list = [],
wrap: bool = None,
on_align_baseline: typing.Callable[[bool], Any] = None,
on_align_center: typing.Callable[[bool], Any] = None,
on_align_content_center: typing.Callable[[bool], Any] = None,
on_align_content_end: typing.Callable[[bool], Any] = None,
on_align_content_space_around: typing.Callable[[bool], Any] = None,
on_align_content_space_between: typing.Callable[[bool], Any] = None,
on_align_content_start: typing.Callable[[bool], Any] = None,
on_align_end: typing.Callable[[bool], Any] = None,
on_align_start: typing.Callable[[bool], Any] = None,
on_attributes: typing.Callable[[dict], Any] = None,
on_children: typing.Callable[[list], Any] = None,
on_class_: typing.Callable[[str], Any] = None,
on_column: typing.Callable[[bool], Any] = None,
on_d_block: typing.Callable[[bool], Any] = None,
on_d_contents: typing.Callable[[bool], Any] = None,
on_d_flex: typing.Callable[[bool], Any] = None,
on_d_grid: typing.Callable[[bool], Any] = None,
on_d_inherit: typing.Callable[[bool], Any] = None,
on_d_initial: typing.Callable[[bool], Any] = None,
on_d_inline: typing.Callable[[bool], Any] = None,
on_d_inline_block: typing.Callable[[bool], Any] = None,
on_d_inline_flex: typing.Callable[[bool], Any] = None,
on_d_inline_grid: typing.Callable[[bool], Any] = None,
on_d_inline_table: typing.Callable[[bool], Any] = None,
on_d_list_item: typing.Callable[[bool], Any] = None,
on_d_none: typing.Callable[[bool], Any] = None,
on_d_run_in: typing.Callable[[bool], Any] = None,
on_d_table: typing.Callable[[bool], Any] = None,
on_d_table_caption: typing.Callable[[bool], Any] = None,
on_d_table_cell: typing.Callable[[bool], Any] = None,
on_d_table_column: typing.Callable[[bool], Any] = None,
on_d_table_column_group: typing.Callable[[bool], Any] = None,
on_d_table_footer_group: typing.Callable[[bool], Any] = None,
on_d_table_header_group: typing.Callable[[bool], Any] = None,
on_d_table_row: typing.Callable[[bool], Any] = None,
on_d_table_row_group: typing.Callable[[bool], Any] = None,
on_fill_height: typing.Callable[[bool], Any] = None,
on_id: typing.Callable[[str], Any] = None,
on_justify_center: typing.Callable[[bool], Any] = None,
on_justify_end: typing.Callable[[bool], Any] = None,
on_justify_space_around: typing.Callable[[bool], Any] = None,
on_justify_space_between: typing.Callable[[bool], Any] = None,
on_justify_start: typing.Callable[[bool], Any] = None,
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
on_ma_0: typing.Callable[[bool], Any] = None,
on_ma_1: typing.Callable[[bool], Any] = None,
on_ma_2: typing.Callable[[bool], Any] = None,
on_ma_3: typing.Callable[[bool], Any] = None,
on_ma_4: typing.Callable[[bool], Any] = None,
on_ma_5: typing.Callable[[bool], Any] = None,
on_ma_auto: typing.Callable[[bool], Any] = None,
on_mb_0: typing.Callable[[bool], Any] = None,
on_mb_1: typing.Callable[[bool], Any] = None,
on_mb_2: typing.Callable[[bool], Any] = None,
on_mb_3: typing.Callable[[bool], Any] = None,
on_mb_4: typing.Callable[[bool], Any] = None,
on_mb_5: typing.Callable[[bool], Any] = None,
on_mb_auto: typing.Callable[[bool], Any] = None,
on_ml_0: typing.Callable[[bool], Any] = None,
on_ml_1: typing.Callable[[bool], Any] = None,
on_ml_2: typing.Callable[[bool], Any] = None,
on_ml_3: typing.Callable[[bool], Any] = None,
on_ml_4: typing.Callable[[bool], Any] = None,
on_ml_5: typing.Callable[[bool], Any] = None,
on_ml_auto: typing.Callable[[bool], Any] = None,
on_mr_0: typing.Callable[[bool], Any] = None,
on_mr_1: typing.Callable[[bool], Any] = None,
on_mr_2: typing.Callable[[bool], Any] = None,
on_mr_3: typing.Callable[[bool], Any] = None,
on_mr_4: typing.Callable[[bool], Any] = None,
on_mr_5: typing.Callable[[bool], Any] = None,
on_mr_auto: typing.Callable[[bool], Any] = None,
on_mt_0: typing.Callable[[bool], Any] = None,
on_mt_1: typing.Callable[[bool], Any] = None,
on_mt_2: typing.Callable[[bool], Any] = None,
on_mt_3: typing.Callable[[bool], Any] = None,
on_mt_4: typing.Callable[[bool], Any] = None,
on_mt_5: typing.Callable[[bool], Any] = None,
on_mt_auto: typing.Callable[[bool], Any] = None,
on_mx_0: typing.Callable[[bool], Any] = None,
on_mx_1: typing.Callable[[bool], Any] = None,
on_mx_2: typing.Callable[[bool], Any] = None,
on_mx_3: typing.Callable[[bool], Any] = None,
on_mx_4: typing.Callable[[bool], Any] = None,
on_mx_5: typing.Callable[[bool], Any] = None,
on_mx_auto: typing.Callable[[bool], Any] = None,
on_my_0: typing.Callable[[bool], Any] = None,
on_my_1: typing.Callable[[bool], Any] = None,
on_my_2: typing.Callable[[bool], Any] = None,
on_my_3: typing.Callable[[bool], Any] = None,
on_my_4: typing.Callable[[bool], Any] = None,
on_my_5: typing.Callable[[bool], Any] = None,
on_my_auto: typing.Callable[[bool], Any] = None,
on_pa_0: typing.Callable[[bool], Any] = None,
on_pa_1: typing.Callable[[bool], Any] = None,
on_pa_2: typing.Callable[[bool], Any] = None,
on_pa_3: typing.Callable[[bool], Any] = None,
on_pa_4: typing.Callable[[bool], Any] = None,
on_pa_5: typing.Callable[[bool], Any] = None,
on_pa_auto: typing.Callable[[bool], Any] = None,
on_pb_0: typing.Callable[[bool], Any] = None,
on_pb_1: typing.Callable[[bool], Any] = None,
on_pb_2: typing.Callable[[bool], Any] = None,
on_pb_3: typing.Callable[[bool], Any] = None,
on_pb_4: typing.Callable[[bool], Any] = None,
on_pb_5: typing.Callable[[bool], Any] = None,
on_pb_auto: typing.Callable[[bool], Any] = None,
on_pl_0: typing.Callable[[bool], Any] = None,
on_pl_1: typing.Callable[[bool], Any] = None,
on_pl_2: typing.Callable[[bool], Any] = None,
on_pl_3: typing.Callable[[bool], Any] = None,
on_pl_4: typing.Callable[[bool], Any] = None,
on_pl_5: typing.Callable[[bool], Any] = None,
on_pl_auto: typing.Callable[[bool], Any] = None,
on_pr_0: typing.Callable[[bool], Any] = None,
on_pr_1: typing.Callable[[bool], Any] = None,
on_pr_2: typing.Callable[[bool], Any] = None,
on_pr_3: typing.Callable[[bool], Any] = None,
on_pr_4: typing.Callable[[bool], Any] = None,
on_pr_5: typing.Callable[[bool], Any] = None,
on_pr_auto: typing.Callable[[bool], Any] = None,
on_pt_0: typing.Callable[[bool], Any] = None,
on_pt_1: typing.Callable[[bool], Any] = None,
on_pt_2: typing.Callable[[bool], Any] = None,
on_pt_3: typing.Callable[[bool], Any] = None,
on_pt_4: typing.Callable[[bool], Any] = None,
on_pt_5: typing.Callable[[bool], Any] = None,
on_pt_auto: typing.Callable[[bool], Any] = None,
on_px_0: typing.Callable[[bool], Any] = None,
on_px_1: typing.Callable[[bool], Any] = None,
on_px_2: typing.Callable[[bool], Any] = None,
on_px_3: typing.Callable[[bool], Any] = None,
on_px_4: typing.Callable[[bool], Any] = None,
on_px_5: typing.Callable[[bool], Any] = None,
on_px_auto: typing.Callable[[bool], Any] = None,
on_py_0: typing.Callable[[bool], Any] = None,
on_py_1: typing.Callable[[bool], Any] = None,
on_py_2: typing.Callable[[bool], Any] = None,
on_py_3: typing.Callable[[bool], Any] = None,
on_py_4: typing.Callable[[bool], Any] = None,
on_py_5: typing.Callable[[bool], Any] = None,
on_py_auto: typing.Callable[[bool], Any] = None,
on_reverse: typing.Callable[[bool], Any] = None,
on_row: typing.Callable[[bool], Any] = None,
on_slot: typing.Callable[[str], Any] = None,
on_style_: typing.Callable[[str], Any] = None,
on_tag: typing.Callable[[str], Any] = None,
on_v_model: typing.Callable[[Any], Any] = None,
on_v_on: typing.Callable[[str], Any] = None,
on_v_slots: typing.Callable[[list], Any] = None,
on_wrap: typing.Callable[[bool], Any] = None,
) -> Element[ipyvuetify.generated.Layout]:
""" """
kwargs: Dict[Any, Any] = without_default(Layout, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = w.Layout(**kwargs["layout"])
widget_cls = ipyvuetify.generated.Layout
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | b20157e22d69e32fc89d5abda243b5e967d1eefe | 27,210 |
def paint_hull(inputfile, hull={}):
"""Launches the emergency hull painting robot with the specified Intcode source file
Parameters
----------
inputfile: str
Path/Filename of Intcode source code
hull : dict<(int,int): int>
Initial state of the hull
"""
robot_pos = (0, 0)
robot_dir = (0, -1)
machine = IntcodeVM(inputfile, silent=True)
machine.run()
while machine.waiting:
color, turn = machine.resume([hull.get(robot_pos, BLACK)])
hull[robot_pos] = color
robot_dir = TURNS[robot_dir][turn]
robot_pos = (robot_pos[0] + robot_dir[0], robot_pos[1] + robot_dir[1])
return hull | a781aca8f946cc6931b310848064df93ce888012 | 27,211 |
from typing import Tuple
from datetime import datetime
def get_token_expiration(parser: ConfigParser, profile: str) -> Tuple[str, str]:
"""Return token expiration date and whether it is expired.
Parameters
----------
parser : ConfigParser
Parser with all configuration files.
profile : str
Profile name.
Returns
-------
Tuple[str, str]
Tuple with expiration date and whether it is expired.
"""
expiration = parser.get(profile, 'aws_session_token_expiration', fallback=None)
if expiration is None:
return "", ""
dt = datetime.strptime(expiration, "%Y-%m-%dT%H:%M:%S%z")
expired = "N" if dt > datetime.now(dt.tzinfo) else "Y"
return f"{dt:%Y-%m-%d %H:%M:%S}", expired | 0ec467b5c1455784f28b1529e82116e1dbc0dde6 | 27,212 |
def ensure_format(s: str, n_chars: int = None) -> str:
"""
Removes spaces within a string and ensures proper format
------
PARAMS
------
1. 's' -> input string
2. 'n_chars' -> Num characters the string should consist of. Defaults to None.
"""
assert isinstance(s, str), "Input must be a string."
s = s.replace(" ", "") #clean spaces
if n_chars:
assert len(s) == n_chars, f"Input must be a payload of {n_chars} characters."
return s | 646faf1f155fdd2023ea711adfe62d6e13dd0954 | 27,213 |
def gamma_boundary_condition(gamma=-3):
"""
Defines boundary condition parameterized by either a scalar or list/iterable.
In the latter case, piecewise-interpolation on an equispaced grid over
the interior of (0, 1). In the former, the scalar defines the minimum displacement
value of the boundary condition.
"""
if isinstance(gamma, int) or isinstance(gamma, float): # 1-D case
# the function below will have a min at (2/7, gamma) by design
# (scaling factor chosen via calculus)
lam = gamma * 823543 / 12500
expr = fin.Expression(f"pow(x[1], 2) * pow(1 - x[1], 5) * {lam}", degree=3)
else: # Higher-D case
expr = fin.Expression(piecewise_eval_from_vector(gamma, d=1), degree=1)
return expr | acdfe4e311361c27ef0cec4dbebb2fb51d180683 | 27,214 |
def toggleDateSyncButton(click):
"""Change the color of on/off date syncing button - for css."""
if not click:
click = 0
if click % 2 == 0:
children = "Date Syncing: On"
style = {**on_button_style, **{"margin-right": "15px"}}
else:
children = "Date Syncing: Off"
style = {**off_button_style, **{"margin-right": "15px"}}
return style, children | f8d74feb690044c96d41d83802f791f54f3f6ab8 | 27,215 |
import os
import inspect
def _find_path(image):
"""Searches for the given filename and returns the full path.
Searches in the directory of the script that called (for example)
detect_match, then in the directory of that script's caller, etc.
"""
if os.path.isabs(image):
return image
# stack()[0] is _find_path;
# stack()[1] is _find_path's caller, e.g. detect_match;
# stack()[2] is detect_match's caller (the user script).
for caller in inspect.stack()[2:]:
caller_image = os.path.join(
os.path.dirname(inspect.getframeinfo(caller[0]).filename),
image)
if os.path.isfile(caller_image):
return os.path.abspath(caller_image)
# Fall back to image from cwd, for convenience of the selftests
return os.path.abspath(image) | 402a4ee96229db6a94ce77bfca73749509fbd714 | 27,216 |
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which the pacf is returned
method : str
specifies which method for the calculations to use:
- 'yw' or 'ywunbiased' : Yule-Walker with bias correction in
denominator for acovf. Default.
- 'ywm' or 'ywmle' : Yule-Walker without bias correction
- 'ols' : regression of time series on lags of it and on constant
- 'ols-inefficient' : regression of time series on lags using a single
common sample to estimate all pacf coefficients
- 'ols-unbiased' : regression of time series on lags with a bias
adjustment
- 'ld' or 'ldunbiased' : Levinson-Durbin recursion with bias correction
- 'ldb' or 'ldbiased' : Levinson-Durbin recursion without bias
correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
See also
--------
sm2.tsa.autocov.acf
sm2.tsa.autocov.pacf_yw
sm2.tsa.autocov.pacf_burg
sm2.tsa.stattools.pacf_ols
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-unbiased.
Yule-Walker (unbiased) and Levinson-Durbin (unbiased) performed
consistently worse than the other options.
"""
if method in ('ols', 'ols-inefficient', 'ols-unbiased'):
# GH#5153
efficient = 'inefficient' not in method
unbiased = 'unbiased' in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, unbiased=unbiased)
elif method in ('yw', 'ywu', 'ywunbiased', 'yw_unbiased'):
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ('ywm', 'ywmle', 'yw_mle'):
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ('ld', 'ldu', 'ldunbiased', 'ld_unbiased'):
acv = acovf(x, unbiased=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
# FIXME: inconsistent naming with ywmle
elif method in ('ldb', 'ldbiased', 'ld_biased'):
acv = acovf(x, unbiased=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else: # pragma: no cover
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(list(zip(ret - interval, ret + interval)))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
# TODO: Get rid of multiple-return | 0f4ac3ca1802f6564a84375d78a85b63ab1b0cf8 | 27,217 |
def _opt(spc_info, mod_thy_info, geo, run_fs,
script_str, opt_cart=True, **kwargs):
""" Run an optimization
"""
# Optimize displaced geometry
geom = geo if opt_cart else automol.geom.zmatrix(geo)
success, ret = es_runner.execute_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geo=geom,
spc_info=spc_info,
thy_info=mod_thy_info,
zrxn=None,
overwrite=True,
**kwargs,
)
if success:
inf_obj, _, out_str = ret
prog = inf_obj.prog
ret_geo = elstruct.reader.opt_geometry(prog, out_str)
else:
ret_geo = None
return ret_geo, ret | b06a97dd33d66a3bc6dc755f48e010f03eb96832 | 27,218 |
def fi(x, y, z, i):
"""The f1, f2, f3, f4, and f5 functions from the specification."""
if i == 0:
return x ^ y ^ z
elif i == 1:
return (x & y) | (~x & z)
elif i == 2:
return (x | ~y) ^ z
elif i == 3:
return (x & z) | (y & ~z)
elif i == 4:
return x ^ (y | ~z)
else:
assert False | af30fff4cfc2036eb2b7d3e6b33d365b8d64404a | 27,219 |
import torch
def th_accuracy(pad_outputs, pad_targets, ignore_label):
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax, D).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0),
pad_targets.size(1),
pad_outputs.size(1)).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
denominator = torch.sum(mask)
return float(numerator) / float(denominator) | 31b89a949a6c2cfa7e9dd2dddc8e0f25d148d5e9 | 27,220 |
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output | f665c822c002a9b27e2f8132213c7c2b8841611d | 27,221 |
def aa_status_string (status):
"""usage: str return = aa_status_string(int status)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_status_string(status) | eabe0b6016b269a749e86e88e3eb5aab8e844215 | 27,222 |
def echo(context, args):
""" echo text
Echo back the following text."""
info(args)
return context | 887e49ce9ff95c7eabf0499756e1cfce418ccd59 | 27,223 |
import time
def wait_for_visibility(element, wait_time=1):
"""Wait until an element is visible before scrolling.
Args:
element (ElementAPI): The splinter element to be waited on.
wait_time (int): The time in seconds to wait.
"""
end_time = time.time() + wait_time
while time.time() < end_time:
if element and element.visible:
return True
return False | b3e4ed391098131bc62bad4277f8ef163e129d20 | 27,224 |
import asyncio
async def meta(request):
"""Return ffprobe metadata"""
async def stream_fn(response):
async with sem:
cmd = ['ffprobe',
'-v',
'quiet',
'-i',
request.args.get('url'),
'-print_format',
'json',
'-show_format',
'-show_streams'
]
proc = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.PIPE
)
while True:
chunk = await proc.stdout.read(PYTHUMBIO_CHUNKSIZE)
if not chunk:
break
response.write(chunk)
return stream(stream_fn, content_type='application/json') | 65bbedf428196ac8317716287550ee868bb6b99e | 27,225 |
from typing import Tuple
import ctypes
def tpictr(
sample: str, lenout: int = _default_len_out, lenerr: int = _default_len_out
) -> Tuple[str, int, str]:
"""
Given a sample time string, create a time format picture
suitable for use by the routine timout.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html
:param sample: A sample time string.
:param lenout: The length for the output picture string.
:param lenerr: The length for the output error string.
:return:
A format picture that describes sample,
Flag indicating whether sample parsed successfully,
Diagnostic returned if sample cannot be parsed
"""
sample = stypes.string_to_char_p(sample)
pictur = stypes.string_to_char_p(lenout)
errmsg = stypes.string_to_char_p(lenerr)
lenout = ctypes.c_int(lenout)
lenerr = ctypes.c_int(lenerr)
ok = ctypes.c_int()
libspice.tpictr_c(sample, lenout, lenerr, pictur, ctypes.byref(ok), errmsg)
return stypes.to_python_string(pictur), ok.value, stypes.to_python_string(errmsg) | 73f098aa71b796d1a586f9e8666e4277339b7c0d | 27,226 |
def survey_page(request):
"""View extracts the data that a volunteer fills out from the monthly survey
and updates the data on app side accordingly"""
if request.method == 'GET':
request.session['vol_id'] = request.GET.get('id')
request.session['vol_email'] = request.GET.get('email')
request.session['vol_token'] = request.GET.get('token')
request.session['survey_month'] = request.GET.get('month')
request.session['survey_year'] = request.GET.get('year')
days = monthrange(int(request.session['survey_year']), int(request.session['survey_month']))[1]
vol_id = request.session['vol_id']
vol_token = request.session['vol_token']
if not (vol_id or vol_token):
return render(request, "scheduling_application/bad_link.html", {})
volunteer = Volunteer.objects.get(id=vol_id)
month = request.session['survey_month']
context = {
'month': month,
'num_days': range(days)
}
# Validate the token inside of the URL
if vol_token != volunteer.survey_token:
return render(request, "scheduling_application/bad_link.html", {})
else:
return render(request, "scheduling_application/survey_sending/survey_page.html", context=context)
if request.method == 'POST' and 'unsubscribe' in request.POST:
vol_id = request.session['vol_id']
return render(request, "scheduling_application/unsubscribe.html", context={})
elif request.method == 'POST' and 'confirm_unsubscribe' in request.POST:
comms = request.POST.get('comms')
everything = request.POST.get('everything')
vol_id = request.session['vol_id']
# Unsubscribe SMS/Email
if comms:
volunteer = Volunteer.objects.get(id=vol_id)
unsub_comms(volunteer)
# Unsubscribe from entire service
if everything:
volunteer = Volunteer.objects.get(id=vol_id)
unsub_all(volunteer)
return render(request, "scheduling_application/survey_sending/survey_complete.html", {})
elif request.method == 'POST':
vol_id = request.session['vol_id']
option_list = request.POST.getlist('survey-value')
volunteer = Volunteer.objects.get(id=vol_id)
if int(request.session['survey_month']) < 10:
month_string = "0" + request.session['survey_month']
else:
month_string = request.session['survey_month']
regex = r'((' + month_string + r')[/]\d\d[/](' + request.session['survey_year'] + r'))'
volunteer.Days.all().filter(date__regex=regex).delete()
read_survey_data(option_list, volunteer, request.session['survey_month'], request.session['survey_year'])
return render(request, "scheduling_application/survey_sending/survey_complete.html", {}) | fa9ac1a31782c0a5639d0d20d98cc41772b1ce09 | 27,227 |
def chebyu(n, monic=0):
"""Return nth order Chebyshev polynomial of second kind, Un(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(1/2).
"""
base = jacobi(n,0.5,0.5,monic=monic)
if monic:
return base
factor = sqrt(pi)/2.0*_gam(n+2) / _gam(n+1.5)
base._scale(factor)
return base | b71c947e8f988fe3500339a10bd783ed8561da77 | 27,228 |
def spm(name, path, size, bos= -1, eos= -1, unk= 0, coverage= 0.9995):
"""-> SentencePieceProcessor
trains a sentence piece model of `size` from text file on `path`
and saves with `name`.
"""
SentencePieceTrainer.train(
"--model_prefix={name} \
--input={path} \
--vocab_size={size} \
--bos_id={bos} \
--eos_id={eos} \
--unk_id={unk} \
--unk_surface=☹ \
--character_coverage={coverage}".format(
coverage= coverage
, unk= unk
, eos= eos
, bos= bos
, size= size
, path= path
, name= name))
return load_spm(name + ".model") | df22367462839192bcd55093ddf8a2c5b15085f6 | 27,229 |
def list_reshape_bywindow(longlist, windowlen, step=1):
"""
A function to use window intercept long list into several component
A list could like below,
[a, b, c, d, e]
Output could be
[[a,b], [c,d]]
where windowlen as 2,
step as 2
Parameters:
------------
longlist: original long list
windowlen: window length
step: by default is 1, to use overlapping batch method, step as 1,
to use non-overlapping batch method, step as windowlen
Returns:
--------
ic_list: intercept list
Example:
--------
>>> ic_list = list_reshape_bywindow(longlist, windowlen = 3)
"""
ic_list = []
i = 0
while len(longlist)>=(windowlen+step*i):
ic_list.append(longlist[(step*i):(windowlen+step*i)])
i+=1
return ic_list | ee501b49c34656f4c0a1353d36f542b231b3a925 | 27,230 |
def generate_test_repo() -> Repository:
""" gets you a test repo """
test_requester = Requester(
login_or_token="",
retry=False,
password=None,
jwt=None,
base_url="https://github.com/yaleman/github_linter/",
timeout=30,
pool_size=10,
per_page=100,
user_agent="",
verify=False,
) # type: ignore
testrepo = Repository(
test_requester,
{},
attributes={"full_name" : "testuser/test1", "name" : "test1"},
completed=True,
)
return testrepo | 56ed5de055e0437bb00630f4cdb71e4547868e64 | 27,231 |
from ucsmsdk.mometa.comm.CommSyslogConsole import \
def syslog_local_console_exists(handle, **kwargs):
"""
Checks if the syslog local console already exists
Args:
handle (UcsHandle)
**kwargs: key-value pair of managed object(MO) property and value, Use
'print(ucscoreutils.get_meta_info(<classid>).config_props)'
to get all configurable properties of class
Returns:
(True/False, MO/None)
Example:
syslog_local_console_exists(handle, severity="alerts")
"""
CommSyslogConsoleConsts
dn = _syslog_dn + "/console"
mo = handle.query_dn(dn)
if not mo:
return False, None
kwargs['admin_state'] = CommSyslogConsoleConsts.ADMIN_STATE_ENABLED
mo_exists = mo.check_prop_match(**kwargs)
return (mo_exists, mo if mo_exists else None) | f4f0b1c50dd29fdf8d574ff4bd5b3f4b33b522f5 | 27,232 |
import numpy
def filter_inputs(inputlist, minimum_members, number_of_families):
"""
Removes functions that have fewer than minimum_members different hashes,
and returns a subset (number_of_families) different ones.
"""
temp = defaultdict(list)
for i in inputlist:
temp[i[1]].append((i[0], i[2]))
result = []
# Remove all functions with insufficient number of variants.
keys = [k for k in temp.keys()]
for sym in keys:
if len(temp[sym]) < minimum_members:
del temp[sym]
# Now choose a random subset of functions that remain.
keys = [k for k in temp.keys()]
keys.sort() # Necessary because of Python key order being nondeterministic.
# Deterministic seed for reproducibility.
numpy.random.seed(0)
subset = numpy.random.choice(keys, number_of_families, replace=False)
for key in subset:
result.extend([(simhash[0], key, simhash[1]) for simhash in temp[key]])
return result | a92d37a20964b543bbd89dd86b0a93166ffe0130 | 27,233 |
import sys
def get_dataloader_workers():
"""在非Windows的平台上,使用4个进程来读取的数据。"""
return 0 if sys.platform.startswith('win') else 4 | 92c81010c6e1a81ae3b19a232a243c68ecbfe2fa | 27,234 |
def is_phone_in_call_video_tx_enabled(log, ad):
"""Return if phone in tx_enabled video call.
Args:
log: log object.
ad: android device object
Returns:
True if phone in tx_enabled video call.
"""
return is_phone_in_call_video_tx_enabled_for_subscription(
log, ad, get_outgoing_voice_sub_id(ad)) | ec51a84c43a808b1e5f750eddf1eedcc4f050158 | 27,235 |
from typing import Sequence
def fit_t1_results(times: Sequence[float], z_expectations: Sequence[float],
z_std_errs: Sequence[float] = None, param_guesses: tuple = (1.0, 15, 0.0)) \
-> ModelResult:
"""
Wrapper for fitting the results of a T1 experiment for a single qubit; simply extracts key
parameters and passes on to the standard fit.
The estimate for T1 can be found in the returned fit.params['decay_constant']
:param times: the times at which the z_expectations were measured. The units of the time
determine the units of the T1 estimate, decay_constant. Here we set the default guess to
O(10) which corresponds to the times being given in units of microseconds.
:param z_expectations: expectation of Z at each time for a qubit initialized to 1
:param z_std_errs: std_err of the Z expectation, optionally used to weight the fit.
:param param_guesses: guesses for the (amplitude, decay_constant, offset) parameters. Here
the default decay_constant of 15 assumes that times are given in units of microseconds.
:return: a ModelResult fit with estimates of the Model parameters, including the T1
'decay_constant'
"""
z_expectations = np.asarray(z_expectations)
if z_std_errs is not None:
probability_one, var = transform_pauli_moments_to_bit(np.asarray(-1 * z_expectations),
np.asarray(z_std_errs)**2)
err = np.sqrt(var)
non_zero = [v for v in err if v > 0]
if len(non_zero) == 0:
weights = None
else:
# TODO: does this handle 0 var appropriately?
# Other possibility is to use unbiased prior into std_err estimate.
min_non_zero = min(non_zero)
non_zero_err = np.asarray([v if v > 0 else min_non_zero for v in err])
weights = 1 / non_zero_err
else:
probability_one, _ = transform_pauli_moments_to_bit(np.asarray(-1 * z_expectations), 0)
weights = None
return fit_decay_time_param_decay(np.asarray(times), probability_one, weights,
param_guesses) | 63b8aeb8fa3823ea9857ede3ed07d52076e6d691 | 27,236 |
def find_mapping_net_assn(context, network_id, host_id):
"""
Takes in a network id and the host id to return an SEA that creates that
mapping. If there's no association found, a None is returned
:context: The context used to call the dom API
:network_id: The neutron network id.
:host_id: The Host id of the host, for which we want the
association.
:returns: The associated Network Association. May be None
"""
return dom_api.network_association_find(context, host_id, network_id) | 543516722a3011d7d859f8f46ae62fd2823989e0 | 27,237 |
import os
def get_emergency_passphrase():
"""Returns emergency passphrase provided in environment variable"""
passphrase = os.environ.get(EMERGENCY_PASSPHRASE_VARIABLE)
if passphrase is None:
raise MissingVariableError(EMERGENCY_PASSPHRASE_VARIABLE)
return passphrase | 6404ef99dfaedf2baf9ee5ccabff12a76fcc8ae0 | 27,238 |
def bubble_sort(seq):
"""Inefficiently sort the mutable sequence (list) in place.
seq MUST BE A MUTABLE SEQUENCE.
As with list.sort() and random.shuffle this does NOT return
"""
changed = True
while changed:
changed = False
for i in xrange(len(seq) - 1):
if seq[i] > seq[i+1]:
seq[i], seq[i+1] = seq[i+1], seq[i]
changed = True
return seq | be8c8b4dea93fb91f0ed9397dcd3a9fb9c5d4703 | 27,239 |
def numba_cuda_DeviceNDArray(xd_arr):
"""Return cupy.ndarray view of a xnd.xnd in CUDA device.
"""
cbuf = pyarrow_cuda_buffer(xd_arr)
# DERIVED
return pyarrow_cuda_buffer_as.numba_cuda_DeviceNDArray(cbuf) | 80f820ac589434f407a1e05954cb59c308882540 | 27,240 |
def deg2rad(dd):
"""Convertit un angle "degrés décimaux" en "radians"
"""
return dd/180*pi | cba29769452ed971a9934cae4f072724caf9a8d8 | 27,241 |
def createView(database, view_name, map_func):
"""
Creates and returns a Cloudant view.
"""
my_design_document = design_document.DesignDocument(database, "_design/names")
my_design_document.add_view(view_name, map_func)
return view.View(my_design_document, view_name, map_func) | 77b8fbcbe33c8ae08605f4dff8dc7379dd686329 | 27,242 |
def find_modifiable_states(state_data):
""" Find indices into the state_data array,
Args:
state_data (ndarray): States array, in the form returned by cmd_states.get_cmd_states.fetch_states
Returns:
(ndarray): Numeric index of states that represent dwells with modifiable chip counts
(list): List of all permutations, where each row represents a single case (combination), and each column
represents the number of chips to be added for each state indexed by the returned ndarray (above)
Note:
The first element in the returned list represents 0's for each modifiable state, representing the baseline case.
"""
modifiable = (state_data['pcad_mode'] == 'NPNT') & (state_data['clocking'] == 1) & (
state_data['fep_count'] == state_data['ccd_count']) & (state_data['fep_count'] < 4)
states_ind = np.where(modifiable)[0]
cases = list(product([0, 1], repeat=len(states_ind)))
return states_ind, cases | 61dc59766deb0b6b2c238fe4ce20fe819ef8c7d3 | 27,243 |
import urllib
def get_tool_def( trans, hda ):
""" Returns definition of an interactive tool for an HDA. """
job = get_dataset_job( hda )
# TODO: could use this assertion to provide more information.
# assert job is not None, 'Requested job has not been loaded.'
if not job:
return None
tool = trans.app.toolbox.get_tool( job.tool_id )
# TODO: could use this assertion to provide more information.
# assert tool is not None, 'Requested tool has not been loaded.'
if not tool:
return None
# Tool must have a Trackster configuration.
if not tool.trackster_conf:
return None
# Get list of tool parameters that can be interactively modified.
tool_params = []
tool_param_values = dict( [ ( p.name, p.value ) for p in job.parameters ] )
tool_param_values = tool.params_from_strings( tool_param_values, trans.app, ignore_errors=True )
for name, input in tool.inputs.items():
if type( input ) == IntegerToolParameter or type( input ) == FloatToolParameter:
param_dict = { 'name' : name, 'label' : input.label, \
'value' : tool_param_values.get( name, input.value ), \
'type' : 'number', 'init_value' : input.value,
'html' : urllib.quote( input.get_html() ) }
if input.min:
param_dict['min'] = input.min
if input.max:
param_dict['max'] = input.max
tool_params.append( param_dict )
elif type( input ) == SelectToolParameter and type( input.options ) != DynamicOptions:
tool_params.append( { 'name' : name, 'label' : input.label, 'type' : 'select', \
'value' : tool_param_values.get( name, None ), \
'html' : urllib.quote( input.get_html() ) } )
# If tool has parameters that can be interactively modified, return tool.
if len( tool_params ) != 0:
return { 'name' : tool.name, 'params' : tool_params }
return None | 4f1ae068945bc23ee2d870820888aa088bec9f7a | 27,244 |
import os
import hashlib
def create_multi_file_info(directory,
files,
piece_length):
"""
Return dictionary with the following keys:
- pieces: concatenated 20-byte-sha1-hashes
- name: basename of the directory (default name of all torrents)
- files: a list of dictionaries with the following keys:
- length: size of the file in bytes
- md5sum: md5 sum of the file
- path: path to the file, relative to the initial directory,
given as list.
Examples:
-> ["dir1", "dir2", "file.ext"]
-> ["just_in_the_initial_directory_itself.ext"]
@see: BitTorrent Metainfo Specification.
@note: md5sum actually is optional
"""
assert os.path.isdir(directory), "not a directory"
# Concatenated 20byte sha1-hashes of all the torrent's pieces.
info_pieces = ''
#
info_files = []
# This bytearray will be used for the calculation of info_pieces.
# At some point, every file's data will be written into data. Consecutive
# files will be written into data as a continuous stream, as required
# by info_pieces' BitTorrent specification.
data = ''
for f in files:
path = os.path.join(directory, f)
# File's byte count.
length = 0
# File's md5sum.
md5 = hashlib.md5()
printv("Processing file '%s'... " % os.path.relpath(path, directory))
fh = open(path,'rb')
while True:
filedata = fh.read(piece_length)
if len(filedata) == 0:
break
length += len(filedata)
data += filedata
if len(data) >= piece_length:
info_pieces += sha1_20(data[:piece_length])
data = data[piece_length:]
md5.update(filedata)
fh.close()
# Build the current file's dictionary.
fdict = {
'length': length,
'md5sum': md5.hexdigest(),
'path': split_path(relpath(f,directory))
}
info_files.append(fdict)
# Don't forget to hash the last piece.
# (Probably the piece that has not reached the regular piece size.)
if len(data) > 0:
info_pieces += sha1_20(data)
# Build the final dictionary.
info = {
'pieces': info_pieces,
'name': os.path.basename(directory.strip("/\\")),
'files': info_files
}
return info | 07964794728aa5a6af3672093de04cb7b5ef990e | 27,245 |
def get_datasource_bounding_box(datasource_uri):
"""Get datasource bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
datasource = ogr.Open(datasource_uri)
layer = datasource.GetLayer(0)
extent = layer.GetExtent()
# Reindex datasource extents into the upper left/lower right coordinates
bounding_box = [extent[0],
extent[3],
extent[1],
extent[2]]
return bounding_boxz | 0d262eafb535807c9f6ce38ca3485f731cc95c97 | 27,246 |
def instance_id(instance):
"""
Return id of instance in hex form.
Helps in logs/debugs/development troubleshooting.
"""
instance_id = hex(id(instance))[2:] # remove leading 0x
return instance_id | fd755c01f4a2031cff072b629fdbc1a596097342 | 27,247 |
def distributions_to_lower_upper_bounds(model, negative_allowed=[], ppf=(0.05,0.95), save_to_model=False):
"""
Converts distributions to uniform distributions by taking specified ppf
Args:
model: The model object
negative_allowed: list of params which are allowed to be negative
ppf: Quartile range of pdf to take. Default 5th anf 95th
save_to_model: Boolean. True will save lower and upper bounds in place of scipy distributions.
Returns:
List of lower and upper bounds.
"""
bounds = []
for name, distribution in model.parameter_distributions.items():
upper = distribution.ppf(ppf[1])
lower = None
lower_ppf = ppf[0]
while not check_not_neg(lower, name, negative_allowed):
lower = distribution.ppf(lower_ppf)
lower_ppf += 0.01
bounds.append([lower, upper])
if save_to_model == True:
model.parameter_distributions[name] = [lower,upper]
for name, distribution in model.species_distributions.items():
upper = distribution.ppf(ppf[1])
lower = None
lower_ppf = ppf[0]
while not check_not_neg(lower, name, negative_allowed):
lower = distribution.ppf(lower_ppf)
lower_ppf += 0.01
bounds.append([lower, upper])
if save_to_model == True:
model.species_distributions[name] = [lower,upper]
return bounds | c86b53802e34ec71a9a72d498278900a36420f37 | 27,248 |
def refresh_database(engine, server, jobs, source_obj, container_obj):
"""
This function actually performs the refresh
engine:
server: Engine object
jobs: list containing running jobs
source_obj: source object used to refresh from snapshot or timeflow
container_obj: VDB container
"""
#Sanity check to make sure our source object has a reference
if source_obj.reference:
#We can only refresh VDB's
if source_obj.virtual != True:
print_warning(engine["hostname"] + ": " + container_obj.name +
" is not a virtual object. Skipping.")
#Ensure this source is not a staging database. We can't act upon those.
elif source_obj.staging == True:
print_warning(engine["hostname"] + ": " + container_obj.name +
" is a staging database. Skipping.")
#Ensure the source is enabled. We can't refresh disabled databases.
elif source_obj.runtime.enabled == "ENABLED" :
source_db = database.get(server, container_obj.provision_container)
if not source_db:
print_error(engine["hostname"] +
":Was unable to retrieve the source container for "
+ container_obj.name)
print_info(engine["hostname"] + ": Refreshing " +
container_obj.name + " from " + source_db.name)
print_debug(engine["hostname"] + ": Type: " + source_obj.type )
print_debug(engine["hostname"] + ":" + source_obj.type)
#If the vdb is a Oracle type, we need to use a
# OracleRefreshParameters
if str(container_obj.reference).startswith("ORACLE"):
refresh_params = OracleRefreshParameters()
else:
refresh_params = RefreshParameters()
try:
refresh_params.timeflow_point_parameters = set_timeflow_point(
engine, server,
source_db)
print_debug(engine["hostname"] + ":" + str(refresh_params))
#Sync it
database.refresh(server, container_obj.reference,
refresh_params)
jobs[container_obj] = server.last_job
except RequestError as e:
print '\nERROR: Could not set timeflow point:\n%s\n' % (
e.message.action)
sys.exit(1)
except DlpxException as e:
print 'ERROR: Could not set timeflow point:\n%s\n' % (e.message)
sys.exit(1)
#return the job object to the calling statement so that we can
# tell if a job was created or not (will return None, if no job)
return server.last_job
#Don't do anything if the database is disabled
else:
print_warning(engine["hostname"] + ": " + container_obj.name +
" is not enabled. Skipping sync") | 99ec18df995ee8ce61a75e8c5448bd40a7ca2f1d | 27,249 |
def create_feature_indices(header):
"""
Function to return unique features along with respective column indices
for each feature in the final numpy array
Args:
header (list[str]): description of each feature's possible values
Returns:
feature_indices (dict): unique feature names as keys with value
types (dicrete or continuous) and data column indices where present
"""
feature_indices = {}
for i, head in enumerate(header):
current = head.split("->")
str_name = current[0].replace(" ", "_")
if current[0] == "mask":
feature_indices["presence_" +
current[1].replace(" ", "_")] = ["discrete", i]
elif feature_indices == {} or str_name not in feature_indices:
if len(current) > 1:
feature_indices[str_name] = ["discrete", i]
else:
feature_indices[str_name] = ["continuous", i]
elif str_name in feature_indices:
feature_indices[str_name].extend([i])
return feature_indices | a29d8c4c8f3a31ad516216756b7eba7eb4110946 | 27,250 |
def lower(word):
"""Sets all characters in a word to their lowercase value"""
return word.lower() | f96b1470b3ab1e31cd1875ad9cbf9ed017aa0158 | 27,251 |
def band_pass(data, scale_one, scale_two):
"""
Band pass filter
Difference of two gaussians
G(data, s1) - G(data, s2)
"""
bp = gaussian(data, scale=scale_one) - gaussian(data, scale=scale_two)
return bp | 140074b49dc589641380a830b1cce8edb6445a45 | 27,252 |
def _in_dir(obj, attr):
"""Simpler hasattr() function without side effects."""
return attr in dir(obj) | f95e265d278e3014e8e683a872cd3b70ef6133c9 | 27,253 |
import os
def file_exists(work_dir, path):
"""
goal: check if file exists
type: (string, string) -> bool
"""
prev_dir = os.getcwd()
try:
os.chdir(work_dir)
if os.path.exists(path) and os.path.isfile(path):
return True
else:
return False
os.chdir(prev_dir)
except Exception as e:
os.chdir(prev_dir)
return False | 7bb2e3a4d245908054014cf9210769bf89a7b693 | 27,254 |
import wave
def save_speech(data, p):
""" Saves mic data to temporary WAV file. Returns filename of saved
file """
filename = 'output'
# writes data to WAV file
data = ''.join(data)
wf = wave.open(filename + '.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(16000) # TODO make this value a function parameter?
wf.writeframes(data)
wf.close()
return filename + '.wav' | 113a2d808bc5352e8faec5597d47befa37e07e9c | 27,255 |
def load_stat_features_others_windows(patient_list, data_path="statistic_features.csv",
statistics_list=["std_x", "std_y", "std_z"], n_others_windows=40):
"""
Returns:
X_all_data - ndarray of shape(n_records, n_new_features), feature-vector consist of features of current window and several others ( n_others_windows // 2 before current window and n_others_windows // 2 after it)
y_all_data - ndarray of shape(n_records,)
"""
statistics_df = pd.read_csv(data_path)
X_all_data = []
y_all_data = []
for patient in patient_list:
X = np.array(statistics_df.loc[statistics_df.id == patient, statistics_list])
y = np.array(statistics_df.loc[statistics_df.id == patient, "sleep_stage"])
X_new = np.zeros((X.shape[0]-n_others_windows, X.shape[1]*(n_others_windows+1)))
for i in range(0, X.shape[0]-n_others_windows):
X_buff = X[i]
for j in range(1, n_others_windows+1):
X_buff = np.concatenate((X_buff, X[i+j]))
X_new[i] = X_buff
y = y[(n_others_windows//2): -(n_others_windows//2)]
#y_test_new = y_test[previous:]
X_all_data.append(X_new)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data | 71de81b25740479f6fc1bc270cff35f33a70b357 | 27,256 |
def map_format(value, pattern):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
return soft_unicode(pattern) % (value) | 53273dd29d7d0a0e11981fc7de948e930e966fc4 | 27,257 |
def Cadzow(Xk, K, N, tol_ratio=10000, max_iter=10):
"""
Implement Cadzow denoising
Parameters
----------
Xk : signal to denoise
K : number of most significant members to take
N : number of samples in the signal
tol_ratio : min ratio of (K+1)th singular value / Kth singular value
to stop iterations
max_iter : maximum number of iterations to run
Returns
-------
X : denoised signal
"""
X = Xk.copy()
ratio = 0
iters = 0
while (ratio < tol_ratio and iters < max_iter):
iters += 1
# perform svd
#print(toeplitz(X[K:],X[np.arange(K,-1,-1)]).shape)
U, s, Vh = svd(toeplitz(X[K:],X[np.arange(K,-1,-1)]))
# update ratio of singular values for cutoff
ratio = s[K-1] / s[K]
# build S' : first K diagonals of S
s_ = s[:K]
sz1 = U.shape[1]
sz2 = Vh.shape[0]
S_ = np.zeros(shape=(sz1, sz2))
for elem,(i,j) in enumerate(zip(np.arange(K),np.arange(K))):
S_[i,j] = s_[elem]
# least squares approx. for A
A_ = U @ S_ @ Vh
# denoised Xk is the average of the diagonals
for idx, off in enumerate(np.arange(K,K-N,-1)):
temp = np.mean(np.diagonal(A_,offset=off))
X[idx] = temp
return X | 3dfffcf4eeb0b9765059f327b327375378007825 | 27,258 |
def toposortGroup(candidateIrefs):
"""Given a set of IRefs, returns a list of irefs toposorted based on the include graph."""
graph = {}
for iRef in candidateIrefs:
graph[iRef] = set(includePaths(iRef))
candidateSet = set(candidateIrefs)
output = []
for group in toposort.toposort(graph):
group = list(group.intersection(candidateSet))
group.sort()
output += group
return output | 48166f439a5a9c4ad6ef8abd5c9b9e8dd6559888 | 27,259 |
def make_template(template):
"""Given an OpenSearch template, return a Template instance for it.
>>> template = make_template('http://localhost/search?q={term}')
>>> template.substitute(term='opensearch syntax')
'http://localhost/search?q=opensearch+syntax'
>>>
"""
terms = decompose_template(template)
return Template(template, terms) | ad6729cf5c4c2d9cf2198781e4566809d2a8f2b9 | 27,260 |
def search(T, dist, w, i=0):
"""Searches for w[i:] in trie T with distance at most dist
"""
if i == len(w):
if T is not None and T.is_word and dist == 0:
return ""
else:
return None
if T is None:
return None
f = search(T.s[w[i]], dist, w, i + 1) # matching
if f is not None:
return w[i] + f
if dist == 0:
return None
for c in ascii_letters:
f = search(T.s[c], dist - 1, w, i) # insertion
if f is not None:
return c + f
f = search(T.s[c], dist - 1, w, i + 1) # substitution
if f is not None:
return c + f
return search(T, dist - 1, w, i + 1) # deletion | 926a0c3e50d38ed1ad7b6e66e8e0a85e24716d89 | 27,261 |
def get_weighted_embeddings(embeddings, weights):
"""Multiply a sequence of word embeddings with their weights
:param embeddings: a sequence of word embeddings got from
embedding_lookup, size of [batch_size, seq_len, embed_dim]
:param weights: a sequence of weights for each word, size of [batch_size,
seq_len]
:return: a sequence of weighted word embeddings, size of [batch_size,
seq_len, embed_dim]
"""
# import pdb
# pdb.set_trace()
embeddings = tf.transpose(embeddings, perm=[0, 2, 1])
# print(embeddings.get_shape())
weights = tf.expand_dims(weights, 1)
# print(weights.get_shape())
embeddings = tf.multiply(embeddings, weights)
# print(embeddings.get_shape())
embeddings = tf.transpose(embeddings, perm=[0, 2, 1])
# print(embeddings.get_shape())
return embeddings | 741d68036cc7df4060403f3bf9f2acd08b16466c | 27,262 |
def _adjust_values(females, males):
"""
Adjusting the values as the man moves in with the woman
"""
females = females.copy()
males = males.copy()
males.loc[:,"hid"] = females["hid"].tolist()
males.loc[:,"east"] = females["east"].tolist()
males.loc[:,"hhweight"] = females["hhweight"].tolist()
males.loc[:,"in_couple"] = 1
females.loc[:,"in_couple"] = 1
females.loc[:, "hhv"] = 0 # Make women the head of household
males.loc[:, "hhv"] = 1 # Men are "only" partner
return females, males | d139869b73e06fb917f843e86d135d1d9db3f4e3 | 27,263 |
def create_other_features(data):
"""Create columns for each other feature extracted."""
# Features list
features_list = ['Fibra_ottica', 'Cancello_elettrico', 'Cantina',
'Impianto_di_allarme', 'Mansarda', 'Taverna',
'Cablato', 'Idromassaggio', 'Piscina']
# Create column for each extracted feature
for feature in features_list:
mask = data['Altre_caratteristiche'].apply(lambda x: feature in x)
data[feature] = np.where(mask, 'sì', 'no')
return data | 20d2f7e71c06952f2604004224fa113ab9ec88bb | 27,264 |
def handler_good():
"""Return True for a good event handler."""
return True | 302ea021276cb9be2d5e98c2a09776f4ee53cc97 | 27,265 |
def form_gov():
"""
Collects the data from the government form and
redirects them to the appropriate results page to report
the final results
"""
collected_data = []
form_gov = InputData_gov()
if request.method == "POST":
try:
collected_data.append("Government")
collected_data.append(request.form.get("state"))
collected_data.append(request.form.get("location"))
collected_data.append(request.form.get("land_ava"))
collected_data.append(request.form.get("goal_renewable"))
collected_data.append(request.form.get("api_key"))
# run "build_config.py" to build file for accessing NREL data
build_hscfg.config_file(collected_data[5])
# input data to wrapper function
wtk = h5pyd.File("/nrel/wtk-us.h5", "r")
if collected_data[4] == '':
results = wrapper.wrapper(
wtk, collected_data[2], collected_data[1],
float(collected_data[3]))
else:
results = wrapper.wrapper(
wtk, collected_data[2], collected_data[1],
float(collected_data[3]), goal=int(collected_data[4]))
return redirect(url_for("results_gov", gov_results=results))
except IndexError:
flash("ERROR: Check spelling of 'City/Town' or try a nearby city")
return render_template("form_gov.html", form_gov=form_gov)
except OSError:
flash("ERROR: API key not accepted")
return render_template("form_gov.html", form_gov=form_gov)
except ValueError:
flash("Error: Land available must be a number")
return render_template("form_gov.html", form_gov=form_gov)
return render_template("form_gov.html", form_gov=form_gov) | 483a3ae4746a555bf1ab80252659606bca1b5447 | 27,266 |
from io import StringIO
def convert_to_grayscale(buffer):
"""Converts the image in the given StringIO object to grayscale.
Args:
buffer (StringIO): The original image to convert. Must be in RGB mode.
Returns:
StringIO: The grayscale version of the original image.
Raises:
ValueError: If the provided image is not in RGB mode.
"""
original = Image.open(buffer)
grayscale = StringIO()
_convert_to_grayscale(original).save(grayscale, format=original.format)
return grayscale | 8aa632768da7c49e82923074c3057cffe3ed4d51 | 27,267 |
def line_visible(plaza_geometry, line, delta_m):
""" check if the line is "visible", i.e. unobstructed through the plaza"""
intersection_line = plaza_geometry.intersection(line)
# a line is visible if the intersection has the same length as the line itself, within a given delta
delta = meters_to_degrees(delta_m)
return abs(line.length - intersection_line.length) <= delta | aadc340eddb5f4036af1e8131ced244aa081c75d | 27,268 |
def bad_gateway(message="Bad gateway"):
"""
A shortcut for creating a :class:`~aiohttp.web.Response` object with a ``502`` status and the JSON body
``{"message": "Bad gateway"}``.
:param message: text to send instead of 'Bad gateway'
:type message: str
:return: the response
:rtype: :class:`aiohttp.Response`
"""
return json_response({
"id": "bad_gateway",
"message": message
}, status=502) | 9f9592b53b4e08b5c089ed2ad32754b95b8dcdb9 | 27,269 |
def chromatic_induction_factors(n: FloatingOrArrayLike) -> NDArray:
"""
Return the chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Parameters
----------
n
Function of the luminance factor of the background :math:`n`.
Returns
-------
:class:`numpy.ndarray`
Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Examples
--------
>>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS
array([ 1.000304, 1.000304])
"""
n = as_float_array(n)
with sdiv_mode():
N_bb = N_cb = as_float(0.725) * spow(sdiv(1, n), 0.2)
N_bbcb = tstack([N_bb, N_cb])
return N_bbcb | 14f43cbc64aa1904eb38fa2442b33c840aad7275 | 27,270 |
def get_correctly_labeled_entries(all_entries):
"""Get entries that are labeled and evaluated as correct."""
return [
entry for entry in all_entries if convert_to_bool(entry[9]) and convert_to_bool(entry[10])
] | a61251165629c9bfff3d412c8f4be10eb5b8a5ac | 27,271 |
def data_context_connectivity_context_connectivity_serviceuuid_latency_characteristictraffic_property_name_delete(uuid, traffic_property_name): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_latency_characteristictraffic_property_name_delete
removes tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: None
"""
return 'do some magic!' | f8409c09d84ac223f8b3081e4396bc9af86c31c0 | 27,272 |
def tag_state_quantities(blocks, attributes, labels, exception=False):
""" Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""
tags={}
if labels is None:
lables = attributes
for a in attributes:
if isinstance(a, (tuple, list)):
if len(a) == 2:
# in case there are multiple indexes and user gives tuple
label = f"{a[0]}[{a[1]}]"
if len(a) > 2:
label = f"{a[0]}[{a[1:]}]"
else:
label = a[0]
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
if len(a) == 2:
j = a[1] # catch user supplying list-like of indexes
if len(a) > 2:
j = a[1:]
#if len(a) == 1, we'll say that's fine here. Don't know why you
#would put the attribute in a list-like if not indexed, but I'll
#allow it.
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(
f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)")
except ZeroDivisionError:
pass # this one is okay
if v is not None:
tags[f"{key}{labels[i]}"] = v
return tags | 74df558808c1db1e59f27ebe320e8931c291eb28 | 27,273 |
import argparse
def optional_list():
"""Return an OptionalList action."""
class OptionalList(argparse.Action):
"""An action that supports an optional list of arguments.
This is a list equivalent to supplying a const value with nargs='?'. Which itself only allows a single optional
value.
"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values if values else self.const)
return OptionalList | 8e6f84e75c75893862dfbbb93d2d9c75ce229c68 | 27,274 |
import os
def test_exercise_2():
"""Solution for exercise 2."""
dirname = os.path.dirname(os.path.realpath(__file__))
df = pd.read_pickle(f"{dirname}/material/data-consumption-function.pkl")
def construct_predicted_values(income, alpha, beta, gamma):
return alpha + beta * income ** gamma
mock_rslt = [-91.1933, 0.5691, 1.0204]
income = df["realgdp"].values
df["realcons_pred"] = construct_predicted_values(income, *mock_rslt)
x = df.index.get_level_values("Year")
fig, ax = plt.subplots()
ax.plot(x, df["realcons_pred"], label="Predicted")
ax.plot(x, df["realcons"], label="Observed") | 1b56bba8140fb7e0614e9b1d96edd8e41e5e5837 | 27,275 |
def instancesUserLookup(query=None, query_type=None):
"""
Return a list of sites to which the requested user belongs
Display on /search
"""
if query_type == 'username':
kwargs = {'$or': [{'users.username.site_owner': {'$regex': query, '$options': 'i'}}, {'users.username.site_editor': {'$regex': query, '$options': 'i'}}, {'users.username.form_manager': {'$regex': query, '$options': 'i'}}, {'users.username.edit_my_content': {'$regex': query, '$options': 'i'}}, {
'users.username.content_editor': {'$regex': query, '$options': 'i'}}, {'users.username.configuration_manager': {'$regex': query, '$options': 'i'}}, {'users.username.campaign_manager': {'$regex': query, '$options': 'i'}}, {'users.username.access_manager': {'$regex': query, '$options': 'i'}}]}
elif query_type == 'email_address':
# Handle mixed case in email addresses
kwargs = {'$or': [{'users.email_address.site_owner': {'$regex': query, '$options': 'i'}}, {'users.email_address.site_editor': {'$regex': query, '$options': 'i'}}, {'users.email_address.form_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.edit_my_content': {'$regex': query, '$options': 'i'}}, {
'users.email_address.content_editor': {'$regex': query, '$options': 'i'}}, {'users.email_address.configuration_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.campaign_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.access_manager': {'$regex': query, '$options': 'i'}}]}
results, totalItems = getAllResults(atlasType='statistics', **kwargs)
# Get list of all instances
instanceList = []
for r in results:
instance = get_internal(
'sites', **{'_id': r['site']})[0]['_items'][0]
for role, user in r['users'][query_type].iteritems():
# Handle mixed case in email addresses
if query.lower() in lowerList(user):
instanceList.append((instance['_id'], instance['path'], role))
return instanceList | 27b778fb1d199497c5529754bf0cbbe99357ddcb | 27,276 |
def volume_kerucut_melingkar(radius: float, tinggi: float) -> float:
"""
referensi dari kerucut melingkar
https://en.wikipedia.org/wiki/Cone
>>> volume_kerucut_melingkar(2, 3)
12.566370614359172
"""
return pi * pow(radius, 2) * tinggi / 3.0 | 2f0ddb7b1bd75ec1ee637135f4d5741fda8af328 | 27,277 |
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['vgg19']))
return model | 2ff08d30dc82297d5d497f55894e62766dd35019 | 27,278 |
def xmprv_from_seed(seed: Octets, version: Octets, decode: bool = True) -> bytes:
"""derive the master extended private key from the seed"""
if isinstance(version, str): # hex string
version = bytes.fromhex(version)
if version not in PRV:
m = f"invalid private version ({version})"
raise ValueError(m)
# serialization data
xmprv = version # version
xmprv += b'\x00' # depth
xmprv += b'\x00\x00\x00\x00' # parent pubkey fingerprint
xmprv += b'\x00\x00\x00\x00' # child index
# actual extended key (key + chain code) derivation
if isinstance(seed, str): # hex string
seed = bytes.fromhex(seed)
hd = HMAC(b"Bitcoin seed", seed, sha512).digest()
mprv = int_from_octets(hd[:32])
xmprv += hd[32:] # chain code
xmprv += b'\x00' + mprv.to_bytes(32, 'big') # private key
xmprv = base58.encode_check(xmprv)
return xmprv.decode('utf-8') if decode else xmprv | f7fb2a06d3e812e24b18453304c9a10476bda31d | 27,279 |
def load_empty_config_setup() -> DictConfig:
"""Return a dictionary containing all the MLOQ setup config values set to None."""
return OmegaConf.load(setup_yml.src) | e862fb753737990a23975659f7c026ca0a2e7132 | 27,280 |
from typing import Dict
from typing import Tuple
def apply_dfg(dfg: Dict[Tuple[str, str], int], start_activities: Dict[str, int], end_activities: Dict[str, int], activities: Dict[str, int], parameters=None, variant=DEFAULT_VARIANT_DFG) -> Tuple[PetriNet, Marking, Marking]:
"""
Apply the chosen IM algorithm to a DFG graph obtaining a Petri net along with an initial and final marking
Parameters
-----------
dfg
Directly-Follows graph
variant
Variant of the algorithm to apply, possible values:
- Variants.IMd
parameters
Parameters of the algorithm, including:
Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name
(default concept:name)
Returns
-----------
net
Petri net
initial_marking
Initial marking
final_marking
Final marking
"""
return exec_utils.get_variant(variant).apply_dfg(dfg, start_activities=start_activities, end_activities=end_activities, activities=activities, parameters=parameters) | 644b871c17bcf983067754588be54aecf20c5c40 | 27,281 |
def make_extrap_log_func(func, extrap_x_l=None):
"""
Generate a version of func that extrapolates to infinitely many gridpoints.
Note that extrapolation here is done on the *log* of the function result,
so this will fail if any returned values are < 0. It does seem to be better
behaved for SFS calculation.
func: A function whose last argument is the number of Numerics.default_grid
points to use in calculation and that returns a single scalar or
array.
extrap_x_l: An explict list of x values to use for extrapolation. If not
provided, the extrapolation routine will look for '.extrap_x'
attributes on the results of func. The method Spectrum.from_phi will
add an extrap_x attribute to resulting Spectra, equal to the x-value
of the first non-zero grid point. An explicit list is useful if you
want to override this behavior for testing.
Returns a new function whose last argument is a list of numbers of grid
points and that returns a result extrapolated to infinitely many grid
points.
"""
return make_extrap_func(func, extrap_x_l=extrap_x_l, extrap_log=True) | 1ddac8d607b18cb3f392ac9a06ccfcc2608617d4 | 27,282 |
def get_logger(name):
"""每次调用,都是一个新的
"""
return LogCollector(name) | 81cb8ad13bbf54ada444cc624b7fc521b2cb8943 | 27,283 |
from re import T
def ShowString(name, msg):
"""Return a html page listing a file and a 'back' button"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>%s</title>
</head>
<body>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
<h3>%s</h3>
<code><pre>%s</pre></code>
</body>
</html>
""" % (
xml_name(name),
T("Back"),
xml_name(name),
escape(msg),
) | 4f1c837989c18180991dcfb6ecb93025d8a6c27d | 27,284 |
def game_loop(screen, buttons, items, music, sound, g_settings, particles=None, percentthing=None):
"""Manage events, return a gamestate change if it happens, and update the screen"""
while True:
# Check and manage event queue
gs_change = gf.check_events(buttons, music, sound, g_settings)
# If we are returned a new gamestate from checking events, return the gamestate again to transition
if isinstance(gs_change, gf.GameState):
return gs_change
# Update all aspects on screen
gf.update_screen(screen, buttons, items, g_settings, particles, percentthing) | 0fcd40402859bca8671a2856a9872bcd7fb6008e | 27,285 |
def fake_request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
# begin test patch
inject_flag_file='./clientinject.dat'
if os.path.exists(inject_flag_file):
# open, read and inject data
replace_data=True
with open(inject_flag_file) as infile:
fake_response_data = yaml.load(infile)
if fake_response_data['url']: # we want to re.match the given url
replace_data=False # only replace on match
if re.match(fake_response_data['url'], response.url):
replace_data=True
if replace_data:
# replace resp[value] w/ the fake data
print "Fake response data: %s" % fake_response_data
for key, value in fake_response_data['data'].items():
setattr(response, key, value)
print 'Altered response values:'
for key, value in vars(response).items():
print "%s: %s" %(key, value)
# end test patch
return response | a2a1cd19fc657d2e0cece37954735d9c66e877c5 | 27,286 |
from typing import List
from typing import Dict
from typing import Any
from typing import OrderedDict
def assert_step_match(
step: Step,
expected_step_func: str,
expected_step_arguments: List[Dict[str, Any]],
step_registry: StepRegistry,
):
"""Assert that the Step correctly matches in the Registry"""
print(
"{} STEP '{}' SHOULD MATCH {}".format(
cf.orange(">>"),
cf.deepSkyBlue3("{} {}".format(step.keyword, step.text)),
cf.deepSkyBlue3(expected_step_func),
),
end=" ",
flush=True,
)
# match the step text from the config with one from the registry
try:
matcher.match_step(step, step_registry)
except StepImplementationNotFoundError:
print_failure(None, ["Expected Step Text didn't match any Step Implementation"])
return False
# check if Step matches the correct Step Implementation Function
matched_step_func = step.step_impl.func
if matched_step_func.__name__ != expected_step_func:
print_failure(
matched_step_func,
[
"Expected Step Text matched {} instead of {}".format(
matched_step_func.__name__, expected_step_func
)
],
)
return False
# check if the Step has a match with the correct arguments
if expected_step_arguments:
# merge the Step's keyword and positional arguments into one dict
args, kwargs = step.step_impl_match.evaluate()
actual_step_arguments = utils.get_func_pos_args_as_kwargs(
matched_step_func, args
)
actual_step_arguments.update(kwargs)
# turn the list of single-item-dicts to a multi-item dict
# -> [{1: 2}, {3: 4}] --> {1: 2, 3: 4}
# NOTE(TF) for Python 3.5 test reproducibility we need an OrderedDict -.^
expected_step_arguments = OrderedDict(
(
argpair
for argpairs in expected_step_arguments
for argpair in argpairs.items()
)
)
errors = assert_step_arguments(actual_step_arguments, expected_step_arguments)
if errors:
print_failure(matched_step_func, errors)
return False
print(cf.bold_forestGreen("✔"))
return True | 0b156e6f7a1bf39b6fcc7805f0dcb9da30768e58 | 27,287 |
from typing import List
from typing import Dict
import requests
def get_grafana_dashboards_url(admin: bool) -> List[Dict]:
"""
Get a list of dashboard available to the tenant.
:admin (bool) A boolean representing admin status.
Return a list of dashboards dictionaries.
"""
urls = []
req = format_grafana_admin_request('/api/search?query=[Grafonnet]')
results = requests.get(req).json()
for item in results:
folder_title = item.get('folderTitle')
if admin or not folder_title or folder_title != 'admin':
item['url'] = format_grafana_frontend_request(item['url'])
urls.append(item)
return urls | 0b28b9ef1333c633a001297f9c038fd5496952a8 | 27,288 |
def validate_comma_separated_list(argument):
"""Convert argument to a list."""
if not isinstance(argument, list):
argument = [argument]
last = argument.pop()
items = [i.strip(u' \t\n') for i in last.split(u',') if i.strip(u' \t\n')]
argument.extend(items)
return argument | bdf68db95d6070be4ffb5a74a646f5c730c726b4 | 27,289 |
def get_view(brain):
"""Setup for view persistence test"""
fig = brain._figures[0][0]
if mlab.options.backend == 'test':
return
fig.scene.camera.parallel_scale = 50
assert fig.scene.camera.parallel_scale == 50
view, roll = brain.show_view()
return fig.scene.camera.parallel_scale, view, roll | 72295348921668e309aed5ac7c281dae7dea292a | 27,290 |
def model_description(formula):
"""Interpret model formula and obtain a model description.
This function receives a string with a formula describing a statistical
model and returns an object of class ModelTerms that describes the
model interpreted from the formula.
Parameters
----------
formula: string
A string with a model description in formula language.
Returns
----------
An object of class ModelTerms with an internal description of the model.
"""
return Resolver(Parser(Scanner(formula).scan()).parse()).resolve() | fad391e86b31108694c3a784101ad8686f8b3292 | 27,291 |
import argparse
def setup():
"""
Parse command line arguments
Returns parsed arguments
"""
parser = argparse.ArgumentParser(description='Search Reddit Thing')
parser.add_argument(
'subreddit',
help="Enter the name of the subreddit to search.")
parser.add_argument(
'query',
help=("Enter search query. See {} for information on how to form search"
" queries.".format("https://www.reddit.com/wiki/search")))
parser.add_argument(
'-p', '--print', action="store_true", default=False,
help=("Pass this argument to print out detailed data."))
parser.add_argument(
'-t', '--title', action="store_true", default=False,
help=("Pass this argument to include post titles in data."))
parser.add_argument(
'-l', '--limit', action="store", type=int, default=None,
metavar='AMOUNT',
help=("Number of posts to grab. Default is as many as possible."))
parser.add_argument(
'-e', '--export', action="store", type=str, default=None,
metavar='FILENAME',
help=("Filename to export data to."))
parser.add_argument(
'-g', '--graph', action="store", type=str,
metavar='FILENAME',
help=("Export a graph of the data.")
)
return parser.parse_args() | 49748a64532fcedf3fcc96c8a56de224e6daac43 | 27,292 |
def eintragen_kaeufe(kliste, id_zu_objekt, id_zu_profil):
""" bekommt eine Liste von dicts mit dem Inhalt von je einer Zeile der
registration-Tabelle der alten db. Außerdem ein mapping der produkt_id
der alten db zu model-Instanzen der neuen. Trägt entsprechende Käufe ein
und gibt dict produkt_id -> model-Instanz zurück
kompliziert ist die Zuordnung von format zu art; es gibt in der alten
db folgende formate der Käufe abhängig vom type vom produkt:
scholie: PDF, Kindle, ePub, Druck
antiquariat: Druck
programm: ''
seminar: '', vorOrt
salon: '', vorOrt, vor Ort, Stream
media-salon: '', Stream
media-vortrag: ''
media-vorlesung: '' """
def reg_zu_kauf(kauf):
""" nimmt eine Zeile der kliste und gibt objekt und art für den
zu erstellenden Kauf aus """
objekt, type_alt = id_zu_objekt[kauf['event_id']]
if type_alt in ['programm', 'seminar']:
art = 'teilnahme'
elif type_alt == 'antiquariat':
art = 'kaufen'
elif type_alt in ['scholie', 'buch']:
art = {'Druck': 'druck',
'PDF': 'pdf',
'': 'spam',
'ePub': 'epub',
'Kindle': 'mobi'}[kauf['format']]
if art=='spam':
if kauf['quantity']==1:
art = 'pdf'
else:
art = 'druck'
elif type_alt[:5] == 'media':
art = 'aufzeichnung'
elif type_alt == 'salon':
art = 'aufzeichnung' # ist falsch, aber zum Wohl des Kunden
return objekt, art
with transaction.atomic():
for kauf in kliste:
if kauf['reg_datetime'][0] == '0':
datum = '1111-11-11 11:11:11'
else:
datum = kauf['reg_datetime']
if kauf['reg_notes']:
kommentar = "Alte reg_id %s, notes %s" % (
kauf['reg_id'], kauf['reg_notes'])
else:
kommentar = "Aus alter DB mit reg_id %s" % kauf['reg_id']
objekt, art = reg_zu_kauf(kauf)
kunde = id_zu_profil[kauf['user_id']]
menge = kauf['quantity']
neu = Kauf.neuen_anlegen(objekt, art, menge, kunde, kommentar)
neu.zeit = datum
neu.save()
print('Kauf von %s durch %s angelegt' % (objekt, kunde.user)) | 68da4934335fefd64ffbfb9200afa81976037332 | 27,293 |
from datetime import datetime
def update_status(payload: Something, context: EventContext) -> Something:
"""
Updates status of payload to PROCESSED and puts previous status in history.
:param payload: Something, object
:param context: EventContext
"""
logger.info(context, "updating something status", extra=extra(something_id=payload.id))
if payload.status:
payload.history.append(payload.status)
payload.status = Status(
ts=datetime.now(timezone.utc),
type=StatusType.PROCESSED
)
return payload | 7dbcec5930e657dfc3e654c4d1c8c970e9947906 | 27,294 |
def get_holdout_set(train, target_column):
"""This is a sample callable to demonstrate how the Environment's `holdout_dataset` is evaluated. If you do provide a
callable, it should expect two inputs: the train_dataset (pandas.DataFrame), and the target_column name (string). You should
return two DataFrames: a modified train_dataset, and a holdout_dataset. What happens in between is up to you, perhaps split
apart a portion of the rows, but the idea is to remove part of train_dataset, and turn it into holdout_dataset. For this
example, we'll just copy train_dataset, which is a VERY BAD IDEA in practice. Don't actually do this"""
return train, train.copy() | ba2ea647c287f11f37bc4557ef389ed288b0bb02 | 27,295 |
def field_type(value):
"""Return the type of the field, using the Ref object"""
if isinstance(value, Ref):
return "RefValue"
else:
return "StringValue" | f3165d87ecef0f13214e98856a10851061aea4f6 | 27,296 |
import torch
def test_read_covars_manual_input(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
additional_text, monkeypatch):
"""
test reading of covars from manual input by user. Monkeypatches reliance on function 'input'
"""
covariates = [1.1, 2.2, 200, -1.7]
# temp class to execute the test
cls = tmp_observe_class
# add attribute 'initial_guess' required for '_read_covars_manual'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
covar_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
cls.initial_guess = covar_tensor
# add proposed_X attributed required for '_read_covars_manual'
cls.proposed_X = covar_tensor
# add attributes defining the covariate expectation
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
cls.sorted_pandas_columns = covar_details_mapped_covar_mapped_names_tmp_observe_class[2]
# monkeypatch
def mock_input(x): # mock function to replace 'input' for unit testing purposes
return ", ".join([str(x) for x in covariates])
monkeypatch.setattr("builtins.input", mock_input)
# run the test
# different tests for cases where it's supposed to pass vs fail
if isinstance(additional_text, str):
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
print(covars_candidate_float_tensor)
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
# cases where type of additonal_text should make test fail
else:
with pytest.raises(AssertionError) as e:
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
assert str(e.value) == "greattunes._observe._read_covars_manual_input: wrong datatype of parameter 'additional_text'. Was expecting 'str' but received " + str(type(additional_text)) | 46c62f0350edbecd001040b5c431862377b9afe8 | 27,297 |
def remove_overlapping_squares_v2(squares_dict, array_type):
"""
removes squares with min_x and min_y that are both within 40 pixels of each other
:param squares_dict: dict with overlapping squares
:param array_type: "Air_100" is the only one currently supported
:return: dict of squares and dataframe with x and y col/row assignments
"""
df = make_df_with_minx_miny(squares_dict)
new = df.drop_duplicates(subset=['min_x', 'min_y'])
x_values = assign_x_in_same_rows(new.sort_values('min_x'))
y_values = assign_y_in_same_columns(x_values)
squares, df = make_uniform_squares(y_values.sort_values(by=['x_groups', 'y_groups']), array_type=array_type)
return squares, df | 5f13cda913ece68c0402e5c5aa003bffed50b0cd | 27,298 |
def depth_to_space(x, scale, use_default=False):
"""Depth to space function."""
if use_default:
out = tf.depth_to_space(x, scale)
else:
b, h, w, c = list(map(int, x.shape))
out = tf.reshape(x, [b, h, w, scale, scale, -1])
out = tf.transpose(out, [0, 1, 3, 2, 4, 5])
out = tf.reshape(out, [b, h * scale, w * scale, -1])
return out | 1c6f8c55fd9f7371ca7e69d91db44b86ffb81d45 | 27,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.