content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_stat_check(string, exception):
"""Test stat_check parse function for valid entries."""
with exception:
result = stat_check(string)
print(result)
| 5,335,300
|
def git_hook():
""" Run pylama after git commit. """
from .main import check_files
_, files_modified, _ = run("git diff-index --cached --name-only HEAD")
options = parse_options()
setup_logger(options)
check_files([f for f in map(str, files_modified)], options)
| 5,335,301
|
def get_lockfile_path(repo_name: str) -> Path:
"""Get a lockfile to lock a git repo."""
if not _lockfile_path.is_dir():
_lockfile_path.mkdir()
return _lockfile_path / f"{repo_name}_lock_file.lock"
| 5,335,302
|
def path_to_graph(hypernym_list, initialnoun):
"""Make a hypernym chain into a graph.
:param hypernym_list: list of hypernyms for a word as obtained from wordnet
:type hypernym_list: [str]
:param initialnoun: the initial noun (we need this to mark it as leaf in the tree)
:type initialnoun: str
:return: the linear directed graph of the chain
:rtype: :class:`networkx.DiGraph`
"""
graph = nx.DiGraph()
# mark the original word as 'seed' so we can track 'importance' later
graph.add_node(initialnoun, seed=True)
previous = initialnoun
for hypernym in reversed(hypernym_list):
# we'll take care of the distances later
graph.add_edge(previous, hypernym.name(), similarity=1.0, distance=1.0)
graph.nodes[hypernym.name()]["seed"] = False
previous = hypernym.name()
return graph
| 5,335,303
|
def make_3d_grid():
"""Generate a 3d grid of evenly spaced points"""
return np.mgrid[0:21, 0:21, 0:5]
| 5,335,304
|
def test_main_reboot(capsys, reset_globals):
"""Test --reboot"""
sys.argv = ['', '--reboot']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
def mock_reboot():
print('inside mocked reboot')
mocked_node.reboot.side_effect = mock_reboot
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'inside mocked reboot', out, re.MULTILINE)
assert err == ''
mo.assert_called()
| 5,335,305
|
def rho(flag, F, K, t, r, sigma):
"""Returns the Black rho of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:returns: float
::
==========================================================
The text book analytical formula does not multiply by .01,
but in practice rho is defined as the change in price
for each 1 percent change in r, hence we multiply by 0.01.
==========================================================
>>> F = 49
>>> K = 50
>>> r = .05
>>> t = 0.3846
>>> sigma = 0.2
>>> flag = 'c'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.0074705380059582258
>>> abs(v1-v2) < .000001
True
>>> flag = 'p'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.011243286001308292
>>> abs(v1-v2) < .000001
True
"""
return -t * black(flag, F, K, t, r, sigma) * .01
| 5,335,306
|
async def novel_series(id: int, endpoint: PixivEndpoints = Depends(request_client)):
"""
## Name: `novel_series`
> 获取小说系列的信息
---
### Required:
- ***int*** **`id`**
- Description: 小说系列ID
"""
return await endpoint.novel_series(id=id)
| 5,335,307
|
def to_raw(
y: np.ndarray,
low: np.ndarray,
high: np.ndarray,
eps: float = 1e-4
) -> np.ndarray:
"""Scale the input y in [-1, 1] to [low, high]"""
# Warn the user if the arguments are out of bounds, this shouldn't happend.""""
if not (np.all(y >= -np.ones_like(y) - eps) and np.all(y <= np.ones_like(y) + eps)):
logger.warning(f"argument out of bounds, {y}, {low}, {high}")
# Clip the values (in case the above warning is ignored).
y = np.clip(y, -np.ones_like(y), np.ones_like(y))
# Transform the input to [low, high].
return (y * (high - low) + (high + low)) / 2.
| 5,335,308
|
def etched_lines(image):
"""
Filters the given image to a representation that is similar to a drawing being preprocessed with an Adaptive Gaussian
Threshold
"""
block_size = 61
c = 41
blur = 7
max_value = 255
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(image, (21, 21), 0, 0)
img_blend = cv2.divide(image, img_blur, scale=256)
blurred = cv2.medianBlur(img_blend, blur)
threshold_image = cv2.adaptiveThreshold(blurred, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, block_size, c)
return threshold_image
| 5,335,309
|
def init(obj, yaml_file):
"""Initialize a new dashboard YAML file"""
name = click.prompt('Dashboard name', default='Example dashboard')
alert_teams = click.prompt('Alert Teams (comma separated)', default='Team1, Team2')
user = obj.config.get('user', 'unknown')
data = {
'id': '',
'name': name,
'last_modified_by': user,
'alert_teams': [t.strip() for t in alert_teams.split(',')],
'tags': [],
'view_mode': 'FULL',
'shared_teams': [],
'widget_configuration': [],
}
yaml_file.write(dump_yaml(data).encode('utf-8'))
ok()
| 5,335,310
|
def signal(sig, action): # real signature unknown; restored from __doc__
"""
signal(sig, action) -> action
Set the action for the given signal. The action can be SIG_DFL,
SIG_IGN, or a callable Python object. The previous action is
returned. See getsignal() for possible return values.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
pass
| 5,335,311
|
def get_last_position(fit, warmup=False):
"""Parse last position from fit object
Parameters
----------
fit : StanFit4Model
warmup : bool
If True, returns the last warmup position, when warmup has been done.
Otherwise function returns the first sample position.
Returns
-------
list
list contains a dictionary of last draw from each chain.
"""
fit._verify_has_samples()
positions = []
extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup)
draw_location = -1
if warmup:
draw_location += max(1, fit.sim["warmup"])
chains = fit.sim["chains"]
for i in range(chains):
extract_pos = {key : values[draw_location, i] for key, values in extracted.items()}
positions.append(extract_pos)
return positions
| 5,335,312
|
def find_fast_route(objective, init, alpha=1, threshold=1e-3, max_iters=1e3):
"""
Optimizes FastRoute objective using Newton’s method optimizer to
find a fast route between the starting point and finish point.
Arguments:
objective : an initialized FastRoute object with preset start and finish points,
velocities and initialization vector.
init : (N-1,) numpy array, initial guess for the crossing points
alpha : step size for the NewtonOptimizer
threshold : stopping criteria |x_(k+1)- x_k |<threshold
max_iters : maximal number of iterations (stopping criteria)
Return:
route_time : scalar
route : (N-1,) numpy array with x coordinate of the optimal route,
i.e., a vector of x-coordinates of crossing points (not including start and finish point)
num_iters : number of iteration
"""
opt = NewtonOptimizer(func=objective, alpha=alpha, init=init)
route_time, route, num_iters = opt.optimize(threshold=threshold, max_iters=max_iters)
return route_time, route, num_iters
| 5,335,313
|
def main():
"""Interact with the teragested parser and a shell script."""
pass
| 5,335,314
|
def get_version(filename="telepresence/__init__.py"):
"""Parse out version info"""
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, filename)) as initfile:
for line in initfile.readlines():
match = re.match("__version__ *= *['\"](.*)['\"]", line)
if match:
return match.group(1)
| 5,335,315
|
def find_child_files(path, searchRecursively=False, wildCardPattern="."):
"""在当前目录中查找文件,若选择searchRecursively则代表着搜索包含子目录, wildCardPattern意思是只搜索扩展名为".xxx"的文件,也可留空代表搜索全部文件. """
all_search_list = ['.','.*','*','']
tmp = list()
if not exists_as_dir(path):
return tmp
for fpath, _, fnames in os.walk(get_full_path_with_ext(path)):
if fpath is not get_full_path_with_ext(path) and not searchRecursively:
break
for filename in fnames:
if wildCardPattern in all_search_list:
pass
else:
if wildCardPattern[0] != '.':
wildCardPattern = '.' + wildCardPattern
if not filename.endswith(wildCardPattern) and wildCardPattern is not '.':
continue
tmp.append( os.path.join(fpath,filename) )
return tmp
| 5,335,316
|
def check_tx_success(result):
"""
Checks if function :meth:`UcanServer.write_can_msg_ex` successfully wrote all CAN message(s).
:param ReturnCode result: Error code of the function.
:return: True if CAN message(s) was(were) written successfully, otherwise False.
:rtype: bool
"""
return result.value == ReturnCode.SUCCESSFUL
| 5,335,317
|
def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable,
query_id: Optional[int] = None) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache_info: cache meta info,
:param keys: a list of keys or (key, type hint) tuples,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
return __cache_contains_keys(connection, cache_info, keys, query_id)
| 5,335,318
|
def add_hosts_to_cluster(cluster_host_list):
"""
Add hosts to cluster
:param cluster_host_list: List of hosts to add to the cluster
:return:
"""
global host_add_failure, api_failure_message
host_add_failure = 'False'
body = cm_client.ApiHostRefList(cluster_host_list)
try:
api_response = clusters_api.add_hosts(cluster_name, body=body)
if debug == 'True':
pprint(api_response)
except ApiException as e:
print('Exception when calling ClustersResourceApi->add_hosts: {}\n'.format(e))
host_add_failure = 'True'
| 5,335,319
|
def discount_cumsum_trun(x, discount, length):
"""
compute discounted cumulative sums of vectors.
truncate x in length array
:param x:
vector x,
[x0,
x1,
x2,
x3,
x4]
:param length:
vector length,
[3,
2]
:return:
truncated by the vector length
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2,
x3 + discount * x4,
x4]
"""
ret_arr = x.copy()
total_len = 0
for len in length:
tmp_list = ret_arr[total_len : total_len + len]
ret_arr[total_len: total_len + len] = discount_cumsum(tmp_list, discount)
total_len += len
return ret_arr
| 5,335,320
|
def wg_install_scripts(names):
""" Install scripts. """
global wg_scripts_to_install
for name in names.split(" "):
wg_scripts_to_install.append(name)
wg_install_next_script()
| 5,335,321
|
def test_scale_merged_intensities_phenix(hewl_merged, ref_hewl, mean_intensity_method):
"""
Compare phenix.french_wilson to scale_merged_intensities(). Current
test criteria are that >95% of I, SigI, F, and SigF are within 2%.
"""
mtz = hewl_merged.dropna()
scaled = scale_merged_intensities(
mtz, "IMEAN", "SIGIMEAN", mean_intensity_method=mean_intensity_method
)
# Assert no reflections were dropped
assert len(scaled) == len(ref_hewl)
# Intensities should be identical
assert np.array_equal(scaled["IMEAN"].to_numpy(), ref_hewl["I"].to_numpy())
assert np.array_equal(scaled["SIGIMEAN"].to_numpy(), ref_hewl["SIGI"].to_numpy())
rsF = scaled["FW-F"].to_numpy()
rsSigF = scaled["FW-SIGF"].to_numpy()
refF = ref_hewl["F"].to_numpy()
refSigF = ref_hewl["SIGF"].to_numpy()
rsI = scaled["FW-I"].to_numpy()
rsSigI = scaled["FW-SIGI"].to_numpy()
refI = ref_hewl["I"].to_numpy()
refSigI = ref_hewl["SIGI"].to_numpy()
assert (np.isclose(rsI, refI, rtol=0.02).sum() / len(scaled)) >= 0.95
assert (np.isclose(rsSigI, refSigI, rtol=0.02).sum() / len(scaled)) >= 0.95
assert (np.isclose(rsF, refF, rtol=0.02).sum() / len(scaled)) >= 0.95
assert (np.isclose(rsSigF, refSigF, rtol=0.02).sum() / len(scaled)) >= 0.95
| 5,335,322
|
def get_canonical(flop):
"""
Returns the canonical version of the given flop.
Canonical flops are sorted. The first suit is 'c' and, if applicable,
the second is 'd' and the third is 'h'.
Args:
flop (tuple): three pokertools.Card objects
Returns
A tuple of three pokertools.Card objects which represent
the canonical version of the given flop.
>>> flop = (CARDS['Ks'], CARDS['2c'], CARDS['3s'])
>>> get_canonical(flop)
(<Card: 2c>, <Card: 3d>, <Card: Kd>)
"""
card1, card2, card3 = sorted(flop)
A, B, C = "cdh"
if card1.suit == card2.suit == card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + A],
)
elif card1.suit == card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + B],
)
elif card1.suit == card3.suit != card2.suit:
# Special case: if the 2nd and 3rd cards are a pair e.g. the flop is
# [Jc, Qd, Qc], then our suit changes have resulted in an
# unsorted flop! The correct canonical form is [Jc, Qc, Qd].
return tuple(sorted([
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
]))
elif card1.suit != card2.suit == card3.suit:
# Special case: if the 1st and 2nd cards are a pair e.g. flop is
# [2c, 2d, 8d], that is isomorphic with those cards being switched
# e.g. [2d, 2c, 8d] -- which forms the suit pattern already
# covered above: 'ABA'. Thus, it can be transformed to [2c, 2d, 8c].
# This version has higher priority lexicographically -- it has more
# clubs! To make this change we can simply change the suit of the
# third card to 'c'.
if card1.rank == card2.rank:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
)
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + B],
)
elif card1.suit != card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + C],
)
| 5,335,323
|
def normalize_path(path):
"""Normalize and return absolute path.
Expand user symbols like ~ and resolve relative paths.
"""
return os.path.abspath(os.path.expanduser(os.path.normpath(path)))
| 5,335,324
|
def _version(lib_name):
"""
Returns the version of a package.
If version cannot be determined returns "available"
"""
lib = importlib.import_module(lib_name)
if hasattr(lib, "__version__"):
return lib.__version__
else:
return "available"
| 5,335,325
|
def DeltaDeltaP(y, treatment, left_mask):
"""Absolute difference between ATEs of two groups."""
return np.abs(
ATE(y[left_mask], treatment[left_mask])
- ATE(y[~left_mask], treatment[~left_mask])
)
| 5,335,326
|
def test_recycling(pool):
"""
Test no errors are raised for multiple rounds of getting and putting. Kind
of a "catch all" to make sure no errors crop up when resources are
recycled.
"""
# Recycle pool repeatedly in single thread.
for _ in range(5):
rs = [pool.get_resource() for _ in range(pool.maxsize)]
# Close resource in different order than retrieved.
rs.reverse()
for r in rs:
r.close()
# Recycle pool repeatedly in multiple threads.
def worker(pool):
for _ in range(5):
r = pool.get_resource()
r.close()
threads = []
for _ in range(5):
t = threading.Thread(target=worker, args=(pool, ))
t.start()
threads.append(t)
for t in threads:
t.join()
assert pool._available == pool.size == pool.capacity
| 5,335,327
|
def p_assignment_operator(p):
"""assignment_operator : '='
| MUL_ASSIGN
| DIV_ASSIGN
| MOD_ASSIGN
| ADD_ASSIGN
| SUB_ASSIGN
| LEFT_ASSIGN
| RIGHT_ASSIGN
| AND_ASSIGN
| XOR_ASSIGN
| OR_ASSIGN
"""
p[0] = p[1]
| 5,335,328
|
def test_source_int_name_remote():
"""
test when file_client remote and
source_interface_name is set and
interface is down
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "remote",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
}
| 5,335,329
|
def flow_cli(ctx):
"""
Fate Flow Client
"""
ctx.ensure_object(dict)
if ctx.invoked_subcommand == 'init':
return
with open(os.path.join(os.path.dirname(__file__), "settings.yaml"), "r") as fin:
config = yaml.safe_load(fin)
if not config.get('api_version'):
raise ValueError('api_version in config is required')
ctx.obj['api_version'] = config['api_version']
is_server_conf_exist = False
if config.get("server_conf_path"):
conf_path = Path(config["server_conf_path"])
is_server_conf_exist = conf_path.exists()
if is_server_conf_exist:
server_conf = yaml.safe_load(conf_path.read_text())["fateflow"]
local_conf_path = conf_path.with_name(f"local.{conf_path.name}")
if local_conf_path.exists():
server_conf.update(yaml.safe_load(local_conf_path.read_text()).get("fateflow", {}))
ctx.obj["ip"] = server_conf["host"]
ctx.obj["http_port"] = server_conf["http_port"]
ctx.obj["server_url"] = f"http://{ctx.obj['ip']}:{ctx.obj['http_port']}/{ctx.obj['api_version']}"
if server_conf.get('http_app_key') and server_conf.get('http_secret_key'):
ctx.obj['app_key'] = server_conf['http_app_key']
ctx.obj['secret_key'] = server_conf['http_secret_key']
elif config.get("ip") and config.get("port"):
ctx.obj["ip"] = config["ip"]
ctx.obj["http_port"] = int(config["port"])
ctx.obj["server_url"] = f"http://{ctx.obj['ip']}:{ctx.obj['http_port']}/{config['api_version']}"
if config.get('app_key') and config.get('secret_key'):
ctx.obj['app_key'] = config['app_key']
ctx.obj['secret_key'] = config['secret_key']
else:
raise ValueError("Invalid configuration file. Did you run 'flow init'?")
ctx.obj["init"] = is_server_conf_exist or (config.get("ip") and config.get("port"))
| 5,335,330
|
def start(isdsAppliance, serverID='directoryserver', check_mode=False, force=False):
"""
Restart the specified appliance server
"""
if force is True or _check(isdsAppliance, serverID, action='start') is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post("Restarting the service " + serverID,
"/widgets/server/start/" + serverID,
{})
return isdsAppliance.create_return_object()
| 5,335,331
|
def test_and_get_codenode(codenode, expected_code_type, use_exceptions=False):
"""
Pass a code node and an expected code (plugin) type. Check that the
code exists, is unique, and return the Code object.
:param codenode: the name of the code to load (in the form label@machine)
:param expected_code_type: a string with the plugin that is expected to
be loaded. In case no plugins exist with the given name, show all existing
plugins of that type
:param use_exceptions: if True, raise a ValueError exception instead of
calling sys.exit(1)
:return: a Code object
"""
import sys
from aiida.common.exceptions import NotExistent
from aiida.orm import Code
try:
if codenode is None:
raise ValueError
code = codenode
if code.get_input_plugin_name() != expected_code_type:
raise ValueError
except (NotExistent, ValueError):
from aiida.orm.querybuilder import QueryBuilder
qb = QueryBuilder()
qb.append(Code,
filters={'attributes.input_plugin':
{'==': expected_code_type}},
project='*')
valid_code_labels = ["{}@{}".format(c.label, c.get_computer().name)
for [c] in qb.all()]
if valid_code_labels:
msg = ("Pass as further parameter a valid code label.\n"
"Valid labels with a {} executable are:\n".format(
expected_code_type))
msg += "\n".join("* {}".format(l) for l in valid_code_labels)
if use_exceptions:
raise ValueError(msg)
else:
print(msg)#, file=sys.stderr)
sys.exit(1)
else:
msg = ("Code not valid, and no valid codes for {}.\n"
"Configure at least one first using\n"
" verdi code setup".format(
expected_code_type))
if use_exceptions:
raise ValueError(msg)
else:
print(msg)#, file=sys.stderr)
sys.exit(1)
return code
| 5,335,332
|
def reverse_crop(
im_arr: np.array, crop_details: dict
) -> Dict[str, Tuple[Image.Image, int]]:
"""Return the recovered image and the number of annotated pixels per
lat_view. If the lat_view annotation has no annotations, nothing is added
for that image."""
width = 360
height = 720 # TODO: add this variable to crop details and read it from there
recovered_images = {}
for lat_view, details in crop_details.items():
lat_view_arr = slice_arr_by_lat_view(im_arr, lat_view, width)
lat_view_arr[lat_view_arr < 0] = 255
annotated_pixels = np.count_nonzero(lat_view_arr)
if annotated_pixels == 0:
continue # no annotations on this lat_view -> skip
# reverse flip
if details["flip"]:
lat_view_arr = np.flip(lat_view_arr, axis=0)
# reverse rot90
if details["rotation"] != 0:
lat_view_arr = np.rot90(lat_view_arr, k=-details["rotation"])
new_arr = np.zeros(shape=[height, width], dtype=np.uint8)
new_arr[details["crop_start"] : details["crop_end"], :] = lat_view_arr
new_im = Image.fromarray(new_arr)
recovered_images[lat_view] = (new_im, annotated_pixels)
return recovered_images
| 5,335,333
|
def get_dates_keyboard(dates):
"""
Метод получения клавиатуры дат
"""
buttons = []
for date in dates:
button = InlineKeyboardButton(
text=date['entry_date'],
callback_data=date_callback.new(date_str=date['entry_date'], entry_date=date['entry_date'])
)
buttons.append(button)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
buttons[:3],
buttons[3:],
])
return keyboard
| 5,335,334
|
def test_match_fn():
"""Test match fn"""
# Define vars
domain = "duckduckgo.com"
regexp = "duck"
# Run scenario
assert dns_matches(domain, regexp) == True
| 5,335,335
|
def fetch_price(zone_key='FR', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""Requests the last known power price of a given country
Arguments:
----------
zone_key: used in case a parser is able to fetch multiple countries
session: request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not
provided, we should default it to now. If past data is not available,
raise a NotImplementedError. Beware that the provided target_datetime is
UTC. To convert to local timezone, you can use
`target_datetime = arrow.get(target_datetime).to('America/New_York')`.
Note that `arrow.get(None)` returns UTC now.
logger: an instance of a `logging.Logger` that will be passed by the
backend. Information logged will be publicly available so that correct
execution of the logger can be checked. All Exceptions will automatically
be logged, so when something's wrong, simply raise an Exception (with an
explicit text). Use `logger.warning` or `logger.info` for information
that can useful to check if the parser is working correctly. A default
logger is used so that logger output can be seen when coding / debugging.
Returns:
--------
If no data can be fetched, any falsy value (None, [], False) will be
ignored by the backend. If there is no data because the source may have
changed or is not available, raise an Exception.
A dictionary in the form:
{
'zoneKey': 'FR',
'currency': EUR,
'datetime': '2017-01-01T00:00:00Z',
'price': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
r = session or requests.session()
assert r.status_code == 200
url = 'https://api.someservice.com/v1/price/latest'
response = r.get(url)
obj = response.json()
data = {
'zoneKey': zone_key,
'currency': 'EUR',
'price': obj['price'],
'source': 'someservice.com',
}
# Parse the datetime and return a python datetime object
data['datetime'] = arrow.get(obj['datetime']).datetime
return data
| 5,335,336
|
async def help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock_entry_with_yaml_config, caplog, domain, config
):
"""Test attributes get extracted from a JSON result.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
| 5,335,337
|
def test_vw_load_paused():
"""
test process is not loaded after predictor is started with paused state
"""
model = DeployedModel(**get_vw_payload(1))
model.status = ModelStatus.paused
assert VWPredictor(model=model, verify_on_load=False).process is None
| 5,335,338
|
def cmd_asydns(url, generate, revoke, verbose):
"""Requests a DNS domain name based on public and private
RSA keys using the AsyDNS protocol https://github.com/portantier/asydns
Example:
\b
$ habu.asydns -v
Generating RSA key ...
Loading RSA key ...
{
"ip": "181.31.41.231",
"name": "07286e90fd6e7e6be61d6a7919967c7cf3bbfb23a36edbc72b6d7c53.a.asydns.org"
}
\b
$ dig +short 07286e90fd6e7e6be61d6a7919967c7cf3bbfb23a36edbc72b6d7c53.a.asydns.org
181.31.41.231
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
#homedir = Path(pwd.getpwuid(os.getuid()).pw_dir)
homedir = Path(os.path.expanduser('~'))
dotdir = homedir / '.asydns'
dotdir.mkdir(exist_ok=True)
pub_file = dotdir / 'rsa.pub'
key_file = dotdir / 'rsa.key'
if generate or not key_file.is_file():
logging.info('Generating RSA key ...')
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
pub = key.public_key()
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
pub_key = pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with key_file.open('w') as k:
k.write(key_pem.decode())
with pub_file.open('w') as p:
p.write(pub_key.decode())
logging.info('Loading RSA key ...')
with key_file.open("rb") as key_file:
key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
with pub_file.open("r") as pub_file:
pub_pem = pub_file.read()
r = requests.get(url + '/api')
if r.status_code != 200:
logging.error('Error')
logging.error(r.content.decode())
return False
j = r.json()
challenge = base64.b64decode(j['challenge'])
response = key.sign(
challenge,
padding.PKCS1v15(
),
hashes.SHA224()
)
response = base64.b64encode(response).decode()
if revoke:
r = requests.delete(url + '/api', json={'pub': pub_pem, 'challenge' : j['challenge'], 'response': response})
else:
r = requests.post(url + '/api', json={'pub': pub_pem, 'challenge' : j['challenge'], 'response': response})
if r.status_code != 200:
logging.error('Error')
logging.error(r.content.decode())
return False
print(json.dumps(r.json(), indent=4))
return True
| 5,335,339
|
def get_stats(method='histogram', save=True, train=True):
"""
Computes statistics, histogram, dumps the object to file and returns it
"""
if os.path.exists(FILE_STATS):
return pickle.load(open(os.path.join(FILE_STATS), "rb"))
elif train:
dataset = _get_dataset()
print("Computing statistics")
stats = Stats()
stats.fill_stats(dataset)
return _train_model(stats, method, save)
else:
return None
| 5,335,340
|
def merge_synset(wn, synsets, reason, lexfile, ssid=None, change_list=None):
"""Create a new synset merging all the facts from other synsets"""
pos = synsets[0].part_of_speech.value
if not ssid:
ssid = new_id(wn, pos, synsets[0].definitions[0].text)
ss = Synset(ssid, "in",
PartOfSpeech(pos), lexfile)
ss.definitions = [d for s in synsets for d in s.definitions]
ss.examples = [x for s in synsets for x in s.examples]
members = {}
wn.add_synset(ss)
for s in synsets:
# Add all relations
for r in s.synset_relations:
if not any(r == r2 for r2 in ss.synset_relations):
add_relation(
wn, ss, wn.synset_by_id(
r.target), r.rel_type, change_list)
# Add members
for m in wn.members_by_id(s.id):
if m not in members:
members[m] = add_entry(wn, ss, m, change_list)
add_entry(wn, ss, m, change_list)
e = [e for e in [wn.entry_by_id(e2) for e2 in wn.entry_by_lemma(m)]
if e.lemma.part_of_speech.value == pos][0]
for f in e.forms:
if not any(f2 == f for f in members[m].forms):
members[m].add_form(f)
# syn behaviours - probably fix manually for the moment
if change_list:
change_list.change_synset(ss)
return ss
| 5,335,341
|
def __correlate_uniform(im, size, output):
"""
Uses repeated scipy.ndimage.filters.correlate1d() calls to compute a uniform filter. Unlike
scipy.ndimage.filters.uniform_filter() this just uses ones(size) instead of ones(size)/size.
"""
# TODO: smarter handling of in-place convolutions?
ndi = get_ndimage_module(im)
weights = get_array_module(im).ones(size)
for axis in range(im.ndim):
ndi.correlate1d(im, weights, axis, output)
im = output
return output
| 5,335,342
|
def clean_all(record):
""" A really messy function to make sure that the citeproc data
are indeed in the citeproc format. Basically a long list of if/...
conditions to catch all errors I have noticed.
"""
record = clean_fields(record)
for arrayed in ['ISSN']:
if arrayed in record:
record = clean_arrayed(record, arrayed)
return record
| 5,335,343
|
def _create_model() -> Model:
"""Setup code: Load a program minimally"""
model = Model(initial_program, [], load=False)
engine = ApproximateEngine(model, 1, geometric_mean)
model.set_engine(engine)
return model
| 5,335,344
|
def candidate_elimination(trainingset):
"""Computes the version space containig all hypothesis
from H that are consistent with the examples in the training set"""
G = set()#set of maximally general h in H
S = set()#set of maximally specific h in H
G.add(("?","?","?","?","?","?"))
S.add(("0","0","0","0","0","0"))
for e in trainingset:
update_vs(G,S,e)
# print "-----------------"
# print "S:",S
# print "G:",G
return G,S
| 5,335,345
|
def mkdir(path_str):
"""
Method to create a new directory or directories recursively.
"""
return Path(path_str).mkdir(parents=True, exist_ok=True)
| 5,335,346
|
def j2_raise(s):
"""Jinja2 custom filter that raises an error
"""
raise Exception(s)
| 5,335,347
|
def create_project(name=None, description=None, source=None, artifacts=None, environment=None, serviceRole=None, timeoutInMinutes=None, encryptionKey=None, tags=None):
"""
Creates a build project.
See also: AWS API Documentation
:example: response = client.create_project(
name='string',
description='string',
source={
'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3',
'location': 'string',
'buildspec': 'string',
'auth': {
'type': 'OAUTH',
'resource': 'string'
}
},
artifacts={
'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',
'location': 'string',
'path': 'string',
'namespaceType': 'NONE'|'BUILD_ID',
'name': 'string',
'packaging': 'NONE'|'ZIP'
},
environment={
'type': 'LINUX_CONTAINER',
'image': 'string',
'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',
'environmentVariables': [
{
'name': 'string',
'value': 'string'
},
]
},
serviceRole='string',
timeoutInMinutes=123,
encryptionKey='string',
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
:type name: string
:param name: [REQUIRED]
The name of the build project.
:type description: string
:param description: A description that makes the build project easy to identify.
:type source: dict
:param source: [REQUIRED]
Information about the build input source code for the build project.
type (string) -- [REQUIRED]The type of repository that contains the source code to be built. Valid values include:
CODECOMMIT : The source code is in an AWS CodeCommit repository.
CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.
GITHUB : The source code is in a GitHub repository.
S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.
location (string) --Information about the location of the source code to be built. Valid values include:
For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline will ignore it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.
For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec (for example, ``https://git-codecommit.*region-ID* .amazonaws.com/v1/repos/repo-name `` ).
For source code in an Amazon Simple Storage Service (Amazon S3) input bucket, the path to the ZIP file that contains the source code (for example, `` bucket-name /path /to /object-name .zip`` )
For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec. Also, you must connect your AWS account to your GitHub account. To do this, use the AWS CodeBuild console to begin creating a build project, and follow the on-screen instructions to complete the connection. (After you have connected to your GitHub account, you do not need to finish creating the build project, and you may then leave the AWS CodeBuild console.) To instruct AWS CodeBuild to then use this connection, in the source object, set the auth object's type value to OAUTH .
buildspec (string) --The build spec declaration to use for the builds in this build project.
If this value is not specified, a build spec must be included along with the source code to be built.
auth (dict) --Information about the authorization settings for AWS CodeBuild to access the source code to be built.
This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly (unless the build project's source type value is GITHUB ).
type (string) -- [REQUIRED]The authorization type to use. The only valid value is OAUTH , which represents the OAuth authorization type.
resource (string) --The resource value that applies to the specified authorization type.
:type artifacts: dict
:param artifacts: [REQUIRED]
Information about the build output artifacts for the build project.
type (string) -- [REQUIRED]The type of build output artifact. Valid values include:
CODEPIPELINE : The build project will have build output generated through AWS CodePipeline.
NO_ARTIFACTS : The build project will not produce any build output.
S3 : The build project will store build output in Amazon Simple Storage Service (Amazon S3).
location (string) --Information about the build output artifact location, as follows:
If type is set to CODEPIPELINE , then AWS CodePipeline will ignore this value if specified. This is because AWS CodePipeline manages its build output locations instead of AWS CodeBuild.
If type is set to NO_ARTIFACTS , then this value will be ignored if specified, because no build output will be produced.
If type is set to S3 , this is the name of the output bucket.
path (string) --Along with namespaceType and name , the pattern that AWS CodeBuild will use to name and store the output artifact, as follows:
If type is set to CODEPIPELINE , then AWS CodePipeline will ignore this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type is set to NO_ARTIFACTS , then this value will be ignored if specified, because no build output will be produced.
If type is set to S3 , this is the path to the output artifact. If path is not specified, then path will not be used.
For example, if path is set to MyArtifacts , namespaceType is set to NONE , and name is set to MyArtifact.zip , then the output artifact would be stored in the output bucket at MyArtifacts/MyArtifact.zip .
namespaceType (string) --Along with path and name , the pattern that AWS CodeBuild will use to determine the name and location to store the output artifact, as follows:
If type is set to CODEPIPELINE , then AWS CodePipeline will ignore this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type is set to NO_ARTIFACTS , then this value will be ignored if specified, because no build output will be produced.
If type is set to S3 , then valid values include:
BUILD_ID : Include the build ID in the location of the build output artifact.
NONE : Do not include the build ID. This is the default if namespaceType is not specified.
For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact would be stored in MyArtifacts/*build-ID* /MyArtifact.zip .
name (string) --Along with path and namespaceType , the pattern that AWS CodeBuild will use to name and store the output artifact, as follows:
If type is set to CODEPIPELINE , then AWS CodePipeline will ignore this value if specified. This is because AWS CodePipeline manages its build output names instead of AWS CodeBuild.
If type is set to NO_ARTIFACTS , then this value will be ignored if specified, because no build output will be produced.
If type is set to S3 , this is the name of the output artifact object.
For example, if path is set to MyArtifacts , namespaceType is set to BUILD_ID , and name is set to MyArtifact.zip , then the output artifact would be stored in MyArtifacts/*build-ID* /MyArtifact.zip .
packaging (string) --The type of build output artifact to create, as follows:
If type is set to CODEPIPELINE , then AWS CodePipeline will ignore this value if specified. This is because AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild.
If type is set to NO_ARTIFACTS , then this value will be ignored if specified, because no build output will be produced.
If type is set to S3 , valid values include:
NONE : AWS CodeBuild will create in the output bucket a folder containing the build output. This is the default if packaging is not specified.
ZIP : AWS CodeBuild will create in the output bucket a ZIP file containing the build output.
:type environment: dict
:param environment: [REQUIRED]
Information about the build environment for the build project.
type (string) -- [REQUIRED]The type of build environment to use for related builds.
image (string) -- [REQUIRED]The ID of the Docker image to use for this build project.
computeType (string) -- [REQUIRED]Information about the compute resources the build project will use. Available values include:
BUILD_GENERAL1_SMALL : Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM : Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE : Use up to 15 GB memory and 8 vCPUs for builds.
environmentVariables (list) --A set of environment variables to make available to builds for this build project.
(dict) --Information about an environment variable for a build project or a build.
name (string) -- [REQUIRED]The name or key of the environment variable.
value (string) -- [REQUIRED]The value of the environment variable.
:type serviceRole: string
:param serviceRole: The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
:type timeoutInMinutes: integer
:param timeoutInMinutes: How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any build that has not been marked as completed. The default is 60 minutes.
:type encryptionKey: string
:param encryptionKey: The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
You can specify either the CMK's Amazon Resource Name (ARN) or, if available, the CMK's alias (using the format ``alias/alias-name `` ).
:type tags: list
:param tags: A set of tags for this build project.
These tags are available for use by AWS services that support AWS CodeBuild build project tags.
(dict) --A tag, consisting of a key and a value.
This tag is available for use by AWS services that support tags in AWS CodeBuild.
key (string) --The tag's key.
value (string) --The tag's value.
:rtype: dict
:return: {
'project': {
'name': 'string',
'arn': 'string',
'description': 'string',
'source': {
'type': 'CODECOMMIT'|'CODEPIPELINE'|'GITHUB'|'S3',
'location': 'string',
'buildspec': 'string',
'auth': {
'type': 'OAUTH',
'resource': 'string'
}
},
'artifacts': {
'type': 'CODEPIPELINE'|'S3'|'NO_ARTIFACTS',
'location': 'string',
'path': 'string',
'namespaceType': 'NONE'|'BUILD_ID',
'name': 'string',
'packaging': 'NONE'|'ZIP'
},
'environment': {
'type': 'LINUX_CONTAINER',
'image': 'string',
'computeType': 'BUILD_GENERAL1_SMALL'|'BUILD_GENERAL1_MEDIUM'|'BUILD_GENERAL1_LARGE',
'environmentVariables': [
{
'name': 'string',
'value': 'string'
},
]
},
'serviceRole': 'string',
'timeoutInMinutes': 123,
'encryptionKey': 'string',
'tags': [
{
'key': 'string',
'value': 'string'
},
],
'created': datetime(2015, 1, 1),
'lastModified': datetime(2015, 1, 1)
}
}
:returns:
CODECOMMIT : The source code is in an AWS CodeCommit repository.
CODEPIPELINE : The source code settings are specified in the source action of a pipeline in AWS CodePipeline.
GITHUB : The source code is in a GitHub repository.
S3 : The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.
"""
pass
| 5,335,348
|
def get_image_with_projected_bbox3d(img, proj_bbox3d_pts=[], width=0, color=Color.White):
"""
Draw the outline of a 3D bbox on the image.
Input:
proj_bbox3d_pts: (8,2) array of projected vertices
"""
v = proj_bbox3d_pts
if proj_bbox3d_pts != []:
draw = ImageDraw.Draw(img)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j = k,(k+1)%4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k+4,(k+1)%4 + 4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k,k+4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
return img
| 5,335,349
|
def process_spectrogram_params(fs, nfft, frequency_range, window_start, datawin_size):
""" Helper function to create frequency vector and window indices
Arguments:
fs (float): sampling frequency in Hz -- required
nfft (int): length of signal to calculate fft on -- required
frequency_range (list): 1x2 list - [<min frequency>, <max frequency>] -- required
window_start (1xm np.array): array of timestamps representing the beginning time for each
window -- required
datawin_size (float): seconds in one window -- required
Returns:
window_idxs (nxm np array): indices of timestamps for each window
(nxm where n=number of windows and m=datawin_size)
stimes (1xt np array): array of times for the center of the spectral bins
sfreqs (1xf np array): array of frequency bins for the spectrogram
freq_inds (1d np array): boolean array of which frequencies are being analyzed in
an array of frequencies from 0 to fs with steps of fs/nfft
"""
# create frequency vector
df = fs / nfft
sfreqs = np.arange(0, fs, df)
# Get frequencies for given frequency range
freq_inds = (sfreqs >= frequency_range[0]) & (sfreqs <= frequency_range[1])
sfreqs = sfreqs[freq_inds]
# Compute times in the middle of each spectrum
window_middle_samples = window_start + round(datawin_size / 2)
stimes = window_middle_samples / fs
# Get indexes for each window
window_idxs = np.atleast_2d(window_start).T + np.arange(0, datawin_size, 1)
window_idxs = window_idxs.astype(int)
return [window_idxs, stimes, sfreqs, freq_inds]
| 5,335,350
|
def Hellwig2022_to_XYZ(
specification: CAM_Specification_Hellwig2022,
XYZ_w: ArrayLike,
L_A: FloatingOrArrayLike,
Y_b: FloatingOrArrayLike,
surround: Union[
InductionFactors_CIECAM02, InductionFactors_Hellwig2022
] = VIEWING_CONDITIONS_HELLWIG2022["Average"],
discount_illuminant: Boolean = False,
) -> NDArray:
"""
Convert from *Hellwig and Fairchild (2022)* specification to *CIE XYZ*
tristimulus values.
Parameters
----------
specification : CAM_Specification_Hellwig2022
*Hellwig and Fairchild (2022)* colour appearance model specification.
Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
Y_b
Luminous factor of background :math:`Y_b` such as
:math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
light source and :math:`L_b` is the luminance of the background. For
viewing images, :math:`Y_b` can be the average :math:`Y` value for the
pixels in the entire image, or frequently, a :math:`Y` value of 20,
approximate an :math:`L^*` of 50 is used.
surround
Surround viewing conditions.
discount_illuminant
Discount the illuminant.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Raises
------
ValueError
If neither *C* or *M* correlates have been defined in the
``CAM_Specification_Hellwig2022`` argument.
Notes
-----
+-------------------------------------+-----------------------+-----------\
----+
| **Domain** | **Scale - Reference** | **Scale - \
1** |
+=====================================+=======================+===========\
====+
| ``CAM_Specification_Hellwig2022.J`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.C`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.h`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.s`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.Q`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.M`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.H`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``XYZ_w`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+-----------+-----------------------+---------------+
References
----------
:cite:`Fairchild2022`, :cite:`Hellwig2022`
Examples
--------
>>> specification = CAM_Specification_Hellwig2022(J=41.731207905126638,
... C=0.025763615829912909,
... h=217.06795976739301)
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> Y_b = 20.0
>>> Hellwig2022_to_XYZ(specification, XYZ_w, L_A, Y_b)
... # doctest: +ELLIPSIS
array([ 19.01..., 20... , 21.78...])
"""
J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
J = to_domain_100(J)
C = to_domain_100(C)
h = to_domain_degrees(h)
M = to_domain_100(M)
L_A = as_float_array(L_A)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
# Step 0
# Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
RGB_w = vector_dot(MATRIX_16, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
if not discount_illuminant
else ones(L_A.shape)
)
F_L, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
D_RGB = (
D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w
+ 1
- D[..., np.newaxis]
)
RGB_wc = D_RGB * RGB_w
# Applying forward post-adaptation non-linear response compression.
RGB_aw = post_adaptation_non_linear_response_compression_forward(
RGB_wc, F_L
)
# Computing achromatic responses for the whitepoint.
A_w = achromatic_response_forward(RGB_aw)
# Step 1
if has_only_nan(M) and not has_only_nan(C):
M = (C * A_w) / 35
elif has_only_nan(M):
raise ValueError(
'Either "C" or "M" correlate must be defined in '
'the "CAM_Specification_Hellwig2022" argument!'
)
# Step 2
# Computing eccentricity factor *e_t*.
e_t = eccentricity_factor(h)
# Computing achromatic response :math:`A` for the stimulus.
A = achromatic_response_inverse(A_w, J, surround.c, z)
# Computing *P_p_1* to *P_p_2*.
P_p_n = P_p(surround.N_c, e_t, A)
P_p_1, P_p_2 = tsplit(P_p_n)
# Step 3
# Computing opponent colour dimensions :math:`a` and :math:`b`.
ab = opponent_colour_dimensions_inverse(P_p_1, h, M)
a, b = tsplit(ab)
# Step 4
# Applying post-adaptation non-linear response compression matrix.
RGB_a = matrix_post_adaptation_non_linear_response_compression(P_p_2, a, b)
# Step 5
# Applying inverse post-adaptation non-linear response compression.
RGB_c = post_adaptation_non_linear_response_compression_inverse(
RGB_a + 0.1, F_L
)
# Step 6
RGB = RGB_c / D_RGB
# Step 7
XYZ = vector_dot(MATRIX_INVERSE_16, RGB)
return from_range_100(XYZ)
| 5,335,351
|
def add_variants_task(req):
"""Perform the actual task of adding variants to the database after receiving an add request
Accepts:
req(flask.request): POST request received by server
"""
db = current_app.db
req_data = req.json
dataset_id = req_data.get("dataset_id")
samples = req_data.get("samples", [])
assembly = req_data.get("assemblyId")
filter_intervals = None
genes = req_data.get("genes")
if genes:
filter_intervals = compute_filter_intervals(req_data)
vcf_obj = extract_variants(
vcf_file=req_data.get("vcf_path"), samples=samples, filter=filter_intervals
)
nr_variants = count_variants(vcf_obj)
vcf_obj = extract_variants(
vcf_file=req_data.get("vcf_path"), samples=samples, filter=filter_intervals
)
added = variants_loader(
database=db,
vcf_obj=vcf_obj,
samples=set(samples),
assembly=assembly,
dataset_id=dataset_id,
nr_variants=nr_variants,
)
if added > 0:
# Update dataset object accordingly
update_dataset(database=db, dataset_id=dataset_id, samples=samples, add=True)
LOG.info(f"Number of inserted variants for samples:{samples}:{added}")
| 5,335,352
|
def build_exec_file_name(graph: str,
strt: str,
nagts: int,
exec_id: int,
soc_name: str = None):
"""Builds the execution file name of id `exec_id` for the given patrolling
scenario `{graph, nagts, strt}` .
Args:
graph:
strt:
nagts:
exec_id:
soc_name:
"""
if soc_name is None or soc_name == '':
soc_name = misc.build_soc_name(strategy=strt, nagts=nagts)
return regularise_path("{}-{}-{}-{}-{}.json".format(strt,
graph,
soc_name,
str(nagts),
str(exec_id)))
| 5,335,353
|
def setup_comp_deps(hass, mock_device_tracker_conf):
"""Set up component dependencies."""
mock_component(hass, 'zone')
mock_component(hass, 'group')
yield
| 5,335,354
|
def set_ticks(ax, tick_locs, tick_labels=None, axis='y'):
"""Sets ticks at standard numerical locations"""
if tick_labels is None:
tick_labels = tick_locs
ax_transformer = AxTransformer()
ax_transformer.fit(ax, axis=axis)
getattr(ax, f'set_{axis}ticks')(ax_transformer.transform(tick_locs))
getattr(ax, f'set_{axis}ticklabels')(tick_labels)
ax.tick_params(axis=axis, which='both', bottom=True, top=False, labelbottom=True)
return ax
| 5,335,355
|
def prefix_parameter(par, prefix):
# type: (Parameter, str) -> Parameter
"""
Return a copy of the parameter with its name prefixed.
"""
new_par = copy(par)
new_par.name = prefix + par.name
new_par.id = prefix + par.id
| 5,335,356
|
def extract_discovery(value:str) -> List[dict]:
"""处理show discovery/show onu discovered得到的信息
Args:
value (str): show discovery/show onu discovered命令返回的字符串
Returns:
List[dict]: 包含字典的列表
"""
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# Command executes success.
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
#
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 6 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 5506-04-F1 FHTT033178b0 fiberhome fiberhome fiberhome 1
# 2 HG6243C FHTT92f445c8 fiberhome fiberhome fiberhome 1
# 3 5506-10-A1 FHTT00010104 fiberhome fiberhome fiberhome 1
# 4 5506-10-A1 FHTT000aae64 fiberhome fiberhome 1
# 5 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# 6 5506-02-F FHTT0274ab18 wangran3 12345678 1
# ====================================================================================
slotPortExp = re.compile('SLOT = (\d+), PON = (\d+)')
titleExp = re.compile('(No)\s+(OnuType)\s+(PhyId)\s+(PhyPwd)\s+(LogicId)\s+(LogicPwd)\s+(Why)\s*')
valueExp = re.compile('([\d\s]{3,3})\s([\w\s-]{14,14})\s([\w\s]{12,12})\s([\w\s]{10,10})\s([\w\s]{24,24})\s([\w\s]{12,12})\s([\d\s]{1,3})')
lines = value.splitlines()
ret = [ ]
titles = None
slot, port = None, None
for line in lines:
match = slotPortExp.search(line)
if match:
slot, port = match.groups()
if titles == None:
match = titleExp.match(line)
if match:
titles = match.groups()
continue
else:
match = valueExp.match(line)
if match:
values = match.groups()
ret.append({ })
for k, v in zip(titles, values):
ret[-1][value(k)] = value(v)
ret[-1]['SLOT'] = value(slot)
ret[-1]['PON'] = value(port)
continue
return ret
| 5,335,357
|
def dict_to_networkx(data):
"""
Convert data into networkx graph
Args:
data: data in dictionary type
Returns: networkx graph
"""
data_checker(data)
G = nx.Graph(data)
return G
| 5,335,358
|
def to_url_slug(string):
"""Transforms string into URL-safe slug."""
slug = urllib.parse.quote_plus(string)
return slug
| 5,335,359
|
def test_readme():
"""README contains doctests, they all pass."""
result = doctest.testfile('../README.rst')
assert result.attempted != 0
assert result.failed == 0
| 5,335,360
|
def _jmi23_helper(model_ref, jmi2, ids):
"""
A helper function to recursively convert from JMI type 2 to JMI type 3.
"""
# Create array for lowest level elements
empties = []
# Loop through each id
for i in ids:
element = model_ref[i]
# Get the parent ID
if isinstance(element['parent'], dict):
parentID = element['parent']['id']
else:
parentID = element['parent']
# If parent is none, nothing to do
if parentID is None:
continue
# Otherwise set the parent
parent = jmi2[parentID]
# Move element to its parent's contains field
# and remove the element from the JMI2 object
parent['contains'][i] = element
if i in jmi2.keys():
del jmi2[i]
# Get the ID of the parent's parent
if (isinstance(parent['parent'], dict) and parent['parent'] is not None):
parentsParent = jmi2[parent['parent']['id']]
elif parent['parent'] is not None:
parentsParent = jmi2[parent['parent']]
else:
parentsParent = None
# If all of the items in contains are objects, the parent is lowest level
is_lowest = True
for k in parent['contains'].keys():
el = parent['contains'][k]
if isinstance(el, dict) and 'id' not in el.keys():
is_lowest = False
# If we're at the lowest level and there is a grandparent, we need to
# process another layer, add the parent to the empties list.
if is_lowest and parentsParent is not None:
empties.append(parentID)
# If there are still lowest level elements, recursively call function
if len(empties) > 0:
_jmi23_helper(model_ref, jmi2, empties)
| 5,335,361
|
def unique_everseen(iterable, key=None):
"""Return iterator of unique elements ever seen with preserving order.
Return iterator of unique elements ever seen with preserving order.
From: https://docs.python.org/3/library/itertools.html#itertools-recipes
Examples
--------
>>> from pygimli.utils import unique_everseen
>>> s1 = 'AAAABBBCCDAABBB'
>>> s2 = 'ABBCcAD'
>>> list(unique_everseen(s1))
['A', 'B', 'C', 'D']
>>> list(unique_everseen(s2, key=str.lower))
['A', 'B', 'C', 'D']
See also
--------
unique, unique_rows
"""
try:
from itertools import ifilterfalse
except BaseException as _:
from itertools import filterfalse
seen = set()
seen_add = seen.add
if key is None:
try:
for element in ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
except BaseException as _:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 5,335,362
|
def is_wrapping(wrapper):
"""Determines if the given callable is a wrapper for another callable"""
return hasattr(wrapper, __WRAPPED)
| 5,335,363
|
def sequence_view(sequence_id):
"""
Get a sequence based on the ID and show the details for this sequence
:param sequence_id: ID of the sequence
"""
from conekt.models.relationships.sequence_go import SequenceGOAssociation
current_sequence = Sequence.query.get_or_404(sequence_id)
go_associations = current_sequence.go_associations.group_by(SequenceGOAssociation.go_id,
SequenceGOAssociation.evidence,
SequenceGOAssociation.source).all()
# to avoid running long count queries, fetch relations here and pass to template
return render_template('sequence.html',
sequence=current_sequence,
go_associations=go_associations,
interpro_associations=current_sequence.interpro_associations.all(),
families=current_sequence.families.all(),
expression_profiles=current_sequence.expression_profiles.all(),
network_nodes=current_sequence.network_nodes.all(),
coexpression_clusters=current_sequence.coexpression_clusters.all(),
ecc_query_associations=current_sequence.ecc_query_associations.all()
)
| 5,335,364
|
def horner(n,c,x0):
"""
Parameters
----------
n : integer
degree of the polynomial.
c : float
coefficients of the polynomial.
x0 : float
where we are evaluating the polynomial.
Returns
-------
y : float
the value of the function evaluated at x0.
z : float
the value of the derivative evaluated at x0.
"""
y=c[n]
z=c[n]
for i in range(n-1,0,-1):
y= x0*y+c[i]
z=x0*z+y
y=x0*y+c[0] #this computes the b0
return y,z
| 5,335,365
|
def test_conv2d_kernel_size_larger_than_stride_and_split_h():
"""
Feature: same mode, stride < kernel_size, need exchange
Description: split n/c-in/c-out/h
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
strategy1 = ((2, 2, 4, 1), (2, 2, 1, 1))
strategy2 = ((2, 2, 4, 1),)
net = Net(_w2, out_channel=8, kernel_size=3, pad_mode="same", stride=1, strategy1=strategy1, strategy2=strategy2)
compile_net(net)
| 5,335,366
|
def init_app(app):
"""
app.teardown_appcontext() tells Flask to call that function when cleaning
up after returning the response.
app.cli.add_command() adds a new command that can be called with the flask
command.
"""
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(create_btc_pay_client)
| 5,335,367
|
def start():
"""Starts the puzzle game"""
init_puzzle()
while not is_sorted():
prompt_move()
update_puzzle()
print("We have a winner!")
| 5,335,368
|
def linlin(x, smi, sma, dmi, dma):
"""TODO
Arguments:
x {float} -- [description]
smi {float} -- [description]
sma {float} -- [description]
dmi {float} -- [description]
dma {float} -- [description]
Returns:
float -- [description]
"""
return (x-smi)/(sma-smi)*(dma-dmi) + dmi
| 5,335,369
|
def _compression_safe_opener(fname):
"""Determine whether to use *open* or *gzip.open* to read
the input file, depending on whether or not the file is compressed.
"""
f = gzip.open(fname, "r")
try:
f.read(1)
opener = gzip.open
except IOError:
opener = open
finally:
f.close()
return opener
| 5,335,370
|
def gen_headers(value_type, value, header_type="PacketFilter", direction=None, notFilter=False):
"""
helper function constructs json header format
value: a STRING corresponding to value_type
direction: "src" or "dst"
Parameters
----------
value_type : string
a string of header formats. Most commonly used are:
ipv4_src |ipv4_dst ipv6_src | ipv6_dst mac_src | mac_dst tp_src | tp_dst| eth_type| vlan_vid| ip_proto
value : string
the value of the corresponding value_type.
header_type : string, optional
DESCRIPTION. The default is "PacketFilter". "PacketAliasFilter" needs corresponding alias set
direction : string, optional
DESCRIPTION. Either "src" or "dst"
notFilter : boolean, optional
DESCRIPTION. The default is False. If set to True negates the header value_type and value.
Returns
-------
dict
constructed header dict usable for fwdApi.
"""
header={}
header['type'] = header_type
if header_type == "PacketFilter":
header['values'] = {str(value_type): [str(value)]}
elif header_type == "PacketAliasFilter":
header['value'] = value
else:
sys.exit("header_type is either 'PacketFilter' or 'PacketAliasFilter'")
if direction:
header['direction'] = direction
if notFilter == True:
notHeader ={}
notHeader['type'] = "NotFilter"
notHeader['clause'] = header
return notHeader
return header
| 5,335,371
|
def assert_token(
client: FlaskClient,
opaque_token: str,
expected_token: Dict[str, Any],
):
"""
Provided an opaque token, this function translates it to an
internal token and asserts on it's content.
:param client:
:param opaque_token:
:param expected_token:
:return:
"""
client.set_cookie(
server_name=TOKEN_COOKIE_DOMAIN,
key=TOKEN_COOKIE_NAME,
value=opaque_token,
secure=True,
httponly=TOKEN_COOKIE_HTTP_ONLY,
samesite='Strict',
)
r_forwardauth = client.get('/token/forward-auth')
r_inspect = client.get(
path='/token/inspect',
headers={TOKEN_HEADER_NAME: r_forwardauth.headers[TOKEN_HEADER_NAME]}
)
assert r_inspect.status_code == 200
assert r_inspect.json == {'token': expected_token}
| 5,335,372
|
def read_payload(payload: str) -> OneOf[Issue, List[FileReport]]:
"""Transform an eslint payload to a list of `FileReport` instances.
Args:
payload: The raw payload from eslint.
Returns:
A `OneOf` containing an `Issue` or a list of `FileReport` instances.
"""
return one_of(lambda: [
[
FileReport(
file_path=error['filePath'],
violations=[
Violation(
msg['ruleId'],
msg['message'],
msg['line'],
msg['column'],
error['filePath'],
)
for msg in error['messages']
],
)
for error in json_payload
]
for json_payload in json.parse_json(payload)
])
| 5,335,373
|
def _ssim_per_channel(img1, img2, img3, max_val=1.0, mode='test',compensation=1):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A pair of tensors containing and channel-wise SSIM and contrast-structure
values. The shape is [..., channels].
"""
filter_size = constant_op.constant(11, dtype=dtypes.int32)
filter_sigma = constant_op.constant(1.5, dtype=img1.dtype)
shape1, shape2 = array_ops.shape_n([img1, img2])
shape1, shape2, shape3 = array_ops.shape_n([img1, img2, img3])
checks = [
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8),
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8),
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape2[-3:-1], filter_size)), [shape3, filter_size], summarize=8)]
# Enforce the check to run before computation.
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# TODO(sjhwang): Try to cache kernels and compensation factor.
kernel = _fspecial_gauss(filter_size, filter_sigma)
kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
#compensation = 1.0
# TODO(sjhwang): Try FFT.
# TODO(sjhwang): Gaussian kernel is separable in space. Consider applying
# 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter.
def reducer(x):
shape = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return array_ops.reshape(y, array_ops.concat([shape[:-3],
array_ops.shape(y)[1:]], 0))
#luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation)
if mode == 'debug':
luminance_gt, cs_gt = _ssim_helper(img1, img2, reducer, max_val, compensation)
luminance, cs = _my_ssim_helper(img1, img1-img2, img1-img3, reducer, max_val, compensation, mode='train')
else:
luminance, cs = _my_ssim_helper(img1, img2, img3, reducer, max_val, compensation, mode)
if mode == 'debug':
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
ssim_val_gt = math_ops.reduce_mean(luminance_gt * cs_gt, axes)
lm_gt = math_ops.reduce_mean(luminance_gt, axes)
cs_gt = math_ops.reduce_mean(cs_gt, axes)
lm = math_ops.reduce_mean(luminance, axes)
cs = math_ops.reduce_mean(cs, axes)
return lm_gt, cs_gt, lm, cs, ssim_val_gt
else:
# Average over the second and the third from the last: height, width.
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
#ssim_val = math_ops.reduce_mean(luminance * cs, axes)
ssim_val = math_ops.reduce_mean(luminance + cs, axes)
print('ssim_shape',ssim_val.shape)
cs = math_ops.reduce_mean(cs, axes)
return ssim_val, cs
| 5,335,374
|
def prepend(
iterable: Iterable[Any],
value: Any,
*,
times: int = 1,
) -> Iterator[Any]:
"""Return an iterator with a specified value prepended.
Arguments:
iterable: the iterable to which the value is to be prepended
value: the value to prepend to the iterable
Keyword Arguments:
times: number of times to prepend the value
(optional; default is 1)
Returns:
iterator prepending the specified value(s) to the items of the iterable
Examples:
>>> list(prepend(range(5), -1))
[-1, 0, 1, 2, 3, 4]
>>> list(prepend(['off to work we go'], 'hi ho', times=2))
['hi ho', 'hi ho', 'off to work we go']
"""
return itertools.chain([value] * times, iterable)
| 5,335,375
|
def set_passive_link_state(fw_conn):
"""Set Passive Link State To Auto
Args:
fw_conn (PanDevice): A panos object for device
"""
base_xpath = ("/config/devices/entry[@name='localhost.localdomain']"
"/deviceconfig/high-availability/group/mode/active-passive")
entry_element = "<passive-link-state>auto</passive-link-state>"
fw_conn.xapi.set(xpath=base_xpath, element=entry_element)
| 5,335,376
|
def list_platform_versions(Filters=None, MaxRecords=None, NextToken=None):
"""
Lists the platform versions available for your account in an AWS Region. Provides summary information about each platform version. Compare to DescribePlatformVersion , which provides full details about a single platform version.
For definitions of platform version and other platform-related terms, see AWS Elastic Beanstalk Platforms Glossary .
See also: AWS API Documentation
Exceptions
:example: response = client.list_platform_versions(
Filters=[
{
'Type': 'string',
'Operator': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
NextToken='string'
)
:type Filters: list
:param Filters: Criteria for restricting the resulting list of platform versions. The filter is interpreted as a logical conjunction (AND) of the separate PlatformFilter terms.\n\n(dict) --Describes criteria to restrict the results when listing platform versions.\nThe filter is evaluated as follows: Type Operator Values[1]\n\nType (string) --The platform version attribute to which the filter values are applied.\nValid values: PlatformName | PlatformVersion | PlatformStatus | PlatformBranchName | PlatformLifecycleState | PlatformOwner | SupportedTier | SupportedAddon | ProgrammingLanguageName | OperatingSystemName\n\nOperator (string) --The operator to apply to the Type with each of the Values .\nValid values: = | != | < | <= | > | >= | contains | begins_with | ends_with\n\nValues (list) --The list of values applied to the filtering platform version attribute. Only one value is supported for all current operators.\nThe following list shows valid filter values for some filter attributes.\n\nPlatformStatus : Creating | Failed | Ready | Deleting | Deleted\nPlatformLifecycleState : recommended\nSupportedTier : WebServer/Standard | Worker/SQS/HTTP\nSupportedAddon : Log/S3 | Monitoring/Healthd | WorkerDaemon/SQSD\n\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of platform version values returned in one call.
:type NextToken: string
:param NextToken: For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other parameter values must be identical to the ones specified in the initial request.\nIf no NextToken is specified, the first page is retrieved.\n
:rtype: dict
ReturnsResponse Syntax
{
'PlatformSummaryList': [
{
'PlatformArn': 'string',
'PlatformOwner': 'string',
'PlatformStatus': 'Creating'|'Failed'|'Ready'|'Deleting'|'Deleted',
'PlatformCategory': 'string',
'OperatingSystemName': 'string',
'OperatingSystemVersion': 'string',
'SupportedTierList': [
'string',
],
'SupportedAddonList': [
'string',
],
'PlatformLifecycleState': 'string',
'PlatformVersion': 'string',
'PlatformBranchName': 'string',
'PlatformBranchLifecycleState': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
PlatformSummaryList (list) --
Summary information about the platform versions.
(dict) --
Summary information about a platform version.
PlatformArn (string) --
The ARN of the platform version.
PlatformOwner (string) --
The AWS account ID of the person who created the platform version.
PlatformStatus (string) --
The status of the platform version. You can create an environment from the platform version once it is ready.
PlatformCategory (string) --
The category of platform version.
OperatingSystemName (string) --
The operating system used by the platform version.
OperatingSystemVersion (string) --
The version of the operating system used by the platform version.
SupportedTierList (list) --
The tiers in which the platform version runs.
(string) --
SupportedAddonList (list) --
The additions associated with the platform version.
(string) --
PlatformLifecycleState (string) --
The state of the platform version in its lifecycle.
Possible values: recommended | empty
If an empty value is returned, the platform version is supported but isn\'t the recommended one for its branch.
PlatformVersion (string) --
The version string of the platform version.
PlatformBranchName (string) --
The platform branch to which the platform version belongs.
PlatformBranchLifecycleState (string) --
The state of the platform version\'s branch in its lifecycle.
Possible values: beta | supported | deprecated | retired
NextToken (string) --
In a paginated request, if this value isn\'t null , it\'s the token that you can pass in a subsequent request to get the next response page.
Exceptions
ElasticBeanstalk.Client.exceptions.InsufficientPrivilegesException
ElasticBeanstalk.Client.exceptions.ElasticBeanstalkServiceException
:return: {
'PlatformSummaryList': [
{
'PlatformArn': 'string',
'PlatformOwner': 'string',
'PlatformStatus': 'Creating'|'Failed'|'Ready'|'Deleting'|'Deleted',
'PlatformCategory': 'string',
'OperatingSystemName': 'string',
'OperatingSystemVersion': 'string',
'SupportedTierList': [
'string',
],
'SupportedAddonList': [
'string',
],
'PlatformLifecycleState': 'string',
'PlatformVersion': 'string',
'PlatformBranchName': 'string',
'PlatformBranchLifecycleState': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
| 5,335,377
|
def apply_transform(transform, source, target,
fill_value=None, propagate_mask=False):
"""Applies the transformation ``transform`` to ``source``.
The output image will have the same shape as ``target``.
Args:
transform: A scikit-image ``SimilarityTransform`` object.
source (numpy array): A 2D numpy array of the source image to be
transformed.
target (numpy array): A 2D numpy array of the target image. Only used
to set the output image shape.
fill_value (float): A value to fill in the areas of aligned_image
where footprint == True.
propagate_mask (bool): Wether to propagate the mask in source.mask
onto footprint.
Return:
A tuple (aligned_image, footprint).
aligned_image is a numpy 2D array of the transformed source
footprint is a mask 2D array with True on the regions
with no pixel information.
"""
from skimage.transform import warp
if hasattr(source, 'data') and isinstance(source.data, _np.ndarray):
source_data = source.data
else:
source_data = source
if hasattr(target, 'data') and isinstance(target.data, _np.ndarray):
target_data = target.data
else:
target_data = target
aligned_image = warp(source_data, inverse_map=transform.inverse,
output_shape=target_data.shape, order=3, mode='constant',
cval=_np.median(source_data), clip=False,
preserve_range=True)
footprint = warp(_np.zeros(source_data.shape, dtype='float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
footprint = footprint > 0.4
if hasattr(source, 'mask') and propagate_mask:
source_mask = _np.array(source.mask)
if source_mask.shape == source_data.shape:
source_mask_rot = warp(source_mask.astype('float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
source_mask_rot = source_mask_rot > 0.4
footprint = footprint | source_mask_rot
if fill_value is not None:
aligned_image[footprint] = fill_value
return aligned_image, footprint
| 5,335,378
|
def astra(tomo, center, recon, theta, **kwargs):
"""
Reconstruct object using the ASTRA toolbox
Extra options
----------
method : str
ASTRA reconstruction method to use.
num_iter : int, optional
Number of algorithm iterations performed.
proj_type : str, optional
ASTRA projector type to use (see ASTRA docs for more information):
- 'cuda' (for GPU algorithms)
- 'line', 'linear', or 'strip' (for CPU algorithms)
gpu_list : list, optional
List of GPU indices to use
extra_options : dict, optional
Extra options for the ASTRA config (i.e. those in cfg['option'])
Example
-------
>>> import tomopy
>>> obj = tomopy.shepp3d() # Generate an object.
>>> ang = tomopy.angles(180) # Generate uniformly spaced tilt angles.
>>> sim = tomopy.project(obj, ang) # Calculate projections.
>>>
>>> # Reconstruct object:
>>> rec = tomopy.recon(sim, ang, algorithm=tomopy.astra,
>>> options={'method':'SART', 'num_iter':10*180,
>>> 'proj_type':'linear',
>>> 'extra_options':{'MinConstraint':0}})
>>>
>>> # Show 64th slice of the reconstructed object.
>>> import pylab
>>> pylab.imshow(rec[64], cmap='gray')
>>> pylab.show()
"""
# Lazy import ASTRA
import astra as astra_mod
# Unpack arguments
nslices = tomo.shape[0]
num_gridx = kwargs['num_gridx']
num_gridy = kwargs['num_gridy']
opts = kwargs['options']
# Check options
for o in needed_options['astra']:
if o not in opts:
logger.error("Option %s needed for ASTRA reconstruction." % (o,))
raise ValueError()
for o in default_options['astra']:
if o not in opts:
opts[o] = default_options['astra'][o]
niter = opts['num_iter']
proj_type = opts['proj_type']
# Create ASTRA geometries
vol_geom = astra_mod.create_vol_geom((num_gridx, num_gridy))
# Number of GPUs to use
if proj_type == 'cuda':
if opts['gpu_list'] is not None:
import concurrent.futures as cf
gpu_list = opts['gpu_list']
ngpu = len(gpu_list)
_, slcs = mproc.get_ncore_slices(nslices, ngpu)
# execute recon on a thread per GPU
with cf.ThreadPoolExecutor(ngpu) as e:
for gpu, slc in zip(gpu_list, slcs):
e.submit(astra_rec_cuda, tomo[slc], center[slc], recon[slc],
theta, vol_geom, niter, proj_type, gpu, opts)
else:
astra_rec_cuda(tomo, center, recon, theta, vol_geom, niter,
proj_type, None, opts)
else:
astra_rec_cpu(tomo, center, recon, theta, vol_geom, niter,
proj_type, opts)
| 5,335,379
|
def _with_extension(base: str, extension: str) -> str:
"""
Adds an extension to a base name
"""
if "sus" in base:
return f"{extension}{base}"
else:
return f"{base}{extension}"
| 5,335,380
|
def test_demo_inexecutable_script(storage, monkeypatch, capsys):
"""Test error message when user script is not executable."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
script = tempfile.NamedTemporaryFile()
orion.core.cli.main(
[
"hunt",
"--config",
"./orion_config.yaml",
script.name,
"--config",
"script_config.yaml",
]
)
captured = capsys.readouterr().err
assert "User script is not executable" in captured
| 5,335,381
|
def putin_rfid_no_order_api():
"""
无订单的情况下入库, 自动创建订单(类型为生产入库), 订单行入库
post req: withlock
{
lines: [{line_id:~, qty, location, lpn='', sku,
rfid_list[rfid1, rfid2, rfid3...],
rfid_details[{rfid1, weight, gross_weight, qty_inner}, {rfid2}, {rfid3}...}],
}...]
w_user_code,
w_user_name
}
sample:{
lines: [
{qty, sku, location:~, rfid_details[{rfid1, weight, gross_weight, qty_inner}, ], }
]
}
"""
if request.method == 'POST':
is_overcharge = ('overcharge' in request.path) or g.owner.is_overcharge
is_enable_fast_stockin_qty_inner = g.owner.is_enable_fast_stockin_qty_inner
data = request.json.pop('lines', [])# [{line_id, qty, location, lpn=''}...]
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
# 每次只一个RFID入库时, 判断RFID是否已经入库了.
if len(data) == 1:
r_details = data[0].get('rfid_details', [])
if len(r_details) == 1:
rfid0 = r_details[0]['rfid']
inv0 = InvRfid.query.t_query.filter_by(rfid=rfid0).first()
if inv0 and inv0.qty == 1:
return json_response({'status': 'fail', 'msg': u'已经入库过了'})
ok, order = StockinAction.create_stockin({'xtype': 'produce'}, g)
db.session.add(order)
db.session.flush()
action = StockinAction(order)
for xd in data:
d = DictNone(xd)
if d.get('qty', 0) <= 0:
continue
# 填充, rfid有数据详情的情况
rfid_details = {}
if not d.get('rfid_list', None) and d.get('rfid_details', None):
r_details = d.get('rfid_details', [])
rfid_list = [r['rfid'] for r in r_details]
d['rfid_list'] = rfid_list
rfid_details = {r['rfid']:r for r in r_details}
# ('spec','brand','unit','style','color','size','level')
ld = DictNone()
ld.sku = d.sku
ld.qty = 1 if is_enable_fast_stockin_qty_inner else (d.qty or 1)
ld.location_code = d.location or ''
ld.batch_code = d.batch_code or ''
ld.spec = d.spec or ''
ld.style = d.style or ''
ld.color = d.color or ''
ld.size = d.size or ''
ld.level = d.level or ''
ld.twisted = d.twisted or ''
line = StockinAction.create_stockin_line(ld, order, poplist=None, is_add=True)
db.session.add(line)
db.session.flush()
# line_id, qty, location, lpn='', line=None
is_overcharge, qty_off, qty_real = action.putin(line_id=None, line=line, qty=ld.qty, location=(ld.location_code or 'STAGE'), \
rfid_list=d['rfid_list'], rfid_details=rfid_details, \
w_user_code=w_user_code, w_user_name=w_user_name, is_overcharge=is_overcharge)
d['qty_real'] = qty_real
order.state = 'all'
db.session.flush()
finish = True
for line in order.lines:
if line.qty_real < line.qty: # 有单行小于预期数量的时候, 则未完成
finish = False
# 计重
_ = line.weight, line.gross_weight, line.qty_inner
# 计重
_ = order.weight, order.gross_weight, order.qty_inner
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data':data})
| 5,335,382
|
def GuessLanguage(filename):
""" Attempts to Guess Langauge of `filename`. Essentially, we do a
filename.rsplit('.', 1), and a lookup into a dictionary of extensions."""
try:
(_, extension) = filename.rsplit('.', 1)
except ValueError:
raise ValueError("Could not guess language as '%s' does not have an \
extension"%filename)
return {'c' : 'c'
,'py' : 'python'}[extension]
| 5,335,383
|
def extract_data_size(series, *names):
"""
Determines series data size from the first available property, which
provides direct values as list, tuple or NumPy array.
Args:
series: perrot.Series
Series from which to extract data size.
names: (str,)
Sequence of property names to check.
Returns:
int or None
Determined data size.
"""
# get size
for name in names:
# check property
if not series.has_property(name):
continue
# get property
prop = series.get_property(name, native=True)
# get size
if isinstance(prop, (list, tuple, numpy.ndarray)):
return len(prop)
# no data
return None
| 5,335,384
|
def uintToQuint (v, length=2):
""" Turn any integer into a proquint with fixed length """
assert 0 <= v < 2**(length*16)
return '-'.join (reversed ([u16ToQuint ((v>>(x*16))&0xffff) for x in range (length)]))
| 5,335,385
|
def alphanumeric_hash(s: str, size=5):
"""Short alphanumeric string derived from hash of given string"""
import hashlib
import base64
hash_object = hashlib.md5(s.encode('ascii'))
s = base64.b32encode(hash_object.digest())
result = s[:size].decode('ascii').lower()
return result
| 5,335,386
|
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
| 5,335,387
|
def process_pulled_tweets(tw_api, mongo_client):
"""Loop through pulled_list collection, if a tweet is valid, post it
and save it in posted_list, else move it to discard_list.
- Checks to pass to move to posted_list:
- tweet is not a retweeted one.
- tweet id is not found in posted_list.
- tweet id is not found in discard_list.
- language code is 'en'.
- full_text does not match with another tweet in posted_list to avoid duplicate action.
- TODO: check the context using ML model.
"""
db = mongo_client[config.MONGO_DB]
pulled_list_coll = db[config.MONGO_COLL_PULLED_LIST]
posted_list_coll = db[config.MONGO_COLL_POSTED_LIST]
discard_list_coll = db[config.MONGO_COLL_DISCARD_LIST]
# process all that are retweets and remove them
pulled_list = pulled_list_coll.find(
{"retweeted_status": {"$exists": True}})
for doc in pulled_list:
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
pulled_list = pulled_list_coll.find({})
for doc in pulled_list:
if can_process_tweets(mongo_client):
# Check tweet id in posted_list
if in_the_list(mongo_client, doc, config.MongoDocLists.POSTED_LIST):
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
continue
# Check tweet id in discard_list
if in_the_list(mongo_client, doc, config.MongoDocLists.DISCARD_LIST):
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
continue
# Check the language code
if not (doc["metadata"]["iso_language_code"] == 'en'):
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
continue
# Check the full_text with existing documents' full_text in posted_list
if posted_list_coll.find_one({"full_text": doc["full_text"]}):
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
continue
# retweet and add to posted_list
try:
retweet_status = tw_api.retweet(doc["id"])
inc_api_call_counters(
mongo_client, config.TWEET_RETWEET_ID, config.TWEET_RETWEET_LIMIT_WINDOW)
posted_list_coll.insert_one(doc)
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
except tweepy.TweepyException as e:
discard_list_coll.insert_one(doc)
pulled_list_coll.delete_one({"id_str": doc["id_str"]})
# FIXME: Below indexing is not right.
#config.logger.error(
# "During retweet for ID: {}; message: {}".format(doc["id_str"], e.args[0][0]['message']))
retweet_delay_time = config.get_rand_sleep_time()
config.logger.info(
"\tNext retweet attempt in {} s".format(retweet_delay_time))
time.sleep(retweet_delay_time)
| 5,335,388
|
def require(section: str = "install") -> List[str]:
""" Requirements txt parser. """
require_txt = Path(".").parent / "requirements.txt"
if not Path(require_txt).is_file():
return []
requires = defaultdict(list) # type: Dict[str, List[str]]
with open(str(require_txt), "rb") as fh:
key = "" # type: str
for line in fh.read().decode("utf-8").split("\n"):
if not line.strip():
" empty line "
continue
if line[0] == "#":
" section key "
key = line[2:]
continue
# actual package
requires[key].append(line.strip())
return requires[section]
| 5,335,389
|
def find_pkgutil_ns_hints(tree):
"""
Analyze an AST for hints that we're dealing with a Python module that defines a pkgutil-style namespace package.
:param tree:
The result of :func:`ast.parse()` when run on a Python module (which is
assumed to be an ``__init__.py`` file).
:returns:
A :class:`set` of strings where each string represents a hint (an
indication) that we're dealing with a pkgutil-style namespace module. No
single hint can definitely tell us, but a couple of unique hints taken
together should provide a reasonable amount of confidence (at least this
is the idea, how well this works in practice remains to be seen).
"""
hints = set()
for node in ast.walk(tree):
if isinstance(node, ast.Attribute):
if node.attr == "extend_path":
logger.debug("Found hint! ('extend_path' reference)")
hints.add("extend_path")
elif isinstance(node, ast.Import) and any(alias.name == "pkgutil" for alias in node.names):
logger.debug("Found hint! (import pkg_util)")
hints.update(("import", "pkgutil"))
elif (
isinstance(node, ast.ImportFrom)
and node.module == "pkgutil"
and any(alias.name == "extend_path" for alias in node.names)
):
logger.debug("Found hint! (from pkg_util import extend_path)")
hints.update(("import", "pkgutil", "extend_path"))
elif isinstance(node, ast.Name):
if node.id == "extend_path":
logger.debug("Found hint! ('extend_path' reference)")
hints.add("extend_path")
elif node.id == "pkgutil":
logger.debug("Found hint! ('pkgutil' reference)")
hints.add("pkgutil")
elif node.id == "__import__":
logger.debug("Found hint! ('__import__' reference)")
hints.add("import")
elif node.id == "__name__":
logger.debug("Found hint! ('__name__' reference)")
hints.add("__name__")
elif node.id == "__path__":
logger.debug("Found hint! ('__path__' reference)")
hints.add("__path__")
elif isinstance(node, ast.Str) and node.s in ("pkgutil", "extend_path"):
logger.debug("Found hint! ('%s' string literal)", node.s)
hints.add(node.s)
return hints
| 5,335,390
|
def announce_voter_cast_ballot(voter_number: int, registrar_url: str, key: str):
"""
Announce to the registrar that voter has cast their ballot and voted
Args:
voter_number: integer voter number from registrar
registrar_url: url to contact registrar
key: valid Fernet key, shared with registrar
"""
dictified = {"voter_number": voter_number}
blob = dump_encrypt_encode_dict(dictified, key)
post(urljoin(registrar_url, "/voter/voted"), data=blob)
| 5,335,391
|
def string_in_list_of_dicts(key, search_value, list_of_dicts):
"""
Returns True if search_value is list of dictionaries at specified key.
Case insensitive and without leading or trailing whitespaces.
:return: True if found, else False
"""
for item in list_of_dicts:
if equals(item[key], search_value):
return True
return False
| 5,335,392
|
def get_data(station_id, elements=None, update=True, as_dataframe=False):
"""Retrieves data for a given station.
Parameters
----------
station_id : str
Station ID to retrieve data for.
elements : ``None``, str, or list of str
If specified, limits the query to given element code(s).
update : bool
If ``True`` (default), new data files will be downloaded if they are
newer than any previously cached files. If ``False``, then previously
downloaded files will be used and new files will only be downloaded if
there is not a previously downloaded file for a given station.
as_dataframe : bool
If ``False`` (default), a dict with element codes mapped to value dicts
is returned. If ``True``, a dict with element codes mapped to equivalent
pandas.DataFrame objects will be returned. The pandas dataframe is used
internally, so setting this to ``True`` is a little bit faster as it
skips a serialization step.
Returns
-------
site_dict : dict
A dict with element codes as keys, mapped to collections of values. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, basestring):
elements = [elements]
start_columns = [
('year', 11, 15, int),
('month', 15, 17, int),
('element', 17, 21, str),
]
value_columns = [
('value', 0, 5, float),
('mflag', 5, 6, str),
('qflag', 6, 7, str),
('sflag', 7, 8, str),
]
columns = list(itertools.chain(start_columns, *[
[(name + str(n), start + 13 + (8 * n), end + 13 + (8 * n), converter)
for name, start, end, converter in value_columns]
for n in xrange(1, 32)
]))
station_file_path = _get_ghcn_file(
station_id + '.dly', check_modified=update)
station_data = util.parse_fwf(station_file_path, columns, na_values=[-9999])
dataframes = {}
for element_name, element_df in station_data.groupby('element'):
if not elements is None and element_name not in elements:
continue
element_df['month_period'] = element_df.apply(
lambda x: pandas.Period('%s-%s' % (x['year'], x['month'])),
axis=1)
element_df = element_df.set_index('month_period')
monthly_index = element_df.index
# here we're just using pandas' builtin resample logic to construct a daily
# index for the timespan
daily_index = element_df.resample('D').index.copy()
# XXX: hackish; pandas support for this sort of thing will probably be
# added soon
month_starts = (monthly_index - 1).asfreq('D') + 1
dataframe = pandas.DataFrame(
columns=['value', 'mflag', 'qflag', 'sflag'], index=daily_index)
for day_of_month in range(1, 32):
dates = [date for date in (month_starts + day_of_month - 1)
if date.day == day_of_month]
if not len(dates):
continue
months = pandas.PeriodIndex([pandas.Period(date, 'M') for date in dates])
for column_name in dataframe.columns:
col = column_name + str(day_of_month)
dataframe[column_name][dates] = element_df[col][months]
dataframes[element_name] = dataframe
if as_dataframe:
return dataframes
else:
return dict([
(key, util.dict_from_dataframe(dataframe))
for key, dataframe in dataframes.iteritems()
])
| 5,335,393
|
def display_stats(pelican_obj):
"""
Called when Pelican is (nearly) done to display the number of files processed.
"""
global gpx_count
plural = "" if gpx_count == 1 else "s"
print("%s Processed %s GPX file%s." % (LOG_PREFIX, gpx_count, plural))
| 5,335,394
|
def add_block(
matrix, block, block_i, block_j, factor, banded
): # pylint: disable=too-many-arguments
"""Add (in place) the block `block` in matrix `matrix`, at block position (i, j) (zero-based).
The block is first multiplied by `factor` (e.g., a sign).
If `banded` is True, fill in a banded matrix (with block-band size 1, i.e. the block-diagonal,
and one upper block-diagonal, and one lower block-diagonal). I.e., `u=1` in the
documentation of scipy.linalg.eig_banded.
The banded matrix follows the LAPACK/scipy.linalg.eig_banded structure, when used with
`lower=False`. Otherwise, fill in a full matrix.
Matrices must be already initialised with the correct size; the block size is deduced from
the shape of `block`.
:param matrix: a numpy array with the matrix to fill
:param block: a numpy square block
:para block_i: the row coordinate of the block to add
:para block_y: the column coordinate of the block to add
:param factor: a factor to multiply by before adding (factor=1 just adds the block)
:param banded: if True, fill the position as defined by scipy.linalg.eig_banded, skipping
elements below the diagonal; otherwise fill a full matrix.
"""
block_size, block_size_y = block.shape
assert block_size == block_size_y, "Only square blocks allowed"
if banded:
u = block_size
for i in range(block_size):
for j in range(block_size):
actual_i = block_i * block_size + i
actual_j = block_j * block_size + j
if actual_i > actual_j:
continue
matrix[u + actual_i - actual_j, actual_j] += factor * block[i, j]
else:
matrix[
block_size * block_i : block_size * (block_i + 1),
block_size * block_j : block_size * (block_j + 1),
] += (factor * block)
| 5,335,395
|
def Find_Peaks(profile, scale, **kwargs):
"""
Pulls out the peaks from a radial profile
Inputs:
profile : dictionary, contains intensity profile and pixel scale of
diffraction pattern
calibration : dictionary, contains camera parameters to scale data
properly in two theta space
is_profile : boolean, changes processing for profiles vs 2D patterns
scale_bar : string, determines which conversions need to be run
to convert to two theta
display_type: string, determines which plots to show
Outputs:
peak_locs : dictionary, contains two_theta, d_spacings, and input_vector arrays
peaks locations found in the profile
"""
max_numpeaks = kwargs.get('max_numpeaks', 75)
scale_range = kwargs.get('dspace_range',[0.5, 6])
squished_scale = [True if x<scale_range[1] and x >scale_range[0] else False for x in scale]
print(squished_scale)
filter_size_default=max(int(scale[squished_scale].shape[0]/50),3)
print(filter_size_default)
kwargs['filter_size'] = kwargs.get('filter_size',filter_size_default)
print('filter size')
print(kwargs['filter_size'])
# find the location of the peaks in pixel space
peaks = pfnd.vote_peaks(profile[squished_scale], **kwargs)
peaks_d = scale[squished_scale][peaks>0]
scale_d = scale
thresh = 0
orig_length = len(peaks_d)
if len(peaks_d) > max_numpeaks:
print(len(peaks_d))
print("WARNING: {} peaks were detected," +
" some of the peaks will be trimmed."+
"\nFor best results. Please check calibration or run manual peak detection.".format(len(peaks_d)))
srt_peaks = np.sort(peaks[peaks>0])
thresh = srt_peaks[len(peaks_d)-max_numpeaks]
if len(scale[squished_scale][peaks>thresh]) ==0 and thresh>0:
thresh -=1
peaks_d = scale[squished_scale][peaks>thresh]
print(len(peaks_d))
print(thresh)
print(srt_peaks)
if len(peaks_d) == orig_length:
print("WARNING: reduction based on votes unsuccessful. try other parameters")
elif len(peaks_d)> max_numpeaks:
print("WARNING: partial reduction to {} peaks.".format(len(peaks_d)))
peak_locs = {"d_spacing":scale[squished_scale][peaks>thresh],
"vec":[int(round((x-.5)*164))-1 for x in peaks_d]
}
# Display the data
peaks_h = pfnd.plot_peaks(profile[squished_scale], scale[squished_scale], peaks, thresh, **kwargs)
if len(peak_locs['vec']) <= 4:
print("WARNING: only {} peaks were detected," +
" this is lower than the recommended 4+ peaks needed"+
"\nFor best results. Please check calibration.".format(len(peaks_d)))
return peak_locs, peaks_h
| 5,335,396
|
def empty_items(item_list, total):
"""
Returns a list of null objects. Useful when you want to always show n
results and you have a list of < n.
"""
list_length = len(item_list)
expected_total = int(total)
if list_length != expected_total:
return range(0, expected_total-list_length)
return ''
| 5,335,397
|
def _read_string(fp):
"""Read the next sigproc-format string in the file.
Parameters
----------
fp : file
file object to read from.
Returns
-------
str
read value from the file
"""
strlen = struct.unpack("I", fp.read(struct.calcsize("I")))[0]
return fp.read(strlen).decode()
| 5,335,398
|
def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent, detailFromGoogle={}):
"""
check for optional return parameters and add them to the result json
:param detail_json:
:param detail:
:param rating:
:param rating_n:
:param popularity:
:param current_popularity:
:param time_spent:
:return:
"""
if rating:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity:
detail_json["current_popularity"] = current_popularity
if popularity:
popularity, wait_times = get_popularity_for_day(popularity)
detail_json["populartimes"] = popularity
if wait_times:
detail_json["time_wait"] = wait_times
if time_spent:
detail_json["time_spent"] = time_spent
if ("name" in detailFromGoogle):
detail_json.update(detailFromGoogle)
return detail_json
| 5,335,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.