content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def UnlinkKongregate(request, callback, customData = None, extraHeaders = None):
"""
Unlinks the related Kongregate identifier from the user's PlayFab account
https://docs.microsoft.com/rest/api/playfab/client/account-management/unlinkkongregate
"""
if not PlayFabSettings._internalSettings.ClientSessionTicket:
raise PlayFabErrors.PlayFabException("Must be logged in to call this method")
def wrappedCallback(playFabResult, error):
if callback:
callback(playFabResult, error)
PlayFabHTTP.DoPost("/Client/UnlinkKongregate", request, "X-Authorization", PlayFabSettings._internalSettings.ClientSessionTicket, wrappedCallback, customData, extraHeaders)
| 5,338,200
|
def validate_options(options):
"""Validate supplied options to ensure basic idea is ok.
This doesnt perform a fine-grained check, just whether or not
the arguments seem consistent or complete so we can fail fast.
"""
for configurator in CONFIGURATOR_LIST:
configurator.validate_options(options)
| 5,338,201
|
def menu(request):
"""
A View to return the menu.html
where all menu images are returned
in a carousel.
"""
menus = MenuImages.objects.all()
context = {
'menus': menus
}
return render(request, 'menu/menu.html', context)
| 5,338,202
|
def wav_process(PATH, i):
"""
音频处理,在路径下读取指定序号的文件进行处理
Args:
PATH (str): 音频文件路径
i (int): 指定序号
Returns:
float: 计算得到的音源角度(单位:°)
"""
# 读取数据
wav, sr = read_wav(PATH, i + 1)
# 进行降噪
wav_rn = reduce_noise(wav)
# 计算角度
angle_list = estimate_angle(wav_rn, sr)
# 确定基准方向
angle_13, angle_24 = angle_list[1], angle_list[4]
theta13p, theta13n = (180 + angle_13) % 360, 180 - angle_13
theta24p, theta24n = (270 + angle_24) % 360, 270 - angle_24
if angle_13 > 15 and angle_13 < 165:
if ((theta24p > theta13p - 10 and theta24p < theta13p + 10) or
(theta24p + 360 > theta13p - 10 and theta24p + 360 < theta13p + 10)
or (theta24n > theta13p - 10 and theta24n < theta13p + 10)
or (theta24n + 360 > theta13p - 10
and theta24n + 360 < theta13p + 10)):
scope_mid = theta13p
else:
scope_mid = theta13n
else:
if ((theta13p > theta24p - 10 and theta13p < theta24p + 10) or
(theta13p + 360 > theta24p - 10 and theta13p + 360 < theta24p + 10)
or (theta13n > theta24p - 10 and theta13n < theta24p + 10)
or (theta13n + 360 > theta24p - 10
and theta13n + 360 < theta24p + 10)):
scope_mid = theta24p
else:
scope_mid = theta24n
angle_base = [135, 180, 225, 225, 270, 315]
processed_angle = []
sum = 0
weights = 0
for i, elem in enumerate(angle_list):
if elem > 15 and elem < 165:
# 加权计算最终角度
if elem > 65 and elem < 115:
weight = 100
else:
weight = 1
ap = (angle_base[i] + elem + 360) % 360
an = (angle_base[i] - elem + 360) % 360
if ap > scope_mid - 10 and ap < scope_mid + 10:
processed_angle.append(ap)
sum = sum + ap * weight
weights = weights + weight
else:
processed_angle.append(an)
sum = sum + an * weight
weights = weights + weight
return sum / weights
| 5,338,203
|
def extract_vuln_id(input_string):
"""
Function to extract a vulnerability ID from a message
"""
if 'fp' in input_string.lower():
wordlist = input_string.split()
vuln_id = wordlist[-1]
return vuln_id
else:
return None
| 5,338,204
|
def britannica_search(text):
"""
Search on Britannica (https://www.britannica.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
britannica=f"https://www.britannica.com/search?query={text}"
open(britannica)
| 5,338,205
|
def _test():
"""Self-test."""
print("Self-test for the Package modul")
import random
apt_pkg.init()
progress = apt.progress.text.OpProgress()
cache = apt.Cache(progress)
pkg = cache["apt-utils"]
print("Name: %s " % pkg.name)
print("ID: %s " % pkg.id)
print("Priority (Candidate): %s " % pkg.candidate.priority)
print("Priority (Installed): %s " % pkg.installed.priority)
print("Installed: %s " % pkg.installed.version)
print("Candidate: %s " % pkg.candidate.version)
print("CandidateDownloadable: %s" % pkg.candidate.downloadable)
print("CandidateOrigins: %s" % pkg.candidate.origins)
print("SourcePkg: %s " % pkg.candidate.source_name)
print("Section: %s " % pkg.section)
print("Summary: %s" % pkg.candidate.summary)
print("Description (formatted) :\n%s" % pkg.candidate.description)
print("Description (unformatted):\n%s" % pkg.candidate.raw_description)
print("InstalledSize: %s " % pkg.candidate.installed_size)
print("PackageSize: %s " % pkg.candidate.size)
print("Dependencies: %s" % pkg.installed.dependencies)
print("Recommends: %s" % pkg.installed.recommends)
for dep in pkg.candidate.dependencies:
print(",".join("%s (%s) (%s) (%s)" % (o.name, o.version, o.relation,
o.pre_depend) for o in dep.or_dependencies))
print("arch: %s" % pkg.candidate.architecture)
print("homepage: %s" % pkg.candidate.homepage)
print("rec: ", pkg.candidate.record)
print(cache["2vcard"].get_changelog())
for i in True, False:
print("Running install on random upgradable pkgs with AutoFix: ", i)
for pkg in cache:
if pkg.is_upgradable:
if random.randint(0, 1) == 1:
pkg.mark_install(i)
print("Broken: %s " % cache._depcache.broken_count)
print("InstCount: %s " % cache._depcache.inst_count)
print()
# get a new cache
for i in True, False:
print("Randomly remove some packages with AutoFix: %s" % i)
cache = apt.Cache(progress)
for name in cache.keys():
if random.randint(0, 1) == 1:
try:
cache[name].mark_delete(i)
except SystemError:
print("Error trying to remove: %s " % name)
print("Broken: %s " % cache._depcache.broken_count)
print("DelCount: %s " % cache._depcache.del_count)
| 5,338,206
|
def ensure_paths_for_args(args):
"""
Ensure all arguments with paths are absolute & have simplification removed
Just apply os.path.abspath & os.path.expanduser
:param args: the arguments given from argparse
:returns: an updated args
"""
args.seqs_of_interest = os.path.abspath(
os.path.expanduser(args.seqs_of_interest))
args.assembly_dir = os.path.abspath(os.path.expanduser(args.assembly_dir))
if args.output is not None:
args.output = os.path.abspath(os.path.expanduser(args.output))
if args.cons is not None:
args.cons = os.path.abspath(os.path.expanduser(args.cons))
if args.index_file is not None:
args.index_file = os.path.abspath(os.path.expanduser(args.index_file))
if args.existing_data is not None:
args.existing_data = os.path.abspath(os.path.expanduser(args.existing_data))
return args
| 5,338,207
|
def compare_images(img1, img2):
"""Expects strings of the locations of two images. Will return an integer representing their difference"""
with Image.open(img1) as img1, Image.open(img2) as img2:
# Calculate a difference image that is the difference between the two images.
diff = ImageChops.difference(img1, img2)
return sum(_unpack_image(diff.getdata())[1])
| 5,338,208
|
def bytes_to_nodes(buf):
""" Return a list of ReadNodes corresponding to the bytes in buf.
@param bytes buf: a bytes object
@rtype: list[ReadNode]
>>> bytes_to_nodes(bytes([0, 1, 0, 2]))
[ReadNode(0, 1, 0, 2)]
"""
lst = []
for i in range(0, len(buf), 4):
l_type = buf[i]
l_data = buf[i+1]
r_type = buf[i+2]
r_data = buf[i+3]
lst.append(ReadNode(l_type, l_data, r_type, r_data))
return lst
| 5,338,209
|
def require_captcha(function, *args, **kwargs):
"""Return a decorator for methods that require captchas."""
raise_captcha_exception = kwargs.pop('raise_captcha_exception', False)
captcha_id = None
# Get a handle to the reddit session
if hasattr(args[0], 'reddit_session'):
reddit_session = args[0].reddit_session
else:
reddit_session = args[0]
while True:
try:
if captcha_id:
captcha_answer = _get_captcha(reddit_session, captcha_id)
# When the method is being decorated, all of its default
# parameters become part of this *args tuple. This means that
# *args currently contains a None where the captcha answer
# needs to go. If we put the captcha in the **kwargs,
# we get a TypeError for having two values of the same param.
func_args = _make_func_args(function)
if 'captcha' in func_args:
captcha_index = func_args.index('captcha')
args = list(args)
args[captcha_index] = captcha_answer
else:
kwargs['captcha'] = captcha_answer
return function(*args, **kwargs)
except errors.InvalidCaptcha as exception:
if raise_captcha_exception or \
not hasattr(sys.stdin, 'closed') or sys.stdin.closed:
raise
captcha_id = exception.response['captcha']
| 5,338,210
|
def list_unnecessary_loads(app_label=None):
"""
Scan the project directory tree for template files and process each and
every one of them.
:app_label: String; app label supplied by the user
:returns: None (outputs to the console)
"""
if app_label:
app = get_app(app_label)
else:
app = None
dt_engines = get_djangotemplates_engines()
for dt_engine in dt_engines:
has_issues = False
templates = []
# Get the locations of installed packages
pkg_locations = get_package_locations()
# Get template directories located within the project
for directory in dt_engine.template_dirs:
templates += get_templates(directory, pkg_locations, app)
if templates:
for template in templates:
status = process_template(template, dt_engine.engine)
if status:
has_issues = status
if not has_issues:
output_message(reason=3)
else:
output_message(reason=1)
return has_issues
| 5,338,211
|
async def test_subscribe_idempotence(
event_log: EventLog, first_topic, second_topic
) -> None:
"""Check that multiple subscriptions does not result in multiple notifications."""
called = asyncio.Event()
async def call(low: Timestamp, high: Timestamp, events: List[Event]) -> None:
nonlocal called
assert not called.is_set() # checks only called once
called.set()
assert len(events) == 1
assert events[0].topic_name == pname("A")
# same subscriber - double subscribe
event_log.subscribe(first_topic, call)
event_log.subscribe(second_topic, call)
event_log.append_event(pname("A"), "waiting")
await asyncio.wait_for(
called.wait(), timeout=1
) # wait for subscribers to get called
assert called.is_set() is True
| 5,338,212
|
def _element(
html_element: str,
html_class: str,
value: Any,
is_visible: bool,
**kwargs,
) -> dict:
"""
Template to return container with information for a <td></td> or <th></th> element.
"""
if "display_value" not in kwargs:
kwargs["display_value"] = value
return {
"type": html_element,
"value": value,
"class": html_class,
"is_visible": is_visible,
**kwargs,
}
| 5,338,213
|
def create_app(test_config=None):
"""
This method creates a flask app object with a
given configuration.
Args:
test_config (dict): Defaults to None.
Returns:
app (Flask): Flask app object.
"""
app = Flask(__name__)
Bootstrap(app)
# check environment variables to see which config to load
env = os.environ.get("FLASK_ENV", "dev")
if test_config:
app.config.from_mapping(**test_config)
else:
# config dict is from config.py
app.config.from_object(config[env])
# create database for development and testing
if env != "prod":
db_url = app.config["SQLALCHEMY_DATABASE_URI"]
if not database_exists(db_url):
create_database(db_url)
db.init_app(app)
Migrate(app, db)
app.register_blueprint(views.flask_app)
return app
| 5,338,214
|
def lovasz_hinge_loss(pred, target, crop_masks, activation='relu', map2inf=False):
"""
Binary Lovasz hinge loss
pred: [P] Variable, logits at each prediction (between -\infty and +\infty)
target: [P] Tensor, binary ground truth labels (0 or 1)
"""
losses = []
for m, p, t in zip(crop_masks, pred, target): # > imgs
num_objs = t.size()[0]
loss = t.new_tensor(0.0)
for i in range(num_objs):
if len(p[i]) > 0:
loss += lovasz_hinge_loss_single(p[i][m[i]].view(-1),
t[i][m[i]].view(-1),
activation=activation,
map2inf=map2inf)
if num_objs > 0:
loss /= num_objs
losses.append(loss)
losses = torch.stack(losses)
return losses
| 5,338,215
|
def get_gene_summary(gene):
"""Gets gene summary from a model's gene."""
return {
gene.id: {
"name": gene.name,
"is_functional": gene.functional,
"reactions": [{rxn.id: rxn.name} for rxn in gene.reactions],
"annotation": gene.annotation,
"notes": gene.notes,
}
}
| 5,338,216
|
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = "\n".join(
[
" [{}] {}{}".format(
i + 1,
x["name"] if isinstance(x, dict) and "name" in x else x,
" - " + x["desc"] if isinstance(x, dict) and "desc" in x else "",
)
for i, x in enumerate(a_list)
]
)
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input(
"{}\n{}\nPlease enter a choice [Default choice({})]: ".format(msg, options, default)
)
if val == "?" and help_string is not None:
print(help_string)
continue
if not val:
val = "{}".format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning("Valid values are %s", allowed_vals)
| 5,338,217
|
def random_permutation_matrix(size):
"""Random permutation matrix.
Parameters
----------
size : int
The dimension of the random permutation matrix.
Returns
-------
random_permutation : array, shape (size, size)
An identity matrix with its rows random shuffled.
"""
identity = np.identity(size)
index = np.arange(0, size)
np.random.shuffle(index)
random_permutation = identity[index]
return random_permutation
| 5,338,218
|
def test_pyramid_handles_none_and_empty_inputs():
"""Test that pyramid handles none and empty inputs."""
assert watch_pyramid_from_the_side(None) is None
assert watch_pyramid_from_above(None) is None
assert count_visible_characters_of_the_pyramid(None) == -1
assert count_all_characters_of_the_pyramid(None) == -1
assert watch_pyramid_from_the_side('') == ''
assert watch_pyramid_from_above('') == ''
assert count_visible_characters_of_the_pyramid('') == -1
assert count_all_characters_of_the_pyramid('') == -1
| 5,338,219
|
def matched(captured: Optional[Capture], groups_count: int) -> MatchedType:
"""
Construct the matched strings transversing\
given a captured structure
The passed Capture has the last captured char\
and so the sequence is transversed in reverse
Sub-matches are put in their group index
Repeating sub-matches (i.e: ``(a)*``) are put\
into a nested sequence of their group index
:param captured: The last capture or None
:param groups_count: number of groups
:return: matched strings
:private:
"""
match = collections.defaultdict(lambda: [])
curr_groups = []
while captured:
if captured.char == Symbols.GROUP_END:
curr_groups.append(captured)
if captured.is_repeated:
match[captured.index].append([])
captured = captured.prev
continue
if captured.char == Symbols.GROUP_START:
curr_groups.pop()
captured = captured.prev
continue
for g in curr_groups:
if g.is_repeated:
match[g.index][-1].append(captured.char)
else:
match[g.index].append(captured.char)
captured = captured.prev
assert not curr_groups
return tuple(
_join_reversed(match[g])
if g in match
else None
for g in range(groups_count))
| 5,338,220
|
def tle_fmt_float(num,width=10):
""" Return a left-aligned signed float string, with no leading zero left of the decimal """
digits = (width-2)
ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits)
if ret.startswith("0."):
return " " + ret[1:]
if ret.startswith("-0."):
return "-" + ret[2:]
| 5,338,221
|
def pack(name=None, prefix=None, output=None, format='infer',
arcroot='', dest_prefix=None, verbose=False, force=False,
compress_level=4, n_threads=1, zip_symlinks=False, zip_64=True,
filters=None, ignore_editable_packages=False):
"""Package an existing conda environment into an archive file.
Parameters
----------
name : str, optional
The name of the conda environment to pack.
prefix : str, optional
A path to a conda environment to pack.
output : str, optional
The path of the output file. Defaults to the environment name with a
``.tar.gz`` suffix (e.g. ``my_env.tar.gz``).
format : {'infer', 'zip', 'tar.gz', 'tgz', 'tar.bz2', 'tbz2', 'tar'}, optional
The archival format to use. By default this is inferred by the output
file extension.
arcroot : str, optional
The relative path in the archive to the conda environment.
Defaults to ''.
dest_prefix : str, optional
If present, prefixes will be rewritten to this path before packaging.
In this case the ``conda-unpack`` script will not be generated.
verbose : bool, optional
If True, progress is reported to stdout. Default is False.
force : bool, optional
Whether to overwrite any existing archive at the output path. Default
is False.
compress_level : int, optional
The compression level to use, from 0 to 9. Higher numbers decrease
output file size at the expense of compression time. Ignored for
``format='zip'``. Default is 4.
zip_symlinks : bool, optional
Symbolic links aren't supported by the Zip standard, but are supported
by *many* common Zip implementations. If True, store symbolic links in
the archive, instead of the file referred to by the link. This can
avoid storing multiple copies of the same files. *Note that the
resulting archive may silently fail on decompression if the ``unzip``
implementation doesn't support symlinks*. Default is False. Ignored if
format isn't ``zip``.
n_threads : int, optional
The number of threads to use. Set to -1 to use the number of cpus on
this machine. If a file format doesn't support threaded packaging, this
option will be ignored. Default is 1.
zip_64 : bool, optional
Whether to enable ZIP64 extensions. Default is True.
filters : list, optional
A list of filters to apply to the files. Each filter is a tuple of
``(kind, pattern)``, where ``kind`` is either ``'exclude'`` or
``'include'`` and ``pattern`` is a file pattern. Filters are applied in
the order specified.
ignore_editable_packages : bool, optional
By default conda-pack will error in the presence of editable packages.
Set to True to skip these checks.
Returns
-------
out_path : str
The path to the archived environment.
"""
if name and prefix:
raise CondaPackException("Cannot specify both ``name`` and ``prefix``")
if verbose:
print("Collecting packages...")
if prefix:
env = CondaEnv.from_prefix(prefix,
ignore_editable_packages=ignore_editable_packages)
elif name:
env = CondaEnv.from_name(name, ignore_editable_packages=ignore_editable_packages)
else:
env = CondaEnv.from_default(ignore_editable_packages=ignore_editable_packages)
if filters is not None:
for kind, pattern in filters:
if kind == 'exclude':
env = env.exclude(pattern)
elif kind == 'include':
env = env.include(pattern)
else:
raise CondaPackException("Unknown filter of kind %r" % kind)
return env.pack(output=output, format=format, arcroot=arcroot,
dest_prefix=dest_prefix,
verbose=verbose, force=force,
compress_level=compress_level, n_threads=n_threads,
zip_symlinks=zip_symlinks, zip_64=zip_64)
| 5,338,222
|
def rasterize(points):
""" Return (array, no_data_value) tuple.
Rasterize the indices of the points in an array at the highest quadtree
resolution. Note that points of larger squares in the quadtree also just
occupy one cell in the resulting array, the rest of the cells get the
no_data_value.
"""
points = np.asarray(points, dtype=float)
x, y = points.transpose()
xs, ys = analyze(x, y)
x1, y2 = x.min(), y.max()
# get indices to land each point index in its own array cell
j = np.int64(np.zeros_like(x) if xs is None else (x - x1) / xs)
i = np.int64(np.zeros_like(y) if ys is None else (y2 - y) / ys)
index = i, j
no_data_value = len(points)
ids = np.arange(no_data_value)
values = np.full((i.max() + 1, j.max() + 1), no_data_value)
values[index] = ids
return values, no_data_value
| 5,338,223
|
def parse_aedge_layout_attrs(aedge, translation=None):
"""
parse grpahviz splineType
"""
if translation is None:
translation = np.array([0, 0])
edge_attrs = {}
apos = aedge.attr['pos']
# logger.info('apos = %r' % (apos,))
end_pt = None
start_pt = None
# if '-' in apos:
# import utool
# utool.embed()
def safeadd(x, y):
if x is None or y is None:
return None
return x + y
strpos_list = apos.split(' ')
strtup_list = [ea.split(',') for ea in strpos_list]
ctrl_ptstrs = [ea for ea in strtup_list if ea[0] not in 'es']
end_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 'e']
start_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 's']
assert len(end_ptstrs) <= 1
assert len(start_ptstrs) <= 1
if len(end_ptstrs) == 1:
end_pt = np.array([float(f) for f in end_ptstrs[0]])
if len(start_ptstrs) == 1:
start_pt = np.array([float(f) for f in start_ptstrs[0]])
ctrl_pts = np.array([tuple([float(f) for f in ea]) for ea in ctrl_ptstrs])
adata = aedge.attr
ctrl_pts = ctrl_pts
edge_attrs['pos'] = apos
edge_attrs['ctrl_pts'] = safeadd(ctrl_pts, translation)
edge_attrs['start_pt'] = safeadd(start_pt, translation)
edge_attrs['end_pt'] = safeadd(end_pt, translation)
edge_attrs['lp'] = safeadd(parse_point(adata.get('lp', None)), translation)
edge_attrs['label'] = adata.get('label', None)
edge_attrs['headlabel'] = adata.get('headlabel', None)
edge_attrs['taillabel'] = adata.get('taillabel', None)
edge_attrs['head_lp'] = safeadd(parse_point(adata.get('head_lp', None)), translation)
edge_attrs['tail_lp'] = safeadd(parse_point(adata.get('tail_lp', None)), translation)
return edge_attrs
| 5,338,224
|
def test_decrypt_v2_good_commitment():
"""Tests forwards compatibility with message serialization format v2."""
client = aws_encryption_sdk.EncryptionSDKClient()
provider = StaticRawMasterKeyProvider(
wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING,
encryption_key_type=EncryptionKeyType.SYMMETRIC,
key_bytes=b"\00" * 32,
)
provider.add_master_key("KeyId")
ciphertext = VALUES["ciphertext_v2_good_commitment"]
plaintext, _ = client.decrypt(source=ciphertext, key_provider=provider)
assert plaintext == b"GoodCommitment"
| 5,338,225
|
def load_class_by_path(taskpath):
""" Given a taskpath, returns the main task class. """
return getattr(importlib.import_module(re.sub(r"\.[^.]+$", "", taskpath)), re.sub(r"^.*\.", "", taskpath))
| 5,338,226
|
def run_http_parser_req(req: bytes):
"""Run the stream.HTTPParser on the specified request."""
parser = HTTPParser()
parser.process(req)
| 5,338,227
|
def test_build_models(name, build_model, params):
"""
Smoke test: ensure we can build the each model with nothing crashing.
"""
model = build_model(params["default"])
assert type(model) is StratifiedModel
| 5,338,228
|
def test_atomic_g_month_day_enumeration_3_nistxml_sv_iv_atomic_g_month_day_enumeration_4_5(mode, save_output, output_format):
"""
Type atomic/gMonthDay is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/gMonthDay/Schema+Instance/NISTSchema-SV-IV-atomic-gMonthDay-enumeration-4.xsd",
instance="nistData/atomic/gMonthDay/Schema+Instance/NISTXML-SV-IV-atomic-gMonthDay-enumeration-4-5.xml",
class_name="NistschemaSvIvAtomicGMonthDayEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,338,229
|
def simplex_edge_tensors(dimensions, # type: int
centers_in, # type: List[List[int]]
centers_out, # type: List[List[int]]
surrounds_in, # type: List[List[int]]
surrounds_out, # type: List[List[int]]
attractor_function=__euclid_function_generator,
# type: Callable[[Real], Callable[[Real], Real]]
flip=None # type: Optional[int]
):
""" Generates the minimum number of edge_orientation_detector tensors needed to represent all orientations of
boundaries in n-dimensional space, with positive values only. This results in one more tensor than when negative
values are allowed.
:param dimensions: number of dimensions.
:param centers_in: list of colors added together on points on the edge_orientation_detector.
:param centers_out: list of colors outputted on points on the edge_orientation_detector.
:param surrounds_in: list of colors subtracted together on points off the edge_orientation_detector
:param surrounds_out: list of colors outputted on points off the edge_orientation_detector.
:param attractor_function: function that takes in the number of dimensions and outputs a function that takes in
distances and returns positive values for small distances and negative values for large distances.
:return: a list of tensors for finding all orientations of boundaries.
"""
simplex = __simplex_coordinates(dimensions)
if flip is not None:
simplex = np.flip(simplex, flip)
return [edge_tensor(simplex_vector, center_in, center_out, surround_in, surround_out, attractor_function)
for simplex_vector, center_in, center_out, surround_in, surround_out
in zip(simplex, centers_in, centers_out, surrounds_in, surrounds_out)]
| 5,338,230
|
def do_token_submit_password(sc, args):
"""
Submit this token to set or update your password.
"""
json_data = {'password': args.password}
_token_submit(sc, args, json_data)
| 5,338,231
|
def hashtoaddress(PARAMETER):
"""
Converts a 160-bit hash to an address.
[PARAMETER] is required and should be an address hash.
"""
d = urllib2.urlopen(blockexplorer('hashtoaddress') + '/' + str(PARAMETER))
return d.read()
| 5,338,232
|
def marks_plot(ppg, marks, title='PPG Signal with Marks', label_ppg='PPG Signal',
label_marks='Marks', y_axis='Amplitude', x_axis=None, figure_path=None):
"""
Plots PPG signals with marks.
Parameters
----------
ppg : pandas.Series or ndarray
The PPG signal.
marks : ndarray
Marks to be plotted agains the PPG signal.
title : str, optional
Title of the plot, by default 'PPG Signal'.
label_ppg : str, optional
Label for the PPG signal, by default 'PPG Signal'.
label_marks : str, optional
Label for the marks, by default 'Marks'.
y_axis : str, optional
Y axis name, by default 'Amplitude'.
x_axis : str, optional
X axis name, by default None.
figure_path : str, optional
The path for the plot to be saved, by default None.
"""
_configure_plot(title, y_axis, x_axis)
plt.plot(ppg, color='#e8335e')
for mark in marks:
if isinstance(ppg, pd.core.series.Series):
plt.plot(ppg.index[mark], ppg.iloc[mark], marker='X', markersize=8, color='#6233E8')
elif isinstance(ppg, np.ndarray):
plt.plot(mark, ppg[mark], marker='X', markersize=8, color='#6233E8')
plt.legend([label_ppg, label_marks])
if figure_path:
plt.savefig(figure_path, dpi=300, bbox_inches='tight', format="pdf")
plt.show()
plt.close()
| 5,338,233
|
def get_all_objects(line: str, frame: types.FrameType) -> ObjectsInfo:
"""Given a (partial) line of code and a frame,
obtains a dict containing all the relevant information about objects
found on that line so that they can be formatted as part of the
answer to "where()" or they can be used during the analysis
of the cause of the exception.
The dict returned has five keys.
The first three, 'locals', 'globals', 'builtins',
each containing a list of tuples, each tuple being of the form
(name, repr(obj), obj) where name --> obj.
The fourth key, 'expressions', contains a list of tuples of the form
('name', obj). It is only occasionally used in helping to make
suggestions regarding the cause of some exception.
"""
objects: ObjectsInfo = {
"locals": [],
"globals": [],
"builtins": [],
"expressions": [],
"name, obj": [],
}
scopes = (
("locals", frame.f_locals), # always have locals before globals
("globals", frame.f_globals),
)
names = set()
tokens = token_utils.get_significant_tokens(line)
if not tokens:
return objects
for tok in tokens:
if tok.is_identifier():
name = tok.string
if name in names:
continue
for scope, scope_dict in scopes:
if name in scope_dict:
names.add(name)
obj = scope_dict[name]
objects[scope].append((name, repr(obj), obj))
objects["name, obj"].append((name, obj))
break
else:
if name in dir(builtins):
names.add(name)
obj = getattr(builtins, name)
objects["builtins"].append((name, repr(obj), obj))
objects["name, obj"].append((name, obj))
line = line.strip()
if line.startswith(("def", "if", "while", "class", "for")) and line.endswith(":"):
line += " pass"
try:
atok = ASTTokens(line.strip(), parse=True)
except SyntaxError as e:
if "unexpected EOF" not in str(e):
debug_helper.log(f"problem with ASTTokens: {e}")
debug_helper.log(f"line: {line}")
return objects
if atok is not None:
evaluator = Evaluator.from_frame(frame)
for nodes, obj in group_expressions(
pair for pair in evaluator.find_expressions(atok.tree)
):
name = atok.get_text(nodes[0])
if not name or name in names:
continue
names.add(name)
objects["name, obj"].append((name, obj))
try:
# We're not interested in showing literals in the list of variables
ast.literal_eval(name)
except Exception: # noqa
objects["expressions"].append((name, obj))
return objects
| 5,338,234
|
def magenta(msg):
"""Return colorized <msg> in magenta"""
return __fore(msg, 'magenta')
| 5,338,235
|
def request_latest_news():
"""
This Method queries the last item of the database and convert it to a string.
:return: A String with the last item of the database
"""
article = News.query.order_by(News.id.desc()).first()
return format_latest_article(article, request.content_type)
| 5,338,236
|
def test_values_for_tag(mock_s3, mock_get_url):
"""
Args:
mock_s3:
mock_get_url:
"""
mock_s3.return_value = _get_mock_s3()
mock_get_url.return_value = "s3://bucket/grid.zinc"
with get_provider("shaystack.providers.url", {}) as provider:
result = provider.values_for_tag("col")
assert result == [1.0, 2.0]
result = provider.values_for_tag("id")
assert result == [Ref("id1"), Ref("id2")]
| 5,338,237
|
def is_vulgar(words, sentence):
"""Checks if a given line has any of the bad words from the bad words list."""
for word in words:
if word in sentence:
return 1
return 0
| 5,338,238
|
def edges_cross(graph, nodes1, nodes2):
"""
Finds edges between two sets of disjoint nodes.
Running time is O(len(nodes1) * len(nodes2))
Args:
graph (nx.Graph): an undirected graph
nodes1 (set): set of nodes disjoint from `nodes2`
nodes2 (set): set of nodes disjoint from `nodes1`.
"""
return {e_(u, v) for u in nodes1
for v in nodes2.intersection(graph.adj[u])}
| 5,338,239
|
def basket_view(func):
""" Returns rendered page for basket """
@jinja2_view('basket.html', template_lookup=[TEMPLATES_DIR])
def _basket_view_call(*args, **kwargs):
func(*args, **kwargs)
return {'col_mapping': COLUMN_MAPPING, 'product_list': _format_products_for_web(get_basket_products())}
return _basket_view_call
| 5,338,240
|
def py2to3(target_path,
interpreter_command_name="python",
is_transform=False,
is_del_bak=False,
is_html_diff=False,
is_check_requirements=False):
"""
The main entrance of the 2to3 function provides a series of parameter entrances.
The main functions are as follows:
1. Whether to enable automatic conversion of Python2 code to Python3
2. Determine whether to keep a backup of Python2 code
3. Determine whether to open the conversion code text comparison
4. Determine whether the version of the library that the project
depends on is suitable for the current Python environment.
:param target_path:
str, project path
:param interpreter_command_name:
str, interpreter command name, default "python"
Please make sure that the Python terminal environment
has been configured successfully
:param is_transform:
bool, default False
:param is_del_bak:
bool, default False
:param is_html_diff:
bool, default False
:param is_check_requirements:
bool, default False
:return: bool, ignore
"""
# Whether to enable automatic conversion of Python2 code to Python3
if is_transform:
files_transform(
target_path=target_path,
interpreter_command_name=interpreter_command_name
)
# Determine whether to keep a backup of Python2 code
if is_del_bak:
bak_files_clear(target_path=target_path)
# Determine whether to open the conversion code text comparison
if is_html_diff:
html_diff_generate(target_path=target_path)
# Determine whether the version of the library that the project
# depends on is suitable for the current Python environment.
if is_check_requirements:
libraries_detect_and_recommend(target_path=target_path)
return True
| 5,338,241
|
def _norm_path(path):
"""
Decorator function intended for using it to normalize a the output of a path retrieval function. Useful for
fixing the slash/backslash windows cases.
"""
def normalize_path(*args):
return os.path.normpath(path(*args))
return normalize_path
| 5,338,242
|
def solve(puzzle_class, output_stream, settings):
"""Find and record all solutions to a puzzle. Report on `output_stream`."""
start = datetime.now()
puzzle = puzzle_class(settings.start_position)
solver = puzzler.exact_cover_modules[settings.algorithm].ExactCover(
puzzle.matrix)
try:
try:
print >>output_stream, ('solving %s:\n'
% puzzle.__class__.__name__)
print >>output_stream, puzzle.start_position, '\n'
for solution in solver.solve():
puzzle.record_solution(
solution, solver, stream=output_stream)
if ( settings.stop_after
and solver.num_solutions == settings.stop_after):
break
except KeyboardInterrupt:
print >>output_stream, 'Session interrupted by user.'
sys.exit(1)
finally:
end = datetime.now()
duration = end - start
print >>output_stream, (
'%s solution%s, %s searches, duration %s'
% (solver.num_solutions, ('s', '')[solver.num_solutions == 1],
solver.num_searches, duration))
| 5,338,243
|
def _get_hash_aliases(name):
"""
internal helper used by :func:`lookup_hash` --
normalize arbitrary hash name to hashlib format.
if name not recognized, returns dummy record and issues a warning.
:arg name:
unnormalized name
:returns:
tuple with 2+ elements: ``(hashlib_name, iana_name|None, ... 0+ aliases)``.
"""
# normalize input
orig = name
if not isinstance(name, str):
name = to_native_str(name, 'utf-8', 'hash name')
name = re.sub("[_ /]", "-", name.strip().lower())
if name.startswith("scram-"): # helper for SCRAM protocol (see passlib.handlers.scram)
name = name[6:]
if name.endswith("-plus"):
name = name[:-5]
# look through standard names and known aliases
def check_table(name):
for row in _known_hash_names:
if name in row:
return row
result = check_table(name)
if result:
return result
# try to clean name up some more
m = re.match(r"(?i)^(?P<name>[a-z]+)-?(?P<rev>\d)?-?(?P<size>\d{3,4})?$", name)
if m:
# roughly follows "SHA2-256" style format, normalize representation,
# and checked table.
iana_name, rev, size = m.group("name", "rev", "size")
if rev:
iana_name += rev
hashlib_name = iana_name
if size:
iana_name += "-" + size
if rev:
hashlib_name += "_"
hashlib_name += size
result = check_table(iana_name)
if result:
return result
# not found in table, but roughly recognize format. use names we built up as fallback.
log.info("normalizing unrecognized hash name %r => %r / %r",
orig, hashlib_name, iana_name)
else:
# just can't make sense of it. return something
iana_name = name
hashlib_name = name.replace("-", "_")
log.warning("normalizing unrecognized hash name and format %r => %r / %r",
orig, hashlib_name, iana_name)
return hashlib_name, iana_name
| 5,338,244
|
def dirac_2d_v_and_h(direction, G_row, vec_len_row, num_vec_row,
G_col, vec_len_col, num_vec_col,
a, K, noise_level, max_ini, stop_cri):
"""
used to run the reconstructions along horizontal and vertical directions in parallel.
"""
if direction == 0: # row reconstruction
c_recon, min_error, b_recon, ini = \
recon_2d_dirac_vertical(G_row, vec_len_row, num_vec_row,
a, K, noise_level, max_ini, stop_cri)
else: # column reconstruction
c_recon, min_error, b_recon, ini = \
recon_2d_dirac_vertical(G_col, vec_len_col, num_vec_col,
a, K, noise_level, max_ini, stop_cri)
return c_recon, min_error, b_recon, ini
| 5,338,245
|
def Matrix(*args, **kwargs):
"""*Funktion zur Erzeugung von Matrizen mit beliebiger Dimension"""
h = kwargs.get("h")
if h in (1, 2, 3):
matrix_hilfe(h)
return
elif isinstance(h, (Integer, int)):
matrix_hilfe(1)
return
Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor
# Erzeugen einer SymPy-Matrix auf die übliche Art
if iterable(args) and not isinstance(args[0], Vektor):
m = SympyMatrix(*args, **kwargs)
for i in range(m.rows):
for j in range(m.cols):
try:
m[i, j] = nsimplify(m[i, j])
except RecursionError:
pass
return m
# Erzeugen einer SymPy-Matrix anhand der Spaltenvektoren
try:
if not args:
raise AglaError('mindestens zwei Vektoren angeben')
if isinstance(args[0], (tuple, Tuple, list, set)):
vektoren = args[0]
if not type(vektoren) == list:
vektoren = list(vektoren)
else:
vektoren = list(args)
if not all(isinstance(v, Vektor) for v in vektoren):
raise AglaError('Vektoren angeben')
if not all(v.dim == vektoren[0].dim for v in vektoren):
raise AglaError('die Vektoren haben unterschiedliche Dimension')
except AglaError as e:
print('agla:', str(e))
liste = [ [k for k in v.komp] for v in vektoren ]
m, n = vektoren[0].dim, len(vektoren)
zeilen = [ [liste[i][j] for i in range(n)] for j in range(m) ]
M = SympyMatrix(zeilen)
return M
| 5,338,246
|
def afw_word_acceptance(afw: dict, word: list) -> bool:
""" Checks if a **word** is accepted by input AFW, returning
True/False.
The word w is accepted by a AFW if exists at least an
accepting run on w. A run for AFWs is a tree and
an alternating automaton can have multiple runs on a given
input.
A run is accepting if all the leaf nodes are accepting states.
:param dict afw: input AFW;
:param list word: list of symbols ∈ afw['alphabet'].
:return: *(bool)*, True if the word is accepted, False otherwise.
"""
return __recursive_acceptance(afw, afw['initial_state'], word)
| 5,338,247
|
def get_following():
"""
endpoint: /release/following
method: GET
param:
"[header: Authorization] Token": str - Token received from firebase
response_type: array
response:
id: 1
created: 123456789
vol: 1
chapter: 1
title: Chapter titles
url: /chapter/1
manga:
title: manga title
url: /manga/1/manga-title
cover: manga_cover_url
error:
404:
code: 404
message: There are no new chapters available
"""
list_manga = UsersManga.query.filter(and_(
UsersManga.user_uid.like(g.uid),
UsersManga.favorited.is_(True),
)).all()
list_manga_id = [x.mangas.id for x in list_manga]
chapters = (
Chapter.query
.filter(Chapter.manga_id.in_(list_manga_id))
.order_by(Chapter.manga_id)
.distinct(Chapter.manga_id)
.from_self()
.order_by(Chapter.created.desc())
.limit(10).all()
)
if not chapters:
return jsonify({
'code': 404,
'message': 'There are no new chapters available'
})
return jsonify(chapters_schema.dump(chapters).data)
| 5,338,248
|
def matchNoSpaces(value):
"""Match strings with no spaces."""
if re.search('\s', value):
return False
return True
| 5,338,249
|
def test_invalid_driver():
"""
Test invalid driver.
"""
code = 'Tiff'
with pytest.raises(ValueError, match = '.*driver.*') as info:
results = drivers.validate(code)
| 5,338,250
|
def test_dirac():
"""
Feature: Test dirac initializer.
Description: Initialize input tensor with the Dirac delta function.
Expectation: The Tensor is correctly initialized.
"""
tensor3_1 = initializer(Dirac(groups=1), [6, 2, 3], mindspore.float32)
tensor3_2 = initializer(Dirac(groups=2), [6, 2, 3], mindspore.float32)
tensor3_3 = initializer(Dirac(groups=3), [6, 2, 3], mindspore.float32)
tensor4_1 = initializer(Dirac(groups=1), [6, 4, 3, 3], mindspore.float32)
tensor4_2 = initializer(Dirac(groups=2), [6, 4, 3, 3], mindspore.float32)
tensor4_3 = initializer(Dirac(groups=3), [6, 4, 3, 3], mindspore.float32)
tensor5_1 = initializer(Dirac(groups=1), [6, 2, 3, 3, 3], mindspore.float32)
tensor5_2 = initializer(Dirac(groups=2), [6, 2, 3, 3, 3], mindspore.float32)
tensor5_3 = initializer(Dirac(groups=3), [6, 2, 3, 3, 3], mindspore.float32)
expectation3_1 = np.array([[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]],
[[0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.]]], dtype=np.float32)
expectation3_2 = np.array([[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]],
[[0., 0., 0.], [0., 0., 0.]],
[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]],
[[0., 0., 0.], [0., 0., 0.]]], dtype=np.float32)
expectation3_3 = np.array([[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]],
[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]],
[[0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.]]], dtype=np.float32)
expectation4_1 = np.array([[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]], dtype=np.float32)
expectation4_2 = np.array([[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]], dtype=np.float32)
expectation4_3 = np.array([[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]], dtype=np.float32)
expectation5_1 = np.array([[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]]], dtype=np.float32)
expectation5_2 = np.array([[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]]], dtype=np.float32)
expectation5_3 = np.array([[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]],
[[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]]], dtype=np.float32)
assert (tensor3_1.asnumpy() == expectation3_1).all()
assert (tensor3_2.asnumpy() == expectation3_2).all()
assert (tensor3_3.asnumpy() == expectation3_3).all()
assert (tensor4_1.asnumpy() == expectation4_1).all()
assert (tensor4_2.asnumpy() == expectation4_2).all()
assert (tensor4_3.asnumpy() == expectation4_3).all()
assert (tensor5_1.asnumpy() == expectation5_1).all()
assert (tensor5_2.asnumpy() == expectation5_2).all()
assert (tensor5_3.asnumpy() == expectation5_3).all()
| 5,338,251
|
def collect_inline_comments(list_of_strings,begin_token=None,end_token=None):
"""Reads a list of strings and returns all of the inline comments in a list.
Output form is ['comment',line_number,string_location] returns None if there are none or tokens are set to None"""
if begin_token in [None] and end_token in [None]:
return None
match=re.compile('{0}(?P<inline_comments>.*){1}'.format(re.escape(begin_token),re.escape(end_token)))
inline_comment_list=[]
for index,line in enumerate(list_of_strings):
comment_match=re.search(match,line)
if comment_match:
inline_comment_list.append([comment_match.group('inline_comments'),index,comment_match.start()])
if inline_comment_list:
return inline_comment_list
else:
return None
| 5,338,252
|
def get_df_ads():
"""
"""
#| - get_df_ads
# #####################################################
# import pickle; import os
path_i = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/collect_collate_dft_data",
"out_data/df_ads.pickle")
# with open(path_i, "rb") as fle:
# df_ads = pickle.load(fle)
# # #####################################################
my_file = Path(path_i)
if my_file.is_file():
with open(path_i, "rb") as fle:
df_ads = pickle.load(fle)
else:
print("Couldn't read df_ads")
print(path_i)
print("")
df_ads = pd.DataFrame()
return(df_ads)
#__|
| 5,338,253
|
def is_categorical_dtype(arr_or_dtype: List[int]):
"""
usage.seaborn: 1
"""
...
| 5,338,254
|
def cli_list(apic, args):
"""Implement CLI command `list`.
"""
# pylint: disable=unused-argument
instances = apic.get_instances()
if instances:
print('\n'.join(apic.get_instances()))
return 0
| 5,338,255
|
def is_symmetric(a: np.array):
"""
Check whether the matrix is symmetric
:param a:
:return:
"""
tol = 1e-10
return (np.abs(a - a.T) <= tol).all()
| 5,338,256
|
def get_number(line, position):
"""Searches for the end of a number.
Args:
line (str): The line in which the number was found.
position (int): The starting position of the number.
Returns:
str: The number found.
int: The position after the number found.
"""
word = ""
for pos, char in enumerate(line[position:]):
if char.isdigit() or char == ".": word += char
else: return word, position + pos
return word, len(line)
| 5,338,257
|
def load(f: TextIO) -> Config:
"""Load a configuration from a file-like object f"""
config = yaml.safe_load(f)
if isinstance(config["diag_table"], dict):
config["diag_table"] = DiagTable.from_dict(config["diag_table"])
return config
| 5,338,258
|
def levelize_smooth_or_improve_candidates(to_levelize, max_levels):
"""Turn parameter in to a list per level.
Helper function to preprocess the smooth and improve_candidates
parameters passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
Returns
-------
to_levelize : list
The parameter list such that entry i specifies the parameter choice
at level i.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_smooth_or_improve_candidates
>>> improve_candidates = ['gauss_seidel', None]
>>> levelize_smooth_or_improve_candidates(improve_candidates, 4)
['gauss_seidel', None, None, None]
"""
# handle default value (mutable)
# improve_candidates=(('block_gauss_seidel',
# {'sweep': 'symmetric', 'iterations': 4}),
# None)
# -> make it a list
if isinstance(to_levelize, tuple):
if isinstance(to_levelize[0], tuple):
to_levelize = list(to_levelize)
if isinstance(to_levelize, (str, tuple)):
to_levelize = [to_levelize for i in range(max_levels)]
elif isinstance(to_levelize, list):
if len(to_levelize) < max_levels:
mlz = max_levels - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels)]
return to_levelize
| 5,338,259
|
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
| 5,338,260
|
def hashname(name, secsalt):
"""Obtain a sha256 hash from a name."""
m = hashlib.sha256()
m.update((name + secsalt).encode("utf-8"))
return m.hexdigest()
| 5,338,261
|
def test_combinations3_task( infiles, outfile,
prefices,
subpath,
subdir):
"""
Test combinations with k-tuple = 3
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
| 5,338,262
|
def company_detail(request, stock_quote: int) -> HttpResponse:
""" Return a view to Company details """
try:
company = Company.objects.get(quote=str(stock_quote))
# TODO(me): Implement company detail view logic
except Company.DoesNotExist:
raise Http404("Company with releated quote does not exist")
return render(request, 'company_detail.html', { 'company': company })
| 5,338,263
|
def parse_encoding_header(header):
"""
Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form,
{'encoding-name':qvalue}.
"""
encodings = {'identity':1.0}
for encoding in header.split(","):
if(encoding.find(";") > -1):
encoding, qvalue = encoding.split(";")
encoding = encoding.strip()
qvalue = qvalue.split('=', 1)[1]
if(qvalue != ""):
encodings[encoding] = float(qvalue)
else:
encodings[encoding] = 1
else:
encodings[encoding] = 1
return encodings
| 5,338,264
|
def _plot_results(novelty_dict, narr_predictor_names, test_index,
top_output_dir_name):
"""Plots results of novelty detection.
:param novelty_dict: Dictionary created by
`novelty_detection.do_novelty_detection`.
:param narr_predictor_names: length-C list of predictor names.
:param test_index: Array index. The [i]th-most novel test example will be
plotted, where i = `test_index`.
:param top_output_dir_name: Name of top-level output directory. Figures
will be saved here.
"""
num_predictors = len(narr_predictor_names)
try:
example_plotting.get_wind_indices(narr_predictor_names)
plot_wind_barbs = True
except ValueError:
plot_wind_barbs = False
image_matrix_actual = novelty_dict[
novelty_detection.NOVEL_IMAGES_ACTUAL_KEY][test_index, ...]
image_matrix_upconv = novelty_dict[
novelty_detection.NOVEL_IMAGES_UPCONV_KEY][test_index, ...]
this_combined_matrix = numpy.concatenate(
(image_matrix_actual, image_matrix_upconv), axis=0)
these_min_colour_values = numpy.array([
numpy.percentile(this_combined_matrix[..., k], 1)
for k in range(num_predictors)
])
these_max_colour_values = numpy.array([
numpy.percentile(this_combined_matrix[..., k], 99)
for k in range(num_predictors)
])
if plot_wind_barbs:
this_figure_object, _ = example_plotting.plot_many_predictors_with_barbs(
predictor_matrix=image_matrix_actual,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=[MAIN_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
else:
this_figure_object, _ = example_plotting.plot_many_predictors_sans_barbs(
predictor_matrix=image_matrix_actual,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=[MAIN_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
base_title_string = '{0:d}th-most novel example'.format(test_index + 1)
this_title_string = '{0:s}: actual'.format(base_title_string)
this_figure_object.suptitle(this_title_string)
this_file_name = '{0:s}/actual_images/actual_image{1:04d}.jpg'.format(
top_output_dir_name, test_index)
file_system_utils.mkdir_recursive_if_necessary(file_name=this_file_name)
print 'Saving figure to file: "{0:s}"...'.format(this_file_name)
pyplot.savefig(this_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
if plot_wind_barbs:
this_figure_object, _ = example_plotting.plot_many_predictors_with_barbs(
predictor_matrix=image_matrix_upconv,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=[MAIN_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
else:
this_figure_object, _ = example_plotting.plot_many_predictors_sans_barbs(
predictor_matrix=image_matrix_upconv,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=[MAIN_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
this_title_string = r'{0:s}: upconvnet reconstruction'.format(
base_title_string)
this_title_string += r' ($\mathbf{X}_{up}$)'
this_figure_object.suptitle(this_title_string)
this_file_name = '{0:s}/upconv_images/upconv_image{1:04d}.jpg'.format(
top_output_dir_name, test_index)
file_system_utils.mkdir_recursive_if_necessary(file_name=this_file_name)
print 'Saving figure to file: "{0:s}"...'.format(this_file_name)
pyplot.savefig(this_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
novelty_matrix = (
image_matrix_upconv -
novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_SVD_KEY][
test_index, ...]
)
these_max_colour_values = numpy.array([
numpy.percentile(numpy.absolute(image_matrix_upconv[..., k]), 99)
for k in range(num_predictors)
])
these_min_colour_values = -1 * these_max_colour_values
if plot_wind_barbs:
this_figure_object, _ = (
example_plotting.plot_many_predictors_with_barbs(
predictor_matrix=novelty_matrix,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=
[NOVELTY_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
)
else:
this_figure_object, _ = (
example_plotting.plot_many_predictors_sans_barbs(
predictor_matrix=novelty_matrix,
predictor_names=narr_predictor_names,
cmap_object_by_predictor=
[NOVELTY_COLOUR_MAP_OBJECT] * num_predictors,
min_colour_value_by_predictor=these_min_colour_values,
max_colour_value_by_predictor=these_max_colour_values)
)
this_title_string = r'{0:s}: novelty'.format(
base_title_string)
this_title_string += r' ($\mathbf{X}_{up} - \mathbf{X}_{up,svd}$)'
this_figure_object.suptitle(this_title_string)
this_file_name = '{0:s}/novelty_images/novelty_image{1:04d}.jpg'.format(
top_output_dir_name, test_index)
file_system_utils.mkdir_recursive_if_necessary(file_name=this_file_name)
print 'Saving figure to file: "{0:s}"...'.format(this_file_name)
pyplot.savefig(this_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
| 5,338,265
|
def test_files():
"""Test a shellfunction that specifies positional CLI arguments that are interpolated by the ``kwargs``."""
content_a = 'content_a'
content_b = 'content_b'
files = {
'file_a': SinglefileData(io.StringIO(content_a)),
'file_b': SinglefileData(io.StringIO(content_b)),
}
arguments = ['{file_a}', '{file_b}']
results, node = launch_shell_job('cat', arguments=arguments, files=files)
assert node.is_finished_ok
assert results['stdout'].get_content().strip() == content_a + content_b
| 5,338,266
|
def opf_consfcn(x, om, Ybus, Yf, Yt, ppopt, il=None, *args):
"""Evaluates nonlinear constraints and their Jacobian for OPF.
Constraint evaluation function for AC optimal power flow, suitable
for use with L{pips}. Computes constraint vectors and their gradients.
@param x: optimization vector
@param om: OPF model object
@param Ybus: bus admittance matrix
@param Yf: admittance matrix for "from" end of constrained branches
@param Yt: admittance matrix for "to" end of constrained branches
@param ppopt: PYPOWER options vector
@param il: (optional) vector of branch indices corresponding to
branches with flow limits (all others are assumed to be
unconstrained). The default is C{range(nl)} (all branches).
C{Yf} and C{Yt} contain only the rows corresponding to C{il}.
@return: C{h} - vector of inequality constraint values (flow limits)
limit^2 - flow^2, where the flow can be apparent power real power or
current, depending on value of C{OPF_FLOW_LIM} in C{ppopt} (only for
constrained lines). C{g} - vector of equality constraint values (power
balances). C{dh} - (optional) inequality constraint gradients, column
j is gradient of h(j). C{dg} - (optional) equality constraint gradients.
@see: L{opf_costfcn}, L{opf_hessfcn}
@author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
Autonoma de Manizales)
@author: Ray Zimmerman (PSERC Cornell)
"""
##----- initialize -----
## unpack data
ppc = om.get_ppc()
baseMVA, bus, gen, branch = \
ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
vv, _, _, _ = om.get_idx()
## problem dimensions
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of branches
ng = gen.shape[0] ## number of dispatchable injections
nxyz = len(x) ## total number of control vars of all types
## set default constrained lines
if il is None:
il = arange(nl) ## all lines have limits by default
nl2 = len(il) ## number of constrained lines
## grab Pg & Qg
Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]] ## active generation in p.u.
Qg = x[vv["i1"]["Qg"]:vv["iN"]["Qg"]] ## reactive generation in p.u.
## put Pg & Qg back in gen
gen[:, PG] = Pg * baseMVA ## active generation in MW
gen[:, QG] = Qg * baseMVA ## reactive generation in MVAr
## rebuild Sbus
Sbus = makeSbus(baseMVA, bus, gen) ## net injected power in p.u.
## ----- evaluate constraints -----
## reconstruct V
Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]]
Vm = x[vv["i1"]["Vm"]:vv["iN"]["Vm"]]
V = Vm * exp(1j * Va)
## evaluate power flow equations
mis = V * conj(Ybus * V) - Sbus
##----- evaluate constraint function values -----
## first, the equality constraints (power flow)
g = r_[ mis.real, ## active power mismatch for all buses
mis.imag ] ## reactive power mismatch for all buses
## then, the inequality constraints (branch flow limits)
if nl2 > 0:
flow_max = (branch[il, RATE_A] / baseMVA)**2
flow_max[flow_max == 0] = Inf
if ppopt['OPF_FLOW_LIM'] == 2: ## current magnitude limit, |I|
If = Yf * V
It = Yt * V
h = r_[ If * conj(If) - flow_max, ## branch I limits (from bus)
It * conj(It) - flow_max ].real ## branch I limits (to bus)
else:
## compute branch power flows
## complex power injected at "from" bus (p.u.)
Sf = V[ branch[il, F_BUS].astype(int) ] * conj(Yf * V)
## complex power injected at "to" bus (p.u.)
St = V[ branch[il, T_BUS].astype(int) ] * conj(Yt * V)
if ppopt['OPF_FLOW_LIM'] == 1: ## active power limit, P (Pan Wei)
h = r_[ Sf.real**2 - flow_max, ## branch P limits (from bus)
St.real**2 - flow_max ] ## branch P limits (to bus)
else: ## apparent power limit, |S|
h = r_[ Sf * conj(Sf) - flow_max, ## branch S limits (from bus)
St * conj(St) - flow_max ].real ## branch S limits (to bus)
else:
h = zeros((0,1))
##----- evaluate partials of constraints -----
## index ranges
iVa = arange(vv["i1"]["Va"], vv["iN"]["Va"])
iVm = arange(vv["i1"]["Vm"], vv["iN"]["Vm"])
iPg = arange(vv["i1"]["Pg"], vv["iN"]["Pg"])
iQg = arange(vv["i1"]["Qg"], vv["iN"]["Qg"])
iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T
## compute partials of injected bus powers
dSbus_dVm, dSbus_dVa = dSbus_dV(Ybus, V) ## w.r.t. V
## Pbus w.r.t. Pg, Qbus w.r.t. Qg
neg_Cg = sparse((-ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
## construct Jacobian of equality constraints (power flow) and transpose it
dg = lil_matrix((2 * nb, nxyz))
blank = sparse((nb, ng))
dg[:, iVaVmPgQg] = vstack([
## P mismatch w.r.t Va, Vm, Pg, Qg
hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]),
## Q mismatch w.r.t Va, Vm, Pg, Qg
hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg])
], "csr")
dg = dg.T
if nl2 > 0:
## compute partials of Flows w.r.t. V
if ppopt['OPF_FLOW_LIM'] == 2: ## current
dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
dIbr_dV(branch[il, :], Yf, Yt, V)
else: ## power
dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
dSbr_dV(branch[il, :], Yf, Yt, V)
if ppopt['OPF_FLOW_LIM'] == 1: ## real part of flow (active power)
dFf_dVa = dFf_dVa.real
dFf_dVm = dFf_dVm.real
dFt_dVa = dFt_dVa.real
dFt_dVm = dFt_dVm.real
Ff = Ff.real
Ft = Ft.real
## squared magnitude of flow (of complex power or current, or real power)
df_dVa, df_dVm, dt_dVa, dt_dVm = \
dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft)
## construct Jacobian of inequality constraints (branch limits)
## and transpose it.
dh = lil_matrix((2 * nl2, nxyz))
dh[:, r_[iVa, iVm].T] = vstack([
hstack([df_dVa, df_dVm]), ## "from" flow limit
hstack([dt_dVa, dt_dVm]) ## "to" flow limit
], "csr")
dh = dh.T
else:
dh = None
return h, g, dh, dg
| 5,338,267
|
def iou3d_kernel(gt_boxes, pred_boxes):
"""
Core iou3d computation (with cuda)
Args:
gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates
pred_boxes: [M, 7]
Returns:
iou3d: [N, M]
"""
intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2)
gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5
gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5
pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5
pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5
max_of_min = np.maximum(gt_min_h, pred_min_h.T)
min_of_max = np.minimum(gt_max_h, pred_max_h.T)
inter_h = min_of_max - max_of_min
inter_h[inter_h <= 0] = 0
#inter_h[intersection_2d <= 0] = 0
intersection_3d = intersection_2d * inter_h
gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]]
pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]]
union_3d = gt_vol + pred_vol.T - intersection_3d
#eps = 1e-6
#union_3d[union_3d<eps] = eps
iou3d = intersection_3d / union_3d
return iou3d
| 5,338,268
|
def get_final_metrics(raw_metrics, summarized=False):
"""
Calculates final metrics from all categories.
:param summarized: True if the result should contain only final metrics (precision recall, f1 and f0.5)
False if the result should contain all the per category metrics too.
:param raw_metrics: A dictionary of tp, fp and fn values for each category
:return: a dictionary with the precision, recall, f1 and f0.5 metrics, as well as the input metrics data.
"""
tp = 0
fp = 0
fn = 0
num_values = 0
num_samples = 0
final_metrics = dict()
for category in raw_metrics:
category_tp = raw_metrics[category]['TP']
category_fp = raw_metrics[category]['FP']
category_fn = raw_metrics[category]['FN']
final_metrics[category] = {}
if category_tp > 0:
final_metrics[category]['precision'] = category_tp / (category_tp + category_fp)
final_metrics[category]['recall'] = category_tp / (category_tp + category_fn)
final_metrics[category]['f1'] = f_beta(final_metrics[category]['precision'],
final_metrics[category]['recall'], 1
)
if 'num_values' in raw_metrics[category]:
final_metrics[category]['num_values'] = raw_metrics[category]['num_values']
if 'num_samples' in raw_metrics[category]:
final_metrics[category]['num_samples'] = raw_metrics[category]['num_samples']
tp += category_tp
fp += category_fp
fn += category_fn
num_values += final_metrics[category]['num_values']
num_samples += final_metrics[category]['num_samples']
if (tp + fp) > 0:
final_metrics['precision'] = tp / (tp + fp)
else:
final_metrics['precision'] = np.nan
if (tp + fn) > 0:
final_metrics['recall'] = tp / (tp + fn)
else:
final_metrics['recall'] = np.nan
final_metrics['f1'] = f_beta(final_metrics['precision'], final_metrics['recall'], 1)
final_metrics['f0.5'] = f_beta(final_metrics['precision'], final_metrics['recall'], 0.5)
final_metrics['num_values'] = num_values
final_metrics['num_samples'] = num_samples
if summarized:
return summarize_metrics(final_metrics)
else:
return final_metrics
| 5,338,269
|
def get_health_feed():
"""
Parse BBC news health feed and remove articles not related to COVID-19.
"""
feed = parse("http://feeds.bbci.co.uk/news/health/rss.xml")
# log parsed feed for debugging purposes
logger.debug(pprint(feed.entries))
logger.debug(f"Feed items before removal: {len(feed.entries)}.")
# Remove all feed items not related to COVID-19
for index, article in enumerate(feed.entries):
if any(findall(r'Coronavirus|COVID|Covid|Covid-19', article.title, IGNORECASE)):
continue
else:
logger.debug(f"Removing item at index: {index}.")
feed.entries.pop(index)
return feed
| 5,338,270
|
def get_data_meta_path(either_file_path: str) -> tuple:
"""get either a meta o rr binary file path and return both as a tuple
Arguments:
either_file_path {str} -- path of a meta/binary file
Returns:
[type] -- (binary_path, meta_path)
"""
file_stripped = '.'.join(either_file_path.split('.')[:-1])
return tuple([file_stripped + ext for ext in ['.bin', '.meta']])
| 5,338,271
|
def group(name):
"""
Allow to create a group with a default click context and a class for Click's ``didyoueamn``
without having to repeat it for every group.
"""
return click.group(
name=name,
context_settings=CLICK_CONTEXT_SETTINGS,
cls=AliasedGroup)
| 5,338,272
|
def get_files(target_files, config):
"""Retrieve files associated with the potential inputs.
"""
out = []
find_fn = _find_file(config)
for fname in target_files.keys():
remote_fname = find_fn(fname)
if remote_fname:
out.append(remote_fname)
return out
| 5,338,273
|
def test_version_type_yyyyw():
"""
Test yyyyw verison type
"""
version_type = version_recommend.VersionTypeYyyyW()
maintain_version = version_type.maintain_version(YYYY_W_VERSION_LIST, YYYY_W_VERSION_LIST[2], 0)
assert maintain_version == "2020.3"
latest_version = version_type.latest_version(YYYY_W_VERSION_LIST)
assert latest_version == "2020.15"
| 5,338,274
|
def cmd(func, *args, **kwargs):
"""Takes a function followed by its arguments"""
def command(*a, **ka):
return func(*args, **kwargs)
return command
| 5,338,275
|
def flow_accumulation(receiver_nodes, baselevel_nodes, node_cell_area=1.0,
runoff_rate=1.0, boundary_nodes=None):
"""Calculate drainage area and (steady) discharge.
Calculates and returns the drainage area and (steady) discharge at each
node, along with a downstream-to-upstream ordered list (array) of node IDs.
Examples
--------
>>> import numpy as np
>>> from landlab.components.flow_accum import flow_accumulation
>>> r = np.array([2, 5, 2, 7, 5, 5, 6, 5, 7, 8])-1
>>> b = np.array([4])
>>> a, q, s = flow_accumulation(r, b)
>>> a
array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.])
>>> q
array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.])
>>> s
array([4, 1, 0, 2, 5, 6, 3, 8, 7, 9])
"""
s = make_ordered_node_array(receiver_nodes, baselevel_nodes)
#Note that this ordering of s DOES INCLUDE closed nodes. It really shouldn't!
#But as we don't have a copy of the grid accessible here, we'll solve this
#problem as part of route_flow_dn.
a, q = find_drainage_area_and_discharge(s, receiver_nodes, node_cell_area,
runoff_rate, boundary_nodes)
return a, q, s
| 5,338,276
|
def extract_ids(response_content):
"""Given a result's content of a research, returns a list of all ids. This method is meant to work with PubMed"""
ids = str(response_content).split("<Id>")
ids_str = "".join(ids)
ids = ids_str.split("</Id>")
ids.remove(ids[0])
ids.remove(ids[len(ids) - 1])
for i in range(len(ids)):
ids[i] = int(ids[i][2:])
return ids
| 5,338,277
|
def gatorosc(candles: np.ndarray, sequential=False) -> GATOR:
"""
Gator Oscillator by Bill M. Williams
:param candles: np.ndarray
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
jaw = shift(smma(candles, period=13, sequential=True), 8)
teeth = shift(smma(candles, period=8, sequential=True), 5)
lips = shift(smma(candles, period=5, sequential=True), 3)
upper = np.abs(jaw - teeth)
lower = -np.abs(teeth - lips)
upper_change = talib.MOM(upper, timeperiod=1)
lower_change = -talib.MOM(lower, timeperiod=1)
if sequential:
return GATOR(upper, lower, upper_change, lower_change)
else:
return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])
| 5,338,278
|
def program_item(prog_hash):
"""
GET,DELETE /programs/<prog_hash>: query programs
:prog_hash: program checksum/identifier
:returns: flask response
"""
if request.method == 'GET':
with client.client_access() as c:
prog = c.user_programs.get(prog_hash)
return respond_json(prog.properties) if prog else respond_error(404)
else:
raise NotImplementedError
| 5,338,279
|
def lambda_handler(event, context):
"""
Federate Token Exchange Lambda Function
"""
if not "body" in event:
return helper.build_response(
{"message": "You do not have permission to access this resource."}, 403
)
input_json = dict()
input_json = json.loads(event["body"])
# verify the client_id and redirect_uri
if not "client_id" in input_json or not "redirect_uri" in input_json:
return helper.build_response(
{"message": "You do not have permission to access this resource."}, 403
)
response_type = "code"
if "response_type" in input_json:
response_type = input_json["response_type"]
# verify the client_id and redirect_uri
if not "client_id" in input_json or not "redirect_uri" in input_json:
return helper.build_response(
{"message": "You do not have permission to access this resource."}, 403
)
client_id = input_json["client_id"]
redirect_uri = input_json["redirect_uri"]
_, msg = helper.verify_client_id_and_redirect_uri(
user_pool_id=USER_POOL_ID, client_id=client_id, redirect_uri=redirect_uri
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
federate_account = None
platform = input_json["platform"].lower()
platform_login_data = dict()
platform_login_data["platform"] = platform
# register the federate record in the user table
if (
"id_token" in input_json
or "access_token" in input_json
or "platform_code" in input_json
):
if "platform_code" in input_json:
platform_code = input_json["platform_code"]
secret_client = boto3.client("secretsmanager", region_name="ap-southeast-1")
if platform == "linkedin":
secret = secret_client.get_secret_value(SecretId=LINKEDIN_SECRET_ARN)
secret_dict = json.loads(secret["SecretString"])
platform_client_id = secret_dict["client_id"]
platform_client_secret = secret_dict["client_secret"]
if "platform_redirect_uri" not in input_json:
return helper.build_response(
{
"message": "You do not have permission to access this resource."
},
403,
)
platform_redirect_uri = input_json["platform_redirect_uri"]
resp, msg = federate.linkedin_code_to_access_token(
linkedin_client_id=platform_client_id,
linkedin_client_secret=platform_client_secret,
linkedin_redirect_uri=platform_redirect_uri,
code=platform_code,
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
platform_login_data["access_token"] = resp["access_token"]
elif platform == "facebook":
secret = secret_client.get_secret_value(SecretId=FACEBOOK_SECRET_ARN)
secret_dict = json.loads(secret["SecretString"])
platform_client_id = secret_dict["client_id"]
platform_client_secret = secret_dict["client_secret"]
resp, msg = federate.facebook_code_to_access_token(
facebook_client_id=platform_client_id,
facebook_client_secret=platform_client_secret,
code=platform_code,
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
platform_login_data["access_token"] = resp["access_token"]
elif platform == "google":
secret = secret_client.get_secret_value(SecretId=GOOGLE_SECRET_ARN)
secret_dict = json.loads(secret["SecretString"])
platform_client_id = secret_dict["client_id"]
platform_client_secret = secret_dict["client_secret"]
resp, msg = federate.google_code_to_access_token(
google_client_id=platform_client_id,
google_client_secret=platform_client_secret,
code=platform_code,
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
platform_login_data["access_token"] = resp["access_token"]
if "id_token" in input_json:
platform_login_data["id_token"] = input_json["id_token"]
if "access_token" in input_json:
platform_login_data["access_token"] = input_json["access_token"]
federate_account, msg = federate.verify_federate_and_register_or_get_user(
user_table_name=USER_TABLE_NAME,
platform_login_data=platform_login_data,
mode="get",
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
token_response = dict()
token_response["platform"] = platform
if "id_token" in platform_login_data:
token_response["platform_id_token"] = platform_login_data["id_token"]
if "access_token" in platform_login_data:
token_response["platform_access_token"] = platform_login_data["access_token"]
if not federate_account is None:
# if 3rd party access_token validated correctly, check we generate our own token using CUSTOM_AUTH challenge
password = ""
resp, msg = helper.initiate_auth(
USER_POOL_ID,
federate_account["cognito_email"],
password,
client_id,
auth_flow="CUSTOM_AUTH",
)
# cognito error message check
if msg != None:
logger.info(msg)
return helper.build_response({"message": msg}, 403)
logger.info("CHALLENGE PASSED")
if "AuthenticationResult" in resp:
formatted_authentication_result = helper.format_authentication_result(resp)
if response_type == "code":
# get the authorization code
auth_code, msg = helper.store_token_to_dynamodb_and_get_auth_code(
auth_code_table_name=AUTH_CODE_TABLE_NAME,
client_id=client_id,
redirect_uri=redirect_uri,
token_set=formatted_authentication_result,
)
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
# return the authorization code
return helper.build_response({"code": auth_code}, 200)
elif response_type == "token":
token_response["access_token"] = formatted_authentication_result[
"access_token"
]
token_response["id_token"] = formatted_authentication_result["id_token"]
token_response["refresh_token"] = formatted_authentication_result[
"refresh_token"
]
token_response["expires_in"] = formatted_authentication_result[
"expires_in"
]
token_response["token_type"] = formatted_authentication_result[
"token_type"
]
else:
return helper.build_response(
{"message": "Unsupported response type."}, 403
)
logger.info(token_response)
return helper.build_response(token_response, 200)
| 5,338,280
|
def find_domain_field(fields: List[str]):
"""Find and return domain field value."""
field_index = 0
for field in fields:
if field == "query:":
field_value = fields[field_index + 1]
return field_value
field_index += 1
return None
| 5,338,281
|
def placeValueOf(num: int, place: int) -> int:
"""
Get the value on the place specified.
:param num: The num
:param place: The place. 1 for unit place, 10 for tens place, 100 for hundreds place.
:return: The value digit.
"""
return lastDigitOf(num // place)
| 5,338,282
|
def prepare_polygon_coords_for_bokeh(countries):
"""Prepares the country polygons for plotting with Bokeh.
To plot series of polygons, Bokeh needs two lists of lists (one for x coordinates, and another
for y coordinates). Each element in the outer list represents a single polygon, and each
element in the inner lists represents the coordinate for a single point in given polygon.
This function takes a GeoDataFrame with a given set of countries, and returns Bokeh-friendly
lists of x coordinates and y coordinates for those countries.
PARAMETERS:
-----------
countries: GeoDataFrame with a given set of countries.
OUTPUTS:
--------
x_coords, y_coords: Bokeh-friendly lists of x and y coordinates for those countries.
"""
# Simplify shapes (to resolution of 10000 meters), convert polygons to multipolygons.
list_of_polygons = []
for raw_poly in countries['geometry']:
raw_poly = raw_poly.simplify(10000, preserve_topology=False)
if isinstance(raw_poly, Polygon):
raw_poly = MultiPolygon([raw_poly])
for poly in list(raw_poly):
list_of_polygons.append(poly)
# Create lists of lists.
x_coords = [list(poly.exterior.coords.xy[0]) for poly in list_of_polygons]
y_coords = [list(poly.exterior.coords.xy[1]) for poly in list_of_polygons]
return x_coords, y_coords
| 5,338,283
|
def get_incident_ids_as_options(incidents):
"""
Collect the campaign incidents ids form the context and return them as options for MultiSelect field
:type incidents: ``list``
:param incidents: the campaign incidents to collect ids from
:rtype: ``dict``
:return: dict with the ids as options for MultiSelect field e.g {"hidden": False, "options": ids}
"""
try:
ids = [str(incident['id']) for incident in incidents]
ids.sort(key=lambda incident_id: int(incident_id))
ids.insert(0, ALL_OPTION)
return {"hidden": False, "options": ids}
except KeyError as e:
raise DemistoException(NO_ID_IN_CONTEXT) from e
| 5,338,284
|
def get_result(dir_path: str) -> List[float]:
"""試合のログ(csv)から勝敗データを抽出する
Args:
file_path (str): 抽出したい試合のログが格納されているパス
Returns:
List[float]: 勝率データ
"""
files = glob.glob(dir_path + "*.csv")
result = []
for file in files:
csv_file = open(file, "r")
csv_data = csv.reader(csv_file, delimiter=",", doublequote=True,
lineterminator="\r\n", quotechar='"', skipinitialspace=True)
win = 0
lose = 0
for data in csv_data:
if int(data[1]) >= int(data[2]):
win += 1
else:
lose += 1
result.append(win/(win+lose))
return result
| 5,338,285
|
def get_internal_energies(
compounds: dict, qrrho: bool = True, temperature: float = 298.15
):
"""Obtain internal energies for compounds at a given temperature.
Parameters
----------
compounds : dict-like
A descriptor of the compounds.
Mostly likely, this comes from a parsed input file.
See `overreact.io.parse_model`.
qrrho : bool, optional
Apply the quasi-rigid rotor harmonic oscillator (QRRHO) approximation of
M. Head-Gordon and others (see
[*J. Phys. Chem. C* **2015**, 119, 4, 1840–1850](http://dx.doi.org/10.1021/jp509921r))
on top of the classical RRHO.
temperature : array-like, optional
Absolute temperature in Kelvin.
Returns
-------
array-like
Examples
--------
>>> import overreact as rx
>>> from overreact import _constants as constants
>>> model = rx.parse_model("data/ethane/B97-3c/model.k")
>>> internal_energies = get_internal_energies(model.compounds)
>>> (internal_energies - internal_energies.min()) / constants.kcal
array([0. , 2.20053981])
"""
compounds = rx.io._check_compounds(compounds)
internal_energies = []
for name in compounds:
logger.info(f"calculate internal energy: {name}")
# TODO(schneiderfelipe): inertia might benefit from caching
moments, _, _ = coords.inertia(
compounds[name].atommasses, compounds[name].atomcoords
)
internal_energy = rx.thermo.calc_internal_energy(
energy=compounds[name].energy,
degeneracy=compounds[name].mult,
moments=moments,
vibfreqs=compounds[name].vibfreqs,
qrrho=qrrho,
temperature=temperature,
)
internal_energies.append(internal_energy)
return np.array(internal_energies)
| 5,338,286
|
def load_folder(folder: str) -> Tuple[Dict[str, Iterable[List[str]]], Dict[str, Any]]:
"""
Loads data from the folder output using neurips_crawler
output/data_<year>/papers_data.jsons
output/data_<year>/pdfs/<files>
where
- <year> is a 4 digits year associated to the year of the Neurips conference.
- papers_data.json is a metadata file for each paper in this conference
- <files> are the raw PDF file for this conference
"""
year_data = {}
with open(os.path.join(folder, 'papers_data.jsons'), 'r') as f:
for line in f.readlines():
paper_data = json.loads(line.strip())
year_data[paper_data['pdf_name']] = paper_data
files = {}
for file in os.listdir(os.path.join(folder, 'pdfs')):
files[file] = pdf_parser.get_text(os.path.join(folder, 'pdfs', file), local=True)
return files, year_data
| 5,338,287
|
def get_local_info(hass):
"""Get HA's local location config."""
latitude = hass.config.latitude
longitude = hass.config.longitude
timezone = str(hass.config.time_zone)
elevation = hass.config.elevation
return latitude, longitude, timezone, elevation
| 5,338,288
|
def if_present_phrase(src_str_tokens, phrase_str_tokens):
"""
:param src_str_tokens: a list of strings (words) of source text
:param phrase_str_tokens: a list of strings (words) of a phrase
:return:
"""
match_pos_idx = -1
for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1):
match_flag = True
# iterate each word in target, if one word does not match, set match=False and break
for seq_idx, seq_w in enumerate(phrase_str_tokens):
src_w = src_str_tokens[src_start_idx + seq_idx]
if src_w != seq_w:
match_flag = False
break
if match_flag:
match_pos_idx = src_start_idx
break
return match_flag, match_pos_idx
| 5,338,289
|
def fit_2D_xanes_non_iter(img_xanes, eng, spectrum_ref, error_thresh=0.1):
"""
Solve equation of Ax=b, where:
Inputs:
----------
A: reference spectrum (2-colume array: xray_energy vs. absorption_spectrum)
X: fitted coefficient of each ref spectrum
b: experimental 2D XANES data
Outputs:
----------
fit_coef: the 'x' in the equation 'Ax=b': fitted coefficient of each ref spectrum
cost: cost between fitted spectrum and raw data
"""
num_ref = len(spectrum_ref)
spec_interp = {}
comp = {}
A = []
s = img_xanes.shape
for i in range(num_ref):
tmp = interp1d(
spectrum_ref[f"ref{i}"][:, 0], spectrum_ref[f"ref{i}"][:, 1], kind="cubic"
)
A.append(tmp(eng).reshape(1, len(eng)))
spec_interp[f"ref{i}"] = tmp(eng).reshape(1, len(eng))
comp[f"A{i}"] = spec_interp[f"ref{i}"].reshape(len(eng), 1)
comp[f"A{i}_t"] = comp[f"A{i}"].T
# e.g., spectrum_ref contains: ref1, ref2, ref3
# e.g., comp contains: A1, A2, A3, A1_t, A2_t, A3_t
# A1 = ref1.reshape(110, 1)
# A1_t = A1.T
A = np.squeeze(A).T
M = np.zeros([num_ref + 1, num_ref + 1])
for i in range(num_ref):
for j in range(num_ref):
M[i, j] = np.dot(comp[f"A{i}_t"], comp[f"A{j}"])
M[i, num_ref] = 1
M[num_ref] = np.ones((1, num_ref + 1))
M[num_ref, -1] = 0
# e.g.
# M = np.array([[float(np.dot(A1_t, A1)), float(np.dot(A1_t, A2)), float(np.dot(A1_t, A3)), 1.],
# [float(np.dot(A2_t, A1)), float(np.dot(A2_t, A2)), float(np.dot(A2_t, A3)), 1.],
# [float(np.dot(A3_t, A1)), float(np.dot(A3_t, A2)), float(np.dot(A3_t, A3)), 1.],
# [1., 1., 1., 0.]])
M_inv = np.linalg.inv(M)
b_tot = img_xanes.reshape(s[0], -1)
B = np.ones([num_ref + 1, b_tot.shape[1]])
for i in range(num_ref):
B[i] = np.dot(comp[f"A{i}_t"], b_tot)
x = np.dot(M_inv, B)
x = x[:-1]
x[x < 0] = 0
x_sum = np.sum(x, axis=0, keepdims=True)
x = x / x_sum
cost = np.sum((np.dot(A, x) - b_tot) ** 2, axis=0) / s[0]
cost = cost.reshape(s[1], s[2])
x = x.reshape(num_ref, s[1], s[2])
# cost = compute_xanes_fit_cost(img_xanes, x, spec_interp)
mask = compute_xanes_fit_mask(cost, error_thresh)
mask = mask.reshape(s[1], s[2])
mask_tile = np.tile(mask, (x.shape[0], 1, 1))
x = x * mask_tile
cost = cost * mask
return x, cost
| 5,338,290
|
def cli(env, crt, csr, icc, key, notes):
"""Add and upload SSL certificate details."""
template = {
'intermediateCertificate': '',
'certificateSigningRequest': '',
'notes': notes,
}
with open(crt, encoding="utf-8") as file_crt:
template['certificate'] = file_crt.read()
with open(key, encoding="utf-8") as file_key:
template['privateKey'] = file_key.read()
with open(csr, encoding="utf-8") as file_csr:
if csr:
body = file_csr.read()
template['certificateSigningRequest'] = body
with open(icc, encoding="utf-8") as file_icc:
if icc:
body = file_icc.read()
template['intermediateCertificate'] = body
manager = SoftLayer.SSLManager(env.client)
manager.add_certificate(template)
| 5,338,291
|
def plot_sse_comparison(clustering_data, max_n):
""" Plots a comparison of homogeneity, completeness, or v-measure.
This plot is very specific to present the results in the paper in a clean way. Please adapt for further use.
Clustering_alg is a list of dicts of which each dict contains information on:
"name": the name of the algorithm
"distance": the distance measure to be used (either euclidean or jaccard)
"alg": any sklearn cluster algorithm, e.g. AgglomerativeClustering
"n": number of clusters
"group": Int, in case this clustering belongs to a group of clustering
:param clustering_data: list of dicts as described above
:param max_n:
:return:
"""
# load values
values = np.zeros((2, max_n - 2))
for i, alg_data in enumerate(clustering_data):
values[0, alg_data["n"] - 2] = alg_data["sse_centroid"]
values[1, alg_data["n"] - 2] = alg_data["sse_core"]
# plot SSE and to core and centroid
cmap = [get_cmap("tab20b").colors[12], get_cmap("tab20b").colors[13],
get_cmap("tab20b").colors[14], get_cmap("tab20b").colors[15]]
cmapoffset = 0
markers = ['.', "x", 'x', '+']
style = ["solid", "dotted", "solid", "dotted"]
steps = 4
plt.figure(num=None, figsize=(5, 3.5), dpi=200, facecolor='w', edgecolor='k')
for i in range(2):
plt.plot(range(2, max_n, steps), values[i, 0:max_n:steps], c=cmap[i + cmapoffset], marker=markers[i],
linestyle=style[i])
plt.xlabel("number of clusters")
plt.title("SSE to centroid and core per number of clusters".format())
plt.tight_layout()
# set legend
handles = list()
handles.append(mlines.Line2D([], [], marker=markers[0], color=cmap[cmapoffset + 0], linestyle=style[0],
label='SSE to centroid'))
handles.append(mlines.Line2D([], [], marker=markers[1], color=cmap[cmapoffset + 1], linestyle=style[1],
label='SSE to core'))
plt.legend(handles=handles)
plt.show()
pass
| 5,338,292
|
def get_cmap(n_fg):
"""Generate a color map for visualizing foreground objects
Args:
n_fg (int): Number of foreground objects
Returns:
cmaps (numpy.ndarray): Colormap
"""
cmap = cm.get_cmap('Set1')
cmaps = []
for i in range(n_fg):
cmaps.append(np.asarray(cmap(i))[:3])
cmaps = np.vstack(cmaps)
return cmaps
| 5,338,293
|
def GetBasinOutlines(DataDirectory, basins_fname):
"""
This function takes in the raster of basins and gets a dict of basin polygons,
where the key is the basin key and the value is a shapely polygon of the basin.
IMPORTANT: In this case the "basin key" is usually the junction number:
this function will use the raster values as keys and in general
the basin rasters are output based on junction indices rather than keys
Args:
DataDirectory (str): the data directory with the basin raster
basins_fname (str): the basin raster
Returns:
list of shapely polygons with the basins
Author: FJC
"""
# read in the basins raster
this_fname = basins_fname.split('.')
print(basins_fname)
OutputShapefile = this_fname[0]+'.shp'
# polygonise the raster
BasinDict = LSDMap_IO.PolygoniseRaster(DataDirectory, basins_fname, OutputShapefile)
return BasinDict
| 5,338,294
|
def get_ventilation_status():
"""
Command: 0x00 0xCD
"""
status_data = {"IntakeFanActive": {0: False, 1: True}}
packet = create_packet([0x00, 0xCD])
data = serial_command(packet)
debug_data(data)
try:
if data is None:
warning_msg("get_ventilation_status function could not get serial data")
else:
ReturnAirLevel = int(data[13], 16)
SupplyAirLevel = int(data[14], 16)
FanLevel = int(data[15], 16) - 1
IntakeFanActive = status_data["IntakeFanActive"][int(data[16], 16)]
publish_message(
msg=ReturnAirLevel, mqtt_path="house/attic/wtw/return_air_level"
)
publish_message(
msg=SupplyAirLevel, mqtt_path="house/attic/wtw/supply_air_level"
)
publish_message(
msg=FanLevel, mqtt_path="house/attic/wtw/ventilation_level"
)
publish_message(
msg=IntakeFanActive, mqtt_path="house/attic/wtw/intake_fan_active"
)
debug_msg(
"ReturnAirLevel: {}, SupplyAirLevel: {}, FanLevel: {}, IntakeFanActive: {}".format(
ReturnAirLevel, SupplyAirLevel, FanLevel, IntakeFanActive
)
)
except IndexError:
warning_msg("get_ventilation_status ignoring incomplete message")
| 5,338,295
|
def detect_park(frame, hsv):
"""
Expects: HSV image of any shape + current frame
Returns: TBD
"""
#hsv = cv2.cvtColor(frame, cfg.COLOUR_CONVERT) # convert to HSV CS
# filter
mask = cv2.inRange(hsv, lower_green_park, upper_green_park)
# operations
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel,iterations=1)
img = cv2.bitwise_and(frame,frame,mask = mask)
# logic
height, width = mask.shape[:2]
contours, _ = cv2.findContours(mask[0:int(height/2), 0:width], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt) # calculate area of the contour
x,y,w,h = cv2.boundingRect(cnt) # create a rectangle around the contour
#roi = frame[y:y+h, x:x+w] # select an ROI out of the frame
# check if the ROI is in allowed area
vr = valid_range(x,y,w,h,frame)
if not vr:
continue
# calculate ratio of sides - anything not square is not worth checking
sr = is_squarish(h, w)
if not sr:
continue
# check the area size (too small ignore, too big ignore)
if cfg.AREA_SIZE_PARK < area < cfg.MAX_AREA_SIZE: #and ( w / h < 1.0):
if cfg.DEMO_MODE:
cv2.rectangle(frame, (x,y), (x+w, y+h), (127,255,127), 2)
cv2.putText(frame, "PARK", (x,y), cfg.FONT, 2, (127,255,127))
return "park"
return None
| 5,338,296
|
def normalize(mx):
"""Row-normalize sparse matrix"""
mx = np.array(mx)
rowsum = mx.sum(axis=1)
r_inv = np.power(rowsum, -1.0).flatten() #use -1.0 as asym matrix
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = np.diag(r_inv)
a = np.dot(r_mat_inv, mx)
#a = np.dot(a, r_mat_inv) #skip for asym matrix
#return a #normalized matrix
return mx
| 5,338,297
|
def th_allclose(x, y):
"""
Determine whether two torch tensors have same values
Mimics np.allclose
"""
return th.sum(th.abs(x-y)) < 1e-5
| 5,338,298
|
def _check_h5_installed(strict=True):
"""Aux function."""
try:
import h5py
return h5py
except ImportError:
if strict is True:
raise RuntimeError('For this functionality to work, the h5py '
'library is required.')
else:
return False
| 5,338,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.