content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def calculate_linear_classifier_output_shapes(operator):
"""
This operator maps an input feature vector into a scalar label if
the number of outputs is one. If two outputs appear in this
operator's output list, we should further generate a tensor storing
all classes' probabilities.
Allowed input/output patterns are
1. [N, C] ---> [N, 1], A sequence of map
"""
_calculate_linear_classifier_output_shapes(operator)
| 17,400
|
def npm_package_bin(tool = None, package = None, package_bin = None, data = [], outs = [], args = [], output_dir = False, **kwargs):
"""Run an arbitrary npm package binary (e.g. a program under node_modules/.bin/*) under Bazel.
It must produce outputs. If you just want to run a program with `bazel run`, use the nodejs_binary rule.
This is like a genrule() except that it runs our launcher script that first
links the node_modules tree before running the program.
This is a great candidate to wrap with a macro, as documented:
https://docs.bazel.build/versions/master/skylark/macros.html#full-example
Args:
data: similar to [genrule.srcs](https://docs.bazel.build/versions/master/be/general.html#genrule.srcs)
may also include targets that produce or reference npm packages which are needed by the tool
outs: similar to [genrule.outs](https://docs.bazel.build/versions/master/be/general.html#genrule.outs)
output_dir: set to True if you want the output to be a directory
Exactly one of `outs`, `output_dir` may be used.
If you output a directory, there can only be one output, which will be a directory named the same as the target.
args: Command-line arguments to the tool.
Subject to 'Make variable' substitution.
Can use $(location) expansion. See https://docs.bazel.build/versions/master/be/make-variables.html
You may also refer to the location of the output_dir with the special `$@` replacement, like genrule.
If output_dir=False then $@ will refer to the output directory for this package.
package: an npm package whose binary to run, like "terser". Assumes your node_modules are installed in a workspace called "npm"
package_bin: the "bin" entry from `package` that should be run. By default package_bin is the same string as `package`
tool: a label for a binary to run, like `@npm//terser/bin:terser`. This is the longer form of package/package_bin.
Note that you can also refer to a binary in your local workspace.
"""
if not tool:
if not package:
fail("You must supply either the tool or package attribute")
if not package_bin:
package_bin = package
tool = "@npm//%s/bin:%s" % (package, package_bin)
_npm_package_bin(
data = data,
outs = outs,
args = args,
output_dir = output_dir,
tool = tool,
**kwargs
)
| 17,401
|
def get_mask_results(probs, boxes, im_w, im_h, pixil_score_th=0.25):
"""
Args:
probs (Tensor)
boxes (ImageContainer)
Returns:
rles (list[string])
mask_pixel_scores (Tensor)
"""
device = probs.device
N, _, H, W = probs.shape
num_chunks = N if device.type == "cpu" else int(np.ceil(N * int(im_h * im_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert num_chunks <= N, "Default GPU_MEM_LIMIT in is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
im_masks = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
im_masks_tl = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
im_masks_th = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool)
for i in chunks:
masks_chunk, spatial_inds = _do_paste_mask(probs[i], boxes[i], im_h, im_w, skip_empty=device.type == "cpu")
im_masks[(i,) + spatial_inds] = (masks_chunk >= 0.5).to(dtype=torch.bool)
im_masks_tl[(i,) + spatial_inds] = (masks_chunk >= pixil_score_th).to(dtype=torch.bool)
im_masks_th[(i,) + spatial_inds] = (masks_chunk >= (1 - pixil_score_th)).to(dtype=torch.bool)
mask_pixel_scores = (torch.sum(im_masks_th, dim=(1, 2)).to(dtype=torch.float32)
/ torch.sum(im_masks_tl, dim=(1, 2)).to(dtype=torch.float32).clamp(min=1e-6))
rles = []
for i in range(N):
# Too slow.
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(np.array(im_masks[i].unsqueeze(2).cpu(), dtype=np.uint8, order='F'))[0]
# For dumping to json, need to decode the byte string.
# https://github.com/cocodataset/cocoapi/issues/70
rle['counts'] = rle['counts'].decode('ascii')
rles.append(rle)
return rles, mask_pixel_scores
| 17,402
|
def openapi() -> Dict[str, Any]:
"""
>>> client = app.test_client()
>>> response = client.get("/openapi.json")
>>> response.get_json()['openapi']
'3.0.0'
>>> response.get_json()['info']['title']
'Chapter 13. Example 2'
"""
# See dominoes_openapi.json for full specification
return jsonify(OPENAPI_SPEC)
| 17,403
|
def get_config_keys(config, prefix):
"""Get configuration options matching given prefix"""
for key, value in config.items():
if key.startswith(prefix):
yield key[len(prefix):].upper(), value
| 17,404
|
def koliko_izdelkov_v_skladiscu():
"""
Vrne stevilo razlicnih izdelkov v skladiscu.
>>> koliko_izdelkov_v_skladiscu()
18
"""
poizvedba = """
SELECT COUNT(*)
FROM izdelki
WHERE kolicina IS NOT null
"""
st, = conn.execute(poizvedba).fetchone()
return st
| 17,405
|
def create_spline(curve_data, s_type='NURBS', len_nodes=100, spline_id=0, splines_count=1, bud_position=None):
"""
Create a spline of given type with n nodes to form a path made of sin and cos
"""
spline = curve_data.splines.new(type=s_type)
# Regular spline points need xyz + weight
got_points = 1
co_dimension = 4
pts = spline.points
if s_type == 'BEZIER':
got_points = 2
# Bezier control points accept only xyz
co_dimension = 3
# Left and right handles are not handled here
pts = spline.bezier_points
# This is the len for numpy arrays
len_nodes = len_nodes - got_points + 1
# Every spline already has got point(s) when created
# This was compensated with got_points
pts.add(len_nodes - 1)
if bud_position is None:
bud_position = np.random.rand(co_dimension) * 1000
# Below is a play with random, sin and cos just for demo.
# Replace with your own data and if you have none, it's pretty easy
# to generate a bunch of points in space with Sverchok or Animation Nodes
radii = np.random.rand(len_nodes) + 1
radii *= radii**4 / 10
dir_walk = np.arange(len_nodes) / 10 + np.random.rand(len_nodes)
pi_walk = (np.arange(len_nodes)+1) * int(math.pi / len_nodes * 100)/(100+len_nodes)
pi_walk += random.random()*math.pi
nodes = np.random.rand(len_nodes, co_dimension)
nodes[:, 0] += bud_position[0]
nodes[:, 1] += bud_position[1]
nodes[:, 2] += bud_position[2]
rf1 = int(random.random()*3 + 1)
rf2 = int(random.random()*3 + 1)
nodes[:, 0] += np.sin(np.cos(pi_walk)) * random.random()*300+200
nodes[:, 1] += (np.cos(np.sin(pi_walk)**rf1) + np.sin(pi_walk*rf2)) * random.random()*300+200
nodes[:, 2] += np.sin(pi_walk*rf2) * np.cos(pi_walk*rf1) * random.random()*300+200
nodes [:, 0] += np.random.rand(len_nodes) * (random.random()*20+20)
nodes [:, 1] += np.random.rand(len_nodes) * (random.random()*20+20)
nodes [:, 2] += np.random.rand(len_nodes) * (random.random()*20+20)
#nodes[:, 0] += np.sin(pi_walk*random.random())*(random.random()*10+10)**2
#nodes[:, 1] += np.sin(pi_walk*random.random())*(random.random()*100+100)
#nodes[:, 2] += np.cos(pi_walk*random.random())*(random.random()*100+100)
nodes [:, :] *= (random.random()*2+0.5)
# Dummy data for key and value properties, play with HairInfo.Key and HairInfo.Value in your shader!
keys = np.arange(len_nodes) + np.random.rand(len_nodes)
values = np.random.rand(len_nodes)
pts.foreach_set('co', nodes.ravel())
pts.foreach_set('radius', radii.ravel())
pts.foreach_set('key', keys.ravel())
pts.foreach_set('value', values.ravel())
if s_type == 'BEZIER':
handle_fac = 100
lefts = nodes.copy()
lefts[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
lefts[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
lefts[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights = nodes.copy()
rights[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
rights[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2
pts.foreach_set('handle_left', lefts.ravel())
pts.foreach_set('handle_right', rights.ravel())
spline.use_endpoint_u = True
# Spline resolution defaults to 12 but is too much for this use-case
spline.resolution_u = 3
return spline
| 17,406
|
def _check_dir(repository_ctx, directory):
"""Checks whether the directory exists and fail if it does not"""
if not repository_ctx.path(directory).exists:
_auto_configure_fail("Cannot find dir: %s" % directory)
| 17,407
|
def getImgN(path):
"""
入力されたフォルダにある画像を全て読み込む
[in] path:
[out] 読みこんだ画像リスト
"""
if not os.path.isdir(path):
print('path not found:', path)
exit(1)
from os.path import join as opj
return np.array([cv2.imread(opj(path, f), IMG.getCh(0))
for f in os.listdir(path) if IMG.isImgPath(opj(path, f))])
| 17,408
|
def split(
texts_path,
summaries_path,
target_dir=Path("."),
ratio=TRAIN_VAL_TEST_SPLIT,
):
"""
Splits texts and summaries on train/test
Args:
texts_path: path to the file with texts
summaries_path: path to the file with summaries
target_dir: dir to save splitted texts and summaries
(default: Path("."))
ratio: ratio of train/val/test split
(default: look at phramer/phramer/config.py)
"""
articles_num_lines = count_lines(texts_path)
summaries_num_lines = count_lines(summaries_path)
assert articles_num_lines == summaries_num_lines, (
"The number of articles and summaries must be the same, "
+ "got {} and {}".format(articles_num_lines, summaries_num_lines)
)
target_dir = Path(target_dir)
paths = [
[
target_dir / "{}.{}".format(tag, Path(data_path).name)
for data_path in [texts_path, summaries_path]
]
for tag in [TEST_TAG, VAL_TAG, TRAIN_TAG]
]
train, val, _ = np.asarray(ratio) / np.sum(ratio) * articles_num_lines
thresholds = [articles_num_lines, train + val, train]
with open(texts_path, "rb") as articles, open(
summaries_path, "rb"
) as summaries:
with tqdm(total=articles_num_lines, desc="Lines") as pbar:
threshold = thresholds.pop()
target_articles_path, target_summaries_path = paths.pop()
for line_idx, (text, summary) in enumerate(
zip(articles, summaries)
):
if line_idx >= threshold:
target_articles_path, target_summaries_path = paths.pop()
threshold = thresholds.pop()
with open(target_articles_path, "wb") as target_texts, open(
target_summaries_path, "wb"
) as target_summaries:
target_texts.write(text)
target_summaries.write(summary)
pbar.update()
| 17,409
|
def get_composed_jumps(jumps, levels, win, verbose=0):
"""
Take the output of get_jumps (from landmarks)
Compose the jumps, return them as an array of array.
If intermediate=True, we return the jumps for intermediary levels,
not just the requested one.
We use a temporary sqlite3 connection to work.
"""
assert len(levels) > 0
maxlevel = max(levels)
assert maxlevel >= 1, 'level 1 min, it means jumps between two landmarks'
# verbose
if verbose>0:
t1 = time.time()
# open temporary connection
# IT IS FAST!
# timeit.Timer("import sqlite3; conn = sqlite3.connect(':memory:'); conn.close()").timeit(10000)
# Out[35]: 0.49553799629211426
conn = sqlite3.connect(':memory:')
# special case: level = 1
if maxlevel == 1:
add_nlmk2_jumps_to_db(conn, jumps, nocopy=True)
q = "SELECT * FROM jumps_level1"
res = conn.execute(q)
composed_jumps = res.fetchall()
conn.close()
if verbose > 0:
print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1)
return composed_jumps
# enters level1 jumps
add_nlmk2_jumps_to_db(conn, jumps)
# do upper levels
for lvl in range(2, maxlevel+1):
compose_jumps(conn, win, level=lvl)
# what do we return?
composed_jumps = []
for lvl in levels:
q = "SELECT * FROM jumps_level" + str(lvl)
res = conn.execute(q)
composed_jumps.extend(res.fetchall())
# done
conn.close()
# verbose
if verbose > 0:
print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1)
return composed_jumps
| 17,410
|
def calculate_agreement_stv(agreement_dictionary, turker_accuracies):
"""
Inter agreement with most accurate chair vote
Args:
agreement_dictionary: holding sentence annotation records - 9 from non-experts and 1 expert
sentence -> list of annotations (size settings.RESPONSE_COUNT + 1)
turker_accuracies: accuracy for each turker used for the chair vote
Returns:
The accuracies from combined agreement from one to nine non-experts with the expert
"""
sequence = list(range(settings.RESPONSE_COUNT))
combinations = []
for i in range(settings.RESPONSE_COUNT + 1):
combinations.append(list(itertools.combinations(sequence, i)))
print(combinations)
accuracies = [0]
standard_deviations = [0]
for i in range(1, settings.RESPONSE_COUNT + 1):
current_combinations = combinations[i]
combination_accuracies = []
for combination in current_combinations:
correct = 0
incorrect = 0
for sentence in agreement_dictionary.keys():
expert_annotations = agreement_dictionary[sentence][-1][1]
chosen_annotations = [agreement_dictionary[sentence][x][1] for x in combination]
votes = np.sum(chosen_annotations, axis=0)
chair = 0
if len(combination) > 0 and len(combination) % 2 == 0:
max_accuracy = 0
for judgement_index in combination:
turker = agreement_dictionary[sentence][judgement_index][0]
turker_accuracy = turker_accuracies[turker][0][1]
if turker_accuracy > max_accuracy:
max_accuracy = turker_accuracy
chair = judgement_index
result_votes = [0] * len(votes)
for j in range(len(votes)):
if votes[j] < len(chosen_annotations) / 2:
result_votes[j] = 0
elif votes[j] > len(chosen_annotations) / 2:
result_votes[j] = 1
else:
result_votes[j] = agreement_dictionary[sentence][chair][1][j]
for j in range(len(votes)):
if expert_annotations[j] == result_votes[j]:
correct += 1
else:
incorrect += 1
combination_accuracy = correct / (correct + incorrect)
combination_accuracies.append(combination_accuracy)
standard_deviation = np.std(combination_accuracies)
standard_deviations.append(standard_deviation)
accuracy = sum(combination_accuracies) / len(combination_accuracies)
accuracies.append(accuracy)
return accuracies, standard_deviations
| 17,411
|
def count_num_peps(filename):
"""
Count the number of peptide sequences in FASTA file.
"""
with open(filename) as f:
counter = 0
for line in f:
if line.startswith(">"):
counter += 1
return counter
| 17,412
|
def get_variables(examples):
"""Convert a code string to a list of variables.
We assume a variable is a 'word' with only alphanumeric characters in it."""
variables = [" ".join(re.split(r"\W+", text)) for text in examples["text"]]
return {"variables": variables}
| 17,413
|
def _stored_data_paths(wf, name, serializer):
"""Return list of paths created when storing data"""
metadata = wf.datafile(".{}.alfred-workflow".format(name))
datapath = wf.datafile(name + "." + serializer)
return [metadata, datapath]
| 17,414
|
def output_0_to_label_click(n):
"""
output_0_label click handler
Goes to first step of the output.
"""
if output_0_label['cursor'] == 'fleur':
load_output_step(n)
| 17,415
|
def ascii_to_walls(char_matrix):
"""
A parser to build a gridworld from a text file.
Each grid has ONE start and goal location.
A reward of +1 is positioned at the goal location.
:param char_matrix: Matrix of characters.
:param p_success: Probability that the action is successful.
:param seed: The seed for the GridWorldMDP object.
:param skip_checks: Skips assertion checks.
:transition_matrix_builder_cls: The transition matrix builder to use.
:return:
"""
grid_size = len(char_matrix[0])
assert(len(char_matrix) == grid_size), 'Mismatch in the columns.'
for row in char_matrix:
assert(len(row) == grid_size), 'Mismatch in the rows.'
# ...
wall_locs = []
empty = []
for r in range(grid_size):
for c in range(grid_size):
char = char_matrix[r][c]
if char == '#':
wall_locs.append((r, c))
elif char == ' ':
empty.append((r, c))
else:
raise ValueError('Unknown character {} in grid.'.format(char))
# Attempt to make the desired gridworld.
return wall_locs, empty
| 17,416
|
def test_opening_pickles():
"""Pickle open ok?"""
for n in [3, 4, 5]:
yield check_pickle_opened, n
| 17,417
|
def save_to_hdf5(db_location, patches, coords, file_name, hdf5_file):
""" Saves the numpy arrays to HDF5 files. A sub-group of patches from a single WSI will be saved
to the same HDF5 file
- db_location folder to save images in
- patches numpy images
- coords x, y tile coordinates
- file_name number, like 1, 2,
- hdf5_file one hdf5 wil contain many datasets
"""
# Save patches into hdf5 file.
subgrp='t'+file_name
grp = hdf5_file.create_group(subgrp) ;
dataset = grp.create_dataset("img", np.shape(patches),
dtype=np.uint8, data=patches)
# dtype=np.uint8, data=patches, compression="szip")
# , compression_opts=9)
dataset2 = grp.create_dataset("coord", np.shape(coords),
dtype=np.int64, data=coords)
| 17,418
|
def mock_sd(nresp=1):
"""Fake Stackdriver Monitoring API response for the ListTimeSeries endpoint.
Args:
nresp (int): Number of responses to add to response.
Returns:
ChannelStub: Mocked gRPC channel stub.
"""
timeserie = load_fixture('time_series_proto.json')
response = {'next_page_token': '', 'time_series': [timeserie]}
return mock_grpc_stub(
response=response,
proto_method=metric_service_pb2.ListTimeSeriesResponse,
nresp=nresp)
| 17,419
|
def cohesion_separation(chroms, doc):
"""Measure balancing both cohesion and separation of clusters."""
coh = cohesion(chroms, doc)
sep = separation(chroms, doc)
return (1 + sigmoid(coh)) ** sep
| 17,420
|
def shellCall(shellCmd, stdin='', stderr=False, env=None, encoding=None):
"""Call a single system command with arguments, return its stdout.
Returns stdout, stderr if stderr is True.
Handles simple pipes, passing stdin to shellCmd (pipes are untested
on windows) can accept string or list as the first argument
Parameters
----------
shellCmd : str, or iterable
The command to execute, and its respective arguments.
stdin : str, or None
Input to pass to the command.
stderr : bool
Whether to return the standard error output once execution is finished.
env : dict
The environment variables to set during execution.
encoding : str
The encoding to use for communication with the executed command.
This argument will be ignored on Python 2.7.
Notes
-----
We use ``subprocess.Popen`` to execute the command and establish
`stdin` and `stdout` pipes.
Python 2.7 always opens the pipes in text mode; however,
Python 3 defaults to binary mode, unless an encoding is specified.
To unify pipe communication across Python 2 and 3, we now provide an
`encoding` parameter, enforcing `utf-8` text mode by default.
This parameter is present from Python 3.6 onwards; using an older
Python 3 version will raise an exception. The parameter will be ignored
when running Python 2.7.
"""
if encoding is None:
encoding = locale.getpreferredencoding()
if type(shellCmd) == str:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd)
elif type(shellCmd) == bytes:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd.decode('utf-8'))
elif type(shellCmd) in (list, tuple): # handles whitespace in filenames
shellCmdList = shellCmd
else:
msg = 'shellCmd requires a string or iterable.'
raise TypeError(msg)
cmdObjects = []
for obj in shellCmdList:
if type(obj) != bytes:
cmdObjects.append(obj)
else:
cmdObjects.append(obj.decode('utf-8'))
# Since Python 3.6, we can use the `encoding` parameter.
if PY3:
if sys.version_info.minor >= 6:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding, env=env)
else:
msg = 'shellCall() requires Python 2.7, or 3.6 and newer.'
raise RuntimeError(msg)
else:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
stdoutData, stderrData = proc.communicate(stdin)
del proc
if stderr:
return stdoutData.strip(), stderrData.strip()
else:
return stdoutData.strip()
| 17,421
|
def get_number_location(
input : str,
):
# endregion get_number_location header
# region get_number_location docs
"""
get the string indices of all numbers that occur on the string
format example: [ ( 0, 1 ), ( 4, 6 ), ( 9, 9 ) ]
both begin and end are inclusive, in contrast with the way the std_lib does it
which is begin(inclusive), end(exclusive)
"""
# endregion get_number_location docs
# region get_number_location implementation
locations = []
for match in re.finditer("\d+", input):
# match start is inclusive
position_start = match.start()
# match end is exclusive
position_end = match.end() - 1
locations.append((position_start, position_end))
...
return locations
| 17,422
|
def update_object(obj, new_values):
"""update an object attributes from a supplied dictionary"""
# avoiding obj.__dict__.update(new_values) as it will set a new attribute if it doesn't exist
for k, v in new_values.items():
if hasattr(obj, k):
try:
setattr(obj, k, v)
except AttributeError: # in case of read only attribute
log(f"update_object(): can't update property: {k}, with value: {v}")
except Exception as e:
log(f'update_object(): error, {e}, property: {k}, value: {v}')
return obj
| 17,423
|
async def luck_cownd(client, message):
""" /luck an @animatedluck """
rep_mesg_id = message.message_id
if message.reply_to_message:
rep_mesg_id = message.reply_to_message.message_id
await client.send_dice(
chat_id=message.chat.id,
emoji=TRY_YOUR_LUCK,
disable_notification=True,
reply_to_message_id=rep_mesg_id
)
| 17,424
|
def subpathNeedsRefresh(modTimes, ufoPath, *subPath):
"""
Determine if a file needs to be refreshed.
Returns True if the file's latest modification time is different
from its previous modification time.
"""
previous = modTimes.get(subPath[-1])
if previous is None:
return True
latest = subpathGetModTime(ufoPath, *subPath)
return latest != previous
| 17,425
|
def resxy_(x: float, y: float, /) -> Resolution:
"""Construct resolution from X,Y order."""
return Resolution(x=x, y=y)
| 17,426
|
def copy_static_files(template_dir_path, dst_dir_path):
"""Copies the static files used by the resulting rendered site
Arguments:
template_dir_path -- path to the template directory
dst_dir_path -- destination directory
"""
subdirectories = ['/css', '/js', '/img']
for subdirectory in subdirectories:
if os.path.exists(dst_dir_path + subdirectory):
shutil.rmtree(dst_dir_path + subdirectory)
shutil.copytree(template_dir_path + subdirectory, dst_dir_path + subdirectory, ignore=shutil.ignore_patterns('*.pyc', '*.py'))
| 17,427
|
def group_toggle_modules(request, group):
"""Enable or disable modules.
"""
if request.method != 'POST':
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = SITE_ROOT if referer is None else referer
username = request.user.username
group_wiki = request.POST.get('group_wiki', 'off')
if group_wiki == 'on':
enable_mod_for_group(group.id, MOD_GROUP_WIKI)
messages.success(request, _('Successfully enable "Wiki".'))
else:
disable_mod_for_group(group.id, MOD_GROUP_WIKI)
if referer.find('wiki') > 0:
next = reverse('group_info', args=[group.id])
messages.success(request, _('Successfully disable "Wiki".'))
return HttpResponseRedirect(next)
| 17,428
|
async def set_key_metadata(wallet_handle: int,
verkey: str,
metadata: str) -> None:
"""
Creates keys pair and stores in the wallet.
:param wallet_handle: Wallet handle (created by open_wallet).
:param verkey: the key (verkey, key id) to store metadata.
:param metadata: the meta information that will be store with the key.
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("set_key_metadata: >>> wallet_handle: %r, verkey: %r, metadata: %r",
wallet_handle,
verkey,
metadata)
if not hasattr(set_key_metadata, "cb"):
logger.debug("set_key_metadata: Creating callback")
set_key_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_verkey = c_char_p(verkey.encode('utf-8'))
c_metadata = c_char_p(metadata.encode('utf-8'))
await do_call('indy_set_key_metadata',
c_wallet_handle,
c_verkey,
c_metadata,
set_key_metadata.cb)
logger.debug("create_key: <<<")
| 17,429
|
def bilinear_sampler(imgs, coords):
"""
Construct a new image by bilinear sampling from the input image.
Args:
imgs: source image to be sampled from [batch, height_s, width_s, channels]
coords: coordinates of source pixels to sample from [batch, height_t,
Returns:
A new sampled image [batch, height_t, width_t, channels]
"""
def _repeat(x, n_repeats):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'float32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
coords_x, coords_y = tf.split(coords, [1, 1], axis=3)
inp_size = imgs.get_shape()
coord_size = coords.get_shape()
out_size = coords.get_shape().as_list()
out_size[3] = imgs.get_shape().as_list()[3]
coords_x = tf.cast(coords_x, 'float32')
coords_y = tf.cast(coords_y, 'float32')
y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32')
x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32')
zero = tf.zeros([1], dtype='float32')
eps = tf.constant([0.5], tf.float32)
coords_x = tf.clip_by_value(coords_x, eps, x_max - eps)
coords_y = tf.clip_by_value(coords_y, eps, y_max - eps)
x0 = tf.floor(coords_x)
x1 = x0 + 1
y0 = tf.floor(coords_y)
y1 = y0 + 1
x0_safe = tf.clip_by_value(x0, zero, x_max)
y0_safe = tf.clip_by_value(y0, zero, y_max)
x1_safe = tf.clip_by_value(x1, zero, x_max)
y1_safe = tf.clip_by_value(y1, zero, y_max)
wt_x0 = x1_safe - coords_x
wt_x1 = coords_x - x0_safe
wt_y0 = y1_safe - coords_y
wt_y1 = coords_y - y0_safe
# indices in the flat image to sample from
dim2 = tf.cast(inp_size[2], 'float32')
dim1 = tf.cast(inp_size[2] * inp_size[1], 'float32')
base = tf.reshape(_repeat(tf.cast(tf.range(coord_size[0]), 'float32') * dim1,
coord_size[1] * coord_size[2]),
[out_size[0], out_size[1], out_size[2], 1])
base_y0 = base + y0_safe * dim2
base_y1 = base + y1_safe * dim2
idx00 = tf.reshape(x0_safe + base_y0, [-1])
idx01 = x0_safe + base_y1
idx10 = x1_safe + base_y0
idx11 = x1_safe + base_y1
## sample from imgs
imgs_flat = tf.reshape(imgs, tf.stack([-1, inp_size[3]]))
imgs_flat = tf.cast(imgs_flat, 'float32')
im00 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx00, 'int32')), out_size)
im01 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx01, 'int32')), out_size)
im10 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx10, 'int32')), out_size)
im11 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx11, 'int32')), out_size)
w00 = wt_x0 * wt_y0
w01 = wt_x0 * wt_y1
w10 = wt_x1 * wt_y0
w11 = wt_x1 * wt_y1
output = tf.add_n([
w00 * im00, w01 * im01,
w10 * im10, w11 * im11
])
return output
| 17,430
|
def wrap_array(typingctx, data_ptr, shape_tup):
"""create an array from data_ptr with shape_tup as shape
"""
assert isinstance(data_ptr, types.CPointer), "invalid data pointer"
assert (isinstance(shape_tup, types.UniTuple)
and shape_tup.dtype == np.intp), "invalid shape tuple"
dtype = data_ptr.dtype
arr_typ = types.Array(dtype, shape_tup.count, 'C')
def codegen(context, builder, sig, args):
assert(len(args) == 2)
data = args[0]
shape = args[1]
# XXX: unnecessary allocation and copy, reuse data pointer
shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count)
ary = _empty_nd_impl(context, builder, arr_typ, shape_list)
cgutils.raw_memcpy(builder, ary.data, data, ary.nitems, ary.itemsize, align=1)
# clean up image buffer
fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])
fn_release = builder.module.get_or_insert_function(fnty, name="cv_delete_buf")
builder.call(fn_release, [data])
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
# # cgutils.printf(builder, "%d", shape)
# retary = context.make_array(arr_typ)(context, builder)
# itemsize = context.get_abi_sizeof(context.get_data_type(dtype))
# shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count)
# strides = [context.get_constant(types.intp, itemsize)]
# for dimension_size in reversed(shape_list[1:]):
# strides.append(builder.mul(strides[-1], dimension_size))
# strides = tuple(reversed(strides))
# #import pdb; pdb.set_trace()
# context.populate_array(retary,
# data=data,
# shape=shape,
# strides=strides,
# itemsize=itemsize,
# meminfo=None)
# return retary._getvalue()
return signature(arr_typ, data_ptr, shape_tup), codegen
| 17,431
|
def change(port, password, limit):
"""修改用户"""
port, password, limit = str(port), str(password), str(limit)
ret = subprocess.check_output([SS_ADMIN, 'change', port, password, limit], stderr=subprocess.STDOUT)
return ret
| 17,432
|
def reshape_resting_ecg_to_tidy(
sample_id: Union[int, str], folder: Optional[str] = None, tmap: TensorMap = DEFAULT_RESTING_ECG_SIGNAL_TMAP,
) -> pd.DataFrame:
"""Wrangle resting ECG data to tidy.
Args:
sample_id: The id of the ECG sample to retrieve.
folder: The local or Cloud Storage folder under which the files reside.
tmap: The TensorMap to use for ECG input.
Returns:
A pandas dataframe in tidy format or print a notebook-friendly error and return an empty dataframe.
"""
if folder is None:
folder = get_resting_ecg_hd5_folder(sample_id)
data: Dict[str, Any] = {'lead': [], 'raw': [], 'ts_reference': [], 'filtered': [], 'filtered_1': [], 'filtered_2': []}
with tempfile.TemporaryDirectory() as tmpdirname:
sample_hd5 = str(sample_id) + '.hd5'
local_path = os.path.join(tmpdirname, sample_hd5)
try:
tf.io.gfile.copy(src=os.path.join(folder, sample_hd5), dst=local_path)
except (tf.errors.NotFoundError, tf.errors.PermissionDeniedError) as e:
print(f'''Warning: Resting ECG not available for sample {sample_id} in folder {folder}.
Use the folder parameter to read HD5s from a different directory or bucket.\n\n{e.message}''')
return pd.DataFrame(data)
with h5py.File(local_path, mode='r') as hd5:
try:
signals = tmap.tensor_from_file(tmap, hd5)
except (KeyError, ValueError) as e:
print(f'''Warning: Resting ECG TMAP {tmap.name} not available for sample {sample_id}.
Use the tmap parameter to choose a different TMAP.\n\n{e}''')
_examine_available_keys(hd5)
return pd.DataFrame(data)
for (lead, channel) in ECG_REST_LEADS.items():
signal = signals[:, channel]
signal_length = len(signal)
data['raw'].extend(signal)
data['lead'].extend([lead] * signal_length)
data['ts_reference'].extend(np.array([i*1./(SAMPLING_RATE+1.) for i in range(0, signal_length)]))
filtered, _, _ = filter_signal(
signal=signal,
ftype='FIR',
band='bandpass',
order=int(0.3 * SAMPLING_RATE),
frequency=[.9, 50],
sampling_rate=SAMPLING_RATE,
)
data['filtered'].extend(filtered)
filtered_1, _, _ = filter_signal(
signal=signal,
ftype='FIR',
band='bandpass',
order=int(0.3 * SAMPLING_RATE),
frequency=[.9, 20],
sampling_rate=SAMPLING_RATE,
)
data['filtered_1'].extend(filtered_1)
filtered_2, _, _ = filter_signal(
signal=signal,
ftype='FIR',
band='bandpass',
order=int(0.3 * SAMPLING_RATE),
frequency=[.9, 30],
sampling_rate=SAMPLING_RATE,
)
data['filtered_2'].extend(filtered_2)
signal_df = pd.DataFrame(data)
# Convert the raw signal to mV.
signal_df['raw_mV'] = signal_df['raw'] * RAW_SCALE
signal_df['filtered_mV'] = signal_df['filtered'] * RAW_SCALE
signal_df['filtered_1_mV'] = signal_df['filtered_1'] * RAW_SCALE
signal_df['filtered_2_mV'] = signal_df['filtered_2'] * RAW_SCALE
# Reshape to tidy (long format).
tidy_signal_df = signal_df.melt(
id_vars=['lead', 'ts_reference'],
value_vars=['raw_mV', 'filtered_mV', 'filtered_1_mV', 'filtered_2_mV'],
var_name='filtering', value_name='signal_mV',
)
# The leads have a meaningful order, apply the order to this column.
lead_factor_type = pd.api.types.CategoricalDtype(
categories=[
'strip_I', 'strip_aVR', 'strip_V1', 'strip_V4',
'strip_II', 'strip_aVL', 'strip_V2', 'strip_V5',
'strip_III', 'strip_aVF', 'strip_V3', 'strip_V6',
],
ordered=True,
)
tidy_signal_df['lead'] = tidy_signal_df.lead.astype(lead_factor_type)
return tidy_signal_df
| 17,433
|
def geometric_project_derivatives(
molecule: Molecule,
conformer: torch.Tensor,
internal_coordinates_indices: Dict[str, torch.Tensor],
reference_gradients: torch.Tensor,
reference_hessians: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""A helper method to project a set of gradients and hessians into internal
coordinates using ``geomTRIC``.
Args:
molecule: The molecule of interest
conformer: The conformer of the molecule with units of [A] and shape=(n_atoms, 3)
internal_coordinates_indices: The indices of the atoms involved in each type
of internal coordinate.
reference_gradients: The gradients to project.
reference_hessians: The hessians to project.
Returns:
The projected gradients and hessians.
"""
from geometric.internal import Angle, Dihedral, Distance, OutOfPlane
from geometric.internal import PrimitiveInternalCoordinates as GeometricPRIC
from geometric.internal import (
RotationA,
RotationB,
RotationC,
TranslationX,
TranslationY,
TranslationZ,
)
from geometric.molecule import Molecule as GeometricMolecule
geometric_molecule = GeometricMolecule()
geometric_molecule.Data = {
"resname": ["UNK"] * molecule.n_atoms,
"resid": [0] * molecule.n_atoms,
"elem": [atom.element.symbol for atom in molecule.atoms],
"bonds": [(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds],
"name": molecule.name,
"xyzs": [conformer.detach().numpy()],
}
geometric_coordinates = GeometricPRIC(geometric_molecule)
geometric_coordinates.Internals = [
internal
for internal in geometric_coordinates.Internals
if not isinstance(
internal,
(TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC),
)
]
# We need to re-order the internal coordinates to generate those produced by
# smirnoffee.
ic_by_type = defaultdict(list)
ic_type_to_name = {
Distance: "distances",
Angle: "angles",
Dihedral: "dihedrals",
OutOfPlane: "out-of-plane-angles",
}
for internal_coordinate in geometric_coordinates.Internals:
ic_by_type[ic_type_to_name[internal_coordinate.__class__]].append(
internal_coordinate
)
ordered_internals = []
for ic_type in internal_coordinates_indices:
ic_by_index = {
_geometric_internal_coordinate_to_indices(ic): ic
for ic in ic_by_type[ic_type]
}
for ic_indices in internal_coordinates_indices[ic_type]:
ic_indices = tuple(int(i) for i in ic_indices)
if ic_indices[-1] > ic_indices[0]:
ic_indices = tuple(reversed(ic_indices))
ordered_internals.append(ic_by_index[ic_indices])
geometric_coordinates.Internals = ordered_internals
reference_gradients = reference_gradients.numpy().flatten()
reference_hessians = reference_hessians.numpy().reshape(molecule.n_atoms * 3, -1)
xyz = conformer.detach().numpy()
return (
geometric_coordinates.calcGrad(xyz, reference_gradients),
geometric_coordinates.calcHess(xyz, reference_gradients, reference_hessians),
)
| 17,434
|
def api_get_script(request):
"""POST - Frida Get Script."""
if not request.POST.getlist('scripts[]'):
return make_api_response(
{'error': 'Missing Parameters'}, 422)
resp = tests_frida.get_script(request, True)
if resp['status'] == 'ok':
return make_api_response(resp, 200)
return make_api_response(resp, 500)
| 17,435
|
def is_valid_slug(slug):
"""Returns true iff slug is valid."""
VALID_SLUG_RE = re.compile(r"^[a-z0-9\-]+$")
return VALID_SLUG_RE.match(slug)
| 17,436
|
def KMeans_GPU(x, K=10, Niter=10, verbose=True):
"""Implements Lloyd's algorithm for the Euclidean metric."""
start = time.time()
N, D = x.shape # Number of samples, dimension of the ambient space
c = x[:K, :].clone() # Simplistic initialization for the centroids
x_i = LazyTensor(x.view(N, 1, D)) # (N, 1, D) samples
c_j = LazyTensor(c.view(1, K, D)) # (1, K, D) centroids
# K-means loop:
# - x is the (N, D) point cloud,
# - cl is the (N,) vector of class labels
# - c is the (K, D) cloud of cluster centroids
for i in range(Niter):
# E step: assign points to the closest cluster -------------------------
D_ij = ((x_i - c_j) ** 2).sum(-1) # (N, K) symbolic squared distances
cl = D_ij.argmin(dim=1).long().view(-1) # Points -> Nearest cluster
# M step: update the centroids to the normalized cluster average: ------
# Compute the sum of points per cluster:
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
# Divide by the number of points per cluster:
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl # in-place division to compute the average
if verbose: # Fancy display -----------------------------------------------
if torch.cuda.is_available():
torch.cuda.synchronize()
end = time.time()
print(
f"K-means for the Euclidean metric with {N:,} points in dimension {D:,}, K = {K:,}:"
)
print(
"Timing for {} iterations: {:.5f}s = {} x {:.5f}s\n".format(
Niter, end - start, Niter, (end - start) / Niter
)
)
return cl, c
| 17,437
|
def make_flat_roof(bm, faces, thick, outset, **kwargs):
"""Create a basic flat roof
Args:
bm (bmesh.types.BMesh): bmesh from current edit mesh
faces (bmesh.types.BMFace): list of user selected faces
thick (float): Thickness of the roof
outset (float): How mush the roof overhangs
**kwargs: Extra kargs from RoofProperty
Returns:
list(bmesh.types.BMFace): Resulting top face
"""
ret = bmesh.ops.extrude_face_region(bm, geom=faces)
bmesh.ops.translate(bm, vec=(0, 0, thick), verts=filter_geom(ret["geom"], BMVert))
top_face = filter_geom(ret["geom"], BMFace)[-1]
link_faces = [f for e in top_face.edges for f in e.link_faces if f is not top_face]
bmesh.ops.inset_region(bm, faces=link_faces, depth=outset, use_even_offset=True)
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
bmesh.ops.delete(bm, geom=faces, context="FACES")
new_faces = list({f for e in top_face.edges for f in e.link_faces})
return bmesh.ops.dissolve_faces(bm, faces=new_faces).get("region")
| 17,438
|
def make_comma_separated_list_fiter(filter_name, field_expression):
"""
Create a filter which uses a comma-separated list of values to filter the queryset.
:param str filter_name: the name of the query param to fetch values
:param str field_expression: the field expression to filter the queryset, like `categories__in`
"""
def filter_queryset(instance, request, queryset, view):
values = request.query_params.get(filter_name)
if not values:
return queryset
values = [v.strip() for v in values.split(",")]
return queryset.filter(**{field_expression: values})
attrs = {}
attrs.setdefault("filter_queryset", filter_queryset)
return type(str("CommaSeparatedIDListFilter"), (filters.BaseFilterBackend,), attrs)
| 17,439
|
def get_dashboard(id_, token_info=None, user=None):
"""Get a single dashboard by ID
:param id: ID of test dashboard
:type id: str
:rtype: Dashboard
"""
dashboard = Dashboard.query.get(id_)
if not dashboard:
return "Dashboard not found", 404
if dashboard and dashboard.project and not project_has_user(dashboard.project, user):
return "Forbidden", 403
return dashboard.to_dict()
| 17,440
|
def get_recorder(execution_cmd, ml_names):
"""
The helper function for generating a recorder object
"""
if not execution_cmd.record_progress:
return DummyRecorder()
root_dir_path = Path(__file__).parent.parent
log_dir_path = root_dir_path.joinpath(
"games", execution_cmd.game_name, "log")
game_params_str = [str(p) for p in execution_cmd.game_params]
filename_prefix = (
"manual" if execution_cmd.game_mode == GameMode.MANUAL else "ml")
if game_params_str:
filename_prefix += "_" + "_".join(game_params_str)
return Recorder(ml_names, log_dir_path, filename_prefix)
| 17,441
|
def eval_epoch(data_iterator, model, optimizer, args,
update=False,
log_split='',
split_name='',
n_iter=0,
epoch=0,
writer=None,
sample_path='',
debug=0,
verbose=False,
obj_classes=[],
weights=[],
clip=1.0,
most_freq=0,
log_path=''):
"""Pass one epoch over the data split.
"""
if update:
model.train()
else:
model.eval()
task = args.task
verbose = args.verbose
num_classes = model.n_actions if model.n_actions > 1 else 2
confusion = torch.zeros(num_classes, num_classes).float()
if task == 'task1':
matrix_labels = []
for dir in DIRECTIONS[args.direction]:
for obj in obj_classes:
matrix_labels += [dir + '_' + obj]
elif task == 'task2':
matrix_labels = DIRECTIONS[args.direction]
elif task in set(['task3', 'task4']):
matrix_labels = ['not present', 'present']
else:
raise NotImplementedError()
n_updates = len(data_iterator) if not debug else debug
if verbose:
pbar = tqdm(data_iterator)
else:
pbar = data_iterator
total_loss = []
total_f1 = []
total_acc = []
zeros_acc = []
ones_acc = []
random_acc = []
mostfreq_acc = []
f1_binary_loss = F1_Binary_Loss().cuda()
cce_loss = nn.CrossEntropyLoss(weight=torch.tensor(weights)).cuda()
for bid, (batch_lat,
batch_lng,
batch_images,
batch_boxes,
batch_feats,
batch_queries,
batch_query_strings,
batch_targets,
batch_most_freq) in enumerate(pbar):
if debug and bid == n_updates:
break
out = model({
'latitude': batch_lat,
'longitude': batch_lng,
'im_batch': batch_images,
'obj_boxes': batch_boxes,
'obj_feats': batch_feats,
'queries': batch_queries,
'query_strings': batch_query_strings})
preds = out['action_logits']
if task in set(['task1', 'task3', 'task4']):
binary_preds = (preds > 0.5).float()
binary_preds.requires_grad = True
if task == 'task1':
w = 10000.0
weight_rebal = torch.ones_like(
batch_targets) / w + (1.0 - 1.0 / w) * batch_targets
loss_fn = nn.BCELoss(weight=weight_rebal)
else:
loss_fn = torch.nn.functional.binary_cross_entropy_with_logits
loss = loss_fn(preds, batch_targets)
f1_score = f1_binary_loss(binary_preds, batch_targets)
acc = ((preds > 0.5).int() == batch_targets).float().mean()
zero_acc = (torch.zeros_like(batch_targets)
== batch_targets).float().mean()
one_acc = (torch.ones_like(batch_targets)
== batch_targets).float().mean()
r_acc = (torch.empty(batch_targets.size()).random_(
2).cuda() == batch_targets).float().mean()
total_f1.append(f1_score.item())
total_acc.append(acc.item())
zeros_acc.append(zero_acc.item())
ones_acc.append(one_acc.item())
random_acc.append(r_acc.item())
binary_preds = (preds > 0.5).long()
binary_preds = Counter(['{}'.format(bp.item()) for bp in binary_preds])
binary_preds = json.dumps(binary_preds)
log_string = 'f1: {:3.3f} mean-acc: {:3.3f} 0-acc: {:3.3f} 1-acc: {:3.3f} r-acc: {:3.3f} preds : {:10s}'.format(
np.mean(total_f1),
np.mean(total_acc),
np.mean(zeros_acc),
np.mean(ones_acc),
np.mean(random_acc),
binary_preds)
if task == 'task1':
pred_indices = [[] for bb in range(preds.size(0))]
for pair in (preds > 0.5).nonzero(as_tuple=False):
pred_indices[pair[0]].append(pair[1].item())
elif task in set(['task3', 'task4']):
pred_indices = (preds > 0.5).long()
elif task == 'task2':
loss = cce_loss(preds, batch_targets.squeeze(1).long())
acc = compute_precision_with_logits(
preds, batch_targets.long())
total_acc.append(acc.item())
r_acc = (torch.empty(batch_targets.size()).random_(
num_classes).cuda() == batch_targets).float().mean()
random_acc.append(r_acc.item())
mfreq_acc = (batch_most_freq == batch_targets).float().mean()
mostfreq_acc.append(mfreq_acc.item())
log_string = 'acc: {:3.3f} mfreq-acc: {:3.3f} r-acc: {:3.3f}'.format(
np.mean(total_acc), np.mean(mostfreq_acc), np.mean(random_acc))
_, pred_indices = torch.max(preds, 1)
else:
raise NotImplementedError()
total_loss.append(loss.item())
if update:
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
log_string = 'mean-loss: {:3.3f} '.format(np.mean(total_loss)) + log_string
log_split_batch = log_split + ' B{:4d}|{:4d}'.format(bid+1, n_updates)
log_final = ' '.join([log_split_batch, log_string])
if verbose:
pbar.set_description(log_final)
else:
print(log_final)
if batch_targets.size(1) == 1:
targets = [[batch_targets[bb].long().item()]
for bb in range(preds.size(0))]
else:
targets = [[] for bb in range(preds.size(0))]
for pair in batch_targets.nonzero(as_tuple=False):
targets[pair[0]].append(pair[1].item())
for bb in range(preds.size(0)):
for t in targets[bb]:
confusion[t, pred_indices[bb]] += 1
total_score = total_f1 if task in set(['task1,task3,task4']) else total_acc
writer.add_scalar('{}_{}'.format(
split_name, 'batch_score'), total_score[-1], n_iter)
writer.add_scalar('{}_{}'.format(
split_name, 'batch_loss'), total_loss[-1], n_iter)
n_iter += 1
if (n_iter+1) % 100 == 0 and update:
model_name = os.path.join(
args.exp_dir, args.prefix + '/model.{}_{}.pt'.format(epoch, n_iter))
print('\n saving model', model_name)
torch.save(model, model_name)
if verbose:
pbar.close()
writer.add_scalar('{}_{}'.format(
split_name, 'epoch_score'), np.mean(total_score), epoch)
writer.add_scalar('{}_{}'.format(
split_name, 'epoch_loss'), np.mean(total_loss), epoch)
img_conf = get_confusion_matrix_image(
matrix_labels, confusion / confusion.sum(), '')
writer.add_image('Confusion Matrix', img_conf, epoch)
with open(log_path, "a") as log_file:
log_file.write(log_final+'\n')
return {'loss': np.mean(total_loss),
'accuracy': np.mean(total_score)}, n_iter
| 17,442
|
def get_customer_key():
""" Reutrn the key of the sample customer from file """
customer_file = open("sample_customer", "r")
customer_key = customer_file.readline().rstrip("\n")
customer_file.close()
return customer_key
| 17,443
|
def init_db() -> None:
"""Initialize database."""
connection = get_db()
with current_app.open_resource("schema.sql") as schema:
connection.executescript(schema.read().decode("utf8"))
| 17,444
|
def main() -> None:
"""Main function that sets up game and runs main game loop"""
game_resources = GameResources(testing, bless, path)
information = Information(game_resources)
game_resources.draw()
game_panel = Panel(game_resources.level.to_string())
layout = PanelLayout.make_layout(start=False)
layout["main_game"].update(game_panel)
# Panels to update
layout["tree"].update(
Panel(game_resources.node.display_node(), title="Map")
)
layout['inventory'].update(Panel('', title="Inventory"))
layout['info'].update(information.display_enemy_panel())
layout["player_health"].update(
(Panel(Text('♥'*10 + " | You have: 100HP", style="bold red"), title='Health')))
start_screen()
with Live(layout, refresh_per_second=10, screen=False): # True prevents re-render
while game_resources.player.playing:
run_game(layout, game_resources, information)
if game_resources.won_game:
game_resources.player.playing = False
if not game_resources.won_game:
end_screen(layout)
if game_resources.won_game:
layout = Layout(name="win")
with Live(layout, refresh_per_second=1, screen=False):
win_screen(layout)
| 17,445
|
def tau_tex(tex, tau0_):
"""
Eq. (15) Goldsmith et al. (2012)
"""
g = gu/gl
return tau0_*(1. - np.exp(-tstar/tex))/(1. + g*np.exp(-tstar/tex))
| 17,446
|
def cached(f):
"""Decorator to cache result of property."""
@wraps(f)
def inner(self):
name = '_{}'.format(f.__name__)
if getattr(self, name, None) is None:
setattr(self, name, f(self))
return getattr(self, name)
return inner
| 17,447
|
def thesaurus(*args, sort=False) -> dict:
"""Формирует словарь, в котором ключи — первые буквы слов,
а значения — списки, содержащие слова, начинающиеся с соответствующей буквы
:param *args: перечень слов
:param sort: признак необходимости сортировки словаря по алфавиту (True - сортировать, False - не сортировать)
:return: словарь слов по первым буквам"""
if sort:
args = sorted(list(args)) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order
dict_out = {}
for word in args:
dict_value = dict_out.setdefault(word[0], list())
if word not in dict_value:
dict_value.append(word)
dict_out[word[0]] = dict_value
return dict_out
| 17,448
|
def calculate_ranking(imbalanced_results):
"""Calculate the ranking of oversamplers for
any combination of datasets, classifiers and
metrics."""
wide_optimal = calculate_wide_optimal(imbalanced_results)
ranking_results = wide_optimal.apply(
lambda row: _return_row_ranking(
row[3:], SCORERS[row[2].replace(' ', '_').lower()]._sign
),
axis=1,
)
ranking = pd.concat([wide_optimal.iloc[:, :3], ranking_results], axis=1)
return ranking
| 17,449
|
def extract鏡像翻訳(item):
"""
Parser for '鏡像翻訳'
"""
if 'anime' in str(item['tags']).lower():
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('sodachi fiasco', 'Orokamonogatari - Sodachi Fiasco', 'translated'),
('karen ogre', 'Wazamonogatari - Karen Ogre', 'translated'),
('shinobu mustard', 'Shinobumonogatari - Shinobu Mustard', 'translated'),
('tsubasa sleeping', 'Wazamonogatari - Tsubasa Sleeping', 'translated'),
('acerola bon appetit', 'Wazamonogatari - Acerola Bon Appetit', 'translated'),
('tsudzura human', 'Musubimonogatari - Tsudzura Human', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('jinrui saikyou no netsuai', 'Jinrui Saikyou no Netsuai', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 17,450
|
def config(live_server, django_user_model):
"""Create a user and return an auth_token config matching that user."""
user = django_user_model.objects.create(
email='jathan@localhost', is_superuser=True, is_staff=True
)
data = {
'email': user.email,
'secret_key': user.secret_key,
'auth_method': 'auth_token',
'url': live_server.url + '/api',
# 'api_version': API_VERSION,
'api_version': '1.0', # Hard-coded.
}
return data
| 17,451
|
def test_failure_immutable_copy():
"""Ensures that Failure returns it self when passed to copy function."""
nothing = _Nothing()
assert nothing is copy(nothing)
| 17,452
|
def combine_data_by_key(
combined_outputs: Dict[str, Union[List[Any], Any]],
output: Dict[str, Union[List[Any], Any]],
) -> Dict[str, Union[List[Any], Any]]:
"""
Combine lists in two multimaps
Args:
combined_outputs: Initial multimap to combine, presumably already combined
output: New multimap to add to initial multimap
Returns:
Combined multimaps (does not modify initial or new data)
"""
combined_keys = combine_keys(combined_outputs, output)
return OrderedDict(
(key, combine_datas(combined_outputs.get(key, []), output.get(key, [])))
for key in combined_keys
)
| 17,453
|
def arango_connection() -> ArangoClient:
"""Connecting to arango."""
host = os.getenv("ARANGO_HOST")
port = os.getenv("ARANGO_PORT")
arango_client = ArangoClient(hosts=f"http://{host}:{port}")
return arango_client
| 17,454
|
def default_scores(X_train, y_train, X_val, y_val):
"""
"""
svc_model = LinearSVC(random_state=0).fit(X_train,y_train)
y_pred = svc_model.predict(X_val)
print 'SVC loss:',cost_score(y_pred,y_val)
rf_model = RandomForestClassifier(random_state=0).fit(X_train,y_train)
y_pred = rf_model.predict(X_val)
print 'Random Forest loss:',cost_score(y_pred,y_val)
nb_model = GaussianNB().fit(X_train,y_train)
y_pred = nb_model.predict(X_val)
print 'Naive Bayes loss:',cost_score(y_pred,y_val)
return
| 17,455
|
def in_bazel() -> bool:
"""Return whether running under bazel."""
return os.environ.get("TEST_WORKSPACE", "") != ""
| 17,456
|
def run_batch(args, batch_name=None):
"""Wrapper around creating, running, and then closing a Batch run.
:param args: Parsed args from the ArgumentParser created via the init_arg_parser method
:param batch_name: (optional) batch label which will show up in the Batch web UI
Usage:
with run_batch(args) as batch:
... batch job definitions ...
"""
if args.local:
backend = (
hb.LocalBackend()
if args.raw
else hb.LocalBackend(gsa_key_file=args.gsa_key_file)
)
else:
backend = hb.ServiceBackend(
billing_project=args.batch_billing_project, bucket=args.batch_temp_bucket
)
try:
batch = hb.Batch(backend=backend, name=batch_name)
batch.batch_utils_temp_bucket = args.batch_temp_bucket
yield batch # returned to with ... as batch:
# run on end of with..: block
batch.run(dry_run=args.dry_run, verbose=args.verbose)
finally:
if isinstance(backend, hb.ServiceBackend):
backend.close()
| 17,457
|
def acceptCode(request):
"""Redeems a code to accept invitation cash"""
params = request.get_params(schemas.AcceptCodeSchema())
device = get_device(request)
customer = device.customer
access_token = get_wc_token(request, customer)
postParams = {
'code': params['code']
}
response = wc_contact(
request, 'POST', 'wallet/accept-code', params=postParams,
access_token=access_token).json()
if response.get('error'):
return { 'error': response.get('error')}
elif response.get('invalid'):
return { 'invalid': response.get('invalid')}
else:
return response
| 17,458
|
def update_integration_response(ApiId=None, ContentHandlingStrategy=None, IntegrationId=None, IntegrationResponseId=None, IntegrationResponseKey=None, ResponseParameters=None, ResponseTemplates=None, TemplateSelectionExpression=None):
"""
Updates an IntegrationResponses.
See also: AWS API Documentation
Exceptions
:example: response = client.update_integration_response(
ApiId='string',
ContentHandlingStrategy='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
IntegrationId='string',
IntegrationResponseId='string',
IntegrationResponseKey='string',
ResponseParameters={
'string': 'string'
},
ResponseTemplates={
'string': 'string'
},
TemplateSelectionExpression='string'
)
:type ApiId: string
:param ApiId: [REQUIRED]\nThe API identifier.\n
:type ContentHandlingStrategy: string
:param ContentHandlingStrategy: Supported only for WebSocket APIs. Specifies how to handle response payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:\nCONVERT_TO_BINARY: Converts a response payload from a Base64-encoded string to the corresponding binary blob.\nCONVERT_TO_TEXT: Converts a response payload from a binary blob to a Base64-encoded string.\nIf this property is not defined, the response payload will be passed through from the integration response to the route response or method response without modification.\n
:type IntegrationId: string
:param IntegrationId: [REQUIRED]\nThe integration ID.\n
:type IntegrationResponseId: string
:param IntegrationResponseId: [REQUIRED]\nThe integration response ID.\n
:type IntegrationResponseKey: string
:param IntegrationResponseKey: The integration response key.
:type ResponseParameters: dict
:param ResponseParameters: A key-value map specifying response parameters that are passed to the method response from the backend. The key is a method response header parameter name and the mapped value is an integration response header value, a static value enclosed within a pair of single quotes, or a JSON expression from the integration response body. The mapping key must match the pattern of method.response.header.{name}, where name is a valid and unique header name. The mapped non-static value must match the pattern of integration.response.header.{name}or integration.response.body.{JSON-expression}, where {name}is a valid and unique response header name and {JSON-expression}is a valid JSON expression without the $ prefix.\n\n(string) --\n(string) --A string with a length between [1-512].\n\n\n\n\n
:type ResponseTemplates: dict
:param ResponseTemplates: The collection of response templates for the integration response as a string-to-string map of key-value pairs. Response templates are represented as a key/value map, with a content-type as the key and a template as the value.\n\n(string) --\n(string) --A string with a length between [0-32768].\n\n\n\n\n
:type TemplateSelectionExpression: string
:param TemplateSelectionExpression: The template selection expression for the integration response. Supported only for WebSocket APIs.
:rtype: dict
ReturnsResponse Syntax
{
'ContentHandlingStrategy': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'IntegrationResponseId': 'string',
'IntegrationResponseKey': 'string',
'ResponseParameters': {
'string': 'string'
},
'ResponseTemplates': {
'string': 'string'
},
'TemplateSelectionExpression': 'string'
}
Response Structure
(dict) --
Success
ContentHandlingStrategy (string) --
Supported only for WebSocket APIs. Specifies how to handle response payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following behaviors:
CONVERT_TO_BINARY: Converts a response payload from a Base64-encoded string to the corresponding binary blob.
CONVERT_TO_TEXT: Converts a response payload from a binary blob to a Base64-encoded string.
If this property is not defined, the response payload will be passed through from the integration response to the route response or method response without modification.
IntegrationResponseId (string) --
The integration response ID.
IntegrationResponseKey (string) --
The integration response key.
ResponseParameters (dict) --
A key-value map specifying response parameters that are passed to the method response from the backend. The key is a method response header parameter name and the mapped value is an integration response header value, a static value enclosed within a pair of single quotes, or a JSON expression from the integration response body. The mapping key must match the pattern of method.response.header.{name}, where name is a valid and unique header name. The mapped non-static value must match the pattern of integration.response.header.{name} or integration.response.body.{JSON-expression}, where name is a valid and unique response header name and JSON-expression is a valid JSON expression without the $ prefix.
(string) --
(string) --
A string with a length between [1-512].
ResponseTemplates (dict) --
The collection of response templates for the integration response as a string-to-string map of key-value pairs. Response templates are represented as a key/value map, with a content-type as the key and a template as the value.
(string) --
(string) --
A string with a length between [0-32768].
TemplateSelectionExpression (string) --
The template selection expressions for the integration response.
Exceptions
ApiGatewayV2.Client.exceptions.NotFoundException
ApiGatewayV2.Client.exceptions.TooManyRequestsException
ApiGatewayV2.Client.exceptions.BadRequestException
ApiGatewayV2.Client.exceptions.ConflictException
:return: {
'ContentHandlingStrategy': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'IntegrationResponseId': 'string',
'IntegrationResponseKey': 'string',
'ResponseParameters': {
'string': 'string'
},
'ResponseTemplates': {
'string': 'string'
},
'TemplateSelectionExpression': 'string'
}
:returns:
ApiGatewayV2.Client.exceptions.NotFoundException
ApiGatewayV2.Client.exceptions.TooManyRequestsException
ApiGatewayV2.Client.exceptions.BadRequestException
ApiGatewayV2.Client.exceptions.ConflictException
"""
pass
| 17,459
|
def log_performance(item, value):
"""
do print performance with pre-defined format to console
:param item: performance item name
:param value: performance value
"""
performance_msg = "[Performance][{}]: {}".format(item, value)
Utility.console_log(performance_msg, "orange")
# update to junit test report
current_junit_case = TinyFW.JunitReport.get_current_test_case()
current_junit_case.stdout += performance_msg + "\r\n"
| 17,460
|
def _find_available_share_drive_letter(share_ignores: List[str] = None) -> str:
"""Find an available drive letter for a share.
This function iterates backwards through the ASCII uppercase letters trying
and checks them against the current net use drive mappings. Once it finds
an available drive letter, it passes that back to the caller. If an
available drive letter is not found, a RuntimeError is raised.
Args:
share_ignores (List[str]): A list of share letters to ignore.
Returns:
str: An available drive letter (i.e., 'Z:') for a network share.
Raises:
RuntimeError
"""
LOGGER.write('Looking for an available share letter.')
drive_mapping = _get_current_drive_mapping()
# Iterate backwards through letters to see if they've already been used.
available_letter = ''
for letter in reversed(ascii_uppercase):
if letter in share_ignores:
continue
letter = f'{letter}:'
if letter not in drive_mapping:
available_letter = letter
break
if not available_letter:
raise RuntimeError('Unable to find a free drive letter to map to!')
return available_letter
| 17,461
|
def integrate(sde=None, *, q=None, sources=None, log=False, addaxis=False):
"""Decorator for Ito Stochastic Differential Equation (SDE)
integration.
Decorates a function representing the SDE or SDEs into the corresponding
``sdepy`` integrator.
Parameters
----------
sde : function
Function to be wrapped. Its signature and values should be
as expected for the ``sde`` method of the ``sdepy.SDE`` or
``sdepy.SDEs`` classes.
q : int
Number of equations. If ``None``, attempts a test evaluation
of ``sde`` to find out. ``q=0`` indicates a single equation.
sources : set
Stochasticity sources used in the equation. If ``None``,
attempts a test evaluation of ``sde`` to find out.
log : bool
Sets the ``log`` attribute for the wrapping class.
addaxis : bool
Sets the ``addaxis`` attribute for the wrapping class.
Returns
-------
A subclass of ``sdepy.SDE`` or ``sdepy.SDEs`` as appropriate,
and of ``sdepy.integrator``, with the given ``sde``
cast as its ``sde`` method.
Notes
-----
To prevent a test evaluation of ``sde``, explicitly provide
the intended ``q`` and ``sources`` as keyword arguments to ``integrate()``.
The test evaluation is attempted as ``sde()`` and, upon failure,
again as ``sde(1., 1.)``.
Examples
--------
>>> from sdepy import integrate
>>> @integrate
... def my_process(t, x, theta=1., k=1., sigma=1.):
... return {'dt': k*(theta - x), 'dw': sigma}
>>> P = my_process(x0=1, sigma=0.5, paths=100*1000, steps=100)
>>> x = P(timeline=(0., 0.5, 1.))
>>> x.shape
(3, 100000)
"""
if sde is None:
def decorator(sde):
return integrate(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
return decorator
else:
SDE_class = _SDE_from_function(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
class sde_integrator(SDE_class, integrator):
pass
return sde_integrator
| 17,462
|
def write_generate_component(file_ptr):
"""Write generate statements in VHDL code."""
file_ptr.write("\t a_s <= std_ulogic_vector(resize(unsigned(a_i), a_s'length));\n")
file_ptr.write("\t b_s <= std_ulogic_vector(resize(unsigned(b_i), b_s'length));\n\n")
file_ptr.write("\t GEN_INPUT_A: for i in 0 to X-1 generate\n")
file_ptr.write("\t\t digit_a_s(i) <= a_s((M*(i+1)-1) downto M*i);\n")
file_ptr.write("\t end generate;\n\n")
file_ptr.write("\t GEN_INPUT_B: for i in 0 to Y-1 generate\n")
file_ptr.write("\t\t digit_b_s(i) <= b_s((n*(i+1)-1) downto n*i);\n")
file_ptr.write("\t end generate;\n\n")
file_ptr.write("\t GEN_DSP_I: for i in 0 to X-1 generate\n")
file_ptr.write("\t\t GEN_DSP_J: for j in 0 to Y-1 generate\n")
file_ptr.write("\t\t\t M1: DSP_MUL\n")
file_ptr.write("\t\t\t port map (\n")
file_ptr.write("\t\t\t\t clk => clk,\n")
file_ptr.write("\t\t\t\t p_o => ab(i)(j),\n")
file_ptr.write("\t\t\t\t a_i => digit_a_s(i),\n")
file_ptr.write("\t\t\t\t b_i => digit_b_s(j)\n")
file_ptr.write("\t\t\t );\n")
file_ptr.write("\t\t end generate GEN_DSP_J;\n")
file_ptr.write("\t end generate GEN_DSP_I;\n\n")
| 17,463
|
def calculate_statistical_inefficiency_runs(traj_l):
"""
Using fast autocorrelation calculation to estimate statistical inefficiency. This code wraps
a function from pymbar.
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from
multiple equilibrium states. J. Chem. Phys. 129:124105, 2008
http://dx.doi.org/10.1063/1.2978177
[2] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
"""
try:
import pymbar
except ImportError as err:
err.args = (err.args[0] + "\n You need to install pymbar to use this function.",)
raise
iinv = np.array([pymbar.timeseries.statisticalInefficiency_fft(tra) for tra in traj_l])
return (iinv - 1.0) / 2.0
| 17,464
|
def dataio_prep(hparams):
"""Creates the datasets and their data processing pipelines"""
# 1. define tokenizer and load it
modelpath = download_to_dir(hparams["tok_mdl_file"], hparams["save_folder"])
download_to_dir(hparams["tok_voc_file"], hparams["save_folder"])
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
tokenizer.sp.load(modelpath)
if (tokenizer.sp.eos_id() + 1) == (tokenizer.sp.bos_id() + 1) == 0 and not (
hparams["eos_index"]
== hparams["bos_index"]
== hparams["blank_index"]
== hparams["unk_index"]
== 0
):
raise ValueError(
"Desired indexes for special tokens do not agree "
"with loaded tokenizer special tokens !"
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes(hparams["input_type"])
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides("tokens_bos", "tokens_eos", "tokens")
def text_pipeline(words):
tokens_list = tokenizer.sp.encode_as_ids(words)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
# 4. Create datasets
data = {}
for dataset in ["train", "valid", "test"]:
data[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams[f"{dataset}_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, text_pipeline],
output_keys=["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
if dataset != "train":
data[dataset] = data[dataset].filtered_sorted(sort_key="length")
# Sort train dataset and ensure it doesn't get un-sorted
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
data["train"] = data["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending",
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return data, tokenizer
| 17,465
|
def get_superlative_type(question_normal):
"""What TV series was Mark Harmon the star of that ran the least amount of time on TV ?"""
result = 'argmax'
question_normal = question_normal.lower()
superlative_serialization_list = superlative_serialization(question=question_normal)
for element in superlative_serialization_list:
if element in ['argmax', 'argmin']:
result = element
break
return result
| 17,466
|
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n
| 17,467
|
def note_favorite(note):
"""
get the status of the note as a favorite
returns True if the note is marked as a favorite
False otherwise
"""
if 'favorite' in note:
return note['favorite']
return False
| 17,468
|
def _crossproduct(template: CheckListTemplate):
"""
Takes the output of editor.template and does the cross product of contexts and qas
"""
ret = []
ret_labels = []
for instance in template.data:
cs = instance["contexts"]
qas = instance["qas"]
d = list(itertools.product(cs, qas))
ret.append([(x[0], x[1][0]) for x in d])
ret_labels.append([x[1][1] for x in d])
template.data = ret
template.labels = ret_labels
return template
| 17,469
|
def test_enqueue15(client):
"""Tests enqueue stores IP Address for request using request address as fallback."""
pytest.skip("Not implemented")
| 17,470
|
def thread(function):
"""Runs the decorated function within a concurrent thread,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
"""
@wraps(function)
def wrapper(*args, **kwargs):
future = Future()
launch_thread(_function_handler, function, args, kwargs, future)
return future
return wrapper
| 17,471
|
def collapse_range(arg, value_delimiter=',', range_delimiter='-'):
"""
Collapses a list of values into a range set
:param arg: The list of values to collapse
:param value_delimiter: The delimiter that separates values
:param range_delimiter: The delimiter that separates a value range
:return: An array of collapsed string values
:rtype: list
"""
values = list()
expanded = arg.split(value_delimiter)
range_start = None
for v1, v2 in lookahead(expanded):
if v2:
v1 = int(v1)
v2 = int(v2)
if (v1 + 1) == v2:
if not range_start:
range_start = v1
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
return [str(x) for x in values]
| 17,472
|
def test_number_of_channel_avg_mapping_tuples():
"""
Verify that FSPConfiguration fails if there are an invalid number of
entries in the channel average mapping argument.
Since this test was originally written we allow fewer than 20 entries
"""
# create a partially applied sn.FSPConfiguration constructor to save having
# to type the arguments each time
fsp_constructor = functools.partial(
FSPConfiguration, 1, FSPFunctionMode.CORR, 1, 140, 0
)
# test for 21 tuples
channel_avg_map = list(zip(itertools.count(1, 744), 21 * [0]))
with pytest.raises(ValueError):
_ = fsp_constructor(channel_avg_map)
| 17,473
|
def compute_diag_mog_params(M=int(4), snr=3.):
"""Returns diagonal mixture of Gaussian target distribution settings for d=2
Args:
M: (Optional) Integer, number of components
snr: (Optional) Scaling of the means
"""
d = int(2)
weights = np.ones(M)
weights /= np.sum(weights)
# change this to set the means apart
means = np.zeros((M, d))
if M == 3:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.]])
if M == 4:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.]])
if M == 6:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.]])
if M == 8:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.], [2, 0.], [0, -2.]])
covs = np.ones(M)
# compute the expected value of E[||X-Y||^2] for X, Y iid from P
mean_sqdist = 0.
for i in range(M):
for j in range(M):
temp = npl.norm(means[i])**2 + npl.norm(means[j])**2 - 2 * np.dot(means[i], means[j])
temp += d*(covs[i]+ covs[j])
mean_sqdist += weights[i] * weights[j] * temp
params_p = {"name": "diag_mog",
"weights": weights,
"means": means,
"covs": covs,
"d": int(d),
"mean_sqdist" : mean_sqdist,
"saved_samples": False,
"flip_Pnmax": False
}
return(params_p)
| 17,474
|
def has_ifm2(npu_op: NpuBlockOperation) -> bool:
"""Checks if op has non-scalar IFM2"""
return npu_op.ifm2 is not None and npu_op.ifm2_scalar is None
| 17,475
|
def ilsvrc_fix_args(args):
"""
Update the args with fixed parameter in ilsvrc
"""
args.ds_name="ilsvrc"
args.num_classes == 1000
# GPU will handle mean std transformation to save CPU-GPU communication
args.do_mean_std_gpu_process = True
args.input_type = 'uint8'
args.mean = get_augmented_data.ilsvrc_mean
args.std = get_augmented_data.ilsvrc_std
#assert args.do_mean_std_gpu_process and args.input_type == 'uint8'
#assert args.mean is not None and args.std is not None
decay_power = args.batch_size / float(ILSVRC_DEFAULT_BATCH_SIZE)
args.batch_norm_decay=0.9**decay_power # according to Torch blog
return args
| 17,476
|
def get_mock_adapter() -> Adapter:
"""Get a requests-mock Adapter with some URLs mocked by default"""
adapter = Adapter()
adapter.register_uri(
ANY_METHOD,
MOCKED_URL,
headers={'Content-Type': 'text/plain'},
text='mock response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_HTTPS,
headers={'Content-Type': 'text/plain'},
text='mock https response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_JSON,
headers={'Content-Type': 'application/json'},
json={'message': 'mock json response'},
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT,
headers={'Content-Type': 'text/plain', 'Location': MOCKED_URL_REDIRECT_TARGET},
text='mock redirect response',
status_code=302,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT_TARGET,
headers={'Content-Type': 'text/plain'},
text='mock redirected response',
status_code=200,
)
return adapter
| 17,477
|
def ext_force_bend_from_input(ext_force_bend_list, o_molsys):
"""
Creates bend coordinate with external force
Parameters
----------
ext_force_bend_list : list
each entry is a list of 3 atoms (indexed from 1), followed by a formula
o_molsys : molsys.Molsys
optking molecular system
"""
logger = logging.getLogger(__name__)
for B in ext_force_bend_list:
if len(B) != 4:
raise OptError("Num. of entries in ext. force bend should be 4.")
eBend = bend.Bend(B[0] - 1, B[1] - 1, B[2] - 1)
f = check_fragment(eBend.atoms, o_molsys)
try:
I = o_molsys.fragments[f].intcos.index(eBend)
o_molsys.fragments[f].intcos[I].ext_force = B[3]
except ValueError:
logger.info("External force bend not present, so adding it.\n")
eBend.ext_force = B[3]
o_molsys.fragments[f].intcos.append(eBend)
| 17,478
|
def eqtls_weights_summing(eqtl_occurrence_log_likelihood, ens_gene_id, target_species_hit, converted_eqtls, gtex_weights_dict, chr_start, chr_end, gtex_variants, tf_len, gene_len):
"""
Identify if any of the eQTLs associated with this gene overlap this predicted TFBS.
Retrieve the log-likelihood scores for all of them.
Fix.
"""
eqtl_weights = []
if len(converted_eqtls) > 0:
# determine the weight score for likelihood of this magnitude eQTL.
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_eqtl in converted_eqtls:
converted_eqtl_start = converted_eqtl[0]
converted_eqtl_end = converted_eqtl[1]
converted_eqtl_score_mag = abs(converted_eqtl[2])
overlap = overlap_range([motif_start, motif_end], [converted_eqtl_start, converted_eqtl_end])
if len(overlap) > 0:
eqtl_weight = gtex_weights_dict[converted_eqtl_score_mag]
eqtl_weights.append(eqtl_weight + eqtl_occurrence_log_likelihood)
eqtl_weights_sum = sum(eqtl_weights)
return eqtl_weights_sum
| 17,479
|
def get_file_chunks_in_range(context, filediff, interfilediff,
first_line, num_lines):
"""
A generator that yields chunks within a range of lines in the specified
filediff/interfilediff.
This is primarily intended for use with templates. It takes a
RequestContext for looking up the user and for caching file lists,
in order to improve performance and reduce lookup times for files that have
already been fetched.
Each returned chunk is a dictionary with the following fields:
============= ========================================================
Variable Description
============= ========================================================
``change`` The change type ("equal", "replace", "insert", "delete")
``numlines`` The number of lines in the chunk.
``lines`` The list of lines in the chunk.
``meta`` A dictionary containing metadata on the chunk
============= ========================================================
Each line in the list of lines is an array with the following data:
======== =============================================================
Index Description
======== =============================================================
0 Virtual line number (union of the original and patched files)
1 Real line number in the original file
2 HTML markup of the original file
3 Changed regions of the original line (for "replace" chunks)
4 Real line number in the patched file
5 HTML markup of the patched file
6 Changed regions of the patched line (for "replace" chunks)
7 True if line consists of only whitespace changes
======== =============================================================
"""
def find_header(headers):
for header in reversed(headers):
if header[0] < first_line:
return {
'line': header[0],
'text': header[1],
}
interdiffset = None
key = "_diff_files_%s_%s" % (filediff.diffset.id, filediff.id)
if interfilediff:
key += "_%s" % (interfilediff.id)
interdiffset = interfilediff.diffset
if key in context:
files = context[key]
else:
assert 'user' in context
request = context.get('request', None)
files = get_diff_files(filediff.diffset, filediff, interdiffset,
request=request)
populate_diff_chunks(files, get_enable_highlighting(context['user']),
request=request)
context[key] = files
if not files:
raise StopIteration
assert len(files) == 1
last_header = [None, None]
for chunk in files[0]['chunks']:
if ('headers' in chunk['meta'] and
(chunk['meta']['headers'][0] or chunk['meta']['headers'][1])):
last_header = chunk['meta']['headers']
lines = chunk['lines']
if lines[-1][0] >= first_line >= lines[0][0]:
start_index = first_line - lines[0][0]
if first_line + num_lines <= lines[-1][0]:
last_index = start_index + num_lines
else:
last_index = len(lines)
new_chunk = {
'lines': chunk['lines'][start_index:last_index],
'numlines': last_index - start_index,
'change': chunk['change'],
'meta': chunk.get('meta', {}),
}
if 'left_headers' in chunk['meta']:
left_header = find_header(chunk['meta']['left_headers'])
right_header = find_header(chunk['meta']['right_headers'])
del new_chunk['meta']['left_headers']
del new_chunk['meta']['right_headers']
if left_header or right_header:
header = (left_header, right_header)
else:
header = last_header
new_chunk['meta']['headers'] = header
yield new_chunk
first_line += new_chunk['numlines']
num_lines -= new_chunk['numlines']
assert num_lines >= 0
if num_lines == 0:
break
| 17,480
|
def urlread(url):
"""Return the contents of a url. Raises IOError if couldn't read url."""
try:
urlfile = urllib.request.urlopen(url)
return urlfile.read()
except IOError as e:
print("[!] Error reading url:", url)
print(e.message)
sys.exit(1)
| 17,481
|
def parse_metric(y_train, goal):
"""
Parse the metric to the dictionary
"""
y_array = np.array(y_train, dtype=np.float64)
if goal == api_pb2.MINIMIZE:
y_array *= -1
return y_array
| 17,482
|
def erfc(x):
"""Complementary error function (via `http://bit.ly/zOLqbc`_)"""
z = abs(x)
t = 1. / (1. + z / 2.)
r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
-0.82215223 + t * 0.17087277
)))
)))
)))
return 2. - r if x < 0 else r
| 17,483
|
def request_until_success(url, max_attempts=5, wait=5):
"""Makes a request a few times in case of a 500 error.
Should use exponential backoff?
"""
req = urllib.request.Request(url)
success = False
num_tries = 0
while not success:
try:
num_tries += 1
response = urllib.request.urlopen(req)
success = response.getcode() == 200
except urllib.request.HTTPError as e:
logging.error(e)
logging.error("Error on url {}".format(url))
if e.code == 500 and num_tries < max_attempts:
logging.error("trying again soon")
time.sleep(wait)
else:
logging.error(e.reason)
raise e
return json.loads(response.read().decode('UTF-8'))
| 17,484
|
def uuid(name, value) -> "Optional[str]":
"""Validate that the value is a UUID
Args:
name (str): Name of the argument
value (any): A UUID string value
Returns:
The value, or None if value is None
Raises:
InvalidParameterValue: if the value is not a valid UUID
"""
if value is None:
return
if not uuidutils.is_uuid_like(value):
raise InvalidParameterValue(f"Expected UUID for {name}: {value}")
return value
| 17,485
|
def _encode_object_json_aided(obj, name, zipfile):
"""
Encodes composed objects with the help of JSON.
Parameters
----------
obj: PyFar-type
The object, usually values from **objs, see `io.write`.
name: str
The object's name, usually keys from **objs, see `io.write`.
zipfile: zipfile
The zipfile where we'd like to write data.
"""
try:
obj_dict = _encode(obj._encode(), name, zipfile)
type_hint = f'${type(obj).__name__}'
zipfile.writestr(
f'{name}/{type_hint}',
json.dumps(obj_dict))
except AttributeError:
raise NotImplementedError(
f'You must implement `{type}._encode` first.')
| 17,486
|
def expandall(text):
"""
Search for abbreviations in text using re_abbr (defined in utils.get_res).
For each abbreviation, find likely full term. Replace each instance of the
abbreviation in the text with the full term.
Parameters
----------
text : str
Text to search for abbreviations.
Returns
-------
text: str
Text with expanded abbreviations.
Examples
----------
>>> text = 'This is a test string (TS). I hope it is informative (inf).'
>>> expanded = expandall(text)
>>> print(expanded)
This is a test string (test string). I hope it is informative (informative).
"""
re_abbr, _ = get_res()
f = re.finditer(re_abbr, text)
for match in f:
if match is not None:
abb = str(match.group(1))
# Very long abbreviations will break regex.
if len(abb) < 9:
abR = make_abbr_regex(match)
fullterm = re.search(abR, text)
if fullterm is not None:
index = fullterm.group(0).find(' (')
fullterm = str(fullterm.group(0)[:index]).strip()
text = replace(text, abb, fullterm)
else:
logger.info('No full term detected for '
'abbreviation {0}'.format(abb))
else:
logger.warning('Abbreviation detection regex returned None.')
return text
| 17,487
|
def get_source_fields(client, source_table):
"""
Gets column names of a table in bigquery
:param client: BigQuery client
:param source_table: fully qualified table name.
returns as a list of column names.
"""
return [f'{field.name}' for field in client.get_table(source_table).schema]
| 17,488
|
def has_user_based_permission(obj, user, allow_superuser=True, allow_staff=False):
"""
Based on obj.get_user(), checks if provided user is that user.
Accounts for superusers and staff.
"""
if hasattr(obj, "get_user"):
obj_user = obj.get_user()
# User is logged in
if user.is_authenticated:
# If staff or superuser or share a common group, then yes.
if (allow_staff and user.is_staff) \
or (allow_superuser and user.is_superuser) \
or obj_user == user:
return True
return False
| 17,489
|
def trace_stack_top(trace_stack_var: ContextVar) -> Optional[Any]:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None
| 17,490
|
def fbconnect():
"""This allows users to use facebook account to sign in."""
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_id"]
app_secret = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_secret"]
url = ("https://graph.facebook.com/v2.8/oauth/access_token?"
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s"
"&fb_exchange_token=%s") % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
token = data["access_token"]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
url = userinfo_url + "?access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
print data
login_session["provider"] = "facebook"
login_session["username"] = data["name"]
login_session["email"] = data["email"]
login_session["facebook_id"] = data["id"]
login_session["access_token"] = token
# Get user picture
url = userinfo_url + \
"/picture?access_token=%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session["picture"] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("Now logged in as %s" % login_session["username"])
return output
| 17,491
|
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
| 17,492
|
def init_module_(module, method, **kwargs):
"""
Initialize a module using the specified method.
Args:
module (:obj:`nn.Module`): The module to be initialized.
method (str): The initialization method. Expected methods include
``'constant'``, ``'normal'``, ``'uniform'``, ``'xavier'``,
``'kaiming'``.
"""
assert method in INITIALIZERS
INITIALIZERS.get(method)(module, **kwargs)
| 17,493
|
def naiveMP(tsA, m, tsB=None):
"""
Calculate the Matrix Profile using the naive all-pairs calculation.
Parameters
----------
tsA: Time series containing the queries for which to calculate the Matrix Profile.
m: Length of subsequence to compare.
tsB: Time series to compare the query against. Note that, if no value is provided, tsB = tsA by default.
"""
return _matrixProfile(tsA, m, order.linearOrder, distanceProfile.naiveDistanceProfile, tsB)
| 17,494
|
def markdown_list(
handle: Jira,
jql_text: str,
column_fields=None,
list_type: str = 'ul',
data: Mapping[str, Union[object, Iterable, Sized]] = None,
) -> str:
"""Yes we can ... document later."""
if data is None:
data = query(handle, jql_text, column_fields)
if data.get('error', ''):
return json.dumps(data, indent=2)
if not data['rows']:
if laskea.STRICT:
message = f'WARNING: received 0 results for JQL ({jql_text}) and {list_type}'
if not laskea.DRY_RUN:
print(message, file=sys.stderr)
return message
else:
return ''
items = []
for slot, record in enumerate(data['rows']):
k, v = '', ''
for key, cell in record.items():
if key.lower() not in ('key', 'summary'):
continue
if key.lower() == 'key':
k = f'[{cell}]({BASE_URL.strip("/")}/browse/{cell})'
else:
v = cell
items.append((k, v))
if list_type in ('ol', 'ul'):
lt = '-' if list_type == 'ul' else '1.' # implicit 'ol'
xl = tuple(f'{lt} {key} - {summary}' for key, summary in items)
the_list = '\n'.join(xl) + '\n'
return the_list.replace('\r', '') if BASE_LF_ONLY else the_list
elif list_type == 'dl':
# 'Term'
# ':definition of term'
#
xl = tuple(f'{key}\n:{summary}\n' for key, summary in items)
the_list = '\n'.join(xl) + '\n'
return the_list.replace('\r', '') if BASE_LF_ONLY else the_list
else:
return f'Unexpected list type ({list_type}) in markdown_list not in ({("dl", "ol", "ul")})' + '\n'
| 17,495
|
def render_html(data):
"""
"""
data.setdefault('domain', DOMAIN)
template = '''
<table border="1" cellspacing="0" cellpadding="0">
<tr><td>类型</td><td>{type}</td></tr>
<tr><td>团队</td><td>{team}</td></tr>
<tr><td>项目</td><td>{project}</td></tr>
<tr><td>名称</td><td>{name}</td></tr>
<tr><td>接口</td><td>{interface[total]}个</td></tr>
<tr><td>断言</td><td>{interface[verify]}个</td></tr>
<tr><td>成功率</td><td>{interface[percent]}%</td></tr>
<tr><td>开始时间</td><td>{start}</td></tr>
<tr><td>结束时间</td><td>{end}</td></tr>
<tr><td>报告地址</td><td><a href="{domain}/report/detail?id={id}">测试报告-{id}</a></td></tr>
</table>
'''.format(**data)
return template
| 17,496
|
def docs(session: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = session.posargs or ["--open-browser", "docs", "docs/_build"]
session.install(".")
session.install("sphinx", "sphinx-autobuild", "sphinx-click", "furo", "sphinx-inline-tabs")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
| 17,497
|
def segment_cells(image, max_cell_size):
"""Return segmented cells."""
image = identity(image)
wall = threshold_adaptive_median(image, block_size=101)
seeds = remove_small_objects(wall, min_size=100)
seeds = dilate_binary(seeds)
seeds = invert(seeds)
seeds = remove_small_objects(seeds, min_size=5)
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(-image, seeds=seeds)
segmentation = remove_large_segments(segmentation, max_cell_size)
return segmentation, wall
| 17,498
|
def encrypt(message, key):
"""
>>> encrypt("Hello world",12)
'Tqxxa iadxp'
>>> encrypt("We are Penn State!!!",6)
'Ck gxk Vktt Yzgzk!!!'
>>> encrypt("We are Penn State!!!",5)
'Bj fwj Ujss Xyfyj!!!'
>>> encrypt(5.6,3)
'error'
>>> encrypt('Hello',3.5)
'error'
>>> encrypt(5.6,3.15)
'error'
"""
# --- YOU CODE STARTS HERE
# decide whether it is the right data type
if type(message) == str and type(key) == int:
# define a list that have the ascii number of character
words = string.ascii_letters
# use the for loop to transfer characters with keys
lowerchr = [chr((i - 97) % 26 + 97) for i in range(97 + key, 123 + key)]
capchr = [chr((i - 65) % 26 + 65) for i in range(65 + key, 91 + key)]
# join the lower and upper characters together
asc = ''.join(lowerchr) + ''.join(capchr)
# use the translate and maketrans function to transfer the ascii code to string
return message.translate(str.maketrans(words, asc))
# if the value type is not correct return "error"
return "error"
| 17,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.