content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import tqdm
def convNodeToProblems(graphList,vecGraphList,masterEncoder,genre=["C","V"],targetParams=CF.targetParams):
"""
graphList: list of graphs (before vectorization)
vecGraphList: list of vectorized graphs (of graphList)
masterEncoder: masterEncoder
genre: genre to make problems: C: compound , V: numeric value, O: others *** this mode may not work..?
targetParams: target parametes to make problems
return: list of problem-type vectorized graphs, list of answers, list of parameter names of the answer
"""
probVecGraphList=[]
targetList=[]
neighborNodeNameList=[]
print("converting nodes to problems")
for graphID,vecGraph in tqdm(enumerate(vecGraphList)):
for node in vecGraph.nodes:
nodeLabel=graphList[graphID].nodes[node]["label"]
if str(nodeLabel).startswith("C_"):
category="C"
elif is_num(nodeLabel):
category="V"
else:
category="O"
#TODO: following codes are too complex and not clear....
flg=False
#find nodes of target parameters
for neighborNode in graphList[graphID].neighbors(node):
neighborNodeName=graphList[graphID].nodes[neighborNode]["label"]
if CF.targetParamMode and neighborNodeName in targetParams:
flg=True
break
#TODO: this func may not work with False..
if CF.targetParamMode==False:
flg=True
#TODO: genre mode may not work
if category in genre and flg ==True:
g,target=convGraphToProblem(vecGraph,node,masterEncoder)
probVecGraphList.append(g)
neighborNodeNameList.append(neighborNodeName)
if genre==["V"]:
targetList.append([target[-1]])
else:
targetList.append(target[CF.CATEGORY_DIM:])
return probVecGraphList,targetList,neighborNodeNameList | c8eaa40c1b178ef55419956a1a2013875fa7421a | 3,638,500 |
import SimpleITK as sitk
import os
def create_white_edge_cost_image(t1_file, t2_file, gm_proba_file, out_file):
"""
This class represents a...
:param t1_file:
:param t2_file:
:param gm_proba_file:
:param out_file:
:return:
"""
gm_proba = sitk.ReadImage(gm_proba_file)
negative_gm_proba = 1 - gm_proba
t1 = sitk.ReadImage(t1_file)
t2 = sitk.ReadImage(t2_file)
t1_gradient = sitk.GradientMagnitude(t1)
t2_gradient = sitk.GradientMagnitude(t2)
multi_modal_gradient = sitk.Cast(
(t1_gradient + t2_gradient), negative_gm_proba.GetPixelID()
)
cost_image = multi_modal_gradient * negative_gm_proba
out_file = os.path.abspath(out_file)
sitk.WriteImage(cost_image, out_file)
return out_file | edc64e6c602b01b33dc99fb4a724bbc35f523063 | 3,638,501 |
import math
def getDewPoint(temp, humidity):
"""
A utility function to get the temperature to which an amount of air must be
cooled in order for water vapor to condense into water. This is only valid
for: 1) temperatures between 0C and 60C, 2) relative humidity between 1%
and 100%, and 3) dew points between 0C and 50C.
@param temp: temperature in degrees Celsius
@param humidity: percentage relative humidity
"""
if not 0 < temp < 60:
raise InvalidDewPoint("Temperature out of range.")
if not 1 < humidity < 100:
raise InvalidDewPoint("Humidity is out of range.")
a = 17.271
b = 237.7
def gamma(temp, humidity):
return (a * temp) / (b + temp) + math.log(humidity/100.0)
dewPoint = (b * gamma(temp, humidity)) / (a - gamma(temp, humidity))
if dewPoint < 0:
raise InvalidDewPoint("Computed dew point is too low.")
if dewPoint > 50:
raise InvalidDewPoint("Computed dew point is too high.")
return dewPoint | 0e67eef5a90d9e55f85906d57e6c2eb347044897 | 3,638,502 |
import json
def get_result_handler(rc_value, sa_file=None):
"""Returns dict of result handler config. Backwards compatible for JSON input.
rc_value (str): Result config argument specified.
sa_file (str): SA path argument specified.
"""
try:
result_handler = json.loads(rc_value)
except json.decoder.JSONDecodeError:
config = rc_value.split(".", 1)
if len(config) == 2:
result_handler = {
"type": "BigQuery",
"project_id": config[0],
"table_id": config[1],
}
else:
raise ValueError(f"Unable to parse result handler config: `{rc_value}`")
if sa_file:
result_handler["google_service_account_key_path"] = sa_file
return result_handler | 83c6aa6e0cacdc64422553050072af5d8ea46bf6 | 3,638,503 |
def speedup_experiment_ts(args, model_iter_fn, model, example_inputs):
"""
Measure baseline performance (without using TorchDynamo) of TorchScript and optimize_for_inference.
Writes to ./baseline_ts.csv
"""
return baselines(
[
("eager", model),
("ts", try_script(model, example_inputs)),
(
"ofi",
backends.ofi(try_script(model, example_inputs), example_inputs),
),
# ("nnc", backends.nnc(try_script(model, example_inputs), example_inputs)),
# ("nvfuser", backends.nvfuser(try_script(model, example_inputs), example_inputs)),
],
model_iter_fn,
example_inputs,
args,
) | 0936d5e24759ae5e04027f8e68e467caa24d5ccb | 3,638,504 |
from typing import Tuple
def my_polyhedron_to_label(
rays: Rays_Base, dists: ArrayLike, points: ArrayLike, shape: Tuple[int, ...]
) -> npt.NDArray[np.int_]:
"""Convenience funtion to pass 1-d arrays to polyhedron_to_label."""
return polyhedron_to_label( # type: ignore [no-any-return]
np.expand_dims( # type: ignore [no-untyped-call]
np.clip(dists, 1e-3, None), axis=0
),
np.expand_dims(points, axis=0), # type: ignore [no-untyped-call]
rays,
shape,
verbose=False,
) | f967a963fcb47c964895da182a48568a2a8a8ee2 | 3,638,505 |
import tqdm
from sys import stdout
from sys import path
def order_files_by_ranges(root_path: str, dest_path: str, date_ranges: list, *, save_unsorted: bool = True) -> list:
"""Copies all files (including subdirectories)
from given path to destination path
without any loss of data
and groups them into given subdirectories.
"""
t = tqdm(range(get_file_number(root_path)),unit=' img',desc='Progress',file=stdout)
error_file_list = []
if save_unsorted:
size_checked_dirs = {'Unsorted': 1}
else:
size_checked_dirs = {}
for dirpath, dirnames, filenames in walk(root_path):
# Get files only with jpg extension
for filename in filenames:
try:
check_filename = filename.lower()
if not check_filename.endswith(permitted_ext):
continue
t.update(1)
t.refresh()
# Get EXIF file data
tmp_path = path.join(dirpath, filename)
img = Image.open(tmp_path)
exif_data = img._getexif()
# Get year, month and day from EXIF dictionary
year, month, day = exif_data[36867][:10].split(':')
# Check if date is in range
# Else check if image was copied (why not for/else - user can select ranges that overlap each other)
copied = False
for n in date_ranges:
try:
d1 = n[0].split('.')
d1 = date(int(d1[2]),int(d1[1]),int(d1[0]))
d2 = n[1].split('.')
d2 = date(int(d2[2]),int(d2[1]),int(d2[0]))
if d1 <= date(int(year), int(month), int(day)) <= d2:
dir_path = path.join(dest_path, n[2])
# Get size of directory to estimate zfill value
if dir_path not in size_checked_dirs.keys():
size_checked_dirs[dir_path] = [1, len(str(len(listdir(dirpath))))]
# Create folder if doesn't exists
if not path.isdir(dir_path):
mkdir(dir_path)
photo_id = str(size_checked_dirs[dir_path][0]).zfill(size_checked_dirs[dir_path][1])
copy2(tmp_path, path.join(dir_path, f'{year}-{month}-{day} - {photo_id}.jpg'))
size_checked_dirs[dir_path][0] += 1
copied = True
break
except:
continue
if save_unsorted and not copied:
dir_path = path.join(dest_path, 'Unsorted')
if not path.isdir(dir_path):
mkdir(dir_path)
photo_id = str(size_checked_dirs['Unsorted']).zfill(5)
copy2(tmp_path, path.join(dir_path, f'{year}-{month}-{day} - {photo_id}.jpg'))
size_checked_dirs['Unsorted'] += 1
except:
error_file_list.append(tmp_path)
continue
t.close()
return error_file_list | 93d382eb6c0bb08ee9a0fdccc21533e075238cf7 | 3,638,506 |
from typing import Optional
def get_incident_comment(incident_comment_id: Optional[str] = None,
incident_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIncidentCommentResult:
"""
Represents an incident comment
API Version: 2019-01-01-preview.
:param str incident_comment_id: Incident comment ID
:param str incident_id: Incident ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['incidentCommentId'] = incident_comment_id
__args__['incidentId'] = incident_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:securityinsights:getIncidentComment', __args__, opts=opts, typ=GetIncidentCommentResult).value
return AwaitableGetIncidentCommentResult(
author=__ret__.author,
created_time_utc=__ret__.created_time_utc,
etag=__ret__.etag,
id=__ret__.id,
last_modified_time_utc=__ret__.last_modified_time_utc,
message=__ret__.message,
name=__ret__.name,
type=__ret__.type) | c0fa6ec1bb7bcccc379455454296bc6a5814946f | 3,638,507 |
def getOrElseUpdate(dictionary, key, opr):
"""If given key is already in the dictionary, returns associated value.
Otherwise compute the value with opr, update the dictionary and return it.
None dictionary are ignored.
>>> d = dict()
>>> getOrElseUpdate(d, 1, lambda _: _ + 1)
2
>>> print(d)
{1: 2}
@type dictionary: dictionary of A => B
@param dictionary: the dictionary
@type key: A
@param key: the key
@type opr: function of A => B
@param opr: the function to compute new value from keys
@rtype: B
@return: the value associated with the key
"""
if dictionary is None:
return opr(key)
else:
if key not in dictionary:
dictionary[key] = opr(key)
return dictionary[key] | 95454d7ca34d6ae243fda4e70338cf3d7584b827 | 3,638,508 |
from operator import add
from operator import mul
def gs_norm(f, g, q):
"""
Compute the squared Gram-Schmidt norm of the NTRU matrix generated by f, g.
This matrix is [[g, - f], [G, - F]].
This algorithm is equivalent to line 9 of algorithm 5 (NTRUGen).
"""
sqnorm_fg = sqnorm([f, g])
ffgg = add(mul(f, adj(f)), mul(g, adj(g)))
Ft = div(adj(g), ffgg)
Gt = div(adj(f), ffgg)
sqnorm_FG = (q ** 2) * sqnorm([Ft, Gt])
return max(sqnorm_fg, sqnorm_FG) | da30e1bac41cba3a6c051ba0159234aac5e6e3cc | 3,638,509 |
from phaser import substructure
def find_anomalous_scatterers(*args, **kwds):
"""
Wrapper for corresponding method in phaser.substructure, if phaser is
available and configured.
"""
if (not libtbx.env.has_module("phaser")):
if "log" in kwds:
print("Phaser not available", file=kwds["log"])
return None
return substructure.find_anomalous_scatterers(*args, **kwds) | 0c88f0df336802fa798ac26966485b28105a6238 | 3,638,510 |
def OpChr(ea, n):
"""
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
"""
return idaapi.op_chr(ea, n) | 39c2716ed7344fccd85edda2d27b7a7f305cb14b | 3,638,511 |
def check_access(func):
"""
Check whether user is in policy owners group
"""
def inner(*args, **kwargs):
keycloak = get_keycloak()
if 'policy_id' in kwargs:
current_user = kwargs['user']
group_name = f'policy-{kwargs["policy_id"]}-owners'
group_list = keycloak.user_group_list(current_user)
groups = {group['name']: group for group in group_list}
if group_name in groups.keys():
# User has access to delete/edit policy
return func(*args, **kwargs)
else:
# User does not have access to delete/edit policy
return problem(403, 'Forbidden', 'You do not own this policy')
else:
return func(*args, **kwargs)
return inner | 6655af97f11ae04587904f1aaf2a2225ace5b64d | 3,638,512 |
def score_ranking(score_dict):
"""
用pandas实现分组排序
:param score_dict: dict {'591_sum_test_0601': 13.1, '591_b_tpg7': 13.1, '591_tdw_ltpg6': 14.14}
:return: DataFrame
pd.DataFrame([['591_sum_test_0601', 13.10, 2.0, 0.6667],
['591_b_tpg7', 13.10, 2.0, 0.6667],
['591_tdw_ltpg6', 14.14, 3.0, 1.0]],
columns=['dataset_id', 'score', 'ranking', 'ranking_perct'])
"""
sorted_list = sorted(score_dict.items(), key=lambda item: item[1])
dataset_id_list = []
score_list = []
for each_dataset in sorted_list:
dataset_id_list.append(each_dataset[0])
score_list.append(each_dataset[1])
score_dict = {"dataset_id": dataset_id_list, "score": score_list}
df = pd.DataFrame(data=score_dict)
df["ranking"] = df["score"].rank(method="max")
df["ranking_perct"] = (df["ranking"]) / len(df)
return df | d799576afe382c13124c703351b69b8bcb7393b2 | 3,638,513 |
def dock_widget(widget, label="DockWindow", area="right", floating=False):
"""Dock the given widget properly for both M2016 and 2017+."""
# convert widget to Qt if needed
if not issubclass(widget.__class__, QObject):
widget = utils.to_qwidget(widget)
# make sure our widget has a name
name = widget.objectName()
if not name:
name, num = label + "_mainWindow", 1
while cmds.control(name, exists=True):
name = label + "_mainWindow" + str(num)
num += 1
widget.setObjectName(label + "_mainWindow")
# if `floating` is True, return with `widget.show()`
if floating is True:
if not widget.windowTitle():
widget.setWindowTitle(label)
widget.show()
return widget
# make sure the workspaceControl doesn't exist yet
control = name + "_WorkspaceControl"
if cmds.control(control, exists=True):
cmds.deleteUI(control)
# create workspaceControl (only works with Maya 2017+)
flags = {"dockToControl": ["ToolBox", "right"]}
if area == "right":
# If the ChannelBox is not visible, fallback on the AttributeEditor.
_control = "ChannelBoxLayerEditor"
if not cmds.workspaceControl(_control, query=True, visible=True):
_control = "AttributeEditor"
flags = {"tabToControl": [_control, -1]}
control = cmds.workspaceControl(control)
cmds.workspaceControl(control, edit=True, label=label, r=True, **flags)
# Convert workspace to Qt and add the widget into its layout.
workspace = utils.to_qwidget(control)
layout = workspace.layout()
layout.addWidget(widget)
return widget | 80ef6bde493585e0010a497dfb179600aae04e9e | 3,638,514 |
def compute_benjamin_feir_index(bandwidth, steepness, water_depth, peak_wavenumber):
"""Compute Benjamin-Feir index (BFI) from bandwidth and steepness estimates.
Reference:
Serio, Marina, et al. “On the Computation of the Benjamin-Feir Index.”
Nuovo Cimento Della Societa Italiana Di Fisica C, vol. 28, Nov. 2005, pp. 893–903.
ResearchGate, doi:10.1393/ncc/i2005-10134-1.
"""
kd = peak_wavenumber * water_depth
# side-step numerical issues
if kd > 100:
nu = alpha = beta = 1
else:
nu = 1 + 2 * kd / np.sinh(2 * kd)
alpha = -nu ** 2 + 2 + 8 * kd ** 2 * \
np.cosh(2 * kd) / np.sinh(2 * kd) ** 2
beta = (
(np.cosh(4 * kd) + 8 - 2 * np.tanh(kd) ** 2) / (8 * np.sinh(kd) ** 4)
- (2 * np.cosh(kd) ** 2 + 0.5 * nu) ** 2 /
(np.sinh(2 * kd) ** 2 * (kd / np.tanh(kd) - (nu / 2) ** 2))
)
return steepness / bandwidth * nu * np.sqrt(np.maximum(beta / alpha, 0)) | 2b3ef715a85a6dab837a36f86c3eeeaed05f8345 | 3,638,515 |
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": False, "y": False, "ye": False,
"no": True, "n": True}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 058ab1b06f9b3179264e0dc46c131f3a089abfa9 | 3,638,516 |
def plaintext_property_map(name: str) -> Mapper:
"""
Arguments
---------
name : str
Name of the property.
Returns
-------
Mapper
Property map.
See Also
--------
property_map
"""
return property_map(
name,
python_to_api=plaintext_to_notion,
api_to_python=notion_to_plaintext,
markdown=False,
) | 9b909de0eba2d8f55375896bb2acbbb53c6d759f | 3,638,517 |
def pooling_layer(net_input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)):
"""
TensorFlow pooling layer
:param net_input: Input tensor
:param ksize: kernel size of pooling
:param strides: stride of pooling
:return: Tensor after pooling
"""
return tf.nn.max_pool(net_input, ksize=ksize, strides=strides, padding='VALID') | 4de6b7bdb5860cfa235975f799204522e77b9299 | 3,638,518 |
from typing import OrderedDict
def set_standard_attrs(da):
""" Add standard attributed to xarray DataArray"""
da.coords["lat"].attrs = OrderedDict(
[
("standard_name", "latitude"),
("units", "degrees_north"),
("axis", "Y"),
("long_name", "latitude"),
("out_name", "lat"),
("stored_direction", "increasing"),
("type", "double"),
("valid_max", "90.0"),
("valid_min", "-90.0"),
]
)
da.coords["lon"].attrs = OrderedDict(
[
("standard_name", "longitude"),
("units", "degrees_east"),
("axis", "X"),
("long_name", "longitude"),
("out_name", "lon"),
("stored_direction", "increasing"),
("type", "double"),
("valid_max", "180.0"),
("valid_min", "-180.0"),
]
)
da.coords["depth_coord"].attrs = OrderedDict(
[
("standard_name", "depth"),
("units", "m"),
("axis", "Z"),
("long_name", "ocean depth coordinate"),
("out_name", "lev"),
("positive", "down"),
("stored_direction", "increasing"),
("valid_max", "12000.0"),
("valid_min", "0.0"),
]
)
da.coords["time"].attrs = OrderedDict(
[
("standard_name", "time"),
("axis", "T"),
("long_name", "time"),
("out_name", "time"),
("stored_direction", "increasing"),
]
)
da.coords["time"].encoding["units"] = "days since '1900-01-01'"
return da | 21f83552466127928c9a30e9354e91c3031225aa | 3,638,519 |
import os
def find_git_repos(folder):
"""
Returns a list of all git repos within the given ancestor folder.
"""
return [root for root, subfolders, files
in os.walk(folder)
if '.git' in subfolders] | 615fcc3e947ac3f198638acb23b8a8118c3ec9cd | 3,638,520 |
def isnotebook():
"""
Utility function to detect if the code being run is within a jupyter
notebook. Useful to change progress indicators for example.
Returns
-------
isnotebook : bool
True if the function is being called inside a notebook, False otherwise.
"""
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False | 71e0a77c4bbf3afe16723b01ee5a8d08cf3b98a3 | 3,638,521 |
from typing import Optional
from typing import Tuple
from typing import List
from typing import Dict
def get_poagraph(dagmaf: DAGMaf.DAGMaf,
fasta_provider: missings.FastaProvider,
metadata: Optional[msa.MetadataCSV]) -> \
Tuple[List[graph.Node], Dict[msa.SequenceID, graph.Sequence]]:
"""Gets poagraph from given dagmaf using fasta_provider and metadata.
Args:
dagmaf: DagMaf that will be converted to Poagraph.
fasta_provider: Provider of symbols missing in DagMaf.
metadata: MetadataCSV.
Returns:
Tuple of poagraph elements.
"""
sequences_in_dagmaf = _get_sequences_ids(dagmaf)
build_state = _BuildState(initial_nodes=[],
initial_sequences=_init_sequences(sequences_in_dagmaf, metadata),
initial_edges=_init_free_edges(sequences_in_dagmaf),
seqs_info=_get_seqs_info(dagmaf, sequences_in_dagmaf),
initial_column_id=graph.ColumnID(-1),
fasta_provider=fasta_provider)
_complement_starting_nodes(build_state)
for i, mafnode in enumerate(dagmaf.dagmaf_nodes):
_process_block(build_state, mafnode)
return build_state.nodes, build_state.sequences | cdc62d444cd22a8ff4c1b99382ffcc35a0ab33a6 | 3,638,522 |
def const_bool(value):
"""Create an expression representing the given boolean value.
If value is not a boolean, it is converted to a boolean. So, for
instance, const_bool(1) is equivalent to const_bool(True).
"""
return ['constant', 'bool', ['{0}'.format(1 if value else 0)]] | d11d01f94b8ad20d393a39a28dbfd18cc8fa217e | 3,638,523 |
import struct
def long_to_bytes(n, blocksize=0):
"""Convert an integer to a byte string.
In Python 3.2+, use the native method instead::
>>> n.to_bytes(blocksize, 'big')
For instance::
>>> n = 80
>>> n.to_bytes(2, 'big')
b'\x00P'
If the optional :data:`blocksize` is provided and greater than zero,
the byte string is padded with binary zeros (on the front) so that
the total length of the output is a multiple of blocksize.
If :data:`blocksize` is zero or not provided, the byte string will
be of minimal length.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | 1157a466ce9754c12e01f7512e879cc28a2a4b23 | 3,638,524 |
def vector_matrix_mul(v, M):
"""
returns the product of vector v and matrix M
Consider using brackets notation v[...] in your procedure
to access entries of the input vector. This avoids some sparsity bugs.
"""
assert M.D[0] == v.D
res = {k: 0 for k in M.D[1]}
for i, j in M.f:
res[j] += (M[i, j] * v[i])
return Vec(M.D[1], res) | cd2751850f17b2a71aba906b9525cca91d0ddb82 | 3,638,525 |
def peek(library, session, address, width):
"""Read an 8, 16 or 32-bit value from the specified address.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param width: Number of bits to read.
:return: Data read from bus.
:rtype: bytes
"""
if width == 8:
return peek_8(library, session, address)
elif width == 16:
return peek_16(library, session, address)
elif width == 32:
return peek_32(library, session, address)
elif width == 64:
return peek_64(library, session, address)
raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width) | 6203a516f5a67daa67ec0f37c0e3a8818515f2de | 3,638,526 |
import pkg_resources
import textwrap
import os
import io
def create_text_image(text, image_export=False, **kwargs):
"""
Create a jpg with given text and return in bytes format
"""
text_canvas_w = 720
text_canvas_h = 744
text_canvas_bg = 'white'
text_canvas_fg = 'black'
text_canvas_font_name = 'open_sans_emoji.ttf'
def load_font(size):
return ImageFont.truetype(pkg_resources.resource_stream(__name__, text_canvas_font_name), size)
def find_optimal_size(msg, min_size=20, max_size=400, min_line_w=1, max_line_w=80, padding=0):
"""
Find optimal font size and line width for a given text
"""
if min_line_w >= max_line_w:
raise Exception("illegal arguments, min_line_w < max_line_w needed")
def line_width(font_size, line_padding=70):
l = min_line_w
r = max_line_w
font = load_font(font_size)
while l < r:
n = floor((l + r) / 2)
t = ''.join([char * n for char in '1'])
font_w, font_h = font.getsize(t)
font_w = font_w + (2 * line_padding)
if font_w >= text_canvas_w:
r = n - 1
pass
else:
l = n + 1
pass
return n
size_l = min_size
size_r = max_size
last_line_w = 0
last_size = 0
while size_l < size_r:
size = floor((size_l + size_r) / 2.0)
last_size = size
line_w = line_width(size)
last_line_w = line_w
lines = []
for line in msg.splitlines():
cur_lines = textwrap.wrap(line, width=line_w)
for cur_line in cur_lines:
lines.append(cur_line)
font = load_font(size)
total_w, line_h = font.getsize(msg)
tot_height = len(lines) * line_h
if tot_height + (2 * padding) < text_canvas_h:
start_y = (text_canvas_h - tot_height) / 2
else:
start_y = 0
if start_y == 0:
size_r = size - 1
else:
# does fit
size_l = size + 1
return last_size, last_line_w
def center_y(lines, font_h):
tot_height = len(lines) * font_h
if tot_height < text_canvas_h:
return (text_canvas_h - tot_height) // 2
else:
return 0
size, line_w = find_optimal_size(text, padding=50)
logger.debug(f'using font with size: {size}, width: {line_w}')
font = load_font(size)
font_w, font_h = font.getsize(text)
lines = []
for line in text.splitlines():
cur_lines = textwrap.wrap(line, width=line_w)
for cur_line in cur_lines:
lines.append(cur_line)
text_y_start = center_y(lines, font_h)
canvas = Image.new('RGB', (text_canvas_w, text_canvas_h), text_canvas_bg)
draw = ImageDraw.Draw(canvas)
for line in lines:
width, height = font.getsize(line)
draw.text(((text_canvas_w - width) // 2, text_y_start), line,
font=font,
fill=text_canvas_fg,
embedded_color=True)
text_y_start += (height)
if image_export:
name = strftime("postcard_creator_export_%Y-%m-%d_%H-%M-%S_text.jpg", gmtime())
path = os.path.join(_get_trace_postcard_sent_dir(), name)
logger.info('exporting image to {} (image_export=True)'.format(path))
canvas.save(path)
img_byte_arr = io.BytesIO()
canvas.save(img_byte_arr, format='jpeg')
return img_byte_arr.getvalue() | 1cff45768f43c0c1737f599c38c377ae0366534b | 3,638,527 |
def mtf_from_psf(psf, dx=None):
"""Compute the MTF from a given PSF.
Parameters
----------
psf : `prysm.RichData` or `numpy.ndarray`
object with data property having 2D data containing the psf,
or the array itself
dx : `float`
sample spacing of the data
Returns
-------
RichData
container holding the MTF, ready for plotting or slicing.
"""
data, df = transform_psf(psf, dx)
cy, cx = (int(np.ceil(s / 2)) for s in data.shape)
dat = abs(data)
dat /= dat[cy, cx]
return RichData(data=dat, dx=df, wavelength=None) | fb009d3068c67447d2f10c3448e91b258a0d7ca3 | 3,638,528 |
def check_intersection(vertical_line: Line, other_line: Line) -> bool:
"""
Check for intersection between two line segments.
:param vertical_line: The first line segment. Guaranteed to be vertical.
:param other_line: The second line segment.
:return: Whether or not they intersect.
"""
intersection = get_intersection_point(vertical_line, other_line)
return not not intersection | 7e9279ea5976b99c9edb36ae5c59bcc69d22aa59 | 3,638,529 |
from krun.scheduler import ManifestManager
from krun.platform import detect_platform
def get_session_info(config):
"""Gets information about the session (for --info)
Overwrites any existing manifest file.
Separated from print_session_info for ease of testing"""
platform = detect_platform(None, config)
manifest = ManifestManager(config, platform, new_file=True)
return {
"n_proc_execs": manifest.total_num_execs,
"n_in_proc_iters": manifest.get_total_in_proc_iters(config),
"skipped_keys": manifest.skipped_keys,
"non_skipped_keys": manifest.non_skipped_keys,
} | 25729c3838fc7b600600dd74da44a3be9fd7b46d | 3,638,530 |
def rotate(x, y, a):
"""Rotate vector (x, y) by an angle a."""
return x * np.cos(a) + y * np.sin(a), -x * np.sin(a) + y * np.cos(a) | 2858539f3de5c15072657af5f39231f8e7867b6b | 3,638,531 |
def filt_all(list_, func):
"""Like filter but reverse arguments and returns list"""
return [i for i in list_ if func(i)] | 72010b483cab3ae95d49b55ca6a70b0838b0a34d | 3,638,532 |
def auth_user_logout(payload,
override_authdb_path=None,
raiseonfail=False,
config=None):
"""Logs out a user.
Deletes the session token from the session store. On the next request
(redirect from POST /auth/logout to GET /), the frontend will issue a new
one.
The frontend MUST unset the cookie as well.
Parameters
----------
payload : dict
The payload dict should contain the following keys:
- session_token: str
- user_id: int
In addition to these items received from an authnzerver client, the
payload must also include the following keys (usually added in by a
wrapping function):
- reqid: int or str
- pii_salt: str
override_authdb_path : str or None
The SQLAlchemy database URL to use if not using the default auth DB.
raiseonfail : bool
If True, and something goes wrong, this will raise an Exception instead
of returning normally with a failure condition.
config : SimpleNamespace object or None
An object containing systemwide config variables as attributes. This is
useful when the wrapping function needs to pass in some settings
directly from environment variables.
Returns
-------
dict
Returns a dict containing the result of the password verification check.
"""
for key in ('reqid', 'pii_salt'):
if key not in payload:
LOGGER.error(
"Missing %s in payload dict. Can't process this request." % key
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'user_id': None,
'messages': ["Invalid user logout request."],
}
for key in ('session_token', 'user_id'):
if key not in payload:
LOGGER.error(
'[%s] Invalid user logout request, missing %s.' %
(payload['reqid'], key)
)
return {
'success': False,
'failure_reason': (
"invalid request: missing '%s' in request" % key
),
'messages': ["Invalid user logout request. "
"No %s provided." % key],
}
# check if the session token exists
session = auth_session_exists(
{'session_token': payload['session_token'],
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
override_authdb_path=override_authdb_path,
raiseonfail=raiseonfail)
if session['success']:
# check the user ID
if payload['user_id'] == session['session_info']['user_id']:
deleted = auth_session_delete(
{'session_token': payload['session_token'],
'reqid': payload['reqid'],
'pii_salt': payload['pii_salt']},
override_authdb_path=override_authdb_path,
raiseonfail=raiseonfail
)
if deleted['success']:
LOGGER.info(
"[%s] User logout request successful for "
"session_token: %s, user_id: %s. " %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': True,
'user_id': session['session_info']['user_id'],
'messages': ["Logout successful."]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"delete session failed"
),
'user_id': payload['user_id'],
'messages': ["Logout failed. Invalid "
"session_token for user_id."]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"user does not exist"
),
'user_id': payload['user_id'],
'messages': [
"Logout failed. Invalid session_token for user_id."
]
}
else:
LOGGER.error(
"[%s] User logout request failed for "
"session_token: %s, user_id: %s. "
"Invalid user_id provided for "
"corresponding session token." %
(payload['reqid'],
pii_hash(payload['session_token'],
payload['pii_salt']),
pii_hash(payload['user_id'],
payload['pii_salt']))
)
return {
'success': False,
'failure_reason': (
"session does not exist"
),
'user_id': payload['user_id'],
'messages': ["Logout failed. Invalid "
"session_token for user_id."]
} | 1f468a53f82a58f8c5c3f5397d6f026276a93f05 | 3,638,533 |
def rx_observer(on_next: NextHandler, on_error: ErrorHandler = default_error, on_completed: CompleteHandler = default_on_completed) -> Observer:
"""Return an observer.
The underlying implementation use an named tuple.
Args:
on_next (NextHandler): on_next handler which process items
on_error (ErrorHandler): on_error handler (default with default_error
which raise Exception)
on_completed (CompleteHandler): on_completed handler (default with noop)
Returns:
(Observer): an Observer
"""
return ObserverDefinition(on_next=on_next, on_error=on_error, on_completed=on_completed) | 2ebfd3c6b4e5ed854fdc89e76ac006fddd20ad0b | 3,638,534 |
def _rav_setval_ ( self , value ) :
"""Assign the valeu for the variable
>>> var = ...
>>> var.value = 10
"""
value = float ( value )
self.setVal ( value )
return self.getVal() | 80ad7ddec68d5c97f72ed63dd6ba4a1101de99cb | 3,638,535 |
import scipy
def import_matrix_as_anndata(matrix_path, barcodes_path, genes_path):
"""Import a matrix as an Anndata object.
:param matrix_path: path to the matrix ec file
:type matrix_path: str
:param barcodes_path: path to the barcodes txt file
:type barcodes_path: str
:param genes_path: path to the genes txt file
:type genes_path: str
:return: a new Anndata object
:rtype: anndata.Anndata
"""
df_barcodes = pd.read_csv(
barcodes_path, index_col=0, header=None, names=['barcode']
)
df_genes = pd.read_csv(
genes_path, header=None, index_col=0, names=['gene_id'], sep='\t'
)
return anndata.AnnData(
X=scipy.io.mmread(matrix_path).tocsr(), obs=df_barcodes, var=df_genes
) | 83f5ccdaa945f26451ab2834c832e0e1ea58ce89 | 3,638,536 |
import torch
import tqdm
def get_representations(dataset, pretrained_model, alphabet, batch_size=128):
"""Returns: N x 1280 numpy array"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pretrained_model = pretrained_model.to(device)
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, collate_fn=dataset.collate_fn
)
batch_converter = alphabet.get_batch_converter()
sequence_representations = []
progress_bar = tqdm(dataloader, ascii=True)
for i, (tokens, labels, seqs) in enumerate(progress_bar):
esm_batch = list(zip(labels, seqs))
batch_labels, batch_strs, batch_tokens = batch_converter(esm_batch)
batch_tokens = batch_tokens.to(device)
with torch.no_grad():
results = pretrained_model(
batch_tokens, repr_layers=[33], return_contacts=True
)
token_representations = results["representations"][33]
outputs = token_representations[:, 0] # get the <cls> token
sequence_representations.append(outputs.cpu().numpy())
return np.vstack(sequence_representations) | 7a199156810b787ae7fb8ea059ebe69b6de70250 | 3,638,537 |
def rem_hap_cands():
"""json endpoint to set a sample or set of
sample's haplotype candidate designation to false"""
form = flask.request.form
samples = form['samples']
return mds.remove_hap_cands(samples) | ca22c2af4b6079f3b03accb3b414d553da75e1e3 | 3,638,538 |
from pathlib import Path
def construct_subdirExample(str_dirname):
"""
Método auxiliar para utilizarmos nosso exemplo.
Constrói um conjunto de Diretórios e arquivos
para serem testados.
DirOrigem/
|
├── dir01
│ ├── arq01.dat
│ ├── arq02.dat
│ ├── f001.txt
│ ├── f002.txt
│ └── f003.txt
└── dir02
├── a001.txt
├── a002.txt
└── f.zip
2 directories, 5 files
"""
dir_origem = Path('.') / Path(str_dirname)
subdir_01 = Path(str_dirname) / 'dir01'
subdir_02 = Path(str_dirname) / 'dir02'
# Vefifica se o diretorio existe
if not dir_origem.is_dir():
dir_origem.mkdir(parents=True, exist_ok=True)
if not subdir_01.is_dir():
subdir_01.mkdir(parents=True, exist_ok=True)
if not subdir_02.is_dir():
subdir_02.mkdir(parents=True, exist_ok=True)
path_file = Path(subdir_01, 'f001.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'f002.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'f003.txt')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'arq01.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_01, 'arq02.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'a001.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'a002.dat')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
path_file = Path(subdir_02, 'f.zip')
if not path_file.is_file():
path_file.parent.mkdir(parents=True, exist_ok=True)
path_file.touch()
lst_files = []
# Loop pelos diretorios e arquivos
for item in dir_origem.glob('**/*'):
if item.is_file():
# Path Relativo + Nome do Arquivo
print(item)
# Nome do arquivo
print(item.name)
# Extensão do Arquivo
print(item.suffix)
# Path Absoluto (desde a raiz)
print(item.resolve())
# Tamanho do arquivo
print(item.stat().st_size)
# Armazena Path Relativo como String
lst_files.append(str(item))
if item.is_dir():
print(item.parent)
print(' CRIADO O DIRETÓRIO DE EXEMPLO COM SUCESSO! ')
return lst_files | e06c900f16ab5e27df6cb269ad2ed8a6da17f62e | 3,638,539 |
import json
import pandas as pd
from urllib.request import urlopen
import os
def geojson_to_df(in_geojson, encoding="utf-8", drop_geometry=True):
"""Converts a GeoJSON object to a pandas DataFrame.
Args:
in_geojson (str | dict): The input GeoJSON file or dict.
encoding (str, optional): The encoding of the GeoJSON object. Defaults to "utf-8".
drop_geometry (bool, optional): Whether to drop the geometry column. Defaults to True.
Raises:
FileNotFoundError: If the input GeoJSON file could not be found.
Returns:
pd.DataFrame: A pandas DataFrame containing the GeoJSON object.
"""
if isinstance(in_geojson, str):
if in_geojson.startswith("http"):
with urlopen(in_geojson) as f:
data = json.load(f)
else:
in_geojson = os.path.abspath(in_geojson)
if not os.path.exists(in_geojson):
raise FileNotFoundError("The provided GeoJSON file could not be found.")
with open(in_geojson, encoding=encoding) as f:
data = json.load(f)
elif isinstance(in_geojson, dict):
data = in_geojson
df = pd.json_normalize(data["features"])
df.columns = [col.replace("properties.", "") for col in df.columns]
if drop_geometry:
df = df[df.columns.drop(list(df.filter(regex="geometry")))]
return df | 242230d241af9a6e87de52664dc60754eab10fae | 3,638,540 |
def UndistortImage(image,image_size,\
image_rotation=None,image_center=None,\
out_xs=None,out_ys=None,\
direction='fwd',regenerate_grids=True,\
**kwargs):
"""Remember the recipe for fixin gwyddion image orientation: `image0=image0.T[:,::-1]`"""
global grids
if out_xs is None: out_xs=default_out_xs
if out_ys is None: out_ys=default_out_ys
if grids is None or regenerate_grids:
s=source_pts[direction]; d=destination_pts[direction]
grids=numrec.AffineGridsFromFeaturePoints(d,[s],xs=out_xs,ys=out_ys)
in_Xs,in_Ys=getXYGrids(image.shape,image_size,\
rotation=image_rotation,center=image_center)
undistorted=numrec.InterpolateImageToAffineGrid(image,grid_pts=grids['grid_pts'][0],\
image_xgrid=in_Xs,image_ygrid=in_Ys,
**kwargs)
return AWA(undistorted,axes=[out_xs,out_ys]) | 12cc1e1e428b8a860b0b29a6b81169cb6c1dc73d | 3,638,541 |
def quartic_oscillator(grids, k=1.):
"""Potential of quantum quartic oscillator.
Args:
grids: numpy array of grid points for evaluating 1d potential.
(num_grids,)
k: strength constant for potential.
Returns:
vp: Potential on grid.
(num_grid,)
"""
vp = 0.5 * k * grids ** 4
return vp | c4a386816cd85e24080d62365d2bcd25b6735d5f | 3,638,542 |
def compute_row_similarities(A):
"""
Compute pairwise similarities between the rows of a binary sparse matrix.
Parameters
----------
A: scipy csr_matrix, shape (rows, cols)
Binary matrix.
Returns
-------
sim: numpy array, shape (rows, rows)
Pairwise column similarities.
"""
# normalize A in row-axis
# 1) compute per-row norm
norm = np.sqrt(A.sum(axis=1)) # Y is binary: \sum 1^2 = \sum 1
norm = sparse.csr_matrix(norm) # save as sparse
# 2) build left-multiplying norm (https://stackoverflow.com/questions/16043299/substitute-for-numpy-broadcasting-using-scipy-sparse-csc-matrix)
# summary: sparse arrays don't broadcast and something like
# np.where(norm[:, na]==0., 0., A/norm[:, na]) wouldn't work
# we need to use the left-multiplying trick to achieve that
data = 1. / norm.data
indices = np.where(np.diff(norm.indptr) != 0)[0]
indptr = norm.indptr
rows = A.shape[0]
left_norm = sparse.csr_matrix((data, indices, indptr), shape=(rows, rows))
# 3) compute row-wise normalized version of A
A_norm = left_norm.dot(A)
# compute pairwise row similarities
sim = A_norm.dot(A_norm.T)
return sim | 96ab44ec15f94bf666da248100a98f282119caf1 | 3,638,543 |
def sha9(R, S):
"""Shape functions for a 4-noded quad element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(1, 1). Thus
>>> N = sha9(0, 0)
>>> N_ex = np.array([
... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0],
... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha9(1, 1)
>>> N_ex = np.array([
... [0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 18))
SN = np.zeros((9))
ONE = 1.0
QUART = 0.25
HALF = 0.5
RP =ONE+R
RM =ONE-R
RMS=ONE-R*R
SP =ONE+S
SM =ONE-S
SMS=ONE-S*S
#
SN[8]=RMS*SMS
SN[7]=HALF*SMS*RM-HALF*SN[8]
SN[6]=HALF*RMS*SP-HALF*SN[8]
SN[5]=HALF*SMS*RP-HALF*SN[8]
SN[4]=HALF*RMS*SM-HALF*SN[8]
SN[0]=QUART*RM*SM-HALF*SN[7]-HALF*SN[4]-QUART*SN[8]
SN[1]=QUART*RP*SM-HALF*SN[5]-HALF*SN[4]-QUART*SN[8]
SN[2]=QUART*RP*SP-HALF*SN[5]-HALF*SN[6]-QUART*SN[8]
SN[3]=QUART*RM*SP-HALF*SN[7]-HALF*SN[6]-QUART*SN[8]
#
N[0, ::2] = SN
N[1, 1::2] = SN
#
return N | ba34cde6b5673853d34b9e074e2fbc05dc845aa5 | 3,638,544 |
def padding(seq, size, mode):
"""
Parameters
----------
seq: np.array
The sequence to be padded.
mode: str
Select padding mode among {"zero", "repeat"}.
Returns
-------
seq: np.ndarray
"""
if mode == "zero":
seq = np.array(trimmer(seq, size, filler=0))
elif mode == "repeat":
seq = np.array(repeat_padding(seq, size))
return seq | 3a0a070f784a355ead8439ff63f09918fa401014 | 3,638,545 |
def get_dense_span_ends_from_starts(dense_span_starts,
dense_span_ends):
"""For every mention start positions finds the corresponding end position."""
seq_len = tf.shape(dense_span_starts)[0]
start_pos = tf.cast(tf.where(tf.equal(dense_span_starts, 1)), tf.int32)
end_pos = tf.cast(
tf.squeeze(tf.where(tf.equal(dense_span_ends, 1)), 1), tf.int32)
dense_span_ends_from_starts = tf.zeros(seq_len, dtype=tf.int32)
dense_span_ends_from_starts = tf.tensor_scatter_nd_add(
dense_span_ends_from_starts, start_pos, end_pos)
return dense_span_ends_from_starts | d825ed109b6055ca84adf46f6e5fd91cb5dd513a | 3,638,546 |
def bb_to_plt_plot(x, y, w, h):
""" Converts a bounding box to parameters
for a plt.plot([..], [..])
for actual plotting with pyplot
"""
X = [x, x, x+w, x+w, x]
Y = [y, y+h, y+h, y, y]
return X, Y | 10ea3d381969b7d30defdfdbbac0a8d58d06d4d4 | 3,638,547 |
def handler404(request, *args):
"""
Renders 404 page.
:param request: the request object used
:type request: HttpRequest
"""
return render(request, '404.html', status=404) | 2ae6e036bb56b46ee16a4c0bec4182ba999f14ed | 3,638,548 |
def merge_dimensions(z, axis, sizes):
"""Merge dimensions of a tensor into one dimension. This operation is the opposite
of :func:`split_dimension`.
Args:
z (tensor): Tensor to merge.
axis (int): Axis to merge into.
sizes (iterable[int]): Sizes of dimensions to merge.
Returns:
tensor: Reshaped version of `z`.
"""
shape = B.shape(z)
# The indexing below will only be correct for positive `axis`, so resolve the index.
axis = resolve_axis(z, axis)
return B.reshape(
z,
*shape[: axis - len(sizes) + 1],
np.prod(sizes),
*shape[axis + 1 :],
) | 5ef62cd90ebf5bd9276f334a65a7a9075f5d3710 | 3,638,549 |
import collections
import re
def get_assignment_map_replaced(init_ckpt,
name_replacement_dict={},
list_vars=None):
""" name_replacement_dict = { old_name_str_chunk: new_name_str_chunk }
"""
if list_vars is None:
list_vars = tf.global_variables()
#
name_to_variable = collections.OrderedDict()
for var in list_vars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
#
#
ckpt_vars = tf.train.list_variables(init_ckpt)
#
assignment_map = collections.OrderedDict()
for x in ckpt_vars:
(name, var) = (x[0], x[1])
#
for k, v in name_replacement_dict.items():
if k in name:
name_new = name.replace(k, v)
break
else:
continue
#
if name_new not in name_to_variable:
continue
#
assignment_map[name] = name_new
print("name_old: %s" % name)
print("name_new: %s" % name_new)
#
return assignment_map | fd7df6630f84bde9caf747540c05729b8898ffa0 | 3,638,550 |
def RULE110():
"""RULE 110 celular automata node.
.. code::
000 : 0
001 : 1
010 : 1
011 : 1
100 : 0
101 : 1
110 : 1
111 : 0
"""
return BooleanNode.from_output_list(outputs=[0,1,1,1,0,1,1,0], name="RULE 110") | 3c79a7b6c25f031fdeac4a86f2afc770ad71ea23 | 3,638,551 |
def search_cut(sentence):
"""
HMM的切割方式
:param sentence:
:return:
"""
return jieba.lcut_for_search(sentence) | 7ee0f7eb1a16cd24920b98e38387b2c9b576990f | 3,638,552 |
from typing import Counter
def count_items(column_list:list):
"""
Contar os tipos (valores) e a quantidade de items de uma lista informada
args:
column_list (list): Lista de dados de diferentes tipos de valores
return: Retorna dois valores, uma lista de tipos (list) e o total de itens de cada tipo (list)
"""
counter = Counter(column_list)
item_types = list(counter.keys())
count_items = list(counter.values())
return item_types, count_items | 06cf25aed4d0de17fa8fb11303c9284355669cf5 | 3,638,553 |
import cloudpickle
def py_call(obj, inputs=(), direct_args=()):
"""Create a task that calls Python code
Example:
>>> def hello(x):
return b"Hello " + x.read()
>>> a = tasks.const("Loom")
>>> b = tasks.py_call((a,), hello)
>>> client.submit(b)
b'Hello Loom'
"""
task = Task()
task.task_type = PY_CALL
task.inputs = (obj,) + tuple(inputs)
task.config = cloudpickle.dumps(tuple(direct_args))
task.resource_request = cpu1
return task | f89a5876fcf9b4c192f2b7c6d1362bf5a97e399c | 3,638,554 |
def to_graph(grid):
"""
Build adjacency list representation of graph
Land cells in grid are connected if they are vertically or horizontally adjacent
"""
adj_list = {}
n_rows = len(grid)
n_cols = len(grid[0])
land_val = "1"
for i in range(n_rows):
for j in range(n_cols):
if grid[i][j] == land_val:
adj_list[(i,j)] = []
if i > 0 and grid[i-1][j] == land_val:
adj_list[(i,j)].append((i-1,j))
if i < n_rows-1 and grid[i+1][j] == land_val:
adj_list[(i,j)].append((i+1,j))
if j > 0 and grid[i][j-1] == land_val:
adj_list[(i,j)].append((i,j-1))
if j < n_cols-1 and grid[i][j+1] == land_val:
adj_list[(i,j)].append((i,j+1))
return adj_list | ebdd0406b123a636a9d380391ef4c13220e2dabd | 3,638,555 |
def validate_doc(doc):
"""
Check to see if the given document is a valid dictionary, that is, that it
contains a single definition list.
"""
return len(doc.content) == 1 and \
isinstance(doc.content[0], pf.DefinitionList) | c60799ebbdaa7ec2e3a7e6607853ff021a40ed17 | 3,638,556 |
def is_renderable(obj, quiet=True):
"""
Checks if object is renderable
Args:
obj (unicode): Name of object to verify
quiet (bool): If the function should keep quiet (default=True)
Returns:
(bool) if its renderable or not
"""
# unit test
# make sure we are not working with components/attributes
obj = cmds.ls(obj, objectsOnly=True, l=True)
if isinstance(obj, list) or isinstance(obj, tuple):
if len(obj) == 1:
obj = obj[0]
else:
LOG.error("isRenderable - {0} cannot be checked".format(obj))
return False
if not cmds.objExists(obj):
if not quiet:
LOG.error("{0} does not exist, skipping it".format(obj))
return False
# doIt
if cmds.getAttr("{0}.template".format(obj)):
if not quiet:
LOG.error("{0} is a template object, skipping it".format(obj))
return False
if not cmds.getAttr("{0}.visibility".format(obj)):
# Let's check if it has any in-connection (its animated)
if not cmds.listConnections("{0}.visibility".format(obj)):
if not quiet:
LOG.error("{0} is not visible, skipping it".format(obj))
return False
if not cmds.getAttr("{0}.lodVisibility".format(obj)):
# Let's check if it has any in-connection (its animated)
if not cmds.listConnections("{0}.lodVisibility".format(obj)):
if not quiet:
LOG.error("{0} has no lodVisibility, skipping it".format(obj))
return False
# TODO Display layer override check
renderable = True
# check parents
parent = cmds.listRelatives(obj, parent=True, path=True)
if parent:
renderable = renderable and is_renderable(parent[0])
return renderable | 7e0b402fc8634d96717209274dc5c69a32da395d | 3,638,557 |
import traceback
import traceback
import traceback
from datetime import datetime
import requests
import io
import operator
import csv
import re
def _refresh_database(bot, force=False, prune=True, callback=None, background=False, db=None):
"""
Actual implementation of refresh_database.
Refreshes the database of starsystems. Also rebuilds the bloom filter.
:param bot: Bot instance
:param force: True to force refresh
:param prune: True to prune non-updated systems. Keep True unless performance testing.
:param callback: Optional function that is called as soon as the system determines a refresh is needed.
:param background: If True and a refresh is needed, it is submitted as a background task rather than running
immediately.
:param db: Database handle
Note that this function executes some raw SQL queries (among other voodoo). This is for performance reasons
concerning the insanely large dataset being handled, and should NOT serve as an example for implementation
elsewhere.
"""
eddb_url = bot.config.ratbot.edsm_url or "https://eddb.io/archive/v5/systems.csv"
chunked = bot.config.ratbot.chunked_systems
# Should really implement this, but until then
if chunked:
raise NotImplementedError("Chunked system loading is not implemented yet.")
status = get_status(db)
eddb_maxage = float(bot.config.ratbot.edsm_maxage or (7*86400)) # Once per week = 604800 seconds
if not (
force or
not status.starsystem_refreshed or
(datetime.datetime.now(tz=datetime.timezone.utc) - status.starsystem_refreshed).total_seconds() > eddb_maxage
):
# No refresh needed.
# print('not force and no refresh needed')
return False
if callback:
callback()
if background:
print('Scheduling background refresh of starsystem data')
return bot.memory['ratbot']['executor'].submit(
_refresh_database, bot, force=True, callback=None, background=False
)
conn = db.connection()
# Now in actual implementation beyond background scheduling
# Counters for stats
# All times in seconds
stats = {
'load': 0, # Time spent retrieving the CSV file(s) and dumping it into a temptable in the db.
'prune': 0, # Time spent removing non-update updates.
'systems': 0, # Time spent merging starsystems into the db.
'prefixes': 0, # Time spent merging starsystem prefixes into the db.
'stats': 0, # Time spent (re)computing system statistics
'bloom': 0, # Time spent (re)building the system prefix bloom filter.
'optimize': 0, # Time spent optimizing/analyzing tables.
'misc': 0, # Miscellaneous tasks (total time - all other stats)
'total': 0, # Total time spent.
}
def log(fmt, *args, **kwargs):
print("[{}] ".format(datetime.datetime.now()) + fmt.format(*args, **kwargs))
overall_timer = TimedResult()
log("Starsystem refresh started")
if chunked:
# FIXME: Needs to be reimplemented.
log("Retrieving starsystem index at {}", eddb_url)
with timed() as t:
response = requests.get(eddb_url)
response.raise_for_status()
urls = list(urljoin(eddb_url, chunk["SectorName"]) for chunk in response.json())
stats['index'] += t.seconds
log("{} file(s) queued for starsystem refresh. (Took {}}", len(urls), format_timestamp(t.delta))
else:
urls = [eddb_url]
temptable = sa.Table(
'_temp_new_starsystem', sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('eddb_id', sa.Integer),
sa.Column('name_lower', sa.Text(collation="C")),
sa.Column('name', sa.Text(collation="C")),
sa.Column('first_word', sa.Text(collation="C")),
sa.Column('word_ct', sa.Integer),
sa.Column('xz', SQLPoint),
sa.Column('y', sa.Numeric),
# sa.Index('_temp_id_ix', 'eddb_id'),
prefixes=['TEMPORARY'], postgresql_on_commit='DROP'
)
temptable.create(conn)
sql_args = {
'sp': StarsystemPrefix.__tablename__,
's': Starsystem.__tablename__,
'ts': temptable.name,
'tsp': '_temp_new_prefixes'
}
buffer = io.StringIO() # Temporary IO buffer for COPY FROM
columns = ['eddb_id', 'name_lower', 'name', 'first_word', 'word_ct', 'xz', 'y'] # Columns to copy to temptable
getter = operator.itemgetter(*columns)
total_flushed = 0 # Total number of flushed items so far
pending_flush = 0 # Number of items waiting to flush
def exec(sql, *args, **kwargs):
try:
conn.execute(sql.format(*args, **kwargs, **sql_args))
except Exception as ex:
log("Query failed.")
traceback.print_exc()
raise
def flush():
nonlocal buffer, total_flushed, pending_flush
if not pending_flush:
return
log("Flushing system(s) {}-{}", total_flushed + 1, total_flushed + pending_flush)
buffer.seek(0)
cursor = conn.connection.cursor()
cursor.copy_from(buffer, temptable.name, sep='\t', null='', columns=columns)
buffer = io.StringIO()
# systems = []
total_flushed += pending_flush
pending_flush = 0
with timed() as t:
for url in urls:
log("Retrieving starsystem data at {}", url)
try:
response = requests.get(url, stream=True)
reader = csv.DictReader(io.TextIOWrapper(response.raw))
for row in reader:
# Parse and reformat system info from CSV
name, word_ct = re.subn(r'\s+', ' ', row['name'].strip())
name_lower = name.lower()
first_word, *_ = name_lower.split(" ", 1)
word_ct += 1
if all((row['x'], row['y'], row['z'])):
xz = "({x},{z})".format(**row)
y = row['y']
else:
xz = y = ''
system_raw = {
'eddb_id': str(row['id']),
'name_lower': name_lower,
'name': name,
'first_word': first_word,
'xz': xz,
'y': y,
'word_ct': str(word_ct)
}
pending_flush += 1
buffer.write("\t".join(getter(system_raw)))
buffer.write("\n")
if pending_flush >= FLUSH_THRESHOLD:
flush()
except ValueError:
pass
except Exception:
log("Failed to retrieve data")
traceback.print_exc()
flush()
log("Creating index")
exec("CREATE INDEX ON {ts}(eddb_id)")
stats['load'] += t.seconds
with timed() as t:
log("Removing possible duplicates")
exec("DELETE FROM {ts} WHERE eddb_id NOT IN(SELECT MAX(id) AS id FROM {ts} GROUP BY eddb_id)")
# No need for the temporary 'id' column at this point.
exec("ALTER TABLE {ts} DROP id CASCADE")
# Making this a primary key (or even just a unique key) apparently affects query planner performance vs the
# non-existing unique key.
exec("ALTER TABLE {ts} ADD PRIMARY KEY(eddb_id)")
if prune:
log("Removing non-updates to existing systems")
# If a starsystem has been updated, at least one of 'name', 'xz' or 'y' are guaranteed to have changed.
# (A change that effects word_ct would effect name as well, for instance.)
# Delete any temporary systems that exist in the real table with matching attributes.
exec("""
DELETE FROM {ts} AS t USING {s} AS s
WHERE s.eddb_id=t.eddb_id
AND ROW(s.name, s.y) IS NOT DISTINCT FROM ROW(t.name, t.y)
AND ((s.xz IS NULL)=(t.xz IS NULL)) AND (s.xz~=t.xz OR s.xz IS NULL)
""")
else:
log("Skipping non-update removal phase")
stats['prune'] += t.seconds
with timed() as t:
log("Building list of distinct prefixes")
# Create list of unique prefixes in this batch
exec("""
CREATE TEMPORARY TABLE {tsp} ON COMMIT DROP
AS SELECT DISTINCT first_word, word_ct FROM {ts}
""")
# Insert new prefixes
exec("""
INSERT INTO {sp} (first_word, word_ct)
SELECT t.first_word, t.word_ct
FROM
{tsp} AS t
LEFT JOIN {sp} AS sp ON sp.first_word=t.first_word AND sp.word_ct=t.word_ct
WHERE sp.first_word IS NULL
""")
stats['prefixes'] += t.seconds
with timed() as t:
log("Updating existing systems.")
exec("""
UPDATE {s} AS s
SET name_lower=t.name_lower, name=t.name, first_word=t.first_word, word_ct=t.word_ct, xz=t.xz, y=t.y
FROM {ts} AS t
WHERE s.eddb_id=t.eddb_id
""")
log("Inserting new systems.")
exec("""
INSERT INTO {s} (eddb_id, name_lower, name, first_word, word_ct, xz, y)
SELECT t.eddb_id, t.name_lower, t.name, t.first_word, t.word_ct, t.xz, t.y
FROM {ts} AS t
LEFT JOIN {s} AS s ON s.eddb_id=t.eddb_id
WHERE s.eddb_id IS NULL
""")
stats['systems'] += t.seconds
with timed() as t:
log('Computing prefix statistics')
exec("""
UPDATE {sp} SET ratio=t.ratio, cume_ratio=t.cume_ratio
FROM (
SELECT
t.first_word, t.word_ct, ct/(SUM(ct) OVER w) AS ratio,
(SUM(ct) OVER p)/(SUM(ct) OVER w) AS cume_ratio
FROM (
SELECT sp.*, COUNT(s.eddb_id) AS ct
FROM
{sp} AS sp
LEFT JOIN {s} AS s USING (first_word, word_ct)
WHERE sp.first_word IN(SELECT first_word FROM {tsp})
GROUP BY sp.first_word, sp.word_ct
HAVING COUNT(*) > 0
) AS t
WINDOW
w AS (PARTITION BY t.first_word ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
p AS (PARTITION BY t.first_word ORDER BY t.word_ct ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
) AS t
WHERE {sp}.first_word=t.first_word AND {sp}.word_ct=t.word_ct
""")
stats['stats'] += t.seconds
with timed() as t:
log("Analyzing tables")
exec("ANALYZE {sp}")
exec("ANALYZE {s}")
stats['optimize'] += t.seconds
log("Starsystem database update complete")
# Update refresh time
try:
status = get_status(db)
status.starsystem_refreshed = sql.func.clock_timestamp()
db.add(status)
db.commit()
except Exception:
traceback.print_exc()
raise
log("Starsystem database update committed")
with timed() as t:
log("Rebuilding bloom filter")
refresh_bloom(bot)
stats['bloom'] += t.seconds
overall_timer.stop()
stats['misc'] = overall_timer.seconds - sum(stats.values())
stats['total'] = overall_timer.seconds
bot.memory['ratbot']['stats']['starsystem_refresh'] = stats
log("Starsystem refresh finished")
return True | ae88cbe622accfd6deb9056dd7fe33dbca19fa11 | 3,638,558 |
def V_bandpass(V, R_S, C, L, R_L, f):
"""
filter output voltage
input voltage minus the current times the source impedance
"""
# current in circuit
I = V/(R_S + Z_bandpass(C, L, R_L, f))
# voltage across circuit
V_out = V - I*R_S
return V_out | c21c54e7065a32531dca417eb7e50ea63db820d8 | 3,638,559 |
import os
def get_server_url():
"""
Return current server url, does not work in a task
"""
host = os.environ.get('HTTP_X_FORWARDED_HOST') or os.environ['HTTP_HOST']
return u'%s://%s' % (os.environ['wsgi.url_scheme'], host) | c71f2244b8dd023b11a6db0aee885d4d332f3a7c | 3,638,560 |
from admiral.celery import celery
def celery():
"""Celery app test fixture."""
return celery | 69f672e1c6a568e14a4ad9f5df723b454a346b03 | 3,638,561 |
def perm_cache(func):
"""
根据用户+请求参数,把权限验证结果结果进行缓存
"""
def _deco(self, request, view):
# 只对查询(GET方法)进行权限缓存
if request.method != "GET":
return func(self, request, view)
user = request.user.username
kwargs = "_".join("{}:{}".format(_k, _w) for _k, _w in list(view.kwargs.items()))
cache_name = "{}__{}__{}".format(user, view.action, kwargs)
perm = cache.get(cache_name)
if perm is None:
perm = func(self, request, view)
cache.set(cache_name, perm, 60)
return perm
return _deco | 4ca53057b12efb15dddb422b3aaaddd11898f4bd | 3,638,562 |
def ast_walker(handler):
"""
A generic AST walker decorator.
Decorates either a function or a class (if dispatching based on node type is required).
``handler`` will be wrapped in a :py:class:`~peval.Dispatcher` instance;
see :py:class:`~peval.Dispatcher` for the details of the required class structure.
Returns a callable with the signature::
def walker(state, node, ctx=None)
:param state: a dictionary with the state which will be passed to every handler call.
It will be converted into a :class:`~peval.tools.immutableadict` object
at the start of the traversal.
Handlers can update it by returning a modified version.
:param node: an ``ast.AST`` object to traverse.
:param ctx: a dictionary with the global context which will be passed to every handler call.
It will be converted into a :class:`~peval.tools.immutableadict` object
at the start of the traversal.
:returns: a tuple ``(state, new_node)``, where ``state`` is the same object which was passed
as the corresponding parameter.
Does not mutate ``node``.
``handler`` will be invoked for every node during the AST traversal (depth-first, pre-order).
The ``handler`` function, if it is a function, or its static methods, if it is a class
must have the signature::
def handler([state, node, ctx, prepend, visit_after, visiting_after,
skip_fields, walk_field,] **kwds)
The names of the arguments must be exactly as written here,
but their order is not significant (they will be passed as keywords).
If ``handler`` is a class, the default handler is a "pass-through" function
that does not change the node or the state.
:param state: the (supposedly immutable) state object passed during the initial call.
:param node: the current node
:param ctx: the (supposedly immutable) dictionary with the global context
passed during the initial call.
In addition to normal dictionary methods, its values can be alternatively
accessed as attributes (e.g. either ``ctx['value']`` or ``ctx.value``).
:param prepend: a function ``prepend(lst)`` which, when called, prepends the list
of ``ast.AST`` objects to whatever is returned by the handler of the closest
statement block that includes the current node.
These nodes are not traversed automatically.
:param visit_after: a function of no arguments, which, when called,
schedules to call the handler again on this node when all of its fields are traversed
(providing that after calling it, the handler returns an ``ast.AST`` object
and not a list or ``None``).
During the second call this parameter is set to ``None``.
:param visiting_after: set to ``False`` during the normal (pre-order) visit,
and to ``True`` during the visit caused by ``visit_after()``.
:param skip_fields: a function of no arguments, which, when called,
orders the walker not to traverse this node's fields.
:param walk_field: a function
``walk_field(state, value, block_context=False) -> (new_state, new_value)``,
which traverses the given field value.
If the value contains a list of statements, ``block_context`` must be set to ``True``,
so that ``prepend`` could work correctly.
:returns: must return a tuple ``(new_state, new_node)``, where ``new_node`` is one of:
* ``None``, in which case the corresponding node will be removed from the parent list
or the parent node field.
* The passed ``node`` (unchanged).
By default, its fields will be traversed (unless ``skip_fields()`` is called).
* A new ``ast.AST`` object, which will replace the passed ``node`` in the AST.
By default, its fields will not be traversed,
and the handler must do it manually if needed
(by calling ``walk_field()``).
* If the current node is an element of a list,
a list of ``ast.AST`` objects can be returned,
which will be spliced in place of the node.
Same as in the previous case, these new nodes
will not be automatically traversed.
"""
return _Walker(handler, transform=True, inspect=True) | 978e6718d81663914017af89cf41101ca68dd2bb | 3,638,563 |
def html_escape(text):
"""Produce entities within text."""
L=[]
for c in text:
L.append(html_escape_table.get(c,c))
return "".join(L) | de73c127de8b6338c5db5c9ba7d1f5ebbd6d23a9 | 3,638,564 |
def qs_without_parameter(arg1, arg2):
"""
Removes an argument from the get URL.
Use:
{{ request|url_without_parameter:'page' }}
Args:
arg1: request
arg2: parameter to remove
"""
parameters = {}
for key, value in arg1.items():
if parameters.get(key, None) is None and arg2 != key:
try:
parameters[key] = value[0]
except IndexError:
parameters[key] = value
return "&".join(
[k + "=" + v
for k, v in parameters.items()]) | 649931de5490621c92513877b21cb8cfce8d66ff | 3,638,565 |
def find_power_graph(I, J, w_intersect=10, w_difference=1):
"""takes a graph with edges I,J, and returns a power
graph with routing edges Ir,Jr and power edges Ip,Jp.
Note that this treats the graph as undirected, and will
internally convert edges to be undirected if not already."""
n = int(max(max(I), max(J)) + 1)
Ir, Jr, Ip, Jp = cpp.routing_swig(n, I, J, w_intersect, w_difference)
return Ir, Jr, Ip, Jp | 9e682eebd9664863d80689f0aa718f30e3ad611a | 3,638,566 |
import string
def getcomments(pyObject):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(pyObject)
except (IOError, TypeError):
return None
if ismodule(pyObject):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '') | f58421f176b42ecb2e1e883f48deb31025b13559 | 3,638,567 |
import time
import requests
import io
def crack_captcha(headers):
"""
破解验证码,完整的演示流程
:return:
"""
currentTime = str(int(time.time())*1000)
# 向指定的url请求验证码图片
rand_captcha_url = 'http://59.49.77.231:81/getcode.asp?t=' + currentTime
res = requests.get(rand_captcha_url, stream=True,headers=headers)
f = io.BytesIO()
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
img = Image.open(f) # 从网络上请求验证码图片保存在内存中
bin_clear_img = get_clear_bin_image(img) # 处理获得去噪的二值图
child_img_list = get_crop_imgs(bin_clear_img) # 切割图片为单个字符,保存在内存中,例如:4位验证码就可以分割成4个child
# 加载SVM模型进行预测
svm_model_name = 'svm_model_file'
model_path = data_root + '/svm_train/' + svm_model_name
model = svm_load_model(model_path)
img_ocr_name = ''
for child_img in child_img_list:
img_feature_list = get_feature(child_img) # 使用特征算法,将图像进行特征化降维
yt = [0] # 测试数据标签
# xt = [{1: 1, 2: 1}] # 测试数据输入向量
xt = convert_feature_to_vector(img_feature_list) # 将所有的特征转化为标准化的SVM单行的特征向量
p_label, p_acc, p_val = svm_predict(yt, xt, model)
img_ocr_name += ('%d' % p_label[0]) # 将识别结果合并起来
#uuid_tag = str(uuid.uuid1()) # 生成一组随机的uuid的字符串(开发人员自己写,比较好实现)
#img_save_folder = data_root + '/crack_img_res'
#img.save(img_save_folder + '/' + img_ocr_name + '__' + uuid_tag + '.png')
# 例如:__0067__77b10a28f73311e68abef0def1a6bbc8.png
f.close()
return img_ocr_name | 538843289a64dde1229f7df0a260632fbbd557b6 | 3,638,568 |
from . import sill
from clawpack.pyclaw.util import check_diff
import numpy as np
from clawpack.pyclaw.util import gen_variants
from itertools import chain
def test_2d_sill():
"""test_2d_sill
Tests against expected classic solution of shallow water equations over
a sill."""
def verify_expected(expected):
def sill_verify(claw):
q0 = claw.frames[0].state.get_q_global()
qfinal = claw.frames[claw.num_output_times].state.get_q_global()
if q0 is not None and qfinal is not None:
dx, dy = claw.solution.domain.grid.delta
total_mass = dx * dy * np.linalg.norm(qfinal[0,:,:].reshape(-1), 1)
return check_diff(expected, total_mass, reltol=1e-3)
else:
return
return sill_verify
classic_tests = gen_variants(sill.setup, verify_expected(3.7439),
kernel_languages=["Fortran"],
solver_type='classic', outdir=None)
for test in chain(classic_tests):
yield test | 5d37c3ad21d842c3f03d1b464609f94ee86e1496 | 3,638,569 |
def draw_box(
canvas,
layout,
box_width=None,
box_alpha=0,
color_map=None,
show_element_id=False,
show_element_type=False,
id_font_size=None,
id_font_path=None,
id_text_color=None,
id_text_background_color=None,
id_text_background_alpha=1,
):
"""Draw the layout region on the input canvas(image).
Args:
canvas (:obj:`~np.ndarray` or :obj:`~PIL.Image.Image`):
The canvas to draw the layout boxes.
layout (:obj:`Layout` or :obj:`list`):
The layout of the canvas to show.
box_width (:obj:`int`, optional):
Set to change the width of the drawn layout box boundary.
Defaults to None, when the boundary is automatically
calculated as the the :const:`DEFAULT_BOX_WIDTH_RATIO`
* the maximum of (height, width) of the canvas.
box_alpha (:obj:`float`, optional):
A float range from 0 to 1. Set to change the alpha of the
drawn layout box.
Defaults to 0 - the layout box will be fully transparent.
color_map (dict, optional):
A map from `block.type` to the colors, e.g., `{1: 'red'}`.
You can set it to `{}` to use only the
:const:`DEFAULT_OUTLINE_COLOR` for the outlines.
Defaults to None, when a color palette is is automatically
created based on the input layout.
show_element_id (bool, optional):
Whether to display `block.id` on the top-left corner of
the block.
Defaults to False.
show_element_type (bool, optional):
Whether to display `block.type` on the top-left corner of
the block.
Defaults to False.
id_font_size (int, optional):
Set to change the font size used for drawing `block.id`.
Defaults to None, when the size is set to
:const:`DEFAULT_FONT_SIZE`.
id_font_path (:obj:`str`, optional):
Set to change the font used for drawing `block.id`.
Defaults to None, when the :const:`DEFAULT_FONT_OBJECT` is used.
id_text_color (:obj:`str`, optional):
Set to change the text color used for drawing `block.id`.
Defaults to None, when the color is set to
:const:`DEFAULT_TEXT_COLOR`.
id_text_background_color (:obj:`str`, optional):
Set to change the text region background used for drawing `block.id`.
Defaults to None, when the color is set to
:const:`DEFAULT_TEXT_BACKGROUND`.
id_text_background_alpha (:obj:`float`, optional):
A float range from 0 to 1. Set to change the alpha of the
drawn text.
Defaults to 1 - the text box will be solid.
Returns:
:obj:`PIL.Image.Image`:
A Image object containing the `layout` draw upon the input `canvas`.
"""
assert 0 <= box_alpha <= 1, ValueError(
f"The box_alpha value {box_alpha} is not within range [0,1]."
)
assert 0 <= id_text_background_alpha <= 1, ValueError(
f"The id_text_background_alpha value {id_text_background_alpha} is not within range [0,1]."
)
draw = ImageDraw.Draw(canvas, mode="RGBA")
id_text_background_color = id_text_background_color or DEFAULT_TEXT_BACKGROUND
id_text_color = id_text_color or DEFAULT_TEXT_COLOR
if box_width is None:
box_width = _calculate_default_box_width(canvas)
if show_element_id or show_element_type:
font_obj = _create_font_object(id_font_size, id_font_path)
if color_map is None:
all_types = set([b.type for b in layout if hasattr(b, "type")])
color_map = _create_color_palette(all_types)
for idx, ele in enumerate(layout):
if isinstance(ele, Interval):
ele = ele.put_on_canvas(canvas)
outline_color = (
DEFAULT_OUTLINE_COLOR
if not isinstance(ele, TextBlock)
else color_map.get(ele.type, DEFAULT_OUTLINE_COLOR)
)
_draw_box_outline_on_handler(draw, ele, outline_color, box_width)
_draw_transparent_box_on_handler(draw, ele, outline_color, box_alpha)
if show_element_id or show_element_type:
text = ""
if show_element_id:
ele_id = ele.id or idx
text += str(ele_id)
if show_element_type:
text = str(ele.type) if not text else text + ": " + str(ele.type)
start_x, start_y = ele.coordinates[:2]
text_w, text_h = font_obj.getsize(text)
text_box_object = Rectangle(
start_x, start_y, start_x + text_w, start_y + text_h
)
# Add a small background for the text
_draw_transparent_box_on_handler(
draw,
text_box_object,
id_text_background_color,
id_text_background_alpha,
)
# Draw the ids
draw.text(
(start_x, start_y),
text,
fill=id_text_color,
font=font_obj,
)
return canvas | 9d8ca19a35e91c6e8670aed05c2e61b2c89958c5 | 3,638,570 |
def login(request):
"""Home view, displays login mechanism"""
return render(request, 'duck/login.html') | 5d4474d4ce7bb8f7327e1a005fe9e485d8784ec7 | 3,638,571 |
def make_user_role_table(table_name='user', id_column_name='id'):
"""
Create the user-role association table so that
it correctly references your own UserMixin subclass.
"""
return db.Table('fp_user_role',
db.Column(
'user_id', db.Integer, db.ForeignKey('{}.{}'.format(
table_name, id_column_name))),
db.Column(
'role_id', db.Integer, db.ForeignKey('fp_role.id')),
extend_existing=True) | 8e7570590686e78d2bf7f91ba3b16f14f4c42620 | 3,638,572 |
import re
def _remove_comments_inline(text):
"""Removes the comments from the string 'text'."""
if 'auto-ignore' in text:
return text
if text.lstrip(' ').lstrip('\t').startswith('%'):
return ''
match = re.search(r'(?<!\\)%', text)
if match:
return text[:match.end()] + '\n'
else:
return text | 463e29e1237a88e91c13a58ffea1b2ccdafd4a1d | 3,638,573 |
def wide_to_tall(df: pd.DataFrame) -> pd.DataFrame:
"""Convert a wide table to a tall table
Args:
df (pd.DataFrame): wide table
Returns:
pd.DataFrame: tall table
"""
return df.unstack().dropna().reset_index() | 50ab71d18f5fb1e4dba9207b71030c7f8ffdbcde | 3,638,574 |
def is_pj_player_plus(value):
"""
:param value: The value to be checked
:type value: Any
:return: whether or not the value is a PJ Player+
:rtype: bool
"""
return isinstance(value, list) and len(value) == 4 or len(value) == 3 | 1c4e7a7513d746d25f6b3d7964455b0735c988fc | 3,638,575 |
def pd_fuzz_partial_token_sort_ratio(col1, col2):
""" Calculate "partial token sort" ratio (`fuzz.partial_token_sort_ratio`) between two text columns.
Args:
col1 (Spark Column): 1st text column
col2 (Spark Column): 2nd text column
Returns:
Spark Column (IntegerType): result of `fuzz.partial_token_sort_ratio` calculation.
"""
return pd.Series(map(fuzz.partial_token_sort_ratio, col1.astype(str), col2.astype(str))) | d650d37d5936751f961260d98210e2d219200fe6 | 3,638,576 |
def looterCanReinforce(mine: Game) -> bool:
"""
Return True if, in the given game, the looter (the attack) can
reinforce at this moment, regardless of whether its the first or the
second time
"""
return getLooterReinforcementStatus(mine) != 0 | e73fb193cc1c621766900c1f484db90e4e21decb | 3,638,577 |
def _get_normed_sym_np(X_, _eps=DEFAULT_EPS):
"""
Compute the normalized and symmetrized probability matrix from
relative probabilities X_, where X_ is a numpy array
Parameters
----------
X_ : 2-d array_like (N, N)
asymmetric probabilities. For instance, X_(i, j) = P(i|j)
Returns
-------
P : 2-d array_like (N, N)
symmetric probabilities, making the assumption that P(i|j) = P(j|i)
Diagonals are all 0s."""
batch_size = X_.shape[0]
zero_diags = 1.0 - np.identity(batch_size)
X_ *= zero_diags
norm_facs = np.sum(X_, axis=0, keepdims=True)
X_ = X_ / (norm_facs + _eps)
X_ = 0.5*(X_ + np.transpose(X_))
return X_ | a6f5762a5bf41c83bd017d0661cc069f17bee618 | 3,638,578 |
def load_encoding_model():
"""Model to encode image as vector of length 4096 using 2nd to last layer of
VGG16"""
base_model = VGG16(weights='imagenet', include_top=True)
encoding_model = Model(inputs=base_model.input,
outputs=base_model.get_layer('fc2').output)
return encoding_model | b15f9d9b6d360a71db0fcb7fc0fa83c031f34047 | 3,638,579 |
import math
def get_geohash_radius_approximation(latitude, longitude, radius, precision, georaptor_flag=False, minlevel=1, maxlevel=12):
"""
Get the list of geohashed that approximate a circle
:param latitude: Float the longitude to get the radius approximation for
:param longitude: Float the latitude to get the radius approximation for
:param radius: Integer Radius coverage in meters
:param precision: Integer the geohash precision level
:param georaptor_flag: Do you want to compress it with georaptor
:param minlevel: minimal precision level possible
:param maxlevel: maximal precision level possible
:return: A list of geohashes
"""
x = 0.0
y = 0.0
points = []
geohashes = []
grid_width = [5009400.0, 1252300.0, 156500.0, 39100.0, 4900.0, 1200.0, 152.9, 38.2, 4.8, 1.2, 0.149, 0.0370]
grid_height = [4992600.0, 624100.0, 156000.0, 19500.0, 4900.0, 609.4, 152.4, 19.0, 4.8, 0.595, 0.149, 0.0199]
height = (grid_height[precision - 1]) / 2
width = (grid_width[precision - 1]) / 2
lat_moves = int(math.ceil(radius / height)) # 4
lon_moves = int(math.ceil(radius / width)) # 2
for i in range(0, lat_moves):
temp_lat = y + height * i
for j in range(0, lon_moves):
temp_lon = x + width * j
if in_circle_check(temp_lat, temp_lon, y, x, radius):
x_cen, y_cen = get_centroid(temp_lat, temp_lon, height, width)
lat, lon = convert_to_latlon(y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
for point in points:
geohashes += [pgh.encode(point[0], point[1], precision)]
if georaptor_flag:
georaptor_out = georaptor.compress(set(geohashes), int(minlevel), int(maxlevel))
return list(georaptor_out)
else:
return list(set(geohashes)) | cf8bbc4a8323b796c4f325f4f3ab9f8e3a169fa8 | 3,638,580 |
def manage_products(request, category_id, template_name="manage/category/products.html"):
"""
"""
category = Category.objects.get(pk=category_id)
inline = products_inline(request, category_id, True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("category-products-amount")
})
return render_to_string(template_name, RequestContext(request, {
"category": category,
"products_inline": inline,
"amount_options": amount_options,
})) | 4ece15c50e00198c422dbb452622dde938f2a9e6 | 3,638,581 |
def random_indices(X, size=None, p=None, sort_indices=True, **kwargs):
""" Get indices for a random subset of the data.
Parameters
----------
size: int
* integer size to sample (required if p=None)
p: float
* threshold percentage to keep (required if size=None)
Returns
-------
indices: tuple of np.ndarrays
* indices of samples in the data set
"""
assert(size or p)
# convert p (i.e., percentage of points) to integer size
if size is None:
size = int(p / 100. * len(X))
# Get original indices
indices = np.arange(len(X))
# Get randomized indices
indices = np.random.choice(indices, int(size), replace=False)
# Sort indices
if sort_indices is True:
indices = np.sort(indices)
return indices | 680be93345ab5e3065a43fda5216a4ca8b986121 | 3,638,582 |
def get_facts(F5, uri):
"""
Issue a GET of the URI specified to the F5 appliance and return the result as facts.
If the URI must have a slash as the first character, add it if missing
In Ansible 2.2 found name clashing
http://stackoverflow.com/questions/40281706/cant-read-custom-facts-with-list-array-of-items
"""
result = { 'ansible_facts': {} }
if uri[0] != "/":
uri = "/" + uri
status, result["ansible_facts"] = F5.genericGET(uri)
try:
result["ansible_facts"]["bigip_items"] = result["ansible_facts"].pop("items") # replace key name of 'items' with 'bigip_items'
except:
result["ansible_facts"]["bigip_items"] = dict()
return status, result | 554cc7b9bf35d631c8742614142f5aa2ecaba9b4 | 3,638,583 |
from typing import Optional
from typing import Sequence
def parse_args(args: Optional[Sequence[str]] = None) -> Namespace:
"""
Parses args and validates the consistency of origin/target using the
generator
"""
parser = ArgumentParser(
prog="python -m luh3417.transfer",
description="Transfers a WordPress to one location to the other",
)
parser.add_argument(
"-g",
"--settings-generator",
help="A Python script that handles the transitions",
type=generator_validator,
required=True,
)
parser.add_argument("origin", help="Origin environment")
parser.add_argument("target", help="Target environment")
parsed = parser.parse_args(args)
for env in ["origin", "target"]:
env_name = getattr(parsed, env)
try:
parsed.settings_generator.get_source(env_name)
except UnknownEnvironment as e:
parser.error(
f'Environment "{env_name}" not recognized by generator: {e.message}'
)
if not parsed.settings_generator.allow_transfer(parsed.origin, parsed.target):
parser.error(
f"Generator does not allow transfer from {parsed.origin} to {parsed.target}"
)
return parsed | 475318fc9999b7b259a073e53b3b24d5ea46911a | 3,638,584 |
def parse_papers_plus_json(data):
""" Function which parses the papers_plus json and returns a pandas dataframe of the results.
Solr Field definition shown below:
<!-- Citing paper fields: papers, metadata, arxiv_metadata -->
<!-- Papers -->
<field name="sentencenum" type="pint" indexed="true" stored="true" multiValued="false"/>
<field name="sentence" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="arxiv_identifier" type="string" indexed="true" stored="true" multiValued="false"/>
<!-- arxiv metadata-->
<field name="arxiv_url" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="authors" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="title" type="text_classic" indexed="true" stored="true" multiValued="false"/>
<field name="published_date" type="pdate" indexed="true" stored="true" multiValued="false"/>
<field name="revision_dates" type="string" indexed="true" stored="true" multiValued="false"/>
<!-- meta field: dblp_url-->
<field name="dblp_url" type="string" indexed="true" stored="true" multiValued="false"/>
"""
docs = data['response']['docs']
docs_df = pd.DataFrame(docs)
docs_df = docs_df.drop(['_version_', 'id'], axis=1)
return docs_df | 44c7a27701e265a841e07f49741f03e4b49d4b95 | 3,638,585 |
from pathlib import Path
from typing import Optional
def get_credential(config_file: Path, credential_key: str = 'api_key') -> Optional[str]:
"""
Get a single credential from yaml file. Usual case is 'api_key'
:param config_file:
:param credential_key:
:return:
"""
config = load_credentials(config_file)
credential = config.get('credentials', {}).get(credential_key, None) if config else None
return credential | a3e5182c4b2e3fed777f6bd52e144a6d49e4f48f | 3,638,586 |
import functools
def authenticate_secondarily(endpoint):
"""Proper authentication for function views."""
@functools.wraps(endpoint)
def wrapper(request: HttpRequest):
if not request.user.is_authenticated:
try:
auth_result = PersonalAPIKeyAuthentication.authenticate(request)
if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == "User":
request.user = auth_result[0]
else:
raise AuthenticationFailed("Authentication credentials were not provided.")
except AuthenticationFailed as e:
return JsonResponse({"detail": e.detail}, status=401)
return endpoint(request)
return wrapper | ac7a5b63c2b556e1bb42986db8110a922485b96d | 3,638,587 |
def gather_emails_GUIDs(mailbox, search, folder):
""" Download GUID of messages passing search requirements
"""
mailbox.folder.set(folder)
return (email for email in mailbox.uids(search)) | d75ecdeaa4f95f9108276f2be236e33934d7de01 | 3,638,588 |
def pyrolite_meltsutil_datafolder(subfolder=None):
"""
Returns the path of the pyrolite-meltsutil data folder.
Parameters
-----------
subfolder : :class:`str`
Subfolder within the pyrolite data folder.
Returns
-------
:class:`pathlib.Path`
"""
return get_module_datafolder(module="pyrolite_meltsutil", subfolder=subfolder) | e1ae16fff0b2fcd247c57a40e4713eb0ee13f3e7 | 3,638,589 |
from typing import List
def get_resource_record_set_cloud_formation_dict_list(hosted_zone: ResourceRecordSetList,
with_soa: str,
client: botocore.client.BaseClient, zone_id: str,
type_counter_aws_resource_record_set: dict) -> List[dict]:
"""
Provide a dict representation of a resource record set that can
be used to dump a cloud formation formatted YAML file.
:return: a dict in the form:
{
"Name": str,
"Type": str,
"TTL": str,
"ResourceRecord": [str],
"AliasTarget": {
"DNSName": str,
"HostedZoneId": str
}
}
"""
resource_record_set_cloud_formation_dict_list = []
while hosted_zone is not None:
for resource_record_set in hosted_zone.resource_record_sets:
if ((resource_record_set.type != "SOA" and resource_record_set.type != "NS")
or (with_soa and (resource_record_set.type == "SOA" or resource_record_set.type == "NS"))):
resource_record_values = [resource_record.value
for resource_record in resource_record_set.resource_records]
resource_record_set_cloud_formation_dict = {
"Name": resource_record_set.name,
"Type": resource_record_set.type
}
update_type_counter_aws_resource_record_set(type_counter_aws_resource_record_set,
resource_record_set.type)
if resource_record_set.ttl:
resource_record_set_cloud_formation_dict['TTL'] = resource_record_set.ttl
if resource_record_values:
resource_record_set_cloud_formation_dict['ResourceRecords'] = resource_record_values
if resource_record_set.alias_target:
resource_record_set_cloud_formation_dict['AliasTarget'] = {
"DNSName": resource_record_set.alias_target.dns_name,
"HostedZoneId": resource_record_set.alias_target.hosted_zone_id
}
resource_record_set_cloud_formation_dict_list.append(resource_record_set_cloud_formation_dict)
next_record_name = hosted_zone.next_record_name
if next_record_name:
hosted_zone = ResourceRecordSetList(client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=next_record_name))
else:
hosted_zone = None
return resource_record_set_cloud_formation_dict_list | c7775a45763f733e2dc2392b5073f1bf18b7177c | 3,638,590 |
import aiohttp
async def make_async_request(
url: str, method: str = 'GET', **kwargs) -> dict:
"""
Делает асинхронный запрос по указанному URL с параметрами и возвращает словарь из JSON-ответа
Keyword Args:
headers: Request HTTP Headers
params: URI HTTP request params
data: POST HTTP data
timeout: Timeout requests
Raises:
HttpError: Ошибка сети или вебсервера
IncorrectJsonError: Ошибка парсинга JSON-ответа
"""
try:
async with aiohttp.client.ClientSession() as session:
async with session.request(method, url, **kwargs) as response:
logger.debug(f'Status {response.status} from {method} request to {url} with {kwargs}')
response.raise_for_status()
if response.status in (200, 302) and response.headers.get('Location'):
raise FoundLocation(location=response.headers.get('Location'))
elif not response.content_type.startswith('application/json'):
text = await response.text()
logger.error(f'{response.content_type} -> {text}')
raise esia_client.exceptions.IncorrectJsonError(
f'Invalid content type -> {response.content_type}'
)
return await response.json()
except aiohttp.client.ClientError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.HttpError(e)
except ValueError as e:
logger.error(e, exc_info=True)
raise esia_client.exceptions.IncorrectJsonError(e) | a08fef6c9df201a9704791564c1ad75fb3f20d0d | 3,638,591 |
def _prepare_line(edges, nodes):
"""prepare a plotly scatter3d line plot so that a set of disconnected edges
can be drawn as a single line.
`edges` are values associated with each edge (that get mapped to colors
through a colorscale). `nodes` are pairs of (source, target) node indices
for each edge.
the color of a line segment in plotly is a mixture of the colors associated
with the points it connects. Moreover, segments that begin or end at a
point whose value is `null` are not drawn.
given edges = [eab, ecd, eef] and nodes = [(a, b), (c, d), (e, f)], this
function returns:
path_edges: eab eab 0 ecd ecd 0 eef eef 0
path_nodes: a b 0 c d 0 e f 0
moreover the javascript code replaces every third element (the '0' in the
lists above) with `null`, so only the a-b, c-d, and e-f segments will get
plotted, and their colors are correct because both their start and end
points are associated with the same value.
"""
path_edges = np.zeros(len(edges) * 3, dtype=int)
path_edges[::3] = edges
path_edges[1::3] = edges
path_nodes = np.zeros(len(nodes) * 3, dtype=int)
path_nodes[::3] = nodes[:, 0]
path_nodes[1::3] = nodes[:, 1]
return path_edges, path_nodes | be95f58a3938b628c89639d3311799eb359c19d2 | 3,638,592 |
import getpass
def validate_password( password:str ) -> bool:
""" Validates the password again a password policy.
Args:
password ( str, required ):
password to verify.
Returns:
valid ( bool ):
True if the password meets validity requirements.
"""
policy = PasswordPolicy.from_names(
strength=0.20,
entropybits=10,
length=6,
)
if not password:
return False
tested_pass = policy.password(password)
result = tested_pass.test()
if len(result) > 0:
print(colored('Password not strong enough. Try increasing the length of the password or the password complexity'))
return False
password_verification = getpass.getpass("Retype your password: ")
if password != password_verification:
print("Passwords do not match")
return False
return True | eec09ad86d89184c4f87a8c0710e3af28f874429 | 3,638,593 |
from typing import Iterable
from typing import List
from typing import Dict
from typing import Any
def build_webhooks(
handlers_: Iterable[handlers.WebhookHandler],
*,
resources: Iterable[references.Resource],
name_suffix: str,
client_config: reviews.WebhookClientConfig,
persistent_only: bool = False,
) -> List[Dict[str, Any]]:
"""
Construct the content for ``[Validating|Mutating]WebhookConfiguration``.
This function concentrates all conventions how Kopf manages the webhook.
"""
return [
{
'name': _normalize_name(handler.id, suffix=name_suffix),
'sideEffects': 'NoneOnDryRun' if handler.side_effects else 'None',
'failurePolicy': 'Ignore' if handler.ignore_failures else 'Fail',
'matchPolicy': 'Equivalent',
'rules': [
{
'apiGroups': [resource.group],
'apiVersions': [resource.version],
'resources': (
[resource.plural] if handler.subresource is None else
[f'{resource.plural}/{handler.subresource}']
),
'operations': ['*'] if handler.operation is None else [handler.operation],
'scope': '*', # doesn't matter since a specific resource is used.
}
for resource in resources
if handler.selector is not None # None is used only in sub-handlers, ignore here.
if handler.selector.check(resource)
],
'objectSelector': _build_labels_selector(handler.labels),
'clientConfig': _inject_handler_id(client_config, handler.id),
'timeoutSeconds': 30, # a permitted maximum is 30.
'admissionReviewVersions': ['v1', 'v1beta1'], # only those understood by Kopf itself.
}
for handler in handlers_
if not persistent_only or handler.persistent
] | fc5ca5de1f09c40e08ea8918319b07186af2fe94 | 3,638,594 |
def ndo_real(data, n):
"""mimic of gmx_fio_ndo_real in gromacs"""
return [data.unpack_real() for i in range(n)] | 875edd4c78e591fcee1b3de30f0ed62a4d0b074d | 3,638,595 |
from typing import Union
from typing import Optional
def get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get field type of a field from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name,
field.type)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return field_type | 19445d7a142b940ff3cd0c445e716c070eeac489 | 3,638,596 |
import logging
from datetime import datetime
def query(context: models.Context, query_str: str) -> TimeSeriesCollection:
"""Do a monitoring query in the specified project.
Note that the project can be either the project where the monitored resources
are, or a workspace host project, in which case you will get results for all
associated monitored projects.
"""
time_series = TimeSeriesCollection()
for project_id in context.projects:
mon_api = apis.get_api('monitoring', 'v3', project_id)
try:
request = mon_api.projects().timeSeries().query(name='projects/' +
project_id,
body={'query': query_str})
logging.info('executing monitoring query (project: %s)', project_id)
logging.debug('query: %s', query_str)
pages = 0
start_time = datetime.datetime.now()
while request:
pages += 1
response = request.execute()
time_series.add_api_response(response)
request = mon_api.projects().timeSeries().query_next(
previous_request=request, previous_response=response)
if request:
logging.info('still executing monitoring query (project: %s)',
project_id)
end_time = datetime.datetime.now()
logging.debug('query run time: %s, pages: %d', end_time - start_time,
pages)
except googleapiclient.errors.HttpError as err:
gcp_err = utils.GcpApiError(err)
# Ignore 502 because we get that when the monitoring query times out.
if gcp_err.status in [502]:
logging.warning('error executing monitoring query: %s',
str(gcp_err.message))
else:
raise utils.GcpApiError(err) from err
return time_series | 7d9f40ad59cb926ab5cec0013a08ea551290fc57 | 3,638,597 |
def get_status():
"""get the node status and return data"""
return data({}) | 0314331d249cebfeb63941961793fe9a72e0c329 | 3,638,598 |
import tokenize
def read_orc(path, columns=None, storage_options=None, **kwargs):
"""Read cudf dataframe from ORC file(s).
Note that this function is mostly borrowed from upstream Dask.
Parameters
----------
path: str or list(str)
Location of file(s), which can be a full URL with protocol specifier,
and may include glob character if a single string.
columns: None or list(str)
Columns to load. If None, loads all.
storage_options: None or dict
Further parameters to pass to the bytes backend.
Returns
-------
cudf.DataFrame
"""
storage_options = storage_options or {}
fs, fs_token, paths = get_fs_token_paths(
path, mode="rb", storage_options=storage_options
)
schema = None
nstripes_per_file = []
for path in paths:
with fs.open(path, "rb") as f:
o = orc.ORCFile(f)
if schema is None:
schema = o.schema
elif schema != o.schema:
raise ValueError(
"Incompatible schemas while parsing ORC files"
)
nstripes_per_file.append(o.nstripes)
schema = _get_pyarrow_dtypes(schema, categories=None)
if columns is not None:
ex = set(columns) - set(schema)
if ex:
raise ValueError(
"Requested columns (%s) not in schema (%s)" % (ex, set(schema))
)
else:
columns = list(schema)
with fs.open(paths[0], "rb") as f:
meta = cudf.read_orc(f, stripe=0, columns=columns, **kwargs)
name = "read-orc-" + tokenize(fs_token, path, columns, **kwargs)
dsk = {}
N = 0
for path, n in zip(paths, nstripes_per_file):
for stripe in range(n):
dsk[(name, N)] = (
_read_orc_stripe,
fs,
path,
stripe,
columns,
kwargs,
)
N += 1
divisions = [None] * (len(dsk) + 1)
return dd.core.new_dd_object(dsk, name, meta, divisions) | 2f26a088cd849fc21c171767a0db276844341b11 | 3,638,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.