content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def p_express_arg_8(p):
"""
express_arg : express_arg ARITOP express_arg
"""
p[0] = Expression(p[2], p[1], p[3])
| 5,340,100
|
def xy_to_array_origin(image):
"""Return view of image transformed from Cartesian to array origin."""
return rgb_transpose(image[:, ::-1])
| 5,340,101
|
def test_start_test_program():
"""
>>> write('t.py',
... '''
... import time
... time.sleep(1)
... open('x', 'w').close()
... time.sleep(99)
... ''')
>>> write('conf',
... '''
... <runner>
... program %s t.py
... start-test-program cat x
... </runner>
... ''' % sys.executable)
>>> import os, time
>>> start = time.time()
>>> system("./zdaemon -Cconf start")
. .
daemon process started, pid=21446
>>> os.path.exists('x')
True
>>> system("./zdaemon -Cconf stop")
<BLANKLINE>
daemon process stopped
"""
| 5,340,102
|
def read_csv(infile, delimiter=',', encoding='utf-8', named=False):
"""Reads a csv as a list of lists (unnamed) or a list of named tuples (named)
Args:
string infile: the file to read in
OPTIONAL:
string delimiter: the delimiter used (default ',')
encoding encoding: the encoding of the file (default 'utf-8')
boolean named: if true, loads rows as named tuples
(default lists), (default False)
Returns list of lists or named tuples"""
with open(infile, encoding=encoding) as f:
reader = csv.reader(f, delimiter=delimiter)
if named:
headers = next(reader)
# strip spaces and annoying things from headers
names = [identifier.replace('-', '_').replace(' ', '_').lower()
for identifier in headers]
Data = namedtuple("Data", names)
named_rows = map(Data._make, reader)
return [row for row in named_rows]
else:
return list(list(row for row in reader))
| 5,340,103
|
def publish_ims_legacy(f='ims_legacy'):
""" create public version of data by deletion of all email addresses """
out_dir = global_vars['published_files_directory']
data = {}
dls = []
infile = open(f + '.txt')
instring = infile.read()
##instring = instring.replace(':description',' :citation') ## to be deprecated soon
instring = unicode(instring,'utf-8')
instring = instring.replace('$time_stamp$', time_stamp)
lines = instring.split('\n')
lines = [line for line in lines if not line.find('Email') >= 0 ]
txt = '\n'.join(lines) + '\n'
out_fname = out_dir + 'imslegacy_data'
#outfile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.txt','w')
outfile = open(out_fname + '.txt','w')
outfile.write(txt.encode('utf-8'))
outfile.close()
print ' wrote to ' + out_fname + '.txt'
##print ' published at http://bibserver.berkeley.edu/tmp/imslegacy/imslegacy_data.txt'
#data = read_ims_legacy('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data')
data = read_ims_legacy(out_fname)
#outfile = open('/accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.json','w')
outfile = open(out_fname + '.json','w')
outfile.write(json.dumps(data,indent=4).encode('utf-8'))
outfile.close()
print ' wrote to ' + out_fname + '.json'
#print ' wrote to /accounts/projects/jpopen/apache/htdocs/tmp/imslegacy/imslegacy_data.json'
#print ' published at http://bibserver.berkeley.edu/tmp/imslegacy/imslegacy_data.json'
| 5,340,104
|
def is_ivy_enabled(ctx):
"""Determine if the ivy compiler should be used to by the ng_module.
Args:
ctx: skylark rule execution context
Returns:
Boolean, Whether the ivy compiler should be used.
"""
# Check the renderer flag to see if Ivy is enabled.
# This is intended to support a transition use case for google3 migration.
# The `_renderer` attribute will never be set externally, but will always be
# set internally as a `string_flag()` with the allowed values of:
# "view_engine" or "ivy".
if ((hasattr(ctx.attr, "_renderer") and
ctx.attr._renderer[BuildSettingInfo].value == "ivy")):
return True
# This attribute is only defined in google's private ng_module rule and not
# available externally. For external users, this is effectively a no-op.
if hasattr(ctx.attr, "ivy") and ctx.attr.ivy == True:
return True
if ctx.var.get("angular_ivy_enabled", None) == "True":
return True
# Enable Angular targets extracted by Kythe Angular indexer to be compiled with the Ivy compiler architecture.
# TODO(ayazhafiz): remove once Ivy has landed as the default in g3.
if ctx.var.get("GROK_ELLIPSIS_BUILD", None) != None:
return True
# Return false to default to ViewEngine compiler
return False
| 5,340,105
|
def get_directions_id(destination):
"""Get place ID for directions, which is place ID for associated destination, if an event"""
if hasattr(destination, 'destination'):
# event with a related destination; use it for directions
if destination.destination:
return destination.destination.id
else:
# event without a destination
return None
else:
# not an event
return destination.id
| 5,340,106
|
def decode_token(params, token_field=None):
"""
This function is used to decode the jwt token into the data that was used
to generate it
Args:
session_obj: sqlalchemy obj used to interact with the db
params: json data received with request
token_field: name of the field that token can be found in
Return:
resulting data from the token decode process
"""
try:
if not token_field:
token = params[TOKEN_FIELD]
else:
token = params[token_field]
# token_use_details = find_token_use(session_obj, token)
# check_token_validate_period(session_obj, token_use_details)
account_details = jwt.decode(token, _SECRET, algorithms=ALGORITHM)
# check_login_access_revoked(
# session_obj, account_details, token_use_details
# )
# extend_token_validity(session_obj, token_use_details)
return account_details
except orm_exc.NoResultFound:
raise exc.LoggedOutError()
| 5,340,107
|
def gas_arrow(ods, r, z, direction=None, snap_to=numpy.pi / 4.0, ax=None, color=None, pad=1.0, **kw):
"""
Draws an arrow pointing in from the gas valve
:param ods: ODS instance
:param r: float
R position of gas injector (m)
:param z: float
Z position of gas injector (m)
:param direction: float
Direction of injection (radians, COCOS should match ods.cocos). None = try to guess.
:param snap_to: float
Snap direction angle to nearest value. Set snap to pi/4 to snap to 0, pi/4, pi/2, 3pi/4, etc. No in-between.
:param ax: axes instance into which to plot (default: gca())
:param color: matplotlib color specification
:param pad: float
Padding between arrow tip and specified (r,z)
"""
from matplotlib import pyplot
def pick_direction():
"""Guesses the direction for the arrow (from injector toward machine) in case you don't know"""
dr = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['r'] - r
dz = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['z'] - z
theta = numpy.arctan2(dz, -dr)
if snap_to > 0:
theta = snap_to * round(theta / snap_to)
return theta
if direction is None:
direction = pick_direction()
else:
direction = cocos_transform(ods.cocos, 11)['BP'] * direction
if ax is None:
ax = pyplot.gca()
shaft_len = 3.5 * (1 + pad) / 2.
da = numpy.pi / 10 # Angular half width of the arrow head
x0 = numpy.cos(-direction) * pad
y0 = numpy.sin(-direction) * pad
head_mark = [
(x0, y0),
(x0 + numpy.cos(-direction + da), y0 + numpy.sin(-direction + da)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + shaft_len * numpy.cos(-direction), y0 + shaft_len * numpy.sin(-direction)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + numpy.cos(-direction - da), y0 + numpy.sin(-direction - da)),
]
kw.pop('marker', None) # Ignore this
return ax.plot(r, z, marker=head_mark, color=color, markersize=100 * (pad + shaft_len) / 5, **kw)
| 5,340,108
|
def get_word_vector(text, model, num):
"""
:param text: list of words
:param model: word2vec model in Gensim format
:param num: number of the word to exclude
:return: average vector of words in text
"""
# Creating list of all words in the document which are present in the model
excl_word = text[num]
words = [w for w in text if w in model and w != excl_word]
lexicon = list(set(words))
lw = len(lexicon)
if lw < 1:
print('Empty lexicon in', text, file=sys.stderr)
return np.zeros(model.vector_size)
vectors = np.zeros((lw, model.vector_size)) # Creating empty matrix of vectors for words
for i in list(range(lw)): # Iterate over words in the text
word = lexicon[i]
vectors[i, :] = model[word] # Adding word and its vector to matrix
semantic_fingerprint = np.sum(vectors, axis=0) # Computing sum of all vectors in the document
semantic_fingerprint = np.divide(semantic_fingerprint, lw) # Computing average vector
return semantic_fingerprint
| 5,340,109
|
def find_storage_pool_type(apiclient, storagetype='NetworkFileSystem'):
"""
@name : find_storage_pool_type
@Desc : Returns true if the given storage pool type exists
@Input : type : type of the storage pool[NFS, RBD, etc.,]
@Output : True : if the type of storage is found
False : if the type of storage is not found
FAILED In case the cmd failed
"""
cmd = listStoragePools.listStoragePoolsCmd()
cmd_out = apiclient.listStoragePools(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
for storage_pool in cmd_out:
if storage_pool.type.lower() == storagetype:
return True
return False
| 5,340,110
|
def test_feed_from_annotations_item_guid(factories):
"""Feed items should use the annotation's HTML URL as their GUID."""
annotation = factories.Annotation(
created=datetime.datetime(year=2015, month=3, day=11)
)
feed = rss.feed_from_annotations(
[annotation], _annotation_url(), mock.Mock(), "", "", ""
)
assert feed["entries"][0]["guid"] == ("tag:hypothes.is,2015-09:" + annotation.id)
| 5,340,111
|
def parse_year(candidate: Any) -> int:
"""Parses the given candidate as a year literal. Raises a ValueError
when the candidate is not a valid year."""
if candidate is not None and not isinstance(candidate, int):
raise TypeError("Argument year is expected to be an int, "
"but is {}".format(type(candidate)))
return cast(int, candidate)
| 5,340,112
|
def dropout2d(tensor: Tensor, p: float = 0.2) -> Tensor:
"""
Method performs 2D channel-wise dropout with a autograd tensor.
:param tensor: (Tensor) Input tensor
:param p: (float) Probability that a activation element is set to zero
:return: (Tensor) Output tensor
"""
# Check argument
assert 0.0 <= p <= 1.0, 'Parameter p must be in the range of [0, 1].'
# Apply dropout
mask = (np.random.randint(0, 2, size=tensor.shape[0]) > p).astype(float).reshape(1, -1, 1, 1)
output = tensor.data * mask
# Check if grad is needed
requires_grad = tensor.requires_grad
# Add grad function
dependencies = [Dependency(tensor, lambda grad: grad * mask)] if requires_grad else None
return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies)
| 5,340,113
|
def svn_repos_finish_report(*args):
"""svn_repos_finish_report(void * report_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_finish_report(*args)
| 5,340,114
|
def step(
read_path: Path,
write_path: Path,
overrides
) -> None:
"""
Read config, apply overrides and write out.
"""
step_config(read_path, write_path, overrides)
| 5,340,115
|
def problem451():
"""
Consider the number 15.
There are eight positive numbers less than 15 which are coprime to 15: 1,
2, 4, 7, 8, 11, 13, 14.
The modular inverses of these numbers modulo 15 are: 1, 8, 4, 13, 2, 11,
7, 14
because
1*1 mod 15=1
2*8=16 mod 15=1
4*4=16 mod 15=1
7*13=91 mod 15=1
11*11=121 mod 15=1
14*14=196 mod 15=1
Let I(n) be the largest positive number m smaller than n-1 such that the
modular inverse of m modulo n equals m itself.
So I(15)=11.
Also I(100)=51 and I(7)=1.
Find ∑I(n) for 3≤n≤2·10^7
"""
LIMIT = 20000000
# Build table of smallest prime factors
smallestprimefactor = array.array("L", itertools.repeat(0, LIMIT + 1))
end = eulerlib.sqrt(len(smallestprimefactor) - 1)
for i in range(2, len(smallestprimefactor)):
if smallestprimefactor[i] == 0:
smallestprimefactor[i] = i
if i <= end:
for j in range(i * i, len(smallestprimefactor), i):
if smallestprimefactor[j] == 0:
smallestprimefactor[j] = i
# Returns all the solutions (in ascending order) such that
# for each k, 1 <= k < n and k^2 = 1 mod n.
def get_solutions(n):
if smallestprimefactor[n] == n: # n is prime
return (1, n - 1)
else:
temp = []
p = smallestprimefactor[n]
sols = solutions[n // p]
for i in range(0, n, n // p):
for j in sols:
k = i + j
if k * k % n == 1:
temp.append(k)
return tuple(temp)
# Process every integer in range
solutions = [(), (), (1,)]
ans = 0
for i in range(3, LIMIT + 1):
sols = get_solutions(i)
if i <= LIMIT // 2:
solutions.append(sols)
ans += sols[-2] # Second-largest solution
return ans
| 5,340,116
|
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : BatchedDGLGraph
Batched DGLGraphs
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
| 5,340,117
|
def os_path_abspath():
"""将相对路径转换为绝对路径"""
os.chdir("C:\\")
paths = [
".",
"..",
"/one/two/three",
"./one/two",
]
for path in paths:
print("{!r:>21}: {!r}".format(path, os.path.abspath(path)))
| 5,340,118
|
def reshape_practice(x):
"""
Given an input tensor of shape (24,), return a reshaped tensor y of shape
(3, 8) such that
y = [
[x[0], x[1], x[2], x[3], x[12], x[13], x[14], x[15]],
[x[4], x[5], x[6], x[7], x[16], x[17], x[18], x[19]],
[x[8], x[9], x[10], x[11], x[20], x[21], x[22], x[23]],
]
You must construct y by performing a sequence of reshaping operations on x
(view, t, transpose, permute, contiguous, reshape, etc). The input tensor
should not be modified.
Input:
- x: A tensor of shape (24,)
Returns:
- y: A reshaped version of x of shape (3, 8) as described above.
"""
y = None
#############################################################################
# TODO: Implement this function #
#############################################################################
# Replace "pass" statement with your code
y = x.contiguous().view(2, 3 , 4)
y = y.transpose(0,1)
y = y.contiguous().view(3, -1)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return y
| 5,340,119
|
def xnnpack_cc_library(
name,
srcs = [],
psimd_srcs = [],
x86_srcs = [],
aarch32_srcs = [],
aarch64_srcs = [],
asmjs_srcs = [],
wasm_srcs = [],
wasmsimd_srcs = [],
copts = [],
gcc_copts = [],
msvc_copts = [],
mingw_copts = [],
msys_copts = [],
gcc_x86_copts = [],
msvc_x86_32_copts = [],
msvc_x86_64_copts = [],
aarch32_copts = [],
aarch64_copts = [],
asmjs_copts = [],
wasm_copts = [],
wasmsimd_copts = [],
optimized_copts = ["-O2"],
hdrs = [],
defines = [],
includes = [],
deps = [],
visibility = []):
"""C/C++/assembly library with architecture-specific configuration.
Define a static library with architecture- and instruction-specific
source files and/or compiler flags.
Args:
name: The name of the library target to define.
srcs: The list of architecture-independent source files.
psimd_srcs: The list of psimd-specific source files.
x86_srcs: The list of x86-specific source files.
aarch32_srcs: The list of AArch32-specific source files.
aarch64_srcs: The list of AArch64-specific source files.
asmjs_srcs: The list of Asm.js-specific source files.
wasm_srcs: The list of WebAssembly/MVP-specific source files.
wasmsimd_srcs: The list of WebAssembly/SIMD-specific source files.
copts: The list of compiler flags to use in all builds. -I flags for
include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
gcc_copts: The list of compiler flags to use with GCC-like compilers.
msvc_copts: The list of compiler flags to use with MSVC compiler.
mingw_copts: The list of compiler flags to use with MinGW GCC compilers.
msys_copts: The list of compiler flags to use with MSYS (Cygwin) GCC compilers.
gcc_x86_copts: The list of GCC-like compiler flags to use in x86 (32-bit and 64-bit) builds.
msvc_x86_32_copts: The list of MSVC compiler flags to use in x86 (32-bit) builds.
msvc_x86_64_copts: The list of MSVC compiler flags to use in x86 (64-bit) builds.
aarch32_copts: The list of compiler flags to use in AArch32 builds.
aarch64_copts: The list of compiler flags to use in AArch64 builds.
asmjs_copts: The list of compiler flags to use in Asm.js builds.
wasm_copts: The list of compiler flags to use in WebAssembly/MVP builds.
wasmsimd_copts: The list of compiler flags to use in WebAssembly/SIMD
builds.
optimized_copts: The list of compiler flags to use in optimized builds.
Defaults to -O2.
hdrs: The list of header files published by this library to be textually
included by sources in dependent rules.
defines: List of predefines macros to be added to the compile line.
includes: List of include dirs to be added to the compile line.
deps: The list of other libraries to be linked.
visibility: The list of packages that can depend on this target.
"""
native.cc_library(
name = name,
srcs = srcs + select({
":linux_k8": psimd_srcs + x86_srcs,
":linux_aarch64": psimd_srcs + aarch64_srcs,
":linux_armhf": psimd_srcs + aarch32_srcs,
":macos_x86_64": psimd_srcs + x86_srcs,
":windows_x86_64_clang": psimd_srcs + x86_srcs,
":windows_x86_64_mingw": psimd_srcs + x86_srcs,
":windows_x86_64_msys": psimd_srcs + x86_srcs,
":windows_x86_64": x86_srcs,
":android_armv7": psimd_srcs + aarch32_srcs,
":android_arm64": psimd_srcs + aarch64_srcs,
":android_x86": psimd_srcs + x86_srcs,
":android_x86_64": psimd_srcs + x86_srcs,
":ios_armv7": psimd_srcs + aarch32_srcs,
":ios_arm64": psimd_srcs + aarch64_srcs,
":ios_arm64e": psimd_srcs + aarch64_srcs,
":ios_x86": psimd_srcs + x86_srcs,
":ios_x86_64": psimd_srcs + x86_srcs,
":watchos_armv7k": psimd_srcs + aarch32_srcs,
":watchos_arm64_32": psimd_srcs + aarch64_srcs,
":watchos_x86": psimd_srcs + x86_srcs,
":watchos_x86_64": psimd_srcs + x86_srcs,
":tvos_arm64": psimd_srcs + aarch64_srcs,
":tvos_x86_64": psimd_srcs + x86_srcs,
":emscripten_asmjs": asmjs_srcs,
":emscripten_wasm": wasm_srcs,
":emscripten_wasmsimd": psimd_srcs + wasmsimd_srcs,
"//conditions:default": [],
}),
copts = [
"-Iinclude",
"-Isrc",
] + copts + select({
":linux_k8": gcc_x86_copts,
":linux_aarch64": aarch64_copts,
":linux_armhf": aarch32_copts,
":macos_x86_64": gcc_x86_copts,
":windows_x86_64_clang": ["/clang:" + opt for opt in gcc_x86_copts],
":windows_x86_64_mingw": mingw_copts + gcc_x86_copts,
":windows_x86_64_msys": msys_copts + gcc_x86_copts,
":windows_x86_64": msvc_x86_64_copts,
":android_armv7": aarch32_copts,
":android_arm64": aarch64_copts,
":android_x86": gcc_x86_copts,
":android_x86_64": gcc_x86_copts,
":ios_armv7": aarch32_copts,
":ios_arm64": aarch64_copts,
":ios_arm64e": aarch64_copts,
":ios_x86": gcc_x86_copts,
":ios_x86_64": gcc_x86_copts,
":watchos_armv7k": aarch32_copts,
":watchos_arm64_32": aarch64_copts,
":watchos_x86": gcc_x86_copts,
":watchos_x86_64": gcc_x86_copts,
":tvos_arm64": aarch64_copts,
":tvos_x86_64": gcc_x86_copts,
":emscripten_asmjs": asmjs_copts,
":emscripten_wasm": wasm_copts,
":emscripten_wasmsimd": wasmsimd_copts,
"//conditions:default": [],
}) + select({
":windows_x86_64_clang": ["/clang:" + opt for opt in gcc_copts],
":windows_x86_64_mingw": gcc_copts,
":windows_x86_64_msys": gcc_copts,
":windows_x86_64": msvc_copts,
"//conditions:default": gcc_copts,
}) + select({
":optimized_build": optimized_copts,
"//conditions:default": [],
}),
defines = defines,
deps = deps,
includes = ["include", "src"] + includes,
linkstatic = True,
linkopts = select({
":linux_k8": ["-lpthread"],
":linux_aarch64": ["-lpthread"],
":linux_armhf": ["-lpthread"],
":android": ["-lm"],
"//conditions:default": [],
}),
textual_hdrs = hdrs,
visibility = visibility,
)
| 5,340,120
|
def recouvrement_view(request, id):
"""
Fonction Detail
"""
user = request.user
recouvrement = Recouvrement.objects.filter(user=user).get(id=id)
context = {
'recouvrement': recouvrement,
}
template_name = 'pages/recouvrement/recouvrement_view.html'
return render(request, template_name, context)
| 5,340,121
|
def getFileServicesNames(fileServices=None, verbose=True):
"""
Returns the names and description of the fileServices available to the user.
:param fileServices: a list of FileService objects (dictionaries), as returned by Files.getFileServices(). If not set, then an extra internal call to Jobs.getFileServices() is made.
:param verbose: boolean parameter defining whether warnings will be printed (set to True) or not (set to False).
:return: an array of dicts, where each dict has the name and description of a file service available to the user.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the RACM API returns an error.
:example: fileServiceNames = Files.getFileServicesNames();
.. seealso:: Files.getFileServices
"""
if fileServices is None:
fileServices = getFileServices(verbose);
fileServiceNames = [];
for fileService in fileServices:
fileServiceNames.append({"name":fileService.get('name'),"description":fileService.get('description')})
return fileServiceNames
| 5,340,122
|
def load_demo_data_from_scratch(*args, **kwargs):
"""Loads demo data for testing purpose. Do not use this in production"""
data_files_1 = os.path.join(BASE_DIR, 'data/data/setup/*.json')
data_files_2 = os.path.join(BASE_DIR, 'data/data/admin_units/*.json')
data_files_3 = os.path.join(BASE_DIR, 'data/data/v2_data/*.json')
data_files_4 = os.path.join(BASE_DIR, 'data/data/demo/*.json')
data_files_5 = os.path.join(BASE_DIR, 'data/data/facilities/*.json')
data_files_6 = os.path.join(BASE_DIR, 'data/data/geocodes/*.json')
data_files_7 = os.path.join(BASE_DIR, 'data/data/approvals/*.json')
data_files_8 = os.path.join(BASE_DIR, 'data/data/last/*.json')
manage('bootstrap', data_files_1)
manage('bootstrap', data_files_2)
manage('bootstrap', data_files_3)
manage('bootstrap', data_files_4)
manage('bootstrap', data_files_5)
manage('load_groups')
# Needs to occur after base setup data has been loaded
load_gis_data()
manage('bootstrap', data_files_6)
manage('bootstrap', data_files_7)
manage('bootstrap', data_files_8)
manage("createinitialrevisions")
| 5,340,123
|
def username_in_path(username, path_):
"""Checks if a username is contained in URL"""
if username in path_:
return True
return False
| 5,340,124
|
def str_parse_as_utf8(content) -> str:
"""Returns the provided content decoded as utf-8."""
return content.decode('utf-8')
| 5,340,125
|
def IGet(InterfaceItemPath):
"""
Returns the current value of a ZBrush or ZScript interface item
Output: The item value
"""
pass
| 5,340,126
|
def create_wiki_titles():
""" Read global variable DATA and get all available articles titles.
After update global variable wiki_articles_titles with them.
:return: None
:rtype: None
"""
wiki_articles_titles = []
for article_title, question, answer, article in DATA:
wiki_articles_titles.append(article_title)
global WIKI_ARTICLES_TITLES_SET
WIKI_ARTICLES_TITLES_SET = sorted(set(wiki_articles_titles))
| 5,340,127
|
def TypeProviderClient(version):
"""Return a Type Provider client specially suited for listing types.
Listing types requires many API calls, some of which may fail due to bad
user configurations which show up as errors that are retryable. We can
alleviate some of the latency and usability issues this causes by tuning
the client.
Args:
version: DM API version used for the client.
Returns:
A Type Provider API client.
"""
main_client = apis.GetClientInstance('deploymentmanager', version.id)
main_client.num_retries = 2
return main_client.typeProviders
| 5,340,128
|
def create_indicator(
pattern: str,
pattern_type: str,
created_by: Optional[Identity] = None,
name: Optional[str] = None,
description: Optional[str] = None,
valid_from: Optional[datetime] = None,
kill_chain_phases: Optional[List[KillChainPhase]] = None,
labels: Optional[List[str]] = None,
confidence: Optional[int] = None,
object_markings: Optional[List[MarkingDefinition]] = None,
x_opencti_main_observable_type: Optional[str] = None,
x_opencti_score: Optional[int] = None,
) -> STIXIndicator:
"""Create an indicator."""
custom_properties: Dict[str, Any] = {X_OPENCTI_SCORE: DEFAULT_X_OPENCTI_SCORE}
if x_opencti_score is not None:
custom_properties[X_OPENCTI_SCORE] = x_opencti_score
if x_opencti_main_observable_type is not None:
custom_properties[
X_OPENCTI_MAIN_OBSERVABLE_TYPE
] = x_opencti_main_observable_type
return STIXIndicator(
id=_create_random_identifier("indicator"),
created_by_ref=created_by,
name=name,
description=description,
pattern=pattern,
pattern_type=pattern_type,
valid_from=valid_from,
kill_chain_phases=kill_chain_phases,
labels=labels,
confidence=confidence,
object_marking_refs=object_markings,
custom_properties=custom_properties,
)
| 5,340,129
|
def write_filtered_mtx(
raw_barcode_filename,
raw_matrix_filename,
filtered_barcode_filename,
filtered_matrix_filename,
):
"""wrapper for filter_mtx that will save the filtered matrix
We read the the raw barcodes, raw matrix, and filtered barcodes
and then yield the rows from the raw matrix file to only include
the barcodes in the filtered barcode list and rewrite the index values
to match the filtered barcode list.
Parameters
----------
raw_barcode_filename : string
filename of file containing a list of cell barcode labels for the
raw matrix.
raw_matrix_filename : string
filename of file a matrix market file
filtered_barcode_filename : string
file name containing a subset of the cell barcodes contained in the
raw_barcode_filename with which the raw_matrix will be
resized to only contain those indices.
filtered_matrix_filename : string
filename to write the filtered matrix to
"""
with open(filtered_matrix_filename, "wt") as outstream:
for line in filter_mtx(
raw_barcode_filename, raw_matrix_filename, filtered_barcode_filename
):
outstream.write(line)
| 5,340,130
|
def import_data(
path_to_csv: str,
response_colname: str,
standards_colname: str,
header: int = 0,
nrows: int = None,
skip_rows: int = None,
) -> pd.DataFrame:
"""Import standard curve data from a csv file.
Args:
path_to_csv: Refer to pd.read_csv docs.
response_colname: Name of column with response data.
standards_colname: Name of column with standard concentrations.
header: Refer to pd.read_csv().
nrows: Refer to pd.read_csv().
skip_rows: Skips the first n rows when reading data.
# kwargs: Additional arguments to parse to pd.read_csv().
Returns:
Formatted data as a dataframe.
Raises:
ValueError: If response_colname or standards_colname not in data.columns
"""
data = pd.read_csv(path_to_csv, header=header, nrows=nrows)
if skip_rows:
data = data.iloc[skip_rows:, :]
data.dropna(axis=1, how="all", inplace=True)
data.dropna(inplace=True)
data.rename({response_colname: "response", standards_colname: "standard_concentrations"}, axis=1, inplace=True)
try:
return data.loc[:, ["standard_concentrations", "response"]]
except KeyError:
raise ValueError("Check `response_colname` and `standards_colname` values are valid column names.")
| 5,340,131
|
def plus(x: np.ndarray, y: np.ndarray) -> np.ndarray:
""" 矩阵相加"""
if x.shape == y.shape:
return x + y
| 5,340,132
|
def add_noise(wave, noise, fs, snr, start_time, duration, wave_power):
"""Add a noise to wave.
"""
noise_power = np.dot(noise, noise) / noise.shape[0]
scale_factor = np.sqrt(10**(-snr/10.0) * wave_power / noise_power)
noise = noise * scale_factor
offset = int(start_time * fs)
add_length = min(wave.shape[0] - offset, int(duration * fs), noise.shape[0])
if add_length > 0:
wave[offset: offset + add_length] += noise[0: add_length]
return wave
| 5,340,133
|
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order."""
def order_if_multiple_key(factor):
f, n = factor
return len(f), n, default_sort_key(f)
def order_no_multiple_key(f):
return len(f), default_sort_key(f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
| 5,340,134
|
def get_affix(text):
"""
This method gets the affix information
:param str text: Input text.
"""
return " ".join(
[word[-4:] if len(word) >= 4 else word for word in text.split()])
| 5,340,135
|
async def async_setup_entry(hass, config_entry):
"""Konfigurowanie integracji na podstawie wpisu konfiguracyjnego."""
_LOGGER.info("async_setup_entry " + str(config_entry))
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
| 5,340,136
|
def analyse_branching(geom,ordering_system,conversionFactor,voxelSize):
""" Does a branching analysis on the tree defined by 'geom'
Inputs:
- geom: A geometry structure consisting of element list, node location and radii/lengths
- ordering_system: the ordering system to be used in analysis (e.g. 'strahler', 'horsfield'
Returns: Prints to screen a table of branching properties (one per generation, one per order) and overall summary statistics
"""
elem_cnct = pg_utilities.element_connectivity_1D(geom['nodes'], geom['elems'])
orders = evaluate_orders(geom['nodes'], geom['elems'])
# Find Results
branchGeom = arrange_by_branches(geom, elem_cnct['elem_up'], orders[ordering_system],orders['generation'])
[geom, branchGeom] = find_branch_angles(geom, orders, elem_cnct, branchGeom, voxelSize, conversionFactor)
major_minor_results=major_minor(geom, elem_cnct['elem_down']) #major/minor child stuff
# tabulate data
generation_summary_statistics(geom, orders, major_minor_results)
summary_statistics(branchGeom, geom, orders, major_minor_results,'strahler')
return geom
| 5,340,137
|
def remove_start(s: str) -> str:
"""
Clear string from start '-' symbol
:param s:
:return:
"""
return s[1:] if s.startswith('-') else s
| 5,340,138
|
async def get_user_requests(user, groups):
"""Get requests relevant to a user.
A user sees requests they have made as well as requests where they are a
secondary approver
"""
dynamo_handler = UserDynamoHandler(user)
all_requests = await dynamo_handler.get_all_requests()
query = {
"domains": config.get("dynamo.get_user_requests.domains", []),
"filters": [
{
"field": "extendedattributes.attributeName",
"values": ["secondary_approvers"],
"operator": "EQUALS",
},
{
"field": "extendedattributes.attributeValue",
"values": groups + [user],
"operator": "EQUALS",
},
],
"size": 500,
}
approver_groups = await auth.query_cached_groups(query=query)
approver_groups = [g["name"] for g in approver_groups]
requests = []
for req in all_requests:
if user == req.get("username", ""):
requests.append(req)
continue
group = req.get("group")
if group is None:
continue
if group in approver_groups + [user]:
requests.append(req)
return requests
| 5,340,139
|
async def test_import_unknown_exception(opp):
"""Test we handle unknown exceptions from import."""
await setup.async_setup_component(opp, "persistent_notification", {})
with patch(
"openpeerpower.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
mock_foscam_camera.side_effect = Exception("test")
result = await opp.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=VALID_CONFIG,
)
await opp.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
| 5,340,140
|
def list_dirs(path):
"""遍历文件夹下的文件夹,并返回文件夹路径列表
:param path:
:return:
"""
if not os.path.exists(path):
os.mkdir(path)
_path_list = []
for lists in os.listdir(path):
sub_path = os.path.join(path, lists)
# 如果是文件夹
if os.path.isdir(sub_path):
_path_list.append(sub_path)
return _path_list
| 5,340,141
|
def main(reddit_client, subreddit):
"""
Execute the logic of the bot. Run after init() is successful.
:param reddit_client: PRAW Reddit Object
:param subreddit: String name of the subreddit to check
:return: Nothing
"""
logging.info('Getting ' + str(config.reddit_commentsPerCheck) + ' comments from r/' + subreddit)
for comment in reddit_client.subreddit(subreddit).comments(limit=config.reddit_commentsPerCheck):
match = re.findall('(![Cc]limb|[Cc]limb:) (.*)', comment.body) # gives a list of tuples
# (because there are two groups in the regex)
if match:
logging.info('Found command ' + str(match) + ' in comment: ' + comment.id + ' ; ' + comment.permalink)
query = match[0][1] # take the first Tuple in the List, and the second regex group from the Tuple
if not check_already_commented(comment.id):
logging.info('Comment ID has not been processed yet: ' + comment.id)
logging.debug('vars(comment): ' + str(vars(comment)))
# check for '!climb area' or 'climb: area'
area_match = re.findall('[Aa]rea (.*)', query)
if area_match:
query = area_match[0]
logging.info('Found Area command in comment: ' + comment.id)
logging.debug('Searching MP for Area query: ' + query)
current_area = findmparea(query)
if current_area:
logging.info('Posting reply to comment: ' + comment.id)
comment.reply(current_area.redditstr() + config.bot_footer)
logging.info('Reply posted to comment: ' + comment.id)
record_comment(comment.id)
else:
logging.error('ERROR RETRIEVING AREA LINK AND INFO FROM MP. Comment: ' + comment.id
+ '. Body: ' + comment.body)
else:
# check for Route command, otherwise assume we are handling a route.
route_match = re.findall('[Rr]oute (.*)', query)
if route_match:
query = route_match[0]
logging.info('Found Route command in comment: ' + comment.id)
else:
logging.info('No additional command found; processing as Route command')
# find the MP route link
logging.debug('Searching MP for Route query: ' + query)
current_route = findmproute(query)
if current_route:
logging.info('Posting reply to comment: ' + comment.id)
comment.reply(current_route.redditstr() + config.bot_footer)
# TODO does PRAW return the comment ID of the reply we just submitted? Log permalink
logging.info('Reply posted to comment: ' + comment.id)
record_comment(comment.id)
else:
logging.error('ERROR RETRIEVING ROUTE LINK AND INFO FROM MP. Comment: ' + comment.id
+ '. Body: ' + comment.body)
else:
logging.info('Already visited comment: ' + comment.id + ' ...no reply needed.')
| 5,340,142
|
def seaborn(data, row_labels=None, col_labels=None, **args):
"""Heatmap based on seaborn.
Parameters
----------
data : numpy array
data array.
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
**args : TYPE
Various functionalities that can are directly used as input for seaborn.
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.pcolor.html
Returns
-------
fig.
"""
assert not isinstance(data, pd.DataFrame), print('[imagesc] >data input must be numpy array')
try:
import seaborn as sns
except:
print('[imagesc] >Error: seaborn is missing! Try to: pip install seaborn')
# Set defaults
args, args_im = _defaults(args)
# Linewidth if required
args['linewidth'] = _check_input(data, args['linewidth'], args_im)
# Normalize
data = _normalize(data, args_im)
# Cleaning args
try:
args.pop('standard_scale')
args.pop('distance')
args.pop('linkage')
except:
pass
# Set row and col labels
row_labels, col_labels = set_labels(data.shape, row_labels, col_labels)
# Make dataframe
df = pd.DataFrame(data=data, index=row_labels, columns=col_labels)
sns.set(color_codes=True)
sns.set(font_scale=1.2)
# sns.set_style({"savefig.dpi": args_im['dpi']})
# Set figsize based on data shape
# args_im['figsize']=_set_figsize(data.shape, args_im['figsize'])
[fig, ax] = plt.subplots(figsize=args_im['figsize'])
# Make heatmap
ax = sns.heatmap(df, **args)
# Set labels
ax.set_xlabel(args_im['xlabel'])
ax.set_ylabel(args_im['ylabel'])
if args_im['title'] is not None:
ax.set_title(args_im['title'])
if args_im['label_orientation'] == 'above':
ax.xaxis.tick_top()
# Rotate labels
ax.set_xticklabels(col_labels, rotation=args_im['xtickRot'], ha='center', minor=False)
ax.set_yticklabels(row_labels, rotation=args_im['ytickRot'], ha='right', minor=False)
# set the x-axis labels on the top
# # fix for mpl bug that cuts off top/bottom of seaborn viz
# b, t = plt.ylim() # discover the values for bottom and top
# b += 0.5 # Add 0.5 to the bottom
# t -= 0.5 # Subtract 0.5 from the top
# plt.ylim(b, t) # update the ylim(bottom, top) values
# Plot
# ax.tight_layout()
plt.show()
# Return
fig = ax.get_figure()
return(fig)
| 5,340,143
|
def info(context: AnonAPIContext):
"""Show batch in current directory"""
logger.info(context.get_batch().to_string())
| 5,340,144
|
def get_artist_title(path):
""" Return artist & title information from filename """
directory, filename = os.path.split(path)
name, extension = os.path.splitext(filename)
# Splitting out artist & title with regular expression
result = re.search("^([\w\s\.\',\+\-&]+?) - ([\(\)\w\s\.\',\-\!&]+)", name)
if result is None:
raise ValueError("Could not detect artist & title for '%s'." % filename)
else:
artist = result.group(1)
title = result.group(2)
if filter_regex is not None:
artist = filter_regex.sub('', artist)
title = filter_regex.sub('', title)
return artist.strip(), title.strip()
| 5,340,145
|
def normalize_column(df_column, center_at_zero=False):
"""Converts an unnormalized dataframe column to a normalized
1D numpy array
Default: normalizes between [0,1]
(center_at_zero == True): normalizes between [-1,1] """
normalized_array = np.array(df_column, dtype="float64")
amax, amin = np.max(normalized_array), np.min(normalized_array)
normalized_array -= amin
if center_at_zero:
normalized_array *= 2.0 / (amax - amin)
normalized_array -= 1.0
else:
normalized_array *= 1.0 / (amax - amin)
return normalized_array
| 5,340,146
|
def get_tests():
"""Grab all of the tests to provide them to setup.py"""
start_dir = os.path.dirname(__file__)
return unittest.TestLoader().discover(start_dir, pattern='*.py')
| 5,340,147
|
def apply_mask(image, mask):
"""Apply the given mask to the image.
"""
image = image.astype(np.uint8)
image = np.array(image)
for c in range(3):
image[:, :, c] = np.where(mask == 1,
cv2.blur(image[:, :, c],(40,40)),
image[:, :, c])
return image
| 5,340,148
|
def pairCorrelationFunction_3D(x, y, z, S, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
reference_indices indices of reference particles
"""
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
bools1 = x > (-S/2 +rMax)
bools2 = x < (S/2 - rMax)
bools3 = y > (-S/2 +rMax)
bools4 = y < (S/2 - rMax)
bools5 = z > (-S/2 + rMax)
bools6 = z < (S/2 - rMax)
interior_indices, = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError ("No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube.")
edges = arange(0., rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S**3
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2 + (z[index] - z)**2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p,:] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (4.0 / 3.0 * pi * (rOuter**3 - rInner**3))
return (g_average, radii, interior_indices)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
| 5,340,149
|
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
dir_contents = os.listdir(SECRETS_DIR)
print(dir_contents)
with open(SECRETS_DIR+"my-new-secret") as fptr:
print(fptr.read())
return dir_contents
| 5,340,150
|
def _find_endpoints_of_skeleton(binary_image_matrix):
"""Finds endpoints of skeleton.
:param binary_image_matrix: M-by-N numpy array of integers in 0...1. If
binary_image_matrix[i, j] = 1, grid cell [i, j] is part of the skeleton.
:return: binary_endpoint_matrix: M-by-N numpy array of integers in 0...1.
If binary_endpoint_matrix[i, j] = 1, grid cell [i, j] is an endpoint of
the skeleton.
"""
if numpy.sum(binary_image_matrix) == 1:
return copy.deepcopy(binary_image_matrix)
filtered_image_matrix = numpy.pad(
binary_image_matrix, pad_width=2, mode='constant', constant_values=0)
filtered_image_matrix = cv2.filter2D(
filtered_image_matrix.astype(numpy.uint8), -1,
KERNEL_MATRIX_FOR_ENDPOINT_FILTER)
filtered_image_matrix = filtered_image_matrix[2:-2, 2:-2]
endpoint_flag_matrix = numpy.full(binary_image_matrix.shape, 0, dtype=int)
endpoint_flag_matrix[
filtered_image_matrix == FILTERED_VALUE_AT_ENDPOINT] = 1
return endpoint_flag_matrix
| 5,340,151
|
def set_up():
"""
"""
reset_config()
config['app']['considering_candles'] = [('Sandbox', 'BTC-USD')]
store.reset()
store.trades.init_storage()
| 5,340,152
|
def get_today_timestamp():
"""
Get the formatted timestamp for today
"""
today = dt.datetime.today()
stamp = today.strftime("%d") + today.strftime("%b") + today.strftime("%Y")
return stamp
| 5,340,153
|
def interpolate_to_mesh(
old_mesh, new_mesh, params_to_interp=["VSV", "VSH", "VPV", "VPH"]
):
"""
Maps both meshes to a sphere and interpolate values
from old mesh to new mesh for params to interp.
Returns the original coordinate system
Values that are not found are given zero
"""
# store original point locations
orig_old_elliptic_mesh_points = np.copy(old_mesh.points)
orig_new_elliptic_mesh_points = np.copy(new_mesh.points)
# Map both meshes to a sphere
map_to_sphere(old_mesh)
map_to_sphere(new_mesh)
vals = interpolate_to_points(old_mesh, new_mesh.points, params_to_interp)
for i, param in enumerate(params_to_interp):
new_element_nodal_vals = vals[:, i][new_mesh.connectivity]
new_mesh.element_nodal_fields[param][:] = new_element_nodal_vals
# Restore original coordinates
old_mesh.points = orig_old_elliptic_mesh_points
new_mesh.points = orig_new_elliptic_mesh_points
| 5,340,154
|
def massAvg(massList, method='weighted', weights=None):
"""
Compute the average mass of massList according to method.
If method=weighted but weights were not properly defined,
switch method to harmonic.
If massList contains a zero mass, switch method to mean.
:parameter method: possible values: harmonic, mean, weighted
:parameter weights: weights of elements (only for weighted average)
"""
if not massList:
return massList
if massList.count(massList[0]) == len(massList):
return massList[0]
if method == 'weighted' and (not weights or len(weights) != len(massList)):
method = 'harmonic'
flatList = [ mass / GeV for mass in _flattenList(massList)]
if method == 'harmonic' and 0. in flatList:
method = 'mean'
for mass in massList:
if len(mass) != len(massList[0]) \
or len(mass[0]) != len(massList[0][0]) \
or len(mass[1]) != len(massList[0][1]):
logger.error('Mass shape mismatch in mass list:\n' + str(mass) +
' and ' + str(massList[0]))
raise SModelSError()
avgmass = copy.deepcopy(massList[0])
for ib, branch in enumerate(massList[0]):
for ival in enumerate(branch):
vals = [ float(mass[ib][ival[0]] / GeV) for mass in massList]
if method == 'mean':
avg = np.mean(vals)
elif method == 'harmonic':
avg = stats.hmean(vals)
elif method == 'weighted':
weights = [ float(weight) for weight in weights ]
avg = np.average(vals,weights=weights)
avgmass[ib][ival[0]] = float(avg)*GeV
return avgmass
| 5,340,155
|
def listListenerPortsOnServer(nodeName, serverName):
"""List all of the Listener Ports on the specified Node/Server."""
m = "listListenerPortsOnServer:"
sop(m,"nodeName = %s, serverName = %s" % (nodeName, serverName))
cellName = getCellName() # e.g. 'xxxxCell01'
lPorts = _splitlines(AdminControl.queryNames("type=ListenerPort,cell=%s,node=%s,process=%s,*" % (cellName, nodeName, serverName)))
sop(m,"returning %s" % (lPorts))
return lPorts
| 5,340,156
|
def forward_propagation_with_dropout(X, parameters, keep_prob=0.5):
"""
实现具有随机舍弃节点的前向传播。
LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
参数:
X - 输入数据集,维度为(2,示例数)
parameters - 包含参数“W1”,“b1”,“W2”,“b2”,“W3”,“b3”的python字典:
W1 - 权重矩阵,维度为(20,2)
b1 - 偏向量,维度为(20,1)
W2 - 权重矩阵,维度为(3,20)
b2 - 偏向量,维度为(3,1)
W3 - 权重矩阵,维度为(1,3)
b3 - 偏向量,维度为(1,1)
keep_prob - 随机删除的概率,实数
返回:
A3 - 最后的激活值,维度为(1,1),正向传播的输出
cache - 存储了一些用于计算反向传播的数值的元组
"""
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = reg_utils.relu(Z1)
# 下面的步骤1-4对应于上述的步骤1-4。
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # 步骤1:初始化矩阵D1 = np.random.rand(..., ...)
D1 = D1 < keep_prob # 步骤2:将D1的值转换为0或1(使用keep_prob作为阈值)
A1 = A1 * D1 # 步骤3:舍弃A1的一些节点(将它的值变为0或False)
A1 = A1 / keep_prob # 步骤4:缩放未舍弃的节点(不为0)的值
"""
#不理解的同学运行一下下面代码就知道了。
import numpy as np
np.random.seed(1)
A1 = np.random.randn(1,3)
D1 = np.random.rand(A1.shape[0],A1.shape[1])
keep_prob=0.5
D1 = D1 < keep_prob
print(D1)
A1 = 0.01
A1 = A1 * D1
A1 = A1 / keep_prob
print(A1)
结果是
[[ True False True]]
[[0.02 0. 0.02]]
"""
Z2 = np.dot(W2, A1) + b2
A2 = reg_utils.relu(Z2)
# 下面的步骤1-4对应于上述的步骤1-4。
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # 步骤1:初始化矩阵D2 = np.random.rand(..., ...)
D2 = D2 < keep_prob # 步骤2:将D2的值转换为0或1(使用keep_prob作为阈值)
A2 = A2 * D2 # 步骤3:舍弃A1的一些节点(将它的值变为0或False)
A2 = A2 / keep_prob # 步骤4:缩放未舍弃的节点(不为0)的值
Z3 = np.dot(W3, A2) + b3
A3 = reg_utils.sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
| 5,340,157
|
def getPotInstance(pot_name):
""" Try to get an instance of a give pot_name
Return
----------
pot_module: module object of pot_name, if it is a combined list, return None
pot_instance: module instance, if it is not 3D or not available, return None
"""
pot_module = None
pot_instance=None
if (pot_name in dir(galpy.potential)) & ('Potential' in pot_name):
pot_module = galpy.potential.__getattribute__(pot_name)
if (type(pot_module) == list):
pot_instance = pot_module
pot_module = None
elif (type(pot_module) == type):
# get instance
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
pot_instance = pot_module()
except (ValueError, TypeError, AttributeError, RuntimeWarning):
pot_module = None
pot_instance = None
else:
pot_instance = pot_module
pot_module = type(pot_module)
if (pot_instance != None):
# remove 2D models
if (galpy.potential._dim(pot_instance)!=3):
pot_instance = None
# remove potential without c support
if (not _check_c(pot_instance)):
pot_instance = None
return pot_module, pot_instance
| 5,340,158
|
def upload_processed_files(job_context: Dict) -> Dict:
"""Uploads the processed files and removes the temp dir for the job.
If job_context contains a "files_to_upload" key then only those
files will be uploaded. Otherwise all files will be uploaded.
If job_context contains a "job_dir_prefix" key then that will be
passed through to the file methods as the `dir_name` parameter.
"""
if "files_to_upload" in job_context:
files = job_context["files_to_upload"]
else:
files = File.objects.filter(batch__in=job_context["batches"])
if "job_dir_prefix" in job_context:
job_dir_prefix = job_context["job_dir_prefix"]
else:
job_dir_prefix = None
try:
for file in files:
file.upload_processed_file(job_dir_prefix)
except Exception:
logger.exception("Exception caught while uploading processed file %s",
batch=files[0].batch.id,
processor_job=job_context["job_id"])
job_context["job"].failure_reason = "Exception caught while uploading processed file."
job_context["success"] = False
return job_context
finally:
# Whether or not uploading was successful, the job is over so
# clean up the temp directory.
files[0].remove_temp_directory(job_dir_prefix)
return job_context
| 5,340,159
|
def update_hook(branch, from_rev, to_rev, installdir):
""" Function to be called from the update hook """
if from_rev == gitinfo.NULL_COMMIT:
from_rev = gitinfo.START_COMMIT
changeset_info = githook.UpdateGitInfo(branch, from_rev, to_rev)
hooks_ok = run_hooks(changeset_info, installdir)
messages_ok = message_check.check_messages(changeset_info.commit_messages())
return hooks_ok and messages_ok
| 5,340,160
|
def definition():
"""
Most recent student numbers and fees by set
(i.e. by year, costcentre and set category.),
aggregated by fee, aos code, seesion and fee_category.
"""
sql = """
select s.set_id, s.acad_year, s.costc,
s.set_cat_id,
fsc.description as set_cat_description,
fs.fee_cat_id as fee_cat,
cc.default_aos_code,
n.aos_code, n.session,
o.description as origin_description, o.origin_id,
SUM(n.student_count) as student_count,
a.fee_scheme_id,
SUM(f.gross_fee-f.waiver) as individual_fee,
SUM(n.student_count*(f.gross_fee-f.waiver)) as net_fee
FROM s_number n
INNER JOIN v_s_instance_mri i ON i.instance_Id = n.instance_id
INNER JOIN f_set s ON s.set_id = i.set_id
INNER JOIN fs_cost_centre cc ON cc.costc = s.costc
INNER JOIN f_set_cat fsc ON fsc.set_cat_id = s.set_cat_id
INNER JOIN s_fee_status fs ON fs.fee_status_id = n.fee_status_id
INNER JOIN c_aos_code a ON a.aos_code = n.aos_code
INNER JOIN s_fee f ON f.acad_year = s.acad_year
AND f.fee_cat_id = fs.fee_cat_id
AND f.fee_scheme_id = a.fee_scheme_id
AND f.session = n.session
INNER JOIN s_origin o ON o.origin_id = n.origin_id
GROUP BY s.acad_year, s.costc, s.set_cat_id,
fs.fee_cat_id,
n.aos_code, n.session,
a.fee_scheme_id, fsc.description,
o.description, s.set_id,
cc.default_aos_code,
o.origin_id
"""
return sql
| 5,340,161
|
def ssh_connection():
"""Returns the current tmate SSH connection string."""
result = subprocess.run(['tmate', 'display', '-p', "#{tmate_ssh}"],
text=True,
capture_output=True)
if result.returncode != 0:
raise TmateException('Failed to interact with tmate, are you in a tmate session?')
ssh_string: str = result.stdout
if not ssh_string.startswith('ssh'):
raise TmateException('No ssh string returned, is the tmate session active?')
return ssh_string.split(' ')[1].strip()
| 5,340,162
|
def get_geo_distance(p1, p2, signed=False):
"""Returns distance (meters) between to lat/lon points"""
d = geopy.distance.vincenty(p1, p2).m # .m for meters
return -d if (p2[0] < p1[0] or p2[1] > p1[1]) else d
| 5,340,163
|
def bot_info(sub_bots, cfg):
"""Returns a description for this TweetCredReviewer
:param sub_bots: a list of bot items used by this TweetCredReviewer
:param cfg: config options
:returns: a `TweetCredReviewer` item
:rtype: dict
"""
result = {
'@context': ci_context,
'@type': 'TweetCredReviewer',
'additionalType': content.super_types('TweetCredReviewer'),
'name': 'ESI Tweet Credibility Reviewer',
'description': 'Reviews the credibility of a tweet by reviewing the sentences in the tweet and the (textual) documents linked by the tweet',
'author': bot_describer.esiLab_organization(),
'dateCreated': '2020-04-02T18:00:00Z',
'applicationCategory': ['Disinformation Detection'],
'softwareRequirements': ['python', 'nltk', 'Cogito'],
'softwareVersion': version,
'executionEnvironment': bot_describer.inspect_execution_env(),
'isBasedOn': sub_bots,
'launchConfiguration': {},
'taskConfiguration': {}
}
return {
**result,
'identifier': hashu.hash_dict(dictu.select_keys(
result, content.ident_keys(result)))}
| 5,340,164
|
async def metrics_upload(websocket, client_set: ClientSet, msg, *_):
"""
Save the performance metrics sent by the client to disk.
"""
metrics, route_id = msg['metrics'], msg['metrics']['route_id']
logging.info(f"Received performance metrics for routeID '{route_id}'")
save_json(route_id, metrics, config.model_type, 'modelPerformance.json')
| 5,340,165
|
def in_yelling(channel):
"""
checks that channel is #yelling
exists for test mocking
"""
chan = bot.channels.get(channel)
return chan and chan.name == "yelling"
| 5,340,166
|
def draw_adjacency_list():
"""Solution to exercise R-14.4.
Draw an adjacency list representation of the undirected graph shown in
Figure 14.1.
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
I will re-use the edge labels from Exercise R-14.3:
Snoeyink --- Goodrich a
Garg --- Goodrich b
Garg --- Tamassia c
Goldwasser --- Goodrich d
Goldwasser --- Tamassia e
Goodrich --- Tamassia f
Goodrich --- Vitter g
Goodrich --- Chiang h
Tamassia --- Tollis i
Tamassia --- Vitter j
Tamassia --- Preparata k
Tamassia --- Chiang l
Tollis --- Vitter m
Vitter --- Preparata n
Preparata --- Chiang o
The adjacency list V is a list of vertices v that each point to a
collection I(v) that contains the incident edges of v.
Snoeyink --> {a}
Garg --> {b, c}
Goldwasser --> {d, e}
Goodrich --> {a, b, d, f, g, h}
Tamassia --> {c, e, f, i, j, k, l}
Vitter --> {g, j, m, n}
Chiang --> {h, l, o}
Tollis --> {i, m}
Preparata --> {k, n, o}
Note that each edge appears twice in the adjacency list, for a total of
2*m = 2*15 = 30 edges.
"""
return True
| 5,340,167
|
def some_path(directory) -> str:
"""Generate unique path in the directory."""
return os.path.join(directory, str(uuid()))
| 5,340,168
|
def get_user_categories(user_id, public=False):
"""Get a user's categories.
Arguments: user_id as int, Boolean 'public' (optional).
Returns list of Category objects. Either all private or all public.
"""
return db_session.query(Category).filter((Category.user_id==user_id)&(Category.public==public)).all()
| 5,340,169
|
def dict_to_one(dp_dict):
"""Input a dictionary, return a dictionary that all items are set to one.
Used for disable dropout, dropconnect layer and so on.
Parameters
----------
dp_dict : dictionary
The dictionary contains key and number, e.g. keeping probabilities.
Examples
--------
>>> dp_dict = dict_to_one( network.all_drop )
>>> dp_dict = dict_to_one( network.all_drop )
>>> feed_dict.update(dp_dict)
"""
return {x: 1 for x in dp_dict}
| 5,340,170
|
def handle_500(request, response, exception):
"""Default handler for 500 status code"""
logging.exception(exception)
response.set_status(500)
| 5,340,171
|
def file_io_read_img_slice(path, slicing, axis, is_label, normalize_spacing=True, normalize_intensities=True, squeeze_image=True,adaptive_padding=4):
"""
:param path: file path
:param slicing: int, the nth slice of the img would be sliced
:param axis: int, the nth axis of the img would be sliced
:param is_label: the img is label
:param normalize_spacing: normalized the spacing
:param normalize_intensities: normalized the img
:param squeeze_image:
:param adaptive_padding: padding the img to favored size, (divided by certain number, here is 4), here using default 4 , favored by cuda fft
:return:
"""
normalize_intensities = False if is_label else normalize_intensities
im, hdr, spacing, normalized_spacing = fileio.ImageIO().read(path, normalize_intensities, squeeze_image,adaptive_padding)
if normalize_spacing:
spacing = normalized_spacing
else:
spacing = spacing
if axis == 1:
slice = im[slicing]
slicing_spacing = spacing[1:]
elif axis == 2:
slice = im[:,slicing,:]
slicing_spacing = np.asarray([spacing[0], spacing[2]])
elif axis == 3:
slice = im[:,:,slicing]
slicing_spacing = spacing[:2]
else:
raise ValueError("slicing axis exceed, should be 1-3")
info = { 'spacing':slicing_spacing, 'img_size': slice.shape}
return slice, info
| 5,340,172
|
def get_document_path(workspace_name: str, document_name: str) -> Path:
"""
TODO docstring
"""
path = (
Path(".")
/ Directory.DATA.value
/ Directory.WORKSPACES.value
/ workspace_name
/ document_name
)
if not path.exists():
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Document with name <<{document_name}>>"
+ f" doesn't exist in workspace <<{workspace_name}>>",
)
return path
| 5,340,173
|
def upload_lambda_zip(bosslet_config, path):
"""
Upload a multilambda.domain.zip to the S3 bucket. Useful when
developing and small changes need to be made to a lambda function, but a full
rebuild of the entire zip file isn't required.
"""
s3 = bosslet_config.session.client('s3')
with open(path, 'rb') as in_file:
resp = s3.put_object(Bucket=bosslet_config.LAMBDA_BUCKET,
Key=os.path.basename(path),
Body=in_file)
print(resp)
| 5,340,174
|
def is_replaced_image(url):
"""
>>> is_replaced_image('https://rss.anyant.com/123.jpg?rssant=1')
True
"""
return url and RSSANT_IMAGE_TAG in url
| 5,340,175
|
def version(*args, **kwargs): # real signature unknown
"""
Returns a tuple of major, minor, and patch release numbers of the
underlying DB library.
"""
pass
| 5,340,176
|
def notification_server():
"""
Starts a HeyU notifier. The specific notifier is specified as a
subcommand.
"""
pass
| 5,340,177
|
def get_raw_segment(fast5_fn, start_base_idx, end_base_idx, basecall_group='Basecall_1D_000',
basecall_subgroup='BaseCalled_template'):
"""
Get the raw signal segment given the start and end snp_id of the sequence.
fast5_fn: input fast5 file name.
start_base_idx: start snp_id of the sequence (0-based)
end_base_idx: end snp_id of the sequence (the snp_id is included)
basecall_group: group name to search for base information.
basecall_subgroup: sub grou#!p name to search for base information.
e.g.
get_raw_segment('test.fast5', 0, 10)
Will return the signal corresponded to the 0-10 bases(The 0th and 10th base are both included.)
"""
with h5py.File(fast5_fn, 'r') as root:
base = root['Analyses/{}/BaseCalled_template'.format(basecall_group)]
fastq = base['Fastq'].value.split()[2]
seg = fastq[start_base_idx:end_base_idx]
event_h = base['Events']
events = event_h.value
raw_h = list(root['/Raw/Reads'].values())
raw = raw_h[0]['Signal']
start_time = None
if (type(events[0][1]) is np.float64) or (type(events[0][1]) is np.float32):
start_time = event_h.attrs['start_time']
pos = list()
pos_idx = 0
for event in events:
pos_idx += event[5]
pos.append(pos_idx)
start_idx = next(x[0] for x in enumerate(pos) if x[1] >= start_base_idx)
end_idx = next(x[0] - 1 for x in enumerate(pos) if x[1] > end_base_idx)
if start_time is None:
raw_start = events[start_idx][1]
raw_end = events[end_idx][1]
else:
raw_start = int((events[start_idx][1] - start_time) / 0.00025)
raw_end = int((events[end_idx][1] - start_time) / 0.00025)
seg_raw = raw[raw_start:raw_end]
return seg_raw, seg
| 5,340,178
|
def center_distance(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the box centers (xy only).
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2]))
| 5,340,179
|
def set_specs(override_spec_data):
""" Override Resource Specs """
excludes = []
includes = []
# Extract the exclude list from the override file
if 'ExcludeResourceTypes' in override_spec_data:
excludes = override_spec_data.pop('ExcludeResourceTypes')
if 'IncludeResourceTypes' in override_spec_data:
includes = override_spec_data.pop('IncludeResourceTypes')
for region, spec in RESOURCE_SPECS.items():
# Merge override spec file into the AWS Resource specification
if override_spec_data:
RESOURCE_SPECS[region] = merge_spec(override_spec_data, spec)
# Grab a list of all resources
all_resources = list(RESOURCE_SPECS[region]['ResourceTypes'].keys())[:]
resources = []
# Remove unsupported resource using includes
if includes:
for include in includes:
regex = re.compile(include.replace('*', '(.*)') + '$')
matches = [string for string in all_resources if re.match(regex, string)]
resources.extend(matches)
else:
resources = all_resources[:]
# Remove unsupported resources using the excludes
if excludes:
for exclude in excludes:
regex = re.compile(exclude.replace('*', '(.*)') + '$')
matches = [string for string in resources if re.match(regex, string)]
for match in matches:
resources.remove(match)
# Remove unsupported resources
for resource in all_resources:
if resource not in resources:
del RESOURCE_SPECS[region]['ResourceTypes'][resource]
| 5,340,180
|
def Dc(z, unit, cosmo):
"""
Input:
z: redshift
unit: distance unit in kpc, Mpc, ...
cosmo: dicitonary of cosmology parameters
Output:
res: comoving distance in unit as defined by variable 'unit'
"""
res = cosmo.comoving_distance(z).to_value(unit) #*cosmo.h
return res
| 5,340,181
|
def _get_all_answer_ids(
column_ids,
row_ids,
questions,
):
"""Maps lists of questions with answer coordinates to token indexes."""
answer_ids = [0] * len(column_ids)
found_answers = set()
all_answers = set()
for question in questions:
for answer in question.answer.answer_coordinates:
all_answers.add((answer.column_index, answer.row_index))
for index in _get_cell_token_indexes(column_ids, row_ids,
answer.column_index,
answer.row_index):
found_answers.add((answer.column_index, answer.row_index))
answer_ids[index] = 1
missing_count = len(all_answers) - len(found_answers)
return answer_ids, missing_count
| 5,340,182
|
def transduce(source, transducer) -> ObservableBase:
"""Execute a transducer to transform the observable sequence.
Keyword arguments:
:param Transducer transducer: A transducer to execute.
:returns: An Observable sequence containing the results from the
transducer.
:rtype: Observable
"""
def subscribe(observer, scheduler=None):
xform = transducer(Observing(observer))
def on_next(value):
try:
xform.step(observer, value)
except Exception as exn:
observer.on_error(exn)
def on_completed():
xform.complete(observer)
return source.subscribe_(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
| 5,340,183
|
def readfile(filename):
""" Read candidate json trigger file and return dict
TODO: add file lock?
"""
with open(filename, 'r') as fp:
dd = json.load(fp)
return dd
| 5,340,184
|
def isoformViewer():
"""
Make a "broken" horizontal bar plot, ie one with gaps
"""
fig = pylab.figure()
ax = fig.add_subplot(111)
ax.broken_barh([ (110, 30), (150, 10) ] , (10, 5), facecolors=('gray','blue')) # (position, length) - top row
ax.broken_barh([ (10, 50), (100, 20), (130, 10)] , (20, 5),
facecolors=('red', 'yellow', 'green')) # (position, length) - next row down
### Straight line
pylab.plot((140,150),(12.5,12.5),lw=2,color = 'red') ### x coordinates of the line, y-coordinates of the line, line-thickness - iterate through a list of coordinates to do this
### Curved line
verts = [
(140, 15), # P0
(145, 20), # (x coordinate, half distance to this y coordinate)
(150, 15), # P2
]
codes = [Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2, edgecolor = 'green')
ax.add_patch(patch)
#midpt = cubic_bezier(pts, .5)
ax.text(142, 17.7, '25 reads')
### End-curved line
ax.set_ylim(5,35)
ax.set_xlim(0,200)
ax.set_xlabel('Transcript Exons')
ax.set_yticks([15,25])
ax.set_yticklabels(['isoform A', 'isoform B'])
ax.grid(True)
"""
ax.annotate('alternative splice site', (61, 25),
xytext=(0.8, 0.9), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
horizontalalignment='right', verticalalignment='top')
"""
pylab.show()
| 5,340,185
|
def get_phonopy_gibbs(
energies,
volumes,
force_constants,
structure,
t_min,
t_step,
t_max,
mesh,
eos,
pressure=0,
):
"""
Compute QHA gibbs free energy using the phonopy interface.
Args:
energies (list):
volumes (list):
force_constants (list):
structure (Structure):
t_min (float): min temperature
t_step (float): temperature step
t_max (float): max temperature
mesh (list/tuple): reciprocal space density
eos (str): equation of state used for fitting the energies and the volumes.
options supported by phonopy: vinet, murnaghan, birch_murnaghan
pressure (float): in GPa, optional.
Returns:
(numpy.ndarray, numpy.ndarray): Gibbs free energy, Temperature
"""
# quasi-harmonic approx
phonopy_qha = get_phonopy_qha(
energies,
volumes,
force_constants,
structure,
t_min,
t_step,
t_max,
mesh,
eos,
pressure=pressure,
)
# gibbs free energy and temperature
max_t_index = phonopy_qha._qha._len
G = phonopy_qha.get_gibbs_temperature()[:max_t_index]
T = phonopy_qha._qha._temperatures[:max_t_index]
return G, T
| 5,340,186
|
def forgot():
"""
Allows an administrator to state they forgot their password,
triggering a email for further instructions on how to reset their password.
"""
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RequestResetPasswordForm()
if form.validate_on_submit():
user = Administrator.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('auth.login'))
return render_template('forgot_password.html', title='Reset Password', form=form)
| 5,340,187
|
def install_enterprise(ansible_client):
"""
Install OpenShift Container Platform from RPMs.
:param ansible_client: Ansible client
"""
install_openshift(ansible_client, 'enterprise')
| 5,340,188
|
def test_s3_path_for_public_key():
"""
Should get s3 path for public key
"""
# When: I get S3 path for public key
s3path = provider._s3_path('default')
# Then: Expected path is returned
eq_(s3path, 'totem/keys/default-pub.pem')
| 5,340,189
|
def test_tell_total_vaccinated_booster():
"""Tests whether results returned by icl.tell_total_vaccinated with dose="booster" and with ranging for date options make sense (i.e. the number of total vaccinated individuals is equal to the sum of the number of vaccinated individuals in three periods into which the whole vaccination timeline is divided)."""
try:
total_vaccinated_ever = icl.tell_total_vaccinated("booster")
vaccinated_first_group = icl.tell_total_vaccinated("booster", stop_date="2020-10-20")
vaccinated_second_group = icl.tell_total_vaccinated("booster", start_date="2020-10-21", stop_date="2020-10-22")
vaccinated_third_group = icl.tell_total_vaccinated("booster", start_date="2020-10-23")
assert total_vaccinated_ever == vaccinated_first_group+vaccinated_second_group+vaccinated_third_group
except Exception as e:
assert isinstance(e, icl_e.ItaCovidLibConnectionError)
| 5,340,190
|
def diffractionAngle(inc):
"""Return the diffraction angle for the UV yaw system
Input graze angle in degrees
Output diffraction graze angle in degrees"""
alpha0 = np.sin((90.-inc)*np.pi/180)
alpha1 = alpha0 - 266e-9/160e-9
dang = 90 - np.arcsin(np.abs(alpha1))*180/np.pi
return dang
| 5,340,191
|
def check_conf(conf_filepath):
"""Wrap haproxy -c -f.
Args:
conf_filepath: Str, path to an haproxy configuration file.
Returns:
valid_config: Bool, true if configuration passed parsing.
"""
try:
subprocess.check_output(['haproxy', '-c', '-f', conf_filepath],
stderr=subprocess.STDOUT)
valid_config = True
error_output = None
except subprocess.CalledProcessError as e:
valid_config = False
error_output = e.output
return (valid_config, error_output)
| 5,340,192
|
def get_subject(email):
"""
Takes an email Message object and returns the Subject as a string,
decoding base64-encoded subject lines as necessary.
"""
subject = email.get('Subject', '')
result = decode_header(subject)
subject = result[0][0]
if isinstance(subject, str):
return subject
else:
return subject.decode('unicode_escape')
| 5,340,193
|
def distort(dist_mat, mat):
"""Apply distortion matrix to lattice vectors or sites.
Coordinates are assumed to be Cartesian."""
array = np.array(mat)
for i in range(len(mat)):
array[i, :] = np.array([np.sum(dist_mat[0, :]*mat[i, :]),
np.sum(dist_mat[1, :]*mat[i, :]),
np.sum(dist_mat[2, :]*mat[i, :])])
return array
| 5,340,194
|
def sample(
sampler: Union[typing.RelativeTime, Observable[Any]],
scheduler: Optional[abc.SchedulerBase] = None,
) -> Callable[[Observable[_T]], Observable[_T]]:
"""Samples the observable sequence at each interval.
.. marble::
:alt: sample
---1-2-3-4------|
[ sample(4) ]
----1---3---4---|
Examples:
>>> res = sample(sample_observable) # Sampler tick sequence
>>> res = sample(5.0) # 5 seconds
Args:
sampler: Observable used to sample the source observable **or** time
interval at which to sample (specified as a float denoting
seconds or an instance of timedelta).
scheduler: Scheduler to use only when a time interval is given.
Returns:
An operator function that takes an observable source and
returns a sampled observable sequence.
"""
from ._sample import sample_
return sample_(sampler, scheduler)
| 5,340,195
|
def test_positive_judgement(module_headers, observable, observable_type):
"""Perform testing for enrich observe observables endpoint to get
judgement for observable from CyberCrime Tracker
ID: CCTRI-844-4e53f335-d803-4c3e-a582-af02c94cf727
Steps:
1. Send request to enrich deliberate observable endpoint
Expectedresults:
1. Check that data in response body contains expected judgement for
observable from CyberCrime Tracker
Importance: Critical
"""
observables = [{'type': observable_type, 'value': observable}]
response_from_all_modules = enrich_observe_observables(
payload=observables,
**{'headers': module_headers}
)
response_from_cybercrime_module = get_observables(
response_from_all_modules, MODULE_NAME)
assert response_from_cybercrime_module['module'] == MODULE_NAME
assert response_from_cybercrime_module['module_instance_id']
assert response_from_cybercrime_module['module_type_id']
judgements = response_from_cybercrime_module['data']['judgements']
assert len(judgements['docs']) > 0
for judgement in judgements['docs']:
assert judgement['valid_time']['start_time']
assert judgement['valid_time']['end_time']
assert judgement['type'] == 'judgement'
assert judgement['schema_version']
assert judgement['source'] == MODULE_NAME
assert judgement['disposition'] == 2
assert judgement['observable'] == observables[0]
assert judgement['source_uri'] == (f'{CYBER_CRIME_URL}'
f'/index.php?search={observable}')
assert judgement['disposition_name']
assert judgement['priority'] == 90
assert judgement['id'].startswith('transient:judgement-')
assert judgement['severity'] == 'Medium'
assert judgement['confidence'] == 'Low'
assert judgements['count'] == len(judgements['docs']) <= CTR_ENTITIES_LIMIT
| 5,340,196
|
def get_genome_dir(infra_id, genver=None, annver=None, key=None):
"""Return the genome directory name from infra_id and optional arguments."""
dirname = f"{infra_id}"
if genver is not None:
dirname += f".gnm{genver}"
if annver is not None:
dirname += f".ann{annver}"
if key is not None:
dirname += f".{key}"
return dirname
| 5,340,197
|
def stream_with_context(func: Callable) -> Callable:
"""Share the current request context with a generator.
This allows the request context to be accessed within a streaming
generator, for example,
.. code-block:: python
@app.route('/')
def index() -> AsyncGenerator[bytes, None]:
@stream_with_context
async def generator() -> bytes:
yield request.method.encode()
yield b' '
yield request.path.encode()
return generator()
"""
request_context = _request_ctx_stack.top.copy()
@wraps(func)
async def generator(*args: Any, **kwargs: Any) -> Any:
async with request_context:
async for data in func(*args, **kwargs):
yield data
return generator
| 5,340,198
|
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5, weight=0.5, plot=False):
"""
Event Detection (silence removal)
ARGUMENTS:
- signal: the input audio signal
- sampling_rate: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1)
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],
[1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds
and (1.4, 3.0) seconds
"""
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
# signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
# number of 10% of the total short-term windows
st_windows_fraction = int(len(en) / 10)
# compute "lower" 10% energy threshold
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
# compute "higher" 10% energy threshold
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
# get all features that correspond to low energy
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
# get all features that correspond to high energy
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
# form the binary classification task and ...
features = [low_energy.T, high_energy.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
features_norm, mean, std = normalize_features(features)
svm = train_svm(features_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for index in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, index] - mean) / std
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
# smooth probability:
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = np.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
# get the indices of the frames that satisfy the thresholding
index = 0
seg_limits = []
time_clusters = []
# Step 4B: group frame indices to onset segments
while index < len(max_indices):
# for each of the detected onset indices
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
return seg_limits_2
| 5,340,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.