content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def Ignore(el, scopes, sc):
"""Literal RegExpLiteral Directive EmptyStatement DebuggerStatement ThrowStatement UpdateExpression
ImportExpression TemplateLiteral ContinueStatement BreakStatement ThisExpression ObjectPattern ArrayPattern"""
# I assume that template strings will be used only for strings, not for DOM elements.
| 16,700
|
def history_save_to_txt(path, data):
""" Cette fonction permet de sauvegarder l'historique dans un fichier txt.
Args:
path (str): Fichiers TXT correspondants
data (dict): Donnees à déposer dans le fichier
"""
count_round = len(data["history"])
success = False
out = False
give_up = False
draw = False
history = ""
difficulty = str(data["difficulty"])
croupier_premier_round = str(data["croupier_premier_round"])
croupier_value_final = str(data["croupier_value_final"])
if count_round >= 1:
for items in data:
if data["success"]:
success = True
if data["out"]:
out = True
if data["give_up"]:
give_up = True
if data["draw"]:
draw = True
for key, items in data["history"].items():
history = history + str(key) + ":" + str(items) + ","
string = str(
count_round
) + "," + difficulty + "," + croupier_premier_round + "," + croupier_value_final + "," + history + str(
success) + "," + str(out) + "," + str(give_up) + "," + str(
draw) + "\n"
with open(path, 'a+') as f:
f.write(string)
f.close()
| 16,701
|
def prod(a, axis=None, dtype=None, out=None):
"""
Product of array elements over a given axis.
Parameters
----------
a : array_like
Elements to multiply.
axis : None or int or tuple of ints, optional
Axis or axes along which a multiply is performed.
The default (`axis` = `None`) is perform a multiply over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a multiply is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
protuct_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> np.prod([0.5, 1.5])
2.0
>>> np.prod([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.prod([[0, 1], [0, 5]])
6
>>> np.prod([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.prod([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).prod(dtype=np.int8)
-128
"""
if not bhary.check(a) and not bhary.check(out):
return numpy.prod(a, axis=axis, dtype=dtype, out=out)
else:
if dtype is not None:
a = array_create.array(a, dtype=dtype)
return ufuncs.multiply.reduce(a, axis=axis, out=out)
| 16,702
|
def demo(printer, **kwargs):
"""
Prints demos. Called when CLI is passed `demo`. This function
uses the DEMO_FUNCTIONS dictionary.
:param printer: A printer from escpos.printer
:param kwargs: A dict with a key for each function you want to test. It's
in this format since it usually comes from argparse.
"""
for demo_choice in kwargs.keys():
command = getattr(
printer,
demo_choice.replace("barcodes_a", "barcode").replace(
"barcodes_b", "barcode"
),
)
for params in DEMO_FUNCTIONS[demo_choice]:
command(**params)
printer.cut()
| 16,703
|
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialize dict with all pages with count 0
pr_sample = dict([(page, 0) for page in corpus])
sample_page = None
# Iterate over n samples and increment page each time it is selected
for i in range(n):
if sample_page:
transition_dist = transition_model(corpus, sample_page, damping_factor)
sample_page = random.choices(list(transition_dist.keys()), weights=list(transition_dist.values()), k=1)[0]
else:
sample_page = random.choice(list(pr_sample.keys()))
# Record sample selection for each time it is chosen
pr_sample[sample_page] += 1
# Apply overall percentage by dividing each page count by n
for page in pr_sample:
pr_sample[page] /= n
return pr_sample
| 16,704
|
def guess_layout_cols_lr(mr,
buf,
alg_prefix,
layout_alg_force=None,
verbose=False):
"""
Assume bits are contiguous in columns
wrapping around at the next line
Least significant bit at left
Can either start in very upper left of bit colum and go right
Or can start in upper right of bit colum and go left
Related permutations are handled by flipx, rotate, etc
"""
# Must be able to divide input
txtw, _txth = mr.txtwh()
if txtw % mr.word_bits() != 0:
verbose and "guess_layout_cols_lr: bad width"
return
bit_cols = txtw // mr.word_bits()
# upper left start moving right
def ul_oi2cr(offset, maski):
bitcol = offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-right"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ul_oi2cr, buf), alg_prefix + name
# upper right start moving left
def ur_oi2cr(offset, maski):
bitcol = bit_cols - 1 - offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-left"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ur_oi2cr, buf), alg_prefix + name
# Used in TMS320C15
# even bits start from left side, odd bits from right
# Basically alternating cols-right and cols-left
# they move towards each other and then start again on the next line
if mr.word_bits() % 2 == 0:
def squeeze_lr_oi2cr(offset, maski):
left_bit = maski & 0xFFFE
if maski % 2 == 0:
# cols-right
bitcol = offset % bit_cols
else:
# cols-left (offset by left_bit)
bitcol = 2 * bit_cols - 1 - offset % bit_cols
col = left_bit * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "squeeze-lr"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, squeeze_lr_oi2cr, buf), alg_prefix + name
| 16,705
|
def decrease(rse_id, account, files, bytes, session=None):
"""
Decreases the specified counter by the specified amount.
:param rse_id: The id of the RSE.
:param account: The account name.
:param files: The amount of files.
:param bytes: The amount of bytes.
:param session: The database session in use.
"""
return increase(rse_id=rse_id, account=account, files=-files, bytes=-bytes, session=session)
| 16,706
|
def write_vec_flt(file_or_fd, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
| 16,707
|
def get_import_error(import_error_id, session):
"""
Get an import error
"""
error = session.query(ImportError).filter(ImportError.id == import_error_id).one_or_none()
if error is None:
raise NotFound("Import error not found")
return import_error_schema.dump(error)
| 16,708
|
def payment_reversal(**kw):
""" 冲正交易 """
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0400')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(3, '190000') # 交易处理码
req_packet.FieldData(4, kw.get('amount', '1').rjust(12, '0')) # 交易金额
req_packet.FieldData(11, kw.get('TraceNo', None)) # 原交易流水
req_packet.FieldData(14, '2912') # 卡有效期
req_packet.FieldData(22, '051') # 服务点输入方式
req_packet.FieldData(23, kw.get('CardOrder', '000')) # 卡序列号
req_packet.FieldData(25, '81') # 服务点条件码
req_packet.FieldData(35, kw.get('Track2', '6212142000000000012=29122206899031006')) # 二磁道数据
req_packet.FieldData(36, kw.get('Track3', None)) # 三磁道数据
req_packet.FieldData(38, kw.get('AuthNo', None)) # 原交易授权码
req_packet.FieldData(39, '96') # 冲正原因
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(48, 'KP77SG0C26323520140909356184 70000000201809#') #行业特定信息
req_packet.FieldData(49, '156') # 交易货币代码
req_packet.FieldData(55, kw.get('ICData', None)) # IC卡数据域
req_packet.FieldData(60, kw.get('Field60', None))
req_packet.FieldData(61, kw.get('Field61', None)) # 原始交易信息
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("冲正交易: ")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
| 16,709
|
def get_violations(nsi_uuid=None):
"""Returns info on all SLA violations.
:param nsi_uuid: (Default value = None) uuid of a service instance.
:returns: A list. [0] is a bool with the result. [1] is a list of
SLA violations associated to a service instance.
"""
url = env.sl_violations_api
if nsi_uuid:
url = env.sl_violations_api + '/service/' + nsi_uuid
# get current list of violations
resp = requests.get(url, timeout=env.timeout)
if resp.status_code != 200:
LOG.debug("Request returned with " + (str(resp.status_code)))
error = resp.text
return False, error
violations = json.loads(resp.text)
return True, violations
| 16,710
|
def roll_dice():
""" simulate roll dice """
results = []
for num in range(times):
result = randint(1, sides)
results.append(result)
return results
| 16,711
|
def _read_calib_SemKITTI(calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
calib_out['P2'] = calib_all['P2'].reshape(3, 4) # 3x4 projection matrix for left camera
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out
| 16,712
|
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
| 16,713
|
def root_nodes(g: Mapping):
"""
>>> g = dict(a='c', b='ce', c='abde', d='c', e=['c', 'z'], f={})
>>> sorted(root_nodes(g))
['f']
Note that `f` is present: Isolated nodes are considered both as
root and leaf nodes both.
"""
nodes_having_parents = set(chain.from_iterable(g.values()))
return set(g) - set(nodes_having_parents)
| 16,714
|
def _GetRemoteFileID(local_file_path):
"""Returns the checked-in hash which identifies the name of file in GCS."""
hash_path = local_file_path + '.sha1'
with open(hash_path, 'rb') as f:
return f.read(1024).rstrip()
| 16,715
|
def stress_stress(
bond_array_1, c1, etypes1, bond_array_2, c2, etypes2, sig, ls, r_cut, cutoff_func
):
"""2-body multi-element kernel between two partial stress components
accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 2-body kernel.
"""
kernel_matrix = np.zeros((6, 6))
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig * sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
e1 = etypes1[m]
for n in range(bond_array_2.shape[0]):
e2 = etypes2[n]
# check if bonds agree
if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1):
rj = bond_array_2[n, 0]
r11 = ri - rj
D = r11 * r11
s1 = 0
for d1 in range(3):
ci = bond_array_1[m, d1 + 1]
B = r11 * ci
fi, fdi = cutoff_func(r_cut, ri, ci)
for d2 in range(d1, 3):
coordinate_1 = bond_array_1[m, d2 + 1] * ri
s2 = 0
for d3 in range(3):
cj = bond_array_2[n, d3 + 1]
A = ci * cj
C = r11 * cj
fj, fdj = cutoff_func(r_cut, rj, cj)
for d4 in range(d3, 3):
coordinate_2 = bond_array_2[n, d4 + 1] * rj
force_kern = force_helper(
A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2
)
kernel_matrix[s1, s2] += (
force_kern * coordinate_1 * coordinate_2
)
s2 += 1
s1 += 1
return kernel_matrix / 4
| 16,716
|
def update_plugins_secondary_variables_on_tracer_solver(ctx: "void*") -> "int":
"""
**c++ signature** : ``HOOK_UPDATE_PLUGINS_SECONDARY_VARIABLES_ON_TRACER_SOLVER(void* ctx)``
Internal simulator hook to update plugin's secondary variables in the Tracer Solver scope.
Tracer Solver is used to solve the tracer transport equation.
This is called as the last step on ALFAsim's Tracer Solver update variables workflow.
:param ctx: ALFAsim's plugins context
:returns: Return OK if successful or anything different if failed
Example of usage:
.. code-block:: c++
:linenos:
:emphasize-lines: 1
HOOK_UPDATE_PLUGINS_SECONDARY_VARIABLES_ON_TRACER_SOLVER(ctx)
{
const char* plugin_id = get_plugin_id()
int errcode = -1;
int size_t = -1;
int size_p_var = -1;
int liq_id = -1;
errcode = alfasim_sdk_api.get_field_id(
ctx, &oil_id, "oil");
double* tracer_mass_fraction;
VariableScope global_OnCenters = {
GridScope::FACE,
MultiFieldDescriptionScope::FIELD,
TimestepScope::CURRENT
}
// Tracer information
void* tracer_ref;
errcode = alfasim_sdk_api.get_tracer_ref_by_name(
ctx,
&tracer_ref,
"my_tracer", // Added by User interface
plugin_id);
int tracer_id = -1;
errcode = alfasim_sdk_api.get_tracer_id(
ctx, &tracer_id, tracer_ref);
double *tracer_mass_fraction
errcode = alfasim_sdk_api.get_simulation_tracer_array(
ctx,
&tracer_mass_fraction,
(char*) "phi",
global_OnCenters,
tracer_id,
0, // GLOBAL
&size_t);
// Plugin secondary variable array
double* plugin_var;
errcode = alfasim_sdk_api.get_plugin_variable(
ctx,
(void**) (&plugin_var),
name,
0, // GLOBAL
TimestepScope::CURRENT,
&size_p_var);
if (size_t != size_p_var){
return OUT_OF_BOUNDS;
}
for (int i =0; i < size_t; ++i){
// Do some calculations with plugin_var
// using tracer_mass_fraction values
}
return OK;
}
Note that functions like :cpp:func:`get_tracer_ref_by_name`, :cpp:func:`get_tracer_id` and
:cpp:func:`get_simulation_tracer_array` were used to obtain information related to tracers.
"""
| 16,717
|
def is_color_rgb(color):
"""Is a color in a valid RGB format.
Parameters
----------
color : obj
The color object.
Returns
-------
bool
True, if the color object is in RGB format.
False, otherwise.
Examples
--------
>>> color = (255, 0, 0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0.0, 0.0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0, 0)
>>> is_color_rgb(color)
False
>>> color = (255, 0.0, 0.0)
>>> is_color_rgb(color)
False
>>> color = (256, 0, 0)
>>> is_color_rgb(color)
False
"""
if isinstance(color, (tuple, list)):
if len(color) == 3:
if all(isinstance(c, float) for c in color):
if all(c >= 0.0 and c <= 1.0 for c in color):
return True
elif all(isinstance(c, int) for c in color):
if all(c >= 0 and c <= 255 for c in color):
return True
return False
| 16,718
|
def get_databases(
catalog_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None
) -> Iterator[Dict[str, Any]]:
"""Get an iterator of databases.
Parameters
----------
catalog_id : str, optional
The ID of the Data Catalog from which to retrieve Databases.
If none is provided, the AWS account ID is used by default.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Iterator[Dict[str, Any]]
Iterator of Databases.
Examples
--------
>>> import awswrangler as wr
>>> dbs = wr.catalog.get_databases()
"""
client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
paginator = client_glue.get_paginator("get_databases")
response_iterator = paginator.paginate(**_catalog_id(catalog_id=catalog_id))
for page in response_iterator:
for db in page["DatabaseList"]:
yield db
| 16,719
|
def generate_html(collections):
"""This function is used to generate the HTML file to browse the photo
gallary.
"""
print('generating HTML:')
collection_info = list()
# Sorting collections for sorted display
collections.sort()
for collection in collections:
in_path = os.path.join(GALLERY_PATH, collection)
suffixes = ('/*.jpg', '*.jpeg')
photos_origin = list()
for suffix in suffixes:
path = glob.glob(in_path + '/' + suffix)
photos_origin.extend(path)
photos_origin.sort()
photos_info = list()
for photo in photos_origin:
path, file = os.path.split(photo)
# TODO: Use a more elegant way to get exif informations
exif_info = Image.open(photo)._getexif()
exif_description = DEFAULT_IMG_DESC
if exif_info is None:
print('Image ' + file + ' has no Exif information')
else:
# Exif: user comment
if 37510 in exif_info.keys():
# removing leading \x00's where ever this come from
exif_decoded = exif_info[37510].decode('utf-8').lstrip('\x00')
if exif_decoded:
exif_description = exif_decoded
else:
print('Image ' + file +
' has no description, using default')
# Exif: image description
elif 270 in exif_info.keys():
# removing leading \x00's where ever this come from
exif_decoded = exif_info[270].decode('utf-8').lstrip('\x00')
if exif_decoded:
exif_description = exif_decoded
else:
print('Image ' + file +
' has no description, using default')
else:
print('Image ' + file +
' has no description, using default')
info = {'origin': photo,
'thumbnail': os.path.join(path, 'thumbnails', file),
'description': exif_description
}
photos_info.append(info)
collection_info.append({'name': collection, 'photos': photos_info})
template_base = os.path.join('themes', THEME)
loader = jinja2.FileSystemLoader(template_base)
env = jinja2.Environment(loader=loader)
template = env.get_template('index.html')
render = template.render(version=VERSION,
sitename=SITENAME,
thumbnail_size=SIZE,
collections=collection_info)
with open(os.path.join(OUTPUT_PATH, 'index.html'), 'w') as file_writer:
file_writer.write(render)
print(' {:>80}'.format('[Done]'))
| 16,720
|
def edit_post_svc(current_user, id, content):
"""
Updates post content.
:param current_user:
:param id:
:param content:
:return:
"""
post = single_post_svc(id)
if post is None or post.user_id != current_user:
return None
post.content = content
db.session.commit()
return True
| 16,721
|
def free_free_absorp_coefPQ(n_e,n_i,T,f):
"""Returns a physical quantity for the free-free absorption coefficient
given the electron density, ion density, kinetic temperature and frequency
as physical quantities. From Shklovsky (1960) as quoted by Kraus (1966)."""
value = 9.8e-13 * n_e.inBaseUnits().value * n_i.inBaseUnits().value \
* M.pow(T.inBaseUnits().value,-1.5) * M.pow(f.inBaseUnits().value,-2) \
* (19.8 + M.log(M.pow(T.inBaseUnits().value,1.5)/f.inBaseUnits().value))
return P.pq(value,'1/m')
| 16,722
|
def module_version(module_lint_object, module):
"""
Verifies that the module has a version specified in the ``modules.json`` file
It checks whether the module has an entry in the ``modules.json`` file
containing a commit SHA. If that is true, it verifies that there are no
newer version of the module available.
"""
modules_json_path = os.path.join(module_lint_object.dir, "modules.json")
# Verify that a git_sha exists in the `modules.json` file for this module
try:
module_entry = module_lint_object.modules_json["repos"][module_lint_object.modules_repo.name][
module.module_name
]
git_sha = module_entry["git_sha"]
module.git_sha = git_sha
module.passed.append(("git_sha", "Found git_sha entry in `modules.json`", modules_json_path))
# Check whether a new version is available
try:
module_git_log = nf_core.modules.module_utils.get_module_git_log(module.module_name)
if git_sha == module_git_log[0]["git_sha"]:
module.passed.append(("module_version", "Module is the latest version", module.module_dir))
else:
module.warned.append(("module_version", "New version available", module.module_dir))
except UserWarning:
module.warned.append(("module_version", "Failed to fetch git log", module.module_dir))
except KeyError:
module.failed.append(("git_sha", "No git_sha entry in `modules.json`", modules_json_path))
| 16,723
|
def test_profile_init():
"""
Test that the Profile module initializes correctly.
Expected result: profile is initialized
"""
profile = Profile(os.path.join(os.path.dirname(__file__), "profile.yaml"))
assert profile.profile
| 16,724
|
def plot_intensities(spikes, lambda0, W, theta, path):
"""Plot the intensity given event data and fitted parameters."""
seconds = pd.date_range(start='01-02-2013 10:30:00', end='01-02-2013 14:59:59', freq='S')
hours = pd.date_range(start='01-02-2013 10:30:00', end='01-02-2013 14:59:59', freq='H')
def plot_intensity_pair(Y, pair, label):
i, j = pair
plt.fill_between(seconds, y1=0, y2=Y[:, i], alpha=0.5, color='C0')
plt.fill_between(seconds, y1=0, y2=-Y[:, j], alpha=0.5, color='C3')
plt.ylabel(label, fontsize=8)
plt.yticks(fontsize=8)
plt.xticks(hours, [h.strftime('%H:%M:%S') for h in hours], fontsize=8)
plt.xlim([seconds[0], seconds[-1]])
# plt.legend(['Bids', 'Asks'], fontsize=8)
# Make a model
N, = lambda0.shape
params = {'bias': lambda0, 'weights': W, 'impulse': theta}
model = DiscreteNetworkPoisson(N=N, L=L, B=B, dt=dt, params=params)
# Compute intensity
T, _ = spikes.shape
convolved = model.convolve(spikes)
Lambda = model.calculate_intensity(spikes, convolved)
# Plot
plt.subplot(3, 2, 1)
plot_intensity_pair(Lambda, (0, 3), label='ADDS, Level 0')
plt.subplot(3, 2, 3)
plot_intensity_pair(Lambda, (1, 4), label='ADDS, Level 1')
plt.subplot(3, 2, 5)
plot_intensity_pair(Lambda, (2, 5), label='ADDS, Level 2')
plt.subplot(3, 2, 2)
plot_intensity_pair(Lambda, (6, 7), label='CANCELS, Level 1')
plt.subplot(3, 2, 4)
plot_intensity_pair(Lambda, (8, 9), label='CANCELS, Level 2')
plt.subplot(3, 2, 6)
plot_intensity_pair(Lambda, (10, 11), label='EXECUTES')
# Save figures
plt.tight_layout()
# plt.savefig(path + 'intensity_{}.eps'.format(date))
# plt.savefig(path + 'intensity_{}.pdf'.format(date))
plt.show()
plt.clf()
| 16,725
|
def train_net_alter(network, imdb_train_s, roidb_train_s, imdb_train_ws, roidb_train_ws, imdb_test, roidb_test, \
output_dir, pretrained_model=None, \
max_iters=80000, s_start_iter=0, s_end_iter=80000, ws_start_iter=0, ws_end_iter=80000, \
opt='adam', lr=5e-04, lr_scheduling='const', vis=False):
"""Train a Faster R-CNN using alternating mini-batches each from supervised and weakly supervised sets"""
#with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sw = SolverWrapper(sess, network, imdb_train_s, roidb_train_s, imdb_train_ws, roidb_train_ws, \
imdb_test, roidb_test, output_dir, pretrained_model=pretrained_model, \
opt=opt, lr=lr, lr_scheduling=lr_scheduling, vis=vis)
print 'Solving...'
sw.train_model_alter(sess, max_iters, s_start_iter, s_end_iter, ws_start_iter, ws_end_iter)
print 'done solving'
| 16,726
|
def setupBinaries(options):
"""Ensure that Cactus's C/C++ components are ready to run, and set up the environment."""
if options.latest:
os.environ["CACTUS_USE_LATEST"] = "1"
if options.binariesMode is not None:
# Mode is specified on command line
mode = options.binariesMode
else:
# Might be specified through the environment, or not, in which
# case the default is to use Docker.
mode = os.environ.get("CACTUS_BINARIES_MODE", "docker")
os.environ["CACTUS_BINARIES_MODE"] = mode
if mode == "docker":
# Verify Docker exists on the target system
from distutils.spawn import find_executable
if find_executable('docker') is None:
raise RuntimeError("The `docker` executable wasn't found on the "
"system. Please install Docker if possible, or "
"use --binariesMode local and add cactus's bin "
"directory to your PATH.")
# If running without Docker, verify that we can find the Cactus executables
elif mode == "local":
from distutils.spawn import find_executable
if find_executable('cactus_caf') is None:
raise RuntimeError("Cactus isn't using Docker, but it can't find "
"the Cactus binaries. Please add Cactus's bin "
"directory to your PATH (and run `make` in the "
"Cactus directory if you haven't already).")
if find_executable('ktserver') is None:
raise RuntimeError("Cactus isn't using Docker, but it can't find "
"`ktserver`, the KyotoTycoon database server. "
"Please install KyotoTycoon "
"(https://github.com/alticelabs/kyoto) "
"and add the binary to your PATH, or use the "
"Docker mode.")
else:
assert mode == "singularity"
jobStoreType, locator = Toil.parseLocator(options.jobStore)
if jobStoreType == "file":
# if not using a local jobStore, then don't set the `SINGULARITY_CACHEDIR`
# in this case, the image will be downloaded on each call
if options.containerImage:
imgPath = os.path.abspath(options.containerImage)
os.environ["CACTUS_USE_LOCAL_SINGULARITY_IMG"] = "1"
else:
# When SINGULARITY_CACHEDIR is set, singularity will refuse to store images in the current directory
if 'SINGULARITY_CACHEDIR' in os.environ:
imgPath = os.path.join(os.environ['SINGULARITY_CACHEDIR'], "cactus.img")
else:
imgPath = os.path.join(os.path.abspath(locator), "cactus.img")
os.environ["CACTUS_SINGULARITY_IMG"] = imgPath
| 16,727
|
def pixel_gain_mode_statistics(gmaps):
"""returns statistics of pixels in defferent gain modes in gain maps
gr0, gr1, gr2, gr3, gr4, gr5, gr6 = gmaps
"""
arr1 = np.ones_like(gmaps[0], dtype=np.int32)
return [np.sum(np.select((gr,), (arr1,), default=0)) for gr in gmaps]
| 16,728
|
def run_parallel(ds1, ds2):
""" Run the calculation using multiprocessing.
:param ds1: list with points
:param ds2: list with points
:return: list of distances
"""
pool = mp.Pool(processes=mp.cpu_count())
result = pool.starmap(euclidian_distance, [(p1, p2) for p1 in ds1 for p2 in ds2])
pool.close()
return result
| 16,729
|
def to_latin(name):
"""Convert all symbols to latin"""
symbols = (u"іїєабвгдеёжзийклмнопрстуфхцчшщъыьэюяІЇЄАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
u"iieabvgdeejzijklmnoprstufhzcss_y_euaIIEABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA")
tr = {ord(a): ord(b) for a, b in zip(*symbols)}
translated_name = name.translate(tr)
translated_name = re.sub("[^A-Za-z0-9]", "_", translated_name)
return translated_name
| 16,730
|
def network_instance_create(network, host, attrs=None):
"""
Creates a network_instance of given kind and host, configuring it with the given attributes.
Parameter *kind*:
The parameter *kind* must be a string identifying one of the supported
network_instance kinds.
Parameter *host*:
The parameter *host* must be a string giving a host for the network_instance.
Parameter *attrs*:
The attributes of the network_instance can be given as the parameter *attrs*.
This parameter must be a dict of attributes if given. Attributes can
later be changed using :py:func:`network_instance_modify`.
Return value:
The return value of this method is the info dict of the new network_instance as
returned by :py:func:`resource_info`.
"""
if not attrs: attrs = {}
attrs = dict(attrs)
attrs.update(host=host, network=network)
res = NetworkInstance.create(attrs)
return res.info()
| 16,731
|
def update_get():
"""Fetches the state of the latest update job.
Returns:
On success, a JSON data structure with the following properties:
status: str describing the status of the job. Can be one of
["NOT_RUNNING", "DONE", "IN_PROGRESS"].
Example:
{
"status": "NOT_RUNNING"
}
Returns error object on failure.
"""
status, error = update.status.get()
if error:
return json_response.error(error), 500
return json_response.success({'status': str(status)})
| 16,732
|
def create_github_url(metadata, is_file=False):
"""Constrói a URL da API
Constrói a URL base da API do github a partir
dos dados presentes no metadata.
Args:
metadata: JSON com informações acerca do dataset.
is_file: FLAG usada pra sinalizar se o dataset é apenas um elemento.
"""
url_params = metadata['url'].split('/')
server_idx = url_params.index('github.com')
username = url_params[server_idx + 1]
repo = url_params[server_idx + 2]
data_path = metadata['path']
return ("https://raw.githubusercontent.com/{}/{}/master{}" if is_file else "https://api.github.com/repos/{}/{}/contents{}").format(username, repo, data_path)
| 16,733
|
def kge_2012(obs, sim, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Kling-Gupta Efficiency (KGE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The (weighted) Kling-Gupta Efficiency [kling_2012]_ is computed as follows:
.. math:: \\text{KGE} = 1 - \\sqrt{(r-1)^2 + (\\beta-1)^2 - (\\gamma-1)^2}
where :math:`\\beta = \\bar{x} / \\bar{y}` and :math:`\\gamma =
\\frac{\\bar{\\sigma}_x / \\bar{x}}{\\bar{\\sigma}_y / \\bar{y}}`. If
weighted equals True, the weighted mean, variance and pearson
correlation are used.
References
----------
.. [kling_2012] Kling, H., Fuchs, M., and Paulin, M. (2012). Runoff
conditions in the upper Danube basin under an ensemble of climate
change scenarios. Journal of Hydrology, 424-425:264 - 277.
"""
if missing == "drop":
obs = obs.dropna()
sim = sim.reindex(obs.index).dropna()
# Return nan if the time indices of the sim and obs don't match
if sim.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
r = pearsonr(obs=obs, sim=sim, weighted=weighted, max_gap=max_gap)
mu_sim = mean(sim, weighted=weighted, max_gap=max_gap)
mu_obs = mean(obs, weighted=weighted, max_gap=max_gap)
beta = mu_sim / mu_obs
gamma = (std(sim, weighted=weighted, max_gap=max_gap) / mu_sim) / \
(std(obs, weighted=weighted, max_gap=max_gap) / mu_obs)
kge = 1 - sqrt((r - 1) ** 2 + (beta - 1) ** 2 + (gamma - 1) ** 2)
return kge
| 16,734
|
async def test_entity_device_info_update(opp, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
opp, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
| 16,735
|
def flatten(items):
"""Yield items from any nested iterable; see REF."""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
| 16,736
|
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
| 16,737
|
def main(
date: datetime = YESTERDAY,
telescope: str = "LST1",
config: Path = DEFAULT_CFG,
simulate: bool = False,
):
"""Plot theta2 histograms for each source from a given date."""
log.setLevel(logging.INFO)
log.debug(f"Config: {config.resolve()}")
# Initial setup of global parameters
options.date = date
flat_date = date_to_dir(date)
options.tel_id = telescope
options.prod_id = get_prod_id()
options.dl2_prod_id = get_dl2_prod_id()
options.directory = analysis_path(options.tel_id)
dl2_directory = Path(cfg.get('LST1', 'DL2_DIR'))
highlevel_directory = destination_dir("HIGH_LEVEL", create_dir=True)
host = cfg.get('WEBSERVER', 'HOST')
cuts = toml.load(SELECTION_CUTS_FILE)
sources = get_source_list(date)
log.info(f"Sources: {sources}")
for source in sources:
df = pd.DataFrame()
runs = sources[source]
log.info(f"Source: {source}, runs: {runs}")
if simulate:
continue
for run in runs:
input_file = (
dl2_directory / flat_date / options.prod_id / options.dl2_prod_id /
f"dl2_LST-1.Run{run:05d}.h5"
)
df = pd.concat(
[df, pd.read_hdf(input_file, key=dl2_params_lstcam_key)]
)
selected_events = event_selection(data=df, cuts=cuts)
try:
true_source_position = extract_source_position(
data=selected_events,
observed_source_name=source
)
off_source_position = [element * -1 for element in true_source_position]
theta2_on = np.array(
compute_theta2(selected_events, true_source_position)
)
theta2_off = np.array(
compute_theta2(selected_events, off_source_position)
)
hist_on, hist_off, bin_edges_on, bin_edges_off, bin_center = create_hist(
theta2_on, theta2_off, cuts
)
text, box_color = lima_significance(
hist_on=hist_on,
hist_off=hist_off,
bin_edges_on=bin_edges_on,
bin_edges_off=bin_edges_off,
eff_time=get_effective_time(df)[0],
cuts=cuts
)
pdf_file = plot_theta2(
bin_center=bin_center,
hist_on=hist_on,
hist_off=hist_off,
legend_text=text,
box_color=box_color,
source_name=source,
date_obs=date,
runs=runs,
highlevel_dir=highlevel_directory,
cuts=cuts
)
if not simulate:
dest_directory = directory_in_webserver(
host=host,
datacheck_type="HIGH_LEVEL",
date=flat_date,
prod_id=options.prod_id
)
cmd = ["scp", pdf_file, f"{host}:{dest_directory}/."]
subprocess.run(cmd, capture_output=True, check=True)
except astropy.coordinates.name_resolve.NameResolveError:
log.warning(f"Source {source} not found in the catalog. Skipping.")
# TODO: get ra/dec from the TCU database instead
| 16,738
|
def color_calibration(
src_imgs,
src_color_space,
src_is_linear,
ref_imgs,
ref_color_space,
ref_is_linear,
verbose=False,
distance="de00",
):
"""Function that does color calibration for a given target image according to a given reference image
STEP1: load the colorcheckers from the src and ref images
STEP2: TODO: linearize the src and ref color checkers if necessary
STEP3: TODO: convert the src and ref color checkers into the same color space (usually the color space of the ref image)
STEP4: optimize the CCM to minimize the CIE2000 distance between the ref and calibrated target color checkers
STEP5: compute the calibrated image with the optimzed CCM
Args:
src (String): path of target image file
src_color_space (enum color_space): color space of target image
src_is_linear (bool): indicates whether the target image is linearized (sRGB or RGB)
ref (String): path of reference image file
ref_color_space (enum color_space): color space of reference image
ref_is_linear (bool): indicates whether the reference iamge is linearized (sRGB or RGB)
"""
# Paramters of the standard color checker with aruco tags
col_n = 6
row_n = 4
aruco_type = "DICT_5X5_50"
# load the colorcheckers from the src and ref images
src_colorchecker = None
ref_colorchecker = None
for img in src_imgs:
try:
color_checker = detect_colorchecker(img, row_n, col_n, aruco_type, verbose=verbose)
except SystemExit:
continue
if src_colorchecker is None:
src_colorchecker = color_checker
src_colorchecker = (src_colorchecker + color_checker) / 2
for img in ref_imgs:
try:
color_checker = detect_colorchecker(img, row_n, col_n, aruco_type, verbose=verbose)
except SystemExit:
continue
if ref_colorchecker is None:
ref_colorchecker = color_checker
ref_colorchecker = (ref_colorchecker + color_checker) / 2
# TODO: if the src has a different color space than the ref image, unify their color spaces
# use CCM_3x3 to find the optimized CCM, which brings src closer to ref
ccm = CCM_3x3(src_colorchecker, ref_colorchecker, distance=distance)
ccm_3x3, error = ccm.value()
calibrated_images = []
for img in src_imgs:
img = ccm.infer_image(img)
calibrated_images.append(img)
if verbose:
cv2.imshow("image after calibration", img)
cv2.imwrite("imgs/output_infered.png", img)
if verbose:
cv2.waitKey(0)
cv2.destroyAllWindows()
return ccm_3x3, error, calibrated_images
| 16,739
|
def getChiv6ch(mol):
"""
Chiv6h related to ring 6
"""
return getChivnch(mol, 6)
| 16,740
|
def test_trend_prec(tmpdir):
"""A Test Function the plot of the precipitation trend
Author: Raphael Tasch """
df_cities = pd.read_csv(cfg.world_cities)
dfi = df_cities.loc[df_cities.Name.str.contains('innsbruck', case=False,
na=False)].iloc[0]
df = core.get_cru_timeseries(dfi.Lon, dfi.Lat)
fig = graphics.plot_trend_prec(df)
# Check that the text is found in figure
ref = 'Precipitation trend at location (11.25°, 47.25°)'
test = [ref in t.get_text() for t in fig.findobj(mpl.text.Text)]
assert np.any(test)
# Check that figure is created
fpath = str(tmpdir.join('prec_trend.png'))
graphics.plot_trend_prec(df, filepath=fpath)
assert os.path.exists(fpath)
plt.close()
| 16,741
|
def is_outside_of_range(type_key: CLTypeKey, value: int) -> bool:
"""Returns flag indicating whether a value is outside of numeric range associated with the CL type.
"""
constraints = NUMERIC_CONSTRAINTS[type_key]
return value < constraints.MIN or value > constraints.MAX
| 16,742
|
def test_generate_circuit_raises():
"""Test raising errors in circuit generator."""
with pytest.raises(TypeError):
cirq.Circuit(generate_circuit_from_pauli_string(0, 'theta_0'))
with pytest.raises(TypeError):
cirq.Circuit(generate_circuit_from_pauli_string(
openfermion.FermionOperator('4^ 0 3^ 1', 1.0),
10))
| 16,743
|
def read_skel(dset, path):
"""
:param dset: name of dataset, either 'ntu-rgbd' or 'pku-mmd'
:param path: path to the skeleton file
:return:
"""
if dset == 'ntu-rgbd':
file = open(path, 'r')
lines = file.readlines()
num_lines = len(lines)
num_frames = int(lines[0])
# print(num_lines, num_frames)
line_id = 1
data = []
for i in range(num_frames):
num_skels = int(lines[line_id])
# print(num_skels)
joints = []
for _ in range(num_skels):
num_joints = int(lines[line_id+2])
# print(num_joints)
joint = []
for k in range(num_joints):
tmp = lines[line_id+3+k].rstrip().split(' ')
x_3d, y_3d, z_3d, x_depth, y_depth, x_rgb, y_rgb, orientation_w,\
orientation_x, orientation_y, orientation_z = list(
map(float, tmp[:-1]))
joint.append([x_3d, y_3d, z_3d])
joints.append(joint)
line_id += 2+num_joints
joints = np.array(joints)
data.append(joints)
line_id += 1
assert line_id == num_lines
elif dset == 'pku-mmd':
file = open(path, 'r')
lines = file.readlines()
# num_lines = len(lines)
data = []
for line in lines:
joints = list(map(float, line.rstrip().split(' ')))
joints = np.array(joints).reshape(2, -1, 3)
if not np.any(joints[1]):
joints = joints[0][np.newaxis, :, :]
data.append(joints)
elif dset == 'cad-60':
f = open(path, 'r')
lines = f.readlines()
data = []
# Last line is "END"
for line in lines[:-1]:
# fist item is frame number, last item is empty
row = line.split(',')[1:-1]
row = list(map(float, row))
joints = []
for i in range(15):
if i < 11:
# First 11 joints
index = 14 * i + 10
else:
# Joint 12 ~ 15
index = 11 * 14 + (i - 11) * 4
joint = row[index: index+3]
joints.append(joint)
joints = np.array(joints) / 1000.0 # millimeter to meter
joints = joints[np.newaxis, :, :] # To match ntu-rgb format
data.append(joints)
else:
raise NotImplementedError
return data
| 16,744
|
def cmd_start(message):
"""Старт диалога с ботом"""
mainmenu(message)
| 16,745
|
def test_safe_failure():
"""Ensures that safe decorator works correctly for Failure case."""
failed = _function(0)
assert isinstance(failed.failure(), ZeroDivisionError)
| 16,746
|
def test_rambo(sqrts=7e3, max_n=8):
"""Check that rambo produces the right type of phase space"""
for n in range(2, max_n):
auto_test_rambo_massless(n, sqrts)
# Check that it also accepts a variable input energy
events = 13
variable_sqrts = tf.random.uniform((13,), dtype=DTYPE) * sqrts
auto_test_rambo_massless(n, variable_sqrts, n_events=events)
| 16,747
|
def humanize_date(date_string):
""" returns dates as in this form: 'August 24 2019' """
return convert_date(date_string).strftime("%B %d %Y")
| 16,748
|
def delete_quota(module, blade):
"""Delete Filesystem User Quota"""
changed = True
if not module.check_mode:
try:
if module.params["uid"]:
blade.quotas_users.delete_user_quotas(
file_system_names=[module.params["name"]],
uids=[module.params["uid"]],
)
else:
blade.quotas_users.delete_user_quotas(
file_system_names=[module.params["name"]],
user_names=[module.params["uname"]],
)
except Exception:
if module.params["uid"]:
module.fail_json(
msg="Failed to delete quota for UID {0} on filesystem {1}.".format(
module.params["uid"], module.params["name"]
)
)
else:
module.fail_json(
msg="Failed to delete quota for username {0} on filesystem {1}.".format(
module.params["uname"], module.params["name"]
)
)
module.exit_json(changed=changed)
| 16,749
|
def get_checkpoint(checkpoint_path, requested_step=None, basename='checkpoint'):
"""
根据checkpoint重载模型
"""
if requested_step is not None:
model_checkpoint_path = '%s/%s-%s' % (checkpoint_path, basename, requested_step)
if os.path.exists(model_checkpoint_path) is None:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1)
print(model_checkpoint_path)
print(model_checkpoint_path)
return model_checkpoint_path, requested_step
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
# Restore checkpoint as described in top of this program
print(ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
return ckpt.model_checkpoint_path, global_step
else:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1)
| 16,750
|
def handle_error(exception: Exception, request: Request=None):
"""
If an exception is thrown, deal with it and present an error page.
"""
if request is None:
request = {'_environ': {'PATH_INFO': ''}}
if not getattr(exception, 'hide_traceback', False):
(e_type, e_value, e_tb) = sys.exc_info()
message = f"{exception.__class__} occurred on {request._environ['PATH_INFO']!r}: {exception}" \
f"\n{''.join(traceback.format_exception(e_type, e_value, e_tb))}"
request._environ['wsgi.errors'].write(message)
if isinstance(exception, RequestError):
status = getattr(exception, 'status', 404)
else:
status = 500
if status in ERROR_HANDLERS:
return ERROR_HANDLERS[status](request, exception)
return not_found(request, exception)
| 16,751
|
def train_argument_from_bp(params, extractor):
"""
:type params: dict
:type extractor: nlplingo.nn.extractor.Extractor
"""
print('################ train_argument_from_bp ###############')
bp_filepath = params['bp_file']
labels = create_sequence_labels(extractor.domain.event_roles.keys())
train_docs = prepare_docs(params['data']['train']['filelist'], dict(), params)
""":type: list[nlplingo.text.text_theory.Document]"""
annotations = get_frame_annotations_from_bp(bp_filepath)
""":type: dict[str, list[list[nlplingo.text.text_span.LabeledTextFrame]]]"""
model = SequenceModel(params, extractor.extractor_params, extractor.domain, dict(), None, labels)
example_generator = SequenceExampleGenerator(extractor.domain, params, extractor.extractor_params, None)
feature_generator = SequenceFeatureGenerator(extractor.extractor_params, labels, model.tokenizer)
examples = example_generator.generate_frames_for_training(train_docs, annotations)
""":type: list[nlplingo.tasks.sequence.example.SequenceExample]"""
for example in examples: # populate each example with features
feature_generator.generate_example(example, model.event_domain.sequence_types, model.tokenizer)
global_step, tr_loss = model.train(examples)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
model.save_model()
| 16,752
|
def tree_sanity_check(tree: Node) -> bool:
"""
Sanity check for syntax trees: One and the same node must never appear
twice in the syntax tree. Frozen Nodes (EMTPY_NODE, PLACEHOLDER)
should only exist temporarily and must have been dropped or eliminated
before any kind of tree generation (i.e. parsing) or transformation
is finished.
:param tree: the root of the tree to be checked
:returns: `True`, if the tree is "sane", `False` otherwise.
"""
node_set = set() # type: Set[Node]
for node in tree.select_if(lambda nd: True, include_root=True):
if not isinstance(node, Node) or node in node_set or isinstance(node, FrozenNode):
return False
node_set.add(node)
return True
| 16,753
|
def update_database(path=None, encoding="cp1252", **kwargs):
"""
Update the reference composition database.
Notes
------
This will take all csv files from the geochem/refcomp pyrolite data folder
and construct a document-based JSON database.
"""
if path is None:
path = __dbfile__
with TinyDB(str(path)) as db:
db.truncate()
for f in get_reference_files():
C = Composition(f, encoding=encoding, **kwargs)
db.insert(
{"name": C.name, "composition": C._df.T.to_json(force_ascii=False)}
)
db.close()
| 16,754
|
def fileCompare( filename1, filename2, folder1=None, folder2=None, printFlag=True, exitCount:int=10 ):
"""
Compare the two utf-8 files.
"""
filepath1 = Path( folder1, filename1 ) if folder1 else filename1
filepath2 = Path( folder2, filename2 ) if folder2 else filename2
if verbosityLevel > 1:
if filename1==filename2:
vPrint( 'Quiet', debuggingThisModule, "Comparing {!r} files in folders {!r} and {!r}…".format( filename1, folder1, folder2 ) )
else: vPrint( 'Quiet', debuggingThisModule, "Comparing files {!r} and {!r}…".format( filename1, filename2 ) )
# Do a preliminary check on the readability of our files
if not os.access( filepath1, os.R_OK ):
logging.error( f"fileCompare: File1 {filepath1!r} is unreadable" )
return None
if not os.access( filepath2, os.R_OK ):
logging.error( f"fileCompare: File2 {filepath2!r} is unreadable" )
return None
# Read the files into lists
lineCount, lines1 = 0, []
with open( filepath1, 'rt', encoding='utf-8' ) as file1:
for line in file1:
lineCount += 1
if lineCount==1 and line[0]==chr(65279): #U+FEFF
if printFlag and verbosityLevel > 2:
vPrint( 'Quiet', debuggingThisModule, " fileCompare: Detected Unicode Byte Order Marker (BOM) in file1" )
line = line[1:] # Remove the Unicode Byte Order Marker (BOM)
if line and line[-1]=='\n': line=line[:-1] # Removing trailing newline character
if not line: continue # Just discard blank lines
lines1.append( line )
lineCount, lines2 = 0, []
with open( filepath2, 'rt', encoding='utf-8' ) as file2:
for line in file2:
lineCount += 1
if lineCount==1 and line[0]==chr(65279): #U+FEFF
if printFlag and verbosityLevel > 2:
vPrint( 'Quiet', debuggingThisModule, " fileCompare: Detected Unicode Byte Order Marker (BOM) in file2" )
line = line[1:] # Remove the Unicode Byte Order Marker (BOM)
if line and line[-1]=='\n': line=line[:-1] # Removing trailing newline character
if not line: continue # Just discard blank lines
lines2.append( line )
# Compare the length of the lists/files
len1, len2 = len(lines1), len(lines2 )
equalFlag = True
if len1 != len2:
if printFlag: vPrint( 'Quiet', debuggingThisModule, "Count of lines differ: file1={}, file2={}".format( len1, len2 ) )
equalFlag = False
# Now compare the actual lines
diffCount = 0
for k in range( min( len1, len2 ) ):
if lines1[k] != lines2[k]:
if printFlag:
vPrint( 'Quiet', debuggingThisModule, " {}a:{!r} ({} chars)\n {}b:{!r} ({} chars)" \
.format( k+1, lines1[k], len(lines1[k]), k+1, lines2[k], len(lines2[k]) ) )
equalFlag = False
diffCount += 1
if diffCount > exitCount:
if printFlag and verbosityLevel > 1:
vPrint( 'Quiet', debuggingThisModule, "fileCompare: stopped comparing after {} mismatches".format( exitCount ) )
break
return equalFlag
| 16,755
|
def compute_std_error(g,theta,W,Omega,Nobs,Nsim=1.0e+10,step=1.0e-5,args=()):
""" calculate standard errors from minimum-distance type estimation
g should return a vector with:
data moments - simulated moments as a function of theta
Args:
g (callable): moment function (return vector of length J)
theta (np.ndarray): parameter vector (length K)
W (np.ndarray): weigting matrix (dim J-by-J)
Omega (np.ndarray): covaraince matrix of empirical moments (dim J-by-J)
Nobs (scalar): number of observations
Nsim (scalar,optional): number of simulations
step (scalar,optional): finite step in numerical gradients
args (tupple,optinal): additional arguments passed to g
"""
# a. dimensions
K = len(theta)
J = len(W[0])
# b. numerical gradient.
grad = np.empty((J,K))
for p in range(K):
theta_now = theta.copy()
step_now = np.zeros(K)
step_now[p] = np.fmax(step,step*np.abs(theta_now[p]))
g_forward = g(theta_now + step_now,*args)
g_backward = g(theta_now - step_now,*args)
grad[:,p] = (g_forward - g_backward)/(2.0*step_now[p])
# c. asymptotic variance
GW = grad.T @ W
GWG = GW @ grad
Avar = np.linalg.inv(GWG) @ ( GW @ Omega @ GW.T ) @ np.linalg.inv(GWG)
# d. return asymptotic standard errors
fac = (1.0 + 1.0/Nsim)/Nobs
std_error = np.sqrt(fac*np.diag(Avar))
return std_error
| 16,756
|
def p_error(parse):
"""
Error rule for Syntax Errors handling and reporting.
"""
if parse is None:
print("Error! Unexpected end of input!")
else:
print("Syntax error! Line: {}, position: {}, character: {}, "
"type: {}".format(parse.lineno, parse.lexpos, parse.value,
parse.type))
parser.errok()
| 16,757
|
def target_thread(sess):
"""a thread for target nets in DQN"""
global obstacles
while True:
## get current state
for camera_sensor, lane_invasion, obj_collision in zip(cameras, lane_invasions, obj_collisions):
img = camera_sensor.get()
img = img[int(FLAGS.img_height*2.3//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(FLAGS.net_img_height, FLAGS.net_img_width))
# # cv2.imshow('test', img)
# imgs.append(img)
img_1 = cv2.resize(img, dsize=(FLAGS.net_img_height, FLAGS.net_img_width))
s_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width),
h_type='safe', consider_range=15)
a_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width),
h_type='attentive', consider_range=15)
d_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width),
h_type='danger', consider_range=15)
img_2 = np.uint8(np.minimum(np.stack([a_hm, s_hm, d_hm], axis=-1) * 255, 255))
img = np.concatenate([img_1, img_2], axis=-1)
lane_invasion.clear()
obj_collision.clear()
# s_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width), h_type='safe')
# a_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width), h_type='attentive')
# d_hm = produce_heat_map(egopilots[0], obstacles, hm_size=(FLAGS.net_img_height, FLAGS.net_img_width), h_type='danger')
# img = np.uint8(np.minimum(np.stack([a_hm, s_hm, d_hm], axis=-1)*255, 255))
# cv2.imshow('test', img)
current_img_state = np.array([img])
current_img_state = current_img_state*2./255. - 1.
## get current action and control the egopilots
current_action, current_step = sess.run([max_action_index_online, global_step], feed_dict={online_img_state: current_img_state})
## control the egopilots ##
i = 0
for egopilot, c_a in zip(egopilots, current_action):
## e-greedy
current_action[i] = c_a
steer = action_index_2_steer(c_a)
throttle = 0.5
brake = 0.
ego_v = egopilot.get_velocity()
ego_v = math.sqrt(ego_v.x ** 2 + ego_v.y ** 2 + ego_v.z ** 2)
if ego_v > 8. and throttle > 0.5:
throttle = 0.5 ## avoid velocity too big
## apply control
egopilot.apply_control(carla.VehicleControl(throttle=throttle, steer=steer, brake=brake))
i += 1
# cv2.waitKey(30)
time.sleep(0.5) ## sleep for a while, let the action control the egopilots to next state
## check whether end.
for egopilot, lane_invasion, obj_collision in zip(egopilots, lane_invasions, obj_collisions):
on_collision = obj_collision.get()
on_invasion = lane_invasion.get()
if on_invasion or on_collision:
destroy(obstacles)
respawn_actor_at(world, egopilot, transform=init_point)
obstacles = random_spawn_obstacles_in_specific_area(world)
| 16,758
|
def lhs(paramList, trials, corrMat=None, columns=None, skip=None):
"""
Produce an ndarray or DataFrame of 'trials' rows of values for the given parameter
list, respecting the correlation matrix 'corrMat' if one is specified, using Latin
Hypercube (stratified) sampling.
The values in the i'th column are drawn from the ppf function of the i'th parameter
from paramList, and each columns i and j are rank correlated according to corrMat[i,j].
:param paramList: (list of rv-like objects representing parameters) Only requirement
on parameter objects is that they must implement the ppf function.
:param trials: (int) number of trials to generate for each parameter.
:param corrMat: a numpy matrix representing the correlation between the parameters.
corrMat[i,j] should give the correlation between the i'th and j'th
entries of paramlist.
:param columns: (None or list(str)) Column names to use to return a DataFrame.
:param skip: (list of params)) Parameters to process later because they are
dependent on other parameter values (e.g., they're "linked"). These
cannot be correlated.
:return: ndarray or DataFrame with `trials` rows of values for the `paramList`.
"""
ranks = genRankValues(len(paramList), trials, corrMat) if corrMat is not None else None
samples = np.zeros((trials, len(paramList))) # @UndefinedVariable
skip = skip or []
for i, param in enumerate(paramList):
if param in skip:
continue # process later
values = param.ppf(getPercentiles(trials)) # extract values from the RV for these percentiles
if corrMat is None:
# Sequence is a special case for which we don't shuffle (and we ignore stratified sampling)
if param.param.dataSrc.distroName != 'sequence':
np.random.shuffle(values) # randomize the stratified samples
else:
indices = ranks[:, i] - 1 # make them 0-relative
values = values[indices] # reorder to respect correlations
samples[:, i] = values
return DataFrame(samples, columns=columns) if columns else samples
| 16,759
|
def add_account(account):
"""
add account for LDAP entry
"""
type = 'USER'
try:
client.get_account(account)
print 'Account \'' + account + '\' is already registered as Rucio account'
except exception.AccountNotFound:
client.add_account(account, type, None)
pass
except exception.InvalidObject as e:
print e[0][0]
pass
| 16,760
|
def percentError(mean, sd, y_output, logits):
""" Calculates the percent error between the prediction and real value.
The percent error is calculated with the formula:
100*(|real - predicted|)/(real)
The real and predicted values are un normalized to see how accurate the true
predictions are. This metric is created in the name scope "percentError".
Input:
* mean: The mean of the original output distribution
* sd: The standard deviation of the original output distribution
* y_output: The y_output symbolic output from the iterator
* logits: The symbolic prediction output from the nerual network
Returns:
* percentErr: An operation which calculates the percent error when
* used in a training or validation run of the network
"""
with tf.name_scope("percentError", values=[y_output, logits]):
predictions= tf.exp(tf.reduce_sum(logits, axis=-1)*sd + mean)
actualValue = tf.exp(y_output*sd + mean)
percentErr = tf.reduce_mean(abs((actualValue-predictions)*100/(actualValue)))
tf.summary.scalar('Percent_Error', percentErr)
return(percentErr)
| 16,761
|
def get_file_names(directory, prefix='', suffix='', nesting=True):
"""
Returns list of all files in directory
Args:
directory (str): the directory of interest
prefix (str): if provided, files returned must start with this
suffix (str): if provided, files returned must end with this
nesting (bool): if True, looks in all subdirectories of dir. If false, only looks at top-level.
"""
l = []
for path, subdirs, files in os.walk(directory):
for name in files:
if name.startswith(prefix) and name.endswith(suffix) and (nesting or (path == directory)):
l.append(os.path.join(path, name))
return l
| 16,762
|
def parse_multiple_files(*actions_files):
"""Parses multiple files. Broadly speaking, it parses sequentially all
files, and concatenates all answers.
"""
return parsing_utils.parse_multiple_files(parse_actions, *actions_files)
| 16,763
|
def frequent_combinations(spark_df: DataFrame, n=10, export=True):
"""
takes a dataframe containing visitor logs and computes n most frequent visitor-visite pairs
:param spark_df: Spark Dataframe
:param n: number of top visitors
:return: pandas dataframe with visitor-visite pairs
"""
# compute aggregate and convert to pandas for visualization
freq_pairs = spark_df.groupBy(['VISITOR_NAME', 'VISITEE_NAME']).agg( \
count('APPT_START_DATE').alias('Visits')
). \
orderBy('Visits', ascending=False). \
limit(n). \
toPandas()
print(freq_pairs)
# persist
if export:
freq_pairs.to_csv(catalog['business/frequent_pairs'], index=False)
return freq_pairs
| 16,764
|
def main(blogname, timesleep=0):
""" 多线程处理下载图片部分"""
url_sub = find_latest_page(blogname)
image = ImagesDownloader()
text = TextWriter(blogname)
with futures.ThreadPoolExecutor(max_workers=PROCESSES*2) as ex:
while url_sub:
# 如需设置proxies, 在下行代码设置设置
item, url_sub = page_download_n_parse(url_sub, proxies=None,
timeout=TIMEOUT)
image_urls = item.get('image_urls')
if image_urls:
print('尝试下载图片 %s' % image_urls)
filename = item.get('text')
if filename:
filename = filename if len(filename) < 10 else filename[:10]
text.process_item(item)
ex.submit(image.image_download, image_urls, filename, blogname,
proxies=item['proxies'], timeout=TIMEOUT)
time.sleep(timesleep)
text.close()
| 16,765
|
def reservoir_sampling(items, k):
"""
Reservoir sampling algorithm for large sample space or unknow end list
See <http://en.wikipedia.org/wiki/Reservoir_sampling> for detail>
Type: ([a] * Int) -> [a]
Prev constrain: k is positive and items at least of k items
Post constrain: the length of return array is k
"""
sample = items[0:k]
for i in range(k, len(items)):
j = random.randrange(1, i + 1)
if j <= k:
sample[j] = items[i]
return sample
| 16,766
|
def create_db_engine(app: Flask) -> Engine:
"""Create and return an engine instance based on the app's database configuration."""
url = URL(
drivername=app.config['DATABASE_DRIVER'],
username=app.config['DATABASE_USER'],
password=app.config['DATABASE_PASSWORD'],
host=app.config['DATABASE_HOST'],
port=app.config['DATABASE_PORT'],
database=app.config['DATABASE_DB']
)
return create_engine(
url,
json_serializer=lambda obj: json.dumps(obj, default=json_serialize_default)
)
| 16,767
|
def wgs84_to_gcj02(lat, lng):
"""
WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return:
"""
dlat = _transformlat(lng - 105.0, lat - 35.0)
dlng = _transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglat, mglng]
| 16,768
|
def check_dict(data: dict, dict_name: str, check_str_values=True):
"""
Check that a given top-level element in 'data' dict is a simple str-to-str
dict.
"""
assert dict_name in data, \
f"Top-level dict must contain key '{dict_name}'."
d = data[dict_name]
assert isinstance(d, dict), \
f"Top-level sub-element '{dict_name}' must be a dict."
assert all(isinstance(key, str) for key in d), \
f"Keys of top-level dict '{dict_name}' must be strings."
assert all(key != "" for key in d), \
f"Keys of top-level dict '{dict_name}' must not be empty strings."
if check_str_values:
assert all(isinstance(value, str) for value in d.values()), \
f"All values of top-level dict '{dict_name}' must be strings."
| 16,769
|
def save_raw_csv(raw_frames: Iterable[Frame], target_path: str) -> None:
""" Saves raw_frames as a csv file. """
fields = Frame.get_fields()
with open(target_path, "w") as file:
wr = csv.writer(file, quoting=csv.QUOTE_NONE)
wr.writerow(fields)
for frame in raw_frames:
wr.writerow(getattr(frame, field) for field in fields)
| 16,770
|
async def test_reload(hass):
"""Verify we can reload."""
respx.get("http://localhost") % 200
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: [
{
"resource": "http://localhost",
"method": "GET",
"verify_ssl": "false",
"timeout": 30,
"sensor": [
{
"name": "mockrest",
},
],
}
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("sensor.mockrest")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"rest/configuration_top_level.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"rest",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("sensor.mockreset") is None
assert hass.states.get("sensor.rollout")
assert hass.states.get("sensor.fallover")
| 16,771
|
def test_unsigned_byte_enumeration002_1804_unsigned_byte_enumeration002_1804_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=0 and
document value=0
"""
assert_bindings(
schema="msData/datatypes/Facets/unsignedByte/unsignedByte_enumeration002.xsd",
instance="msData/datatypes/Facets/unsignedByte/unsignedByte_enumeration002.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 16,772
|
def _normalize_kwargs(kwargs, kind='patch'):
"""Convert matplotlib keywords from short to long form."""
# Source:
# github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
if kind == 'line2d':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
mec='markeredgecolor', mew='markeredgewidth',
mfc='markerfacecolor', ms='markersize',)
elif kind == 'patch':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
ec='edgecolor', fc='facecolor',)
for short_name in long_names:
if short_name in kwargs:
kwargs[long_names[short_name]] = kwargs.pop(short_name)
return kwargs
| 16,773
|
def fprime_to_jsonable(obj):
"""
Takes an F prime object and converts it to a jsonable type.
:param obj: object to convert
:return: object in jsonable format (can call json.dump(obj))
"""
# Otherwise try and scrape all "get_" getters in a smart way
anonymous = {}
getters = [attr for attr in dir(obj) if attr.startswith("get_")]
for getter in getters:
# Call the get_ functions, and call all non-static methods
try:
func = getattr(obj, getter)
item = func()
# If there is a property named "args" it needs to be handled specifically unless an incoming command
if (
getter == "get_args"
and not "fprime_gds.common.data_types.cmd_data.CmdData"
in str(type(obj))
):
args = []
for arg_spec in item:
arg_dict = {
"name": arg_spec[0],
"description": arg_spec[1],
"value": arg_spec[2].val,
"type": str(arg_spec[2]),
}
if arg_dict["type"] == "Enum":
arg_dict["possible"] = arg_spec[2].keys()
args.append(arg_dict)
# Fill in our special handling
item = args
anonymous[getter.replace("get_", "")] = item
except TypeError:
continue
return anonymous
| 16,774
|
def subsequent_mask(size: int) -> Tensor:
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8")
return torch.from_numpy(mask) == 0
| 16,775
|
def replace(tree: bpy.types.NodeTree, node: 'ArmLogicTreeNode'):
"""Replaces the given node with its replacement."""
# the node can either return a NodeReplacement object (for simple replacements)
# or a brand new node, for more complex stuff.
response = node.get_replacement_node(tree)
if isinstance(response, arm_nodes.ArmLogicTreeNode):
newnode = response
# some misc. properties
copy_basic_node_props(from_node=node, to_node=newnode)
elif isinstance(response, list): # a list of nodes:
for newnode in response:
copy_basic_node_props(from_node=node, to_node=newnode)
elif isinstance(response, NodeReplacement):
replacement = response
# if the returned object is a NodeReplacement, check that it corresponds to the node (also, create the new node)
if node.bl_idname != replacement.from_node or node.arm_version != replacement.from_node_version:
raise LookupError("The provided NodeReplacement doesn't seem to correspond to the node needing replacement")
# Create the replacement node
newnode = tree.nodes.new(response.to_node)
if newnode.arm_version != replacement.to_node_version:
tree.nodes.remove(newnode)
raise LookupError("The provided NodeReplacement doesn't seem to correspond to the node needing replacement")
# some misc. properties
copy_basic_node_props(from_node=node, to_node=newnode)
# now, use the `replacement` to hook up the new node correctly
# start by applying defaults
for prop_name, prop_value in replacement.property_defaults.items():
setattr(newnode, prop_name, prop_value)
for input_id, input_value in replacement.input_defaults.items():
input_socket = newnode.inputs[input_id]
if isinstance(input_socket, arm.logicnode.arm_sockets.ArmCustomSocket):
if input_socket.arm_socket_type != 'NONE':
input_socket.default_value_raw = input_value
elif input_socket.type != 'SHADER':
# note: shader-type sockets don't have a default value...
input_socket.default_value = input_value
# map properties
for src_prop_name, dest_prop_name in replacement.property_mapping.items():
setattr(newnode, dest_prop_name, getattr(node, src_prop_name))
# map inputs
for src_socket_id, dest_socket_id in replacement.in_socket_mapping.items():
src_socket = node.inputs[src_socket_id]
dest_socket = newnode.inputs[dest_socket_id]
if src_socket.is_linked:
# an input socket only has one link
datasource_socket = src_socket.links[0].from_socket
tree.links.new(datasource_socket, dest_socket)
else:
if isinstance(dest_socket, arm.logicnode.arm_sockets.ArmCustomSocket):
if dest_socket.arm_socket_type != 'NONE':
dest_socket.default_value_raw = src_socket.default_value_raw
elif dest_socket.type != 'SHADER':
# note: shader-type sockets don't have a default value...
dest_socket.default_value = src_socket.default_value
# map outputs
for src_socket_id, dest_socket_id in replacement.out_socket_mapping.items():
dest_socket = newnode.outputs[dest_socket_id]
for link in node.outputs[src_socket_id].links:
tree.links.new(dest_socket, link.to_socket)
else:
print(response)
tree.nodes.remove(node)
| 16,776
|
def process_entries(components):
"""Process top-level entries."""
data = {}
for index, value in enumerate(STRUCTURE):
label = value[0]
mandatory = value[1]
# Raise error if mandatory elements are missing
if index >= len(components):
if mandatory is True:
raise ValueError('UNH header is missing mandatory entry for {label}'.format(label=label))
else:
break
# Process
if len(value) == LENGTH_OF_A_SIMPLE_ENTRY:
data[label] = components[index]
elif len(value) == LENGTH_OF_A_NESTED_ENTRY:
data[label] = process_subentries(components, index)
else:
raise ValueError('unexpected structure')
return data
| 16,777
|
def test_render_changelog_unsupported_type(config):
"""Test that unsupported types are excluded from the changelog content."""
cz = discover_this(config)
parser = cz.commit_parser
changelog_pattern = cz.bump_pattern
gitcommits = [
git.GitCommit(rev='003', title='feat: started commitizen', body=''),
git.GitCommit(rev='002', title='Chg: some legacy commit with changes', body=''),
git.GitCommit(rev='001', title='NotLegacy: unsupported change type', body=''),
]
tree = changelog.generate_tree_from_commits(
commits=gitcommits,
tags=[],
commit_parser=parser,
changelog_pattern=changelog_pattern,
change_type_map=cz.change_type_map,
)
expected = '\n'.join([
'\n## Unreleased',
'\n### Feat',
'\n- started commitizen',
'\n### Change (Old)',
'\n- some legacy commit with changes',
'',
])
result = changelog.render_changelog(tree)
assert result == expected
| 16,778
|
def normalize_input_vector(trainX: np.ndarray, testX: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize the input vector
Args:
trainX (np.ndarray): train embedding array.
testX (np.ndarray): test embedding array.
Returns:
np.ndarray, np.ndarray: normalized train and test arrays.
"""
in_encoder = Normalizer(norm='l2')
trainX = in_encoder.transform(trainX)
testX = in_encoder.transform(testX)
return trainX, testX
| 16,779
|
def update_cache(cache_data, new_data, key):
"""
Add newly collected data to the pre-existing cache data
Args:
cache_data (dict): Pre-existing chip data
new_data (dict): Newly acquired chip data
key (str): The chip UL coordinates
Returns:
"""
if key in cache_data.keys():
cache_data[key].update(new_data[key])
else:
cache_data[key] = new_data[key]
return cache_data
| 16,780
|
def test_empty_data(empty_data):
"""Test that we gracefully get an empty list of data if we feed an empty
string to the function
"""
yaml_parser.read_yaml(empty_data)
| 16,781
|
def status(command, **keys):
"""Run a subprogram capturing it's output and return the exit status."""
return _captured_output(command, **keys).status
| 16,782
|
def normalize(flow: Tensor) -> Tensor:
"""Re-scales the optical flow vectors such that they correspond to motion on the normalized pixel coordinates
in the range [-1, 1] x [-1, 1].
Args:
flow: the optical flow tensor of shape (B, 2, H, W)
Returns:
The optical flow tensor with flow vectors rescaled to the normalized pixel coordinate system.
"""
# flow: (B, 2, H, W)
assert flow.size(1) == 2
h, w = flow.shape[-2:]
return scale(flow, (2.0 / max(w - 1, 1), 2.0 / max(h - 1, 1)))
| 16,783
|
def to_graph6_bytes(G, nodes=None, header=True):
"""Convert a simple undirected graph to bytes in graph6 format.
Parameters
----------
G : Graph (undirected)
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by ``G.nodes()`` is used.
header: bool
If True add '>>graph6<<' bytes to head of data.
Raises
------
NetworkXNotImplemented
If the graph is directed or is a multigraph.
ValueError
If the graph has at least ``2 ** 36`` nodes; the graph6 format
is only defined for graphs of order less than ``2 ** 36``.
Examples
--------
>>> nx.to_graph6_bytes(nx.path_graph(2))
b'>>graph6<<A_\\n'
See Also
--------
from_graph6_bytes, read_graph6, write_graph6_bytes
Notes
-----
The returned bytes end with a newline character.
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
if nodes is not None:
G = G.subgraph(nodes)
H = nx.convert_node_labels_to_integers(G)
nodes = sorted(H.nodes())
return b"".join(_generate_graph6_bytes(H, nodes, header))
| 16,784
|
def test_inbound_bad_provider():
"""Test the inbound user filter with bad provider throws error"""
with pytest.raises(TypeError):
inbound_user_filter({"id": "123-456-abs3"}, "potato")
| 16,785
|
def get_arrays_from_img_label(img, label, img_mode=None):
"""Transform a SimpleITK image and label map into numpy arrays, and
optionally select a channel.
Parameters:
img (SimpleITK.SimpleITK.Image): image
label (SimpleITK.SimpleITK.Image): label map
img_mode (int or None): optional mode channel, so output is 3D
Returns:
(numpy.ndarray, numpy.ndarray): image and label in numpy format
"""
img_np = sitk.GetArrayFromImage(img)
if img_mode is not None:
img_np = img_np[img_mode]
label_np = sitk.GetArrayFromImage(label)
return img_np, label_np.astype(int)
| 16,786
|
def _get_error_code(exception):
"""Get the most specific error code for the exception via superclass"""
for exception in exception.mro():
try:
return error_codes[exception]
except KeyError:
continue
| 16,787
|
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
| 16,788
|
def read_json_file(file_name: str, encoding: str = "utf-8") -> dict:
"""Reads a json file
:param file_name: path
:param encoding: encoding to use
:return: dict content
"""
with open(file_name, "r", encoding=encoding) as json_file:
return json.load(json_file)
| 16,789
|
def pretty_print_row(col_full_widths, row, max_field_size):
"""
pretty print a row such that each column is padded to have the widths in the col_full_widths vector
"""
start = "| "
if len(row) == len(col_full_widths):
end = " |"
else:
end = "|"
return start + "|".join(pretty_print_field(full_width, field, max_field_size) for full_width, field in zip(col_full_widths, row)) + end
| 16,790
|
def check_embedding(X, Y, score=0.76):
"""Compares TSNE embedding trustworthiness, NAN and verbosity"""
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y)
print("Trust = ", trust)
assert trust > score
assert nans == 0
| 16,791
|
def register(username, password, email):
"""Register a new user"""
auth.register(username, email, password)
| 16,792
|
def _get_data_for_agg(new_svarcube, new_tacube):
"""Reshape data for use in iris aggregator based on two cubes."""
dims_to_collapse = set()
dims_to_collapse.update(new_svarcube.coord_dims('air_pressure'))
untouched_dims = set(range(new_svarcube.ndim)) -\
set(dims_to_collapse)
dims = list(untouched_dims) + list(dims_to_collapse)
unrolled_data = np.moveaxis(new_tacube.data, dims,
range(new_svarcube.ndim))
return unrolled_data
| 16,793
|
def refs_changed_by_other_cc(current_user):
"""
Return dictionary with id of reference and log object changed by other cooperative centers
"""
current_user_cc = current_user.profile.get_attribute('cc')
result_list = defaultdict(list)
# get last references of current user cooperative center
refs_from_cc = Reference.objects.filter(cooperative_center_code=current_user_cc).order_by('-id')[:100]
for reference in refs_from_cc:
# get correct class (source our analytic)
c_type = reference.get_content_type_id
# filter by logs of current reference, change type and made by other users
log_list = LogEntry.objects.filter(object_id=reference.id, content_type=c_type, action_flag=2) \
.exclude(user=current_user).order_by('-id')
if log_list:
# exclude from list all changes that was already reviewed (logreview is created)
log_list = log_list.exclude(logreview__isnull=False)
# create list of log users of same cc
exclude_user_list = []
for log in log_list:
log_user_cc = log.user.profile.get_attribute('cc')
if log_user_cc == current_user_cc:
exclude_user_list.append(log.user)
# exclude from log list users from same cc as current user
if exclude_user_list:
log_list = log_list.exclude(user__in=exclude_user_list)
if log_list:
# group result by id (one line for each reference)
for log in log_list:
result_list[log.object_id] = log
return result_list
| 16,794
|
def chain_head(head: int, child: int, heads: Dict[int, int]):
"""
>>> chain_head(0, 2, {1: 2, 2: 3, 3: 0})
True
>>> chain_head(2, 0, {1: 2, 2: 3, 3: 0})
False
"""
curr_child = child
while curr_child != -1:
if curr_child == head:
return True
curr_child = heads.get(curr_child, -1)
return False
| 16,795
|
def read_conll(fp):
"""
Generator that returns a list of Row tuples for each document
:param fp: handle to CoNLL tsv file with columns: token tag token doc_id start stop sentence
:return: list of Row tuples
"""
reader = csv.reader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
first_row = next(reader)
if not first_row:
raise CoNLLReaderException("csv reader cannot read first line of {}".format(fp.name))
if first_row[0].lower() in ['token', 'tok']:
raise CoNLLReaderException("Reader does not handle file with header: {}".format(fp.name))
fp.seek(0)
# take token from conll-full file
token_index = 2
tag_index = 1
doc_id_index = 3
offsets_indexes = (4, 5)
sent_id_index = 6
rows = []
current_doc_id = None
for row in reader:
if len(row) < 6:
# sentence breaks are ignored
continue
if not row[tag_index]:
raise RuntimeError("Bad conll format data: {}".format(row))
if current_doc_id is None:
current_doc_id = row[doc_id_index]
if row[doc_id_index] != current_doc_id:
yield rows
rows = []
current_doc_id = row[doc_id_index]
start = int(row[offsets_indexes[0]])
stop = int(row[offsets_indexes[1]])
sent_id = int(row[sent_id_index].split('-')[1])
rows.append(Row(row[token_index], row[tag_index], row[doc_id_index], (start, stop), sent_id))
yield rows
| 16,796
|
def UpgradeFile(file_proto):
"""In-place upgrade a FileDescriptorProto from v2[alpha\d] to v3alpha.
Args:
file_proto: v2[alpha\d] FileDescriptorProto message.
"""
# Upgrade package.
file_proto.package = UpgradedType(file_proto.package)
# Upgrade imports.
for n, d in enumerate(file_proto.dependency):
file_proto.dependency[n] = UpgradedPath(d)
# Upgrade comments.
for location in file_proto.source_code_info.location:
location.leading_comments = UpgradedComment(location.leading_comments)
location.trailing_comments = UpgradedComment(location.trailing_comments)
for n, c in enumerate(location.leading_detached_comments):
location.leading_detached_comments[n] = UpgradedComment(c)
# Upgrade services.
for s in file_proto.service:
UpgradeService(s)
# Upgrade messages.
for m in file_proto.message_type:
UpgradeMessage(m)
for e in file_proto.enum_type:
UpgradeEnum(e)
return file_proto
| 16,797
|
def _get_ip_block(ip_block_str):
""" Convert string into ipaddress.ip_network. Support both IPv4 or IPv6
addresses.
Args:
ip_block_str(string): network address, e.g. "192.168.0.0/24".
Returns:
ip_block(ipaddress.ip_network)
"""
try:
ip_block = ipaddress.ip_network(ip_block_str)
except ValueError:
logging.error("Invalid IP block format: %s", ip_block_str)
return None
return ip_block
| 16,798
|
def mul_ntt(f_ntt, g_ntt, q):
"""Multiplication of two polynomials (coefficient representation)."""
assert len(f_ntt) == len(g_ntt)
deg = len(f_ntt)
return [(f_ntt[i] * g_ntt[i]) % q for i in range(deg)]
| 16,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.