content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _metric_list_for_check(maas_store, entity, check):
"""
Computes the metrics list for a given check.
Remote checks return a metric for each monitoring zone and
each type of metric for the check type. Agent checks return
a metric for each metric type on the check type. Check types
that Mimic doesn't know about generate an empty list.
"""
if check.type not in maas_store.check_types:
return []
if REMOTE_CHECK_TYPE_REGEX.match(check.type):
return [{'name': '{0}.{1}'.format(mz, metric.name),
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics
for mz in check.monitoring_zones_poll]
return [{'name': metric.name,
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics]
| 5,342,900
|
def plot_MA_values(t,X,**kwargs):
"""
Take the numpy.ndarray time array (t) of size (N,) and the state space numpy.ndarray (X) of size (2,N), (4,N), or (8,N), and plots the moment are values of the two muscles versus time and along the moment arm function.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) InputString - must be a string. Used to alter the figure Title. Default is None.
"""
import matplotlib.pyplot as plt
import numpy as np
assert (np.shape(X)[0] in [2,4,8]) \
and (np.shape(X)[1] == len(t)) \
and (str(type(X)) == "<class 'numpy.ndarray'>"), \
"X must be a (2,N), (4,N), or (8,N) numpy.ndarray, where N is the length of t."
assert np.shape(t) == (len(t),) and str(type(t)) == "<class 'numpy.ndarray'>", "t must be a (N,) numpy.ndarray."
InputString = kwargs.get("InputString",None)
assert InputString is None or type(InputString)==str, "InputString must either be a string or None."
if InputString is None:
DescriptiveTitle = "Moment arm equations"
else:
assert type(InputString)==str, "InputString must be a string"
DescriptiveTitle = "Moment arm equations\n(" + InputString + " Driven)"
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(8,6))
plt.subplots_adjust(left = 0.15,hspace=0.1,bottom=0.1)
plt.suptitle(DescriptiveTitle)
ax1.plot(np.linspace(0,np.pi*(160/180),1001),\
np.array(list(map(lambda x1: R1([x1]),np.linspace(0,np.pi*(160/180),1001)))),\
'0.70')
ax1.plot(np.linspace(min(X[0,:]),max(X[0,:]),101),\
np.array(list(map(lambda x1: R1([x1]),np.linspace(min(X[0,:]),max(X[0,:]),101)))),\
'g',lw=3)
ax1.set_xticks([0,np.pi/4,np.pi/2,3*np.pi/4,np.pi])
ax1.set_xticklabels([""]*len(ax1.get_xticks()))
ax1.set_ylabel("Moment Arm for\n Muscle 1 (m)")
"""
Note: Need to Transpose X in order for Map to work.
"""
ax2.plot(t,np.array(list(map(lambda X: R1(X),X.T))),'g')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels([""]*len(ax1.get_yticks()))
ax2.set_xticklabels([""]*len(ax2.get_xticks()))
ax3.plot(np.linspace(0,np.pi*(160/180),1001),\
np.array(list(map(lambda x1: R2([x1]),np.linspace(0,np.pi*(160/180),1001)))),\
'0.70')
ax3.plot(np.linspace(min(X[0,:]),max(X[0,:]),101),\
np.array(list(map(lambda x1: R2([x1]),np.linspace(min(X[0,:]),max(X[0,:]),101)))),\
'r',lw=3)
ax3.set_xticks([0,np.pi/4,np.pi/2,3*np.pi/4,np.pi])
ax3.set_xticklabels([r"$0$",r"$\frac{\pi}{4}$",r"$\frac{\pi}{2}$",r"$\frac{3\pi}{4}$",r"$\pi$"])
ax3.set_xlabel("Joint Angle (rads)")
ax3.set_ylabel("Moment Arm for\n Muscle 2 (m)")
ax4.plot(t,np.array(list(map(lambda X: R2(X),X.T))),'r')
ax4.set_ylim(ax3.get_ylim())
ax4.set_yticks(ax3.get_yticks())
ax4.set_yticklabels([""]*len(ax3.get_yticks()))
ax4.set_xlabel("Time (s)")
return(fig,[ax1,ax2,ax3,ax4])
| 5,342,901
|
def filter_pdf_files(filepaths):
""" Returns a filtered list with strings that end with '.pdf'
Keyword arguments:
filepaths -- List of filepath strings
"""
return [x for x in filepaths if x.endswith('.pdf')]
| 5,342,902
|
def index_file(path: str) -> dict:
"""
Indexes the files and directory under a certain directory
Arguments:
path {str} - the path of the DIRECTORY to index
Return:
{dict} - structures of the indexed directory
"""
structure = {} # Represents the directory structure
for dirpath, directory, files in os.walk(path):
all_files = {}
for file in files:
all_files.update(get_file_info(path, dirpath, file))
node_info = get_directory_info(path, dirpath, all_files, directory)
structure.update({dirpath: node_info})
return structure
| 5,342,903
|
def hue_quadrature(h: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the hue quadrature from given hue :math:`h` angle in degrees.
Parameters
----------
h
Hue :math:`h` angle in degrees.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Hue quadrature.
Examples
--------
>>> hue_quadrature(196.3185839) # doctest: +ELLIPSIS
237.6052911...
"""
h = as_float_array(h)
h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"]
e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"]
H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"]
# :math:`h_p` = :math:`h_z` + 360 if :math:`h_z` < :math:`h_1, i.e. h_i[0]
h[h <= h_i[0]] += 360
# *np.searchsorted* returns an erroneous index if a *nan* is used as input.
h[np.asarray(np.isnan(h))] = 0
i = as_int_array(np.searchsorted(h_i, h, side="left") - 1)
h_ii = h_i[i]
e_ii = e_i[i]
H_ii = H_i[i]
h_ii1 = h_i[i + 1]
e_ii1 = e_i[i + 1]
H = H_ii + (
(100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii + (h_ii1 - h) / e_ii1)
)
return as_float(H)
| 5,342,904
|
def AddMigCreateStatefulIPsFlags(parser):
"""Adding stateful IPs flags to the parser."""
stateful_internal_ips_help = textwrap.dedent(
"""
Internal IPs considered stateful by the instance group. {}
Use this argument multiple times to make more internal IPs stateful.
At least one of the following is required:
{}
{}
Additional arguments:
{}
""".format(STATEFUL_IPS_HELP_BASE,
STATEFUL_IP_ENABLED_ARG_HELP,
STATEFUL_IP_INTERFACE_NAME_ARG_WITH_ENABLED_HELP,
STATEFUL_IP_AUTO_DELETE_ARG_HELP))
parser.add_argument(
'--stateful-internal-ip',
type=arg_parsers.ArgDict(
allow_key_only=True,
spec={
'enabled': None,
'interface-name': str,
'auto-delete': AutoDeleteFlag.ValidatorWithFlagName(
'--stateful-internal-ip'),
}),
action='append',
help=stateful_internal_ips_help,
)
stateful_external_ips_help = textwrap.dedent(
"""
External IPs considered stateful by the instance group. {}
Use this argument multiple times to make more external IPs stateful.
At least one of the following is required:
{}
{}
Additional arguments:
{}
""".format(STATEFUL_IPS_HELP_BASE,
STATEFUL_IP_ENABLED_ARG_HELP,
STATEFUL_IP_INTERFACE_NAME_ARG_WITH_ENABLED_HELP,
STATEFUL_IP_AUTO_DELETE_ARG_HELP))
parser.add_argument(
'--stateful-external-ip',
type=arg_parsers.ArgDict(
allow_key_only=True,
spec={
'enabled': None,
'interface-name': str,
'auto-delete': AutoDeleteFlag.ValidatorWithFlagName(
'--stateful-external-ip'),
}),
action='append',
help=stateful_external_ips_help,
)
| 5,342,905
|
def compare(inputpath1: str,
inputpath2: str,
output: str,
verbose: bool):
""" Parse two RST-trees (or two sets of RST-tree pairs), \
from INPUTPATH1 and INPUTPATH2 respectively, compare their annotated \
relations, and create comparison tables. If INPUTPATH1 and INPUTPATH2 \
both point to files then both single files will be compared with \
each other. If INPUTPATH1 and INPUTPATH2 both point to directories \
then all '.rs3' files in both directories will be compared with each other. \
If '-o' is set, then the results will be written to OUTPUTPATH. Otherwise,
the results will be printed back on the command line. """
if isFile(inputpath1) and isFile(inputpath2):
compareTwoFiles(inputpath1, inputpath2, output, verbose)
elif isDirectory(inputpath1) and isDirectory(inputpath2):
compareTwoFolders(inputpath1, inputpath2, output, verbose)
else:
print("Error: INPUTPATH1 and INPUTPATH2 must either both point to files or \
both to directories. -> Abort")
pass
| 5,342,906
|
def aa_find_devices_ext (devices, unique_ids):
"""usage: (int return, u16[] devices, u32[] unique_ids) = aa_find_devices_ext(u16[] devices, u32[] unique_ids)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# devices pre-processing
__devices = isinstance(devices, int)
if __devices:
(devices, num_devices) = (array_u16(devices), devices)
else:
(devices, num_devices) = isinstance(devices, ArrayType) and (devices, len(devices)) or (devices[0], min(len(devices[0]), int(devices[1])))
if devices.typecode != 'H':
raise TypeError("type for 'devices' must be array('H')")
# unique_ids pre-processing
__unique_ids = isinstance(unique_ids, int)
if __unique_ids:
(unique_ids, num_ids) = (array_u32(unique_ids), unique_ids)
else:
(unique_ids, num_ids) = isinstance(unique_ids, ArrayType) and (unique_ids, len(unique_ids)) or (unique_ids[0], min(len(unique_ids[0]), int(unique_ids[1])))
if unique_ids.typecode != 'I':
raise TypeError("type for 'unique_ids' must be array('I')")
# Call API function
(_ret_) = api.py_aa_find_devices_ext(num_devices, num_ids, devices, unique_ids)
# devices post-processing
if __devices: del devices[max(0, min(_ret_, len(devices))):]
# unique_ids post-processing
if __unique_ids: del unique_ids[max(0, min(_ret_, len(unique_ids))):]
return (_ret_, devices, unique_ids)
| 5,342,907
|
def decode_varint_in_reverse(byte_array, offset, max_varint_length=9):
"""
This function will move backwards through a byte array trying to decode a varint in reverse. A InvalidVarIntError
will be raised if a varint is not found by this algorithm used in this function. The calling logic should check
for this case in case it is encountered which is likely in the context of carving.
Note: This cannot determine if the field being parsed was originally a varint or not and may give false positives.
Please keep this in mind when calling this function.
Note: If the array runs out of bytes while parsing in reverse, the currently determined varint will be returned.
Note: Since the parsing starts from the left of the offset specified, the resulting byte string that represents
this varint can be determined by byte_array[varint_relative_offset:offset]. The length of the varint
in bytes can be determined likewise either from the len() of the above or offset - varint_relative_offset.
:param byte_array: bytearray The byte array to parse for the varint in reverse.
:param offset: int The offset to move backwards from. The offset specified is not included in the parsing and the
algorithm starts with the last byte of the varint at offset - 1. If you want to start at the
end of the byte array then the offset should be the length of the byte array (where the offset
would refer to a non-existing index in the array).
:param max_varint_length: int The maximum number of varint bytes to go back in reverse. The default is 9 since
this is the maximum number of bytes a varint can be.
:return:
:raise: InvalidVarIntError: If a varint is not determined while parsing the byte array in reverse using the
algorithm in this function. This error is not logged as an error but rather a
debug statement since it is very likely to occur during carving and should be handled
appropriately.
"""
if offset > len(byte_array):
log_message = "The offset: {} is greater than the size of the byte array: {} for the bytes: {}."
log_message = log_message.format(offset, len(byte_array), hexlify(byte_array))
getLogger(LOGGER_NAME).error(log_message)
raise ValueError(log_message)
unsigned_integer_value = 0
varint_inverted_relative_offset = 0
varint_byte = ord(byte_array[offset - 1 - varint_inverted_relative_offset:offset - varint_inverted_relative_offset])
varint_byte &= 0x7f
unsigned_integer_value |= varint_byte
varint_inverted_relative_offset += 1
while offset - varint_inverted_relative_offset - 1 >= 0:
if varint_inverted_relative_offset > max_varint_length:
"""
Since this exception is not considered a important exception to log as an error, it will be logged
as a debug statement. There is a good chance of this use case occurring and is even expected during
carving.
"""
log_message = "A varint was not determined from byte array: {} starting at offset: {} in reverse."
log_message = log_message.format(byte_array, offset)
getLogger(LOGGER_NAME).debug(log_message)
return InvalidVarIntError(log_message)
varint_byte = ord(byte_array[offset - 1 - varint_inverted_relative_offset:
offset - varint_inverted_relative_offset])
msb_set = varint_byte & 0x80
if msb_set:
varint_byte &= 0x7f
varint_byte <<= (7 * varint_inverted_relative_offset)
unsigned_integer_value |= varint_byte
varint_inverted_relative_offset += 1
else:
break
varint_relative_offset = offset - varint_inverted_relative_offset
return unsigned_integer_value, varint_relative_offset
| 5,342,908
|
def manager_version(request):
"""
Context processor to add the rhgamestation-manager version
"""
# Tricky way to know the manager version because its version lives out of project path
root = imp.load_source('__init__', os.path.join(settings.BASE_DIR, '__init__.py'))
return {'manager_version': root.__version__}
| 5,342,909
|
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
#print ('lll: ', blobs['rois'])
return blobs, im_scale_factors
| 5,342,910
|
def string_limiter(text, limit):
"""
Reduces the number of words in the string to length provided.
Arguments:
text -- The string to reduce the length of
limit -- The number of characters that are allowed in the string
"""
for i in range(len(text)):
if i >= limit and text[i] == " ":
break
return text[:i]
| 5,342,911
|
def in_ellipse(xy_list,width,height,angle=0,xy=[0,0]):
"""
Find data points inside an ellipse and return index list
Parameters:
xy_list: Points needs to be deteced.
width: Width of the ellipse
height: Height of the ellipse
angle: anti-clockwise rotation angle in degrees
xy: the origin of the ellipse
"""
if isinstance(xy_list,list):
xy_list = np.array(xy_list)
if not isinstance(xy_list,np.ndarray):
raise Exception(f"Unrecoginzed data type: {type(xy_list)}, \
should be list or np.ndarray")
new_xy_list = xy_list.copy()
new_xy_list = new_xy_list - xy
#------------ define coordinate conversion matrix----------
theta = angle/180*np.pi # degree to radians
con_mat = np.zeros((2,2))
con_mat[:,0] = [np.cos(theta),np.sin(theta)]
con_mat[:,1] = [np.sin(theta),-np.cos(theta)]
tmp = np.matmul(con_mat,new_xy_list.T)
con_xy_list = tmp.T
#------------ check one by one ----------------------------
idxs = []
for i,[x,y] in enumerate(con_xy_list):
if ((x/(width/2))**2+(y/(height/2))**2) < 1:
idxs.append(i)
return idxs
| 5,342,912
|
def test39():
"""
test fetching a priority class list
"""
res = PriorityClassList.listPriorityClass()
assert res.obj
assert isinstance(res.obj, PriorityClassList)
assert len(res.obj.items) > 0
| 5,342,913
|
def get_real_images(dataset,
num_examples,
split=None,
failure_on_insufficient_examples=True):
"""Get num_examples images from the given dataset/split.
Args:
dataset: `ImageDataset` object.
num_examples: Number of images to read.
split: Split of the dataset to use. If None will use the default split for
eval defined by the dataset.
failure_on_insufficient_examples: If True raise an exception if the
dataset/split does not images. Otherwise will log to error and return
fewer images.
Returns:
4-D NumPy array with images with values in [0, 256].
Raises:
ValueError: If the dataset/split does not of the number of requested number
requested images and `failure_on_insufficient_examples` is True.
"""
logging.info("Start loading real data.")
with tf.Graph().as_default():
ds = dataset.eval_input_fn(split=split)
# Get real images from the dataset. In the case of a 1-channel
# dataset (like MNIST) convert it to 3 channels.
next_batch = ds.make_one_shot_iterator().get_next()[0]
shape = [num_examples] + next_batch.shape.as_list()
is_single_channel = shape[-1] == 1
if is_single_channel:
shape[-1] = 3
real_images = np.empty(shape, dtype=np.float32)
with tf.Session() as sess:
for i in range(num_examples):
try:
b = sess.run(next_batch)
b *= 255.0
if is_single_channel:
b = np.tile(b, [1, 1, 3])
real_images[i] = b
except tf.errors.OutOfRangeError:
logging.error("Reached the end of dataset. Read: %d samples.", i)
real_images = real_images[:i]
break
if real_images.shape[0] != num_examples:
if failure_on_insufficient_examples:
raise DatasetOutOfRangeError("Not enough examples in the dataset %s: %d / %d" %
(dataset, real_images.shape[0], num_examples))
else:
logging.error("Not enough examples in the dataset %s: %d / %d", dataset,
real_images.shape[0], num_examples)
logging.info("Done loading real data.")
return real_images
| 5,342,914
|
def save_opt_state(opt, # optimizer
epoch): # epoch to save the optimizer state of
""" Save optimizer state to temporary directory and then log it with mlflow.
Args:
opt: Optimizer
epoch: Epoch to save the optimizer state of
"""
# create temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
# compute optimizer checkpoint path
opt_checkpoint_path = os.path.join(temp_dir, "opt_epoch_{}.pt".format(str(epoch)))
# save optimizer state to checkpoint path
torch.save(opt.state_dict(), opt_checkpoint_path)
# log checkpoint file as artifact
mlflow.log_artifact(opt_checkpoint_path, artifact_path="model_checkpoints")
| 5,342,915
|
def wait_complete():
"""
等待当前音频播放完毕。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
| 5,342,916
|
async def call(fn: Callable, *args, **kwargs) -> Any:
"""
Submit function `fn` for remote execution with arguments `args` and `kwargs`
"""
async with websockets.connect(WS_SERVER_URI) as websocket:
task = serialize((fn, args, kwargs))
await websocket.send(task)
message = await websocket.recv()
results = deserialize(message)
if isinstance(results, TaskExecutionError):
raise results
return results
| 5,342,917
|
def assert_frame_equal(
left: pandas.core.frame.DataFrame,
right: pandas.core.frame.DataFrame,
check_dtype: bool,
):
"""
usage.dask: 1
"""
...
| 5,342,918
|
def wrap_array_func(func):
"""
Returns a version of the function func() that works even when
func() is given a NumPy array that contains numbers with
uncertainties.
func() is supposed to return a NumPy array.
This wrapper is similar to uncertainties.wrap(), except that it
handles an array argument instead of float arguments.
func -- version that takes and returns a single NumPy array.
"""
@uncertainties.set_doc("""\
Version of %s(...) that works even when its first argument is a NumPy
array that contains numbers with uncertainties.
Warning: elements of the first argument array that are not
AffineScalarFunc objects must not depend on uncertainties.Variable
objects in any way. Otherwise, the dependence of the result in
uncertainties.Variable objects will be incorrect.
Original documentation:
%s""" % (func.__name__, func.__doc__))
def wrapped_func(arr, *args):
# Nominal value:
arr_nominal_value = nominal_values(arr)
func_nominal_value = func(arr_nominal_value, *args)
# The algorithm consists in numerically calculating the derivatives
# of func:
# Variables on which the array depends are collected:
variables = set()
for element in arr.flat:
# floats, etc. might be present
if isinstance(element, uncertainties.AffineScalarFunc):
variables |= set(element.derivatives.iterkeys())
# If the matrix has no variables, then the function value can be
# directly returned:
if not variables:
return func_nominal_value
# Calculation of the derivatives of each element with respect
# to the variables. Each element must be independent of the
# others. The derivatives have the same shape as the output
# array (which might differ from the shape of the input array,
# in the case of the pseudo-inverse).
derivatives = numpy.vectorize(lambda _: {})(func_nominal_value)
for var in variables:
# A basic assumption of this package is that the user
# guarantees that uncertainties cover a zone where
# evaluated functions are linear enough. Thus, numerical
# estimates of the derivative should be good over the
# standard deviation interval. This is true for the
# common case of a non-zero standard deviation of var. If
# the standard deviation of var is zero, then var has no
# impact on the uncertainty of the function func being
# calculated: an incorrect derivative has no impact. One
# scenario can give incorrect results, however, but it
# should be extremely uncommon: the user defines a
# variable x with 0 standard deviation, sets y = func(x)
# through this routine, changes the standard deviation of
# x, and prints y; in this case, the uncertainty on y
# might be incorrect, because this program had no idea of
# the scale on which func() is linear, when it calculated
# the numerical derivative.
# The standard deviation might be numerically too small
# for the evaluation of the derivative, though: we set the
# minimum variable shift.
shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value))
# An exceptional case is that of var being exactly zero.
# In this case, an arbitrary shift is used for the
# numerical calculation of the derivative. The resulting
# derivative value might be quite incorrect, but this does
# not matter as long as the uncertainty of var remains 0,
# since it is, in this case, a constant.
if not shift_var:
shift_var = 1e-8
# Shift of all the elements of arr when var changes by shift_var:
shift_arr = array_derivative(arr, var)*shift_var
# Origin value of array arr when var is shifted by shift_var:
shifted_arr_values = arr_nominal_value + shift_arr
func_shifted = func(shifted_arr_values, *args)
numerical_deriv = (func_shifted-func_nominal_value)/shift_var
# Update of the list of variables and associated
# derivatives, for each element:
for (derivative_dict, derivative_value) in (
zip(derivatives.flat, numerical_deriv.flat)):
if derivative_value:
derivative_dict[var] = derivative_value
# numbers with uncertainties are build from the result:
return numpy.vectorize(uncertainties.AffineScalarFunc)(
func_nominal_value, derivatives)
# It is easier to work with wrapped_func, which represents a
# wrapped version of 'func', when it bears the same name as
# 'func' (the name is used by repr(wrapped_func)).
wrapped_func.__name__ = func.__name__
return wrapped_func
| 5,342,919
|
def fx_cmd_line_args_clone_prefix():
"""
Before: adds args_to_add to cmd line so can be accessed by ArgParsers
Sets 2 args for clone, a workflow to clone and a new type for the workflow
After: Set the cmd line args back to its original value
"""
original_cmd_line = copy.deepcopy(sys.argv)
args_to_add = [
"clone",
"-w", "mock_workflow_two", "mock_workflow_one",
"-m", "fn_main_mock_integration",
"-f", "mock_function_one",
"--rule", "Mock Manual Rule", "Mock: Auto Rule", "Mock Task Rule", "Mock Script Rule", "Mock Manual Rule Message Destination",
"-pre", "v2"
]
_add_to_cmd_line_args(args_to_add)
yield
sys.argv = original_cmd_line
| 5,342,920
|
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collides with base or pipes."""
pi = player['index']
player['w'] = fImages['player'][0].get_width()
player['h'] = fImages['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= nBaseY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = fImages['pipe'][0].get_width()
pipeH = fImages['pipe'][0].get_height()
for uPipe in upperPipes:
# pipe rect
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
uHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
if uCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, True, uPipe]
# normal mode
return [True, False]
for lPipe in lowerPipes:
# pipe rect
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = fHitMask['player'][pi]
lHitmask = fHitMask['pipe'][0]
# if bird collided with pipe
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if lCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if bFuryMode:
return [True, False, False, lPipe]
# normal mode
return [True, False]
return [False, False]
| 5,342,921
|
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
return kwargs
| 5,342,922
|
def convert_gwp(context, qty, to):
"""Helper for :meth:`convert_unit` to perform GWP conversions."""
# Remove a leading 'gwp_' to produce the metric name
metric = context.split('gwp_')[1] if context else context
# Extract the species from *qty* and *to*, allowing supported aliases
species_from, units_from = extract_species(qty[1])
species_to, units_to = extract_species(to)
try:
# Convert using a (magnitude, unit) tuple with only units, and explicit
# input and output units
result = iam_units.convert_gwp(metric, (qty[0], units_from),
species_from, species_to)
except (AttributeError, ValueError):
# Missing *metric*, or *species_to* contains invalid units. pyam
# promises UndefinedUnitError in these cases. Use a subclass (above) to
# add a usage hint.
raise UndefinedUnitError(species_to) from None
except pint.DimensionalityError:
# Provide an exception with the user's inputs
raise pint.DimensionalityError(qty[1], to) from None
# Other exceptions are not caught and will pass up through convert_unit()
if units_to:
# Also convert the units
result = result.to(units_to)
else:
# *to* was only a species name. Provide units based on input and the
# output species name.
to = iam_units.format_mass(result, species_to, spec=':~')
return result, to
| 5,342,923
|
def appdataPath(appname):
"""
Returns the generic location for storing application data in a cross
platform way.
:return <str>
"""
# determine Mac OS appdata location
if sys.platform == 'darwin':
# credit: MHL
try:
from AppKit import NSSearchPathForDirectoriesInDomains
# NSApplicationSupportDirectory = 14
# NSUserDomainMask = 1
# True for expanding the tilde into a fully qualified path
basepath = NSSearchPathForDirectoriesInDomains(14, 1, True)
return os.path.join(basepath[0], appname)
except (ImportError, AttributeError, IndexError):
basepath = os.path.expanduser("~/Library/Application Support")
return os.path.join(basepath, appname)
# determine Windows OS appdata location
elif sys.platform == 'win32':
return os.path.join(os.environ.get('APPDATA'), appname)
# determine Linux OS appdata location
else:
return os.path.expanduser(os.path.join('~', '.' + appname))
| 5,342,924
|
def getAction(board, policy, action_set):
"""
return action for policy, chooses max from classifier output
"""
# if policy doesn't exist yet, choose action randomly, else get from policy model
if policy == None:
valid_actions = [i for i in action_set if i[0] > -1]
if len(valid_actions) == 0:
return (-1,-1,0)
rand_i = random.randint(0, len(valid_actions)-1)
# du_policy = [-12.63, 6.60, -9.22,-19.77,-13.08,-10.49,-1.61, -24.04]
# action = nextInitialMove(du_policy, board)
action = valid_actions[rand_i]
else:
piece = [0]*7 # one hot encode piece
piece[board.currentShape.shape -1] = 1
tot_features = np.append(board.getFeatures(), [piece])
action_scores = policy.predict([tot_features])
best_scores = np.argwhere(action_scores == np.amax(action_scores)).flatten().tolist()
max_score = np.random.choice(best_scores)
action = action_set[max_score]
return action
| 5,342,925
|
def f_assert_must_between(value_list, args):
"""
检测列表中的元素是否为数字或浮点数且在args的范围内
:param value_list: 待检测列表
:param args: 范围列表
:return: 异常或原值
example:
:value_list [2, 2, 3]
:args [1,3]
:value_list ['-2', '-3', 3]
:args ['-5',3]
"""
assert len(args) == 2
for value in value_list:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()
and float(args[0]) <= float(value) <= float(args[1])):
raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args))
return value_list
| 5,342,926
|
def annotate_muscle_zscore(raw, threshold=4, ch_type=None, min_length_good=0.1,
filter_freq=(110, 140), n_jobs=1, verbose=None):
"""Create annotations for segments that likely contain muscle artifacts.
Detects data segments containing activity in the frequency range given by
``filter_freq`` whose envelope magnitude exceeds the specified z-score
threshold, when summed across channels and divided by ``sqrt(n_channels)``.
False-positive transient peaks are prevented by low-pass filtering the
resulting z-score time series at 4 Hz. Only operates on a single channel
type, if ``ch_type`` is ``None`` it will select the first type in the list
``mag``, ``grad``, ``eeg``.
See :footcite:`Muthukumaraswamy2013` for background on choosing
``filter_freq`` and ``threshold``.
Parameters
----------
raw : instance of Raw
Data to estimate segments with muscle artifacts.
threshold : float
The threshold in z-scores for marking segments as containing muscle
activity artifacts.
ch_type : 'mag' | 'grad' | 'eeg' | None
The type of sensors to use. If ``None`` it will take the first type in
``mag``, ``grad``, ``eeg``.
min_length_good : float | None
The shortest allowed duration of "good data" (in seconds) between
adjacent annotations; shorter segments will be incorporated into the
surrounding annotations.``None`` is equivalent to ``0``.
Default is ``0.1``.
filter_freq : array-like, shape (2,)
The lower and upper frequencies of the band-pass filter.
Default is ``(110, 140)``.
%(n_jobs)s
%(verbose)s
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts annotated as BAD_muscle.
scores_muscle : array
Z-score values averaged across channels for each sample.
References
----------
.. footbibliography::
"""
from scipy.stats import zscore
from scipy.ndimage import label
raw_copy = raw.copy()
if ch_type is None:
raw_ch_type = raw_copy.get_channel_types()
if 'mag' in raw_ch_type:
ch_type = 'mag'
elif 'grad' in raw_ch_type:
ch_type = 'grad'
elif 'eeg' in raw_ch_type:
ch_type = 'eeg'
else:
raise ValueError('No M/EEG channel types found, please specify a'
' ch_type or provide M/EEG sensor data')
logger.info('Using %s sensors for muscle artifact detection'
% (ch_type))
if ch_type in ('mag', 'grad'):
raw_copy.pick_types(meg=ch_type, ref_meg=False)
else:
ch_type = {'meg': False, ch_type: True}
raw_copy.pick_types(**ch_type)
raw_copy.filter(filter_freq[0], filter_freq[1], fir_design='firwin',
pad="reflect_limited", n_jobs=n_jobs)
raw_copy.apply_hilbert(envelope=True, n_jobs=n_jobs)
data = raw_copy.get_data(reject_by_annotation="NaN")
nan_mask = ~np.isnan(data[0])
sfreq = raw_copy.info['sfreq']
art_scores = zscore(data[:, nan_mask], axis=1)
art_scores = art_scores.sum(axis=0) / np.sqrt(art_scores.shape[0])
art_scores = filter_data(art_scores, sfreq, None, 4)
scores_muscle = np.zeros(data.shape[1])
scores_muscle[nan_mask] = art_scores
art_mask = scores_muscle > threshold
# return muscle scores with NaNs
scores_muscle[~nan_mask] = np.nan
# remove artifact free periods shorter than min_length_good
min_length_good = 0 if min_length_good is None else min_length_good
min_samps = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for com in range(1, num_comps + 1):
l_idx = np.nonzero(comps == com)[0]
if len(l_idx) < min_samps:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times,
art_mask, 'BAD_muscle',
orig_time=raw.info['meas_date'])
_adjust_onset_meas_date(annot, raw)
return annot, scores_muscle
| 5,342,927
|
def circ_dist2(a, b):
"""Angle between two angles
"""
phi = np.e**(1j*a) / np.e**(1j*b)
ang_dist = np.arctan2(phi.imag, phi.real)
return ang_dist
| 5,342,928
|
def print_hugo_install_instructions():
"""Prints out instructions on how to install Hugo
"""
click.secho(
'''
It appears that Hugo isn't installed, which is needed to
launch the preview server.
If you have Homebrew, you can install Hugo using "brew install hugo".
Otherwise, you can get the binary from here:
https://gohugo.io/getting-started/installing/.
After you have Hugo installed and can run "hugo -h" successfully, try
running "journal preview" again. If you run into any issues, reach out on
GitHub and we'll be happy to help.''',
fg='yellow')
| 5,342,929
|
def hinge_loss(positive_scores, negative_scores, margin=1.0):
"""
Pairwise hinge loss [1]:
loss(p, n) = \sum_i [\gamma - p_i + n_i]_+
[1] http://yann.lecun.com/exdb/publis/pdf/lecun-06.pdf
:param positive_scores: (N,) Tensor containing scores of positive examples.
:param negative_scores: (N,) Tensor containing scores of negative examples.
:param margin: Margin.
:return: Loss value.
"""
hinge_losses = tf.nn.relu(margin - positive_scores + negative_scores)
loss = tf.reduce_sum(hinge_losses)
return loss
| 5,342,930
|
def build(req):
"""Builder for this format.
Args:
req: flask request
Returns:
Json containing the creative data
"""
errors = []
v = {}
tdir = "/tmp/" + f.get_tmp_file_name()
index = get_html()
ext = f.get_ext(req.files["videofile"].filename)
if ext != "mp4":
return {"errors": ["Only mp4 files allowed"]}
f.save_file(req.files["videofile"], tdir + "/video.mp4")
v["backgroundColor"] = f.get_param("background_color")
v["autoclose"] = str(f.get_int_param("autoclose"))
return {"errors": errors, "dir": tdir, "index": index, "vars": v}
| 5,342,931
|
def add_s3(command_table, session, **kwargs):
"""
This creates a new service object for the s3 plugin. It sends the
old s3 commands to the namespace ``s3api``.
"""
utils.rename_command(command_table, 's3', 's3api')
command_table['s3'] = S3(session)
| 5,342,932
|
def reflect(old, centroid, new):
"""Reflect the old point around the centroid into the new point on the sphere.
Parameters
----------
old : SPHER_T
centroid : SPHER_T
new : SPHER_T
"""
x = old['x']
y = old['y']
z = old['z']
ca = centroid['cosaz']
sa = centroid['sinaz']
cz = centroid['coszen']
sz = centroid['sinzen']
new['x'] = (
2*ca*cz*sz*z
+ x*(ca*(-ca*cz**2 + ca*sz**2) - sa**2)
+ y*(ca*sa + sa*(-ca*cz**2 + ca*sz**2))
)
new['y'] = (
2*cz*sa*sz*z
+ x*(ca*sa + ca*(-cz**2*sa + sa*sz**2))
+ y*(-ca**2 + sa*(-cz**2*sa + sa*sz**2))
)
new['z'] = 2*ca*cz*sz*x + 2*cz*sa*sz*y + z*(cz**2 - sz**2)
fill_from_cart(new)
| 5,342,933
|
def search(query, data, metric='euclidean', verbose=True):
"""
do search, return ranked list according to distance
metric: hamming/euclidean
query: one query per row
dat: one data point per row
"""
#calc dist of query and each data point
if metric not in ['euclidean', 'hamming']:
print 'metric must be one of (euclidean, hamming)'
sys.exit(0)
#b=time.clock()
dist=scipy.spatial.distance.cdist(query,data,metric)
sorted_idx=np.argsort(dist,axis=1)
#e=time.clock()
if verbose:
#calc avg dist to nearest 200 neighbors
nearpoints=sorted_idx[:,0:200]
d=[np.mean(dist[i][nearpoints[i]]) for i in range(nearpoints.shape[0])]
sys.stdout.write('%.4f, '% np.mean(d))
#print 'search time %.4f' % (e-b)
return sorted_idx
| 5,342,934
|
def create_circle_widget(canvas: Canvas, x: int, y: int, color: str, circle_size: int):
"""create a centered circle on cell (x, y)"""
# in the canvas the 1st axis is horizontal and the 2nd is vertical
# we want the opposite so we flip x and y for the canvas
# to create an ellipsis, we give (x0, y0) and (x1, y1) that define the containing rectangle
pad = (CELL_SIZE - circle_size) / 2
i0 = 5 + y * CELL_SIZE + pad + 1
j0 = 5 + x * CELL_SIZE + pad + 1
i1 = 5 + (y + 1) * CELL_SIZE - pad
j1 = 5 + (x + 1) * CELL_SIZE - pad
return canvas.create_oval(i0, j0, i1, j1, fill=color, outline="")
| 5,342,935
|
def _format_program_counter_relative(state):
"""Program Counter Relative"""
program_counter = state.program_counter
operand = state.current_operand
if operand & 0x80 == 0x00:
near_addr = (program_counter + operand) & 0xFFFF
else:
near_addr = (program_counter - (0x100 - operand)) & 0xFFFF
return '${:04X}'.format(near_addr)
| 5,342,936
|
def compile_program(
program: PyTEAL, mode: Mode = Mode.Application, version: int = 5
) -> bytes:
"""Compiles a PyTEAL smart contract program to the TEAL binary code.
Parameters
----------
program
A function which generates a PyTEAL expression, representing an Algorand program.
mode
The mode with which to compile the supplied PyTEAL program.
version
The version with which to compile the supplied PyTEAL program.
Returns
-------
bytes
The TEAL compiled binary code.
"""
source = compileTeal(program(), mode=mode, version=version)
return _compile_source(source)
| 5,342,937
|
def fetch_url(url):
""" Fetches a URL and returns contents - use opener to support HTTPS. """
# Fetch and parse
logger.debug(u'Fetching %s', url)
# Use urllib2 directly for enabled SSL support (LXML doesn't by default)
timeout = 30
try:
opener = urllib2.urlopen(url, None, timeout)
# Fetch HTTP data in one batch, as handling the 'file-like' object to
# lxml results in thread-locking behaviour.
htmldata = opener.read()
except urllib2.URLError, urllib2.HTTPError:
# These type of errors are non-fatal - but *should* be logged.
logger.exception(u'HTTP Error for %s, returning emtpy string.',
url
)
return None
return htmldata
| 5,342,938
|
def predict(yolo_outputs, image_shape, anchors, class_names, obj_threshold, nms_threshold, max_boxes = 1000):
"""
Process the results of the Yolo inference to retrieve the detected bounding boxes,
the corresponding class label, and the confidence score associated.
The threshold value 'obj_threshold' serves to discard low confidence predictions.
The 'nms_threshold' value is used to discard duplicate boxes for a same object (IoU metric).
"""
# Init
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
total_boxes = []
total_box_scores = []
input_shape = tf.shape(yolo_outputs[0])[1 : 3] * 32
# Process output tensors
for i in range(len(yolo_outputs)):
# Get bboxes and associated scores
detected_boxes, box_scores = boxes_and_scores(yolo_outputs[i], anchors[anchor_mask[i]], len(class_names), input_shape, image_shape)
# Append bboxes and level of confidence to list
total_boxes.append(detected_boxes)
total_box_scores.append(box_scores)
# Concatenate results
total_boxes = tf.concat(total_boxes, axis=0)
total_box_scores = tf.concat(total_box_scores, axis=0)
#print('------------------------------------')
#print('Boxe scores', box_scores)
# Mask to filter out low confidence detections
mask = box_scores >= obj_threshold
# Set boxes limit
max_boxes_tensor = tf.constant(max_boxes, dtype = tf.int32)
boxes_ = []
scores_ = []
classes_ = []
items_ = []
for c in range(len(class_names)):
# Get boxes labels
class_boxes = tf.boolean_mask(total_boxes, mask[:, c])
# Get associated score
class_box_scores = tf.boolean_mask(total_box_scores[:, c], mask[:, c])
# Concatenate label and score
item = [class_boxes, class_box_scores]
# Filter out duplicates when multiple boxes are predicted for a same object
nms_index = tf.image.non_max_suppression(class_boxes, class_box_scores, max_boxes_tensor, iou_threshold = nms_threshold)
# Remove the duplicates from the list of classes and scores
class_boxes = tf.gather(class_boxes, nms_index)
class_box_scores = tf.gather(class_box_scores, nms_index)
# Multiply score by class type
classes = tf.ones_like(class_box_scores, 'int32') * c
# Append results to lists
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
# Concatenate results
boxes_ = tf.concat(boxes_, axis = 0)
scores_ = tf.concat(scores_, axis = 0)
classes_ = tf.concat(classes_, axis = 0)
return boxes_, scores_, classes_
| 5,342,939
|
def get_event_details(entry, workday_user, demisto_user, days_before_hire_to_sync, days_before_hire_to_enable_ad,
deactivation_date_field, display_name_to_user_profile, email_to_user_profile,
employee_id_to_user_profile, source_priority):
"""
This function detects the event type and creates a dictionary which holds the event details.
If the event should not be created, None is returned.
Args:
entry: The employee's report entry.
workday_user: Workday user in XSOAR format.
demisto_user: The user profile in XSOAR.
deactivation_date_field: Deactivation date field - "lastdayofwork" or "terminationdate".
days_before_hire_to_sync: Number of days before hire date to sync hires, -1 if should sync instantly.
days_before_hire_to_enable_ad: Number of days before hire date to enable Active Directory account,
-1 if should sync instantly.
display_name_to_user_profile: A dictionary that maps display names to user profile indicators in XSOAR.
email_to_user_profile: A dictionary that maps email addresses to user profile indicators in XSOAR.
employee_id_to_user_profile: A dictionary that maps employee ids to user profile indicators in XSOAR.
source_priority: The source priority number.
Returns:
event: The event details.
"""
user_email = workday_user.get(EMAIL_ADDRESS_FIELD)
changed_fields = get_profile_changed_fields_str(demisto_user, workday_user)
demisto.debug(f'{changed_fields=}') # type: ignore
if not has_reached_threshold_date(days_before_hire_to_sync, workday_user) \
or new_hire_email_already_taken(workday_user, demisto_user, email_to_user_profile) \
or is_report_missing_required_user_data(workday_user) \
or not is_valid_source_of_truth(demisto_user, source_priority) \
or is_event_processed(demisto_user):
return None
if is_new_hire_event(demisto_user, workday_user, deactivation_date_field):
event_type = NEW_HIRE_EVENT_TYPE
event_details = 'The user has been hired.'
elif is_ad_activation_event(demisto_user, workday_user, days_before_hire_to_enable_ad):
event_type = ACTIVATE_AD_EVENT_TYPE
event_details = 'Active Directory user account was enabled.'
elif is_ad_deactivation_event(demisto_user, workday_user, days_before_hire_to_enable_ad, source_priority):
event_type = DEACTIVATE_AD_EVENT_TYPE
event_details = 'Active Directory user account was disabled due to hire date postponement.'
elif is_rehire_event(demisto_user, workday_user, changed_fields):
event_type = REHIRE_USER_EVENT_TYPE
event_details = 'The user has been rehired.'
elif is_termination_event(workday_user, demisto_user, deactivation_date_field):
event_type = TERMINATE_USER_EVENT_TYPE
event_details = 'The user has been terminated.'
elif is_update_event(workday_user, changed_fields):
event_type = UPDATE_USER_EVENT_TYPE
event_details = f'The user has been updated:\n{changed_fields}'
workday_user[OLD_USER_DATA_FIELD] = demisto_user
if demisto_user.get(SOURCE_PRIORITY_FIELD) != source_priority:
workday_user[CONVERSION_HIRE_FIELD] = True
event_details = f'A conversion hire was detected:\n{changed_fields}'
else:
demisto.debug(f'Could not detect changes in report for user with email address {user_email} - skipping.')
return None
if is_tufe_user(demisto_user) and event_type != REHIRE_USER_EVENT_TYPE:
return None
if is_display_name_already_taken(demisto_user, workday_user, display_name_to_user_profile) \
and event_type in [NEW_HIRE_EVENT_TYPE, REHIRE_USER_EVENT_TYPE, UPDATE_USER_EVENT_TYPE]:
event_details = f'Detected an "{event_type}" event, but display name already exists. Please review.'
if changed_fields:
event_details += f'\n{changed_fields}'
event_type = DEFAULT_INCIDENT_TYPE
entry[USER_PROFILE_INC_FIELD] = workday_user
return {
'name': user_email,
'rawJSON': json.dumps(entry),
'type': event_type,
'details': event_details
}
| 5,342,940
|
def test_qamats_gadol_next_accent():
"""`qamats` with first accent on syllable is `qamats-gadol` (qamats-gadol-next-accent)"""
word = r"אָז֩" # az (Leviticus 26:34)
parts = ["alef", "qamats-gadol", "zayin"]
assert parts == Parser().parse(word).flat()
| 5,342,941
|
def kill_test_logger(logger):
"""Cleans up a test logger object by removing all of its handlers.
Args:
logger: The logging object to clean up.
"""
for h in list(logger.handlers):
logger.removeHandler(h)
if isinstance(h, logging.FileHandler):
h.close()
| 5,342,942
|
def ae_model(inputs, train=True, norm=True, **kwargs):
"""
AlexNet model definition as defined in the paper:
https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
You will need to EDIT this function. Please put your AlexNet implementation here.
Note:
1.) inputs['images'] is a [BATCH_SIZE x HEIGHT x WIDTH x CHANNELS] array coming
from the data provider.
2.) You will need to return 'output' which is a dictionary where
- output['pred'] is set to the output of your model
- output['conv1'] is set to the output of the conv1 layer
- output['conv1_kernel'] is set to conv1 kernels
- output['conv2'] is set to the output of the conv2 layer
- output['conv2_kernel'] is set to conv2 kernels
- and so on...
The output dictionary should include the following keys for AlexNet:
['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'pool1',
'pool2', 'pool5', 'fc6', 'fc7', 'fc8']
as well as the respective ['*_kernel'] keys for the kernels
3.) Set your variable scopes to the name of the respective layers, e.g.
with tf.variable_scope('conv1'):
outputs['conv1'] = ...
outputs['pool1'] = ...
and
with tf.variable_scope('fc6'):
outputs['fc6'] = ...
and so on.
4.) Use tf.get_variable() to create variables, while setting name='weights'
for each kernel, and name='bias' for each bias for all conv and fc layers.
For the pool layers name='pool'.
These steps are necessary to correctly load the pretrained alexnet model
from the database for the second part of the assignment.
"""
# propagate input targets
outputs = inputs
# dropout = .5 if train else None
input_to_network = inputs['images']
outputs['input'] = input_to_network
with tf.variable_scope('conv'):
outputs['relu'], outputs['conv_kernel'] = get_conv(input_to_network,[7,7,3,64],16)
with tf.variable_scope('deconv'):
outputs['deconv'] = get_deconv(outputs['relu'],[12,12,3,64],12,input_to_network.shape)
# shape = input_to_network.get_shape().as_list()
# stride = 16
# hidden_size = 2
# deconv_size = 12
# ### YOUR CODE HERE
# outputs['input'] = input_to_network
# conv_layer = K.layers.Conv2D(64,7,strides=(stride,stride),
# padding='same',
# kernel_initializer='glorot_normal')
# outputs['conv_kernel'] = conv_layer
# outputs['conv'] = conv_layer(input_to_network)
# outputs['relu'] = K.layers.Activation('relu')(outputs['conv'])
# outputs['deconv'] = K.layers.Conv2DTranspose(3,deconv_size,
# deconv_size,padding='valid',
# kernel_initializer='glorot_normal')(outputs['relu'])
### END OF YOUR CODE
for k in ['deconv']:
assert k in outputs, '%s was not found in outputs' % k
return outputs, {}
| 5,342,943
|
def filter_by_country(data, country=DEFAULT_COUNTRY):
"""
Filter provided data by country (defaults to Czechia).
data: pandas.DataFrame
country: str
"""
# Filter data by COUNTRY
return data[data[COLUMN_FILTER] == country]
| 5,342,944
|
def draw_soil_maps():
"""make four-panel map of soil COS fluxes and resulting drawdown
The four panels show soil COS surface fluxes (top row) and the
resulting STEM-simulated drawdowns (bottom row) for both the
Whelan-Kettle "hybrid" fluxes (left panels) as well as Kettle et
al (2002) fluxes (right panels).
"""
maps_soil = MapPanel(nrows=2, ncols=2)
fcos_mean, fcos_total = map_grid.get_JulAug_total_flux(
'fCOS', ('Fsoil_Kettle', 'Fsoil_Hybrid5Feb'))
all_vals = np.vstack((fcos_mean['Fsoil_Kettle'],
fcos_mean['Fsoil_Hybrid5Feb'])).flatten()
vmin, vmax = np.nanpercentile(all_vals, (1, 99))
cb_midpt = 0.0
maps_soil.draw_map(fcos_mean['Fsoil_Kettle'],
map_axis_idx=(0, 0),
label_lat=True,
vmin=vmin,
vmax=vmax,
midpoint=cb_midpt,
bands_above_mdpt=5,
bands_below_mdpt=5,
cmap=plt.get_cmap('BrBG_r'),
cbar_t_str='COS [pmol m$^{-2}$ s$^{-1}$]',
extend='both',
panel_lab='a')
maps_soil.draw_map(fcos_mean['Fsoil_Hybrid5Feb'],
map_axis_idx=(0, 1),
vmin=vmin, vmax=vmax,
midpoint=cb_midpt,
bands_above_mdpt=5,
bands_below_mdpt=5,
cmap=plt.get_cmap('BrBG_r'),
cbar_t_str='COS [pmol m$^{-2}$ s$^{-1}$]',
extend='both',
panel_lab='b')
aqc, dd_soil_whelan, raw_data = get_COS_concentration('Fsoil_Hybrid5Feb')
aqc, dd_soil_kettle, raw_data = get_COS_concentration('Fsoil_Kettle')
all_vals = np.vstack((dd_soil_whelan,
dd_soil_kettle)).flatten()
vmin, vmax = np.nanpercentile(all_vals, (1, 99))
cb_midpt = 0.0
maps_soil.draw_map(dd_soil_kettle,
map_axis_idx=(1, 0),
label_lon=True,
vmin=vmin, vmax=vmax,
midpoint=cb_midpt,
bands_above_mdpt=5,
bands_below_mdpt=5,
cmap=plt.get_cmap('PuOr'),
cbar_t_str='COS [ppt]',
extend='both',
panel_lab='c')
maps_soil.draw_map(dd_soil_whelan,
map_axis_idx=(1, 1),
label_lat=True,
label_lon=True,
vmin=vmin,
vmax=vmax,
midpoint=cb_midpt,
bands_above_mdpt=5,
bands_below_mdpt=5,
cmap=plt.get_cmap('PuOr'),
cbar_t_str='COS [ppt]',
extend='both',
panel_lab='d')
maps_soil.save(fname=os.path.join(os.getenv('HOME'),
'plots', 'map_soil.pdf'))
| 5,342,945
|
def get_instance_id() -> str:
"""Returns the AWS instance id where this is running or "local"."""
global INSTANCE_ID
if INSTANCE_ID is None:
if get_env_variable("RUNNING_IN_CLOUD") == "True":
@retry(stop_max_attempt_number=3)
def retrieve_instance_id():
return requests.get(os.path.join(METADATA_URL, "instance-id")).text
INSTANCE_ID = retrieve_instance_id()
else:
INSTANCE_ID = "local"
return INSTANCE_ID
| 5,342,946
|
def test_array__sized_dump_too_small__unsized_iterable():
"""Crash if writing a generator with too few values."""
field = fields.Array(fields.Int32(), count=100)
with pytest.raises(errors.ArraySizeError) as err:
field.to_bytes(x for x in range(6))
assert err.value.n_expected == 100
assert err.value.n_given == 6
| 5,342,947
|
def scans_from_csvs(*inps, names=None):
"""
Read from csvs.
:param inps: file names of the csvs
:param names: names of the Scans
:return: list of Scans
"""
ns, temp_vals, heat_flow_vals = read_csvs(inps)
names = ns if names is None else names
return [Scan(*vals) for vals in zip(temp_vals, heat_flow_vals, names)]
| 5,342,948
|
def part_b(puzzle_input):
"""
Calculate the answer for part_b.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_b.
"""
return str(collect_letters(puzzle_input)[1])
| 5,342,949
|
def main():
"""Driver Function."""
parser = argparse.ArgumentParser(
description="ARMA Plotting Script for EPRI Data"
)
parser.add_argument(
"-t",
"--add-tables",
action="store_true",
help="Add descriptive tables to plot margin.",
)
parser.add_argument(
"-e",
"--embed-xml",
action="store_true",
help="Embed current xml file into image output."
)
parser.add_argument(
'xml',
type=Path,
help="Path to RAVEN input file"
)
# Add subcommands to this tool
subparsers = parser.add_subparsers(dest="subparser")
single_plot_parser = subparsers.add_parser("single-plot")
single_plot_parser.add_argument("function", type=str)
single_plot_parser.add_argument("--year", required=False, default=2025, type=int)
extract_xml_parser = subparsers.add_parser("extract-xml")
extract_xml_parser.add_argument("png", type=Path)
args = parser.parse_args()
# Add parent directory so we can use it throughout the script.
args.parent_xml = args.xml.resolve().parent
if args.subparser == "extract-xml":
extract_xml(args.png)
elif args.subparser == "single-plot":
create_single_plot(args)
else:
create_multiyear_plots(args)
| 5,342,950
|
def data_encoder(data):
"""
Encode all categorical values in the dataframe into numeric values.
@param data: the original dataframe
@return data: the same dataframe with all categorical variables encoded
"""
le = preprocessing.LabelEncoder()
cols = data.columns
numcols = data._get_numeric_data().columns
catecols = list(set(cols) - set(numcols))
le = preprocessing.LabelEncoder()
data[catecols] = data[catecols].astype(str).apply(le.fit_transform)
return data
| 5,342,951
|
def _search_settings(method_settings_keys, settings):
"""
We maintain a dictionary of dimensionality reduction methods
in dim_settings_keys where each key (method) stores another
dictionary (md) holding that method's settings (parameters).
The keys of md are component ids and the values are parameter
names that will be passed to dim_reduce.
For example, dim_settings_keys['dim-PCA'] holds a dictionary
dim_pca_settings_keys = {
'dim-PCA-n-components': 'n_components',
'dim-PCA-whiten': 'whiten',
'dim-PCA-solver': 'svd_solver',
'dim-PCA-random-state': 'random_state'
}
where the keys (dim-PCA-key) is the widget id and the value is the
parameter name to pass to sklearn's PCA.
Parameters
__________
method_settings_keys: dict
Dicionary holding setting id's and parameter names.
settings: tuple of list of dicts of ...
Holds all children in the method-settings elements. This
is a mixture of lists, tuples, and dicts. We recursively search
this element to find the children with id's as determined by
dim_pca_settings_keys[dim_method]. By doing this we avoid having
to write new Input elements into our callbacks every time we add
a new setting. All that needs to be done is add the setting's
id into the settings_keys dict and it will be parsed automatically.
"""
kwargs = {}
for key in method_settings_keys:
child = next(_recur_search(settings, key))
# if there exists a component with 'key' as its 'id'
# then child should never be None. 'value' may be missing
# if not manually specified when constructing the widget.
if child is None or 'value' not in child:
raise InternalError("'value' key not found in child.")
kwargs[method_settings_keys[key]] = child['value']
return kwargs
| 5,342,952
|
def svn_opt_resolve_revisions(*args):
"""
svn_opt_resolve_revisions(svn_opt_revision_t peg_rev, svn_opt_revision_t op_rev,
svn_boolean_t is_url, svn_boolean_t notice_local_mods,
apr_pool_t pool) -> svn_error_t
"""
return _core.svn_opt_resolve_revisions(*args)
| 5,342,953
|
def write_midi(
path: Union[str, Path],
music: "Music",
backend: str = "mido",
**kwargs: Any
):
"""Write a Music object to a MIDI file.
Parameters
----------
path : str or Path
Path to write the MIDI file.
music : :class:`muspy.Music`
Music object to write.
backend: {'mido', 'pretty_midi'}
Backend to use. Defaults to 'mido'.
"""
if backend == "mido":
return write_midi_mido(path, music, **kwargs)
if backend == "pretty_midi":
return write_midi_pretty_midi(path, music)
raise ValueError("`backend` must by one of 'mido' and 'pretty_midi'.")
| 5,342,954
|
def find_star_column(file, column_type, header_length) :
""" For an input .STAR file, search through the header and find the column numbers assigned to a given column_type (e.g. 'rlnMicrographName', ...)
"""
with open(file, 'r') as f :
line_num = 0
for line in f :
line_num += 1
# extract column number for micrograph name
if column_type in line :
for i in line.split()[1]:
if i in string.digits :
column_num = int(i)
# search header and no further to find setup values
if line_num >= header_length :
if VERBOSE:
# print("Read though header (%s lines total)" % header_length)
print("Column value for %s is %d" % (column_type, column_num))
return column_num
| 5,342,955
|
def recalculate_bb(df, customization_dict, image_dir):
"""After resizing images, bb coordinates are recalculated.
Args:
df (Dataframe): A df for image info.
customization_dict (dict): Resize dict.
image_dir (list): Image path list
Returns:
Dataframe: Updated dataframe.
"""
img = cv2.imread(image_dir[0])
h, w, _ = img.shape
new_width = customization_dict['width']
new_height = customization_dict['height']
w_ratio = new_width/w
h_ratio = new_height/h
df['x_min'] = df['x_min']*w_ratio
df['x_max'] = df['x_max']*w_ratio
df['y_min'] = df['y_min']*h_ratio
df['y_max'] = df['y_max']*h_ratio
df.x_min = df.x_min.astype("int16")
df.x_max = df.x_max.astype("int16")
df.y_min = df.y_min.astype("int16")
df.y_max = df.y_max.astype("int16")
return df
| 5,342,956
|
def free_port():
"""Returns a free port on this host
"""
return get_free_port()
| 5,342,957
|
def gen_treasure_img():
""" Get National Treasure values and Tweet its image """
if len(db_schedule.get(
'date == "{}" AND category == "TD"'.format(
datetime.today().strftime('%d/%m/%Y')))) == 0:
status, treasure = Treasure().buy()
if status == 200:
image = TreasureImg()
image.add_title(
100, 'Confira os preços e taxas de hoje no tesouro direto')
img_status = image.render(treasure)
if img_status == 200:
tweet.send_media(
'./assets/treasure.png',
'Confira as taxas do Tesouro Direto'
)
db_schedule.insert(
datetime.today().strftime('%d/%m/%Y'),
'TD',
)
| 5,342,958
|
def test_unregister_non_existing_file_obj(psql_fixture): # noqa: F811
"""Test unregistering not existing file object and expect corresponding error."""
non_existing_file_obj = psql_fixture.non_existing_file_infos[0]
with pytest.raises(DrsObjectNotFoundError):
psql_fixture.database.unregister_drs_object(non_existing_file_obj.file_id)
| 5,342,959
|
async def websocket_lovelace_update_card(hass, connection, msg):
"""Receive lovelace card config over websocket and save."""
error = None
try:
await hass.async_add_executor_job(
update_card, hass.config.path(LOVELACE_CONFIG_FILE),
msg['card_id'], msg['card_config'], msg.get('format', FORMAT_YAML))
message = websocket_api.result_message(
msg['id']
)
except FileNotFoundError:
error = ('file_not_found',
'Could not find ui-lovelace.yaml in your config dir.')
except UnsupportedYamlError as err:
error = 'unsupported_error', str(err)
except CardNotFoundError as err:
error = 'card_not_found', str(err)
except HomeAssistantError as err:
error = 'save_error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
| 5,342,960
|
def justify_to_box(
boxstart: float,
boxsize: float,
itemsize: float,
just: float = 0.0) -> float:
"""
Justifies, similarly, but within a box.
"""
return boxstart + (boxsize - itemsize) * just
| 5,342,961
|
def newton(oracle, x_0, tolerance=1e-5, max_iter=100,
line_search_options=None, trace=False, display=False):
"""
Newton's optimization method.
Parameters
----------
oracle : BaseSmoothOracle-descendant object
Oracle with .func(), .grad() and .hess() methods implemented for computing
function value, its gradient and Hessian respectively. If the Hessian
returned by the oracle is not positive-definite method stops with message="newton_direction_error"
x_0 : np.array
Starting point for optimization algorithm
tolerance : float
Epsilon value for stopping criterion.
max_iter : int
Maximum number of iterations.
line_search_options : dict, LineSearchTool or None
Dictionary with line search options. See LineSearchTool class for details.
trace : bool
If True, the progress information is appended into history dictionary during training.
Otherwise None is returned instead of history.
display : bool
If True, debug information is displayed during optimization.
Returns
-------
x_star : np.array
The point found by the optimization procedure
message : string
'success' or the description of error:
- 'iterations_exceeded': if after max_iter iterations of the method x_k still doesn't satisfy
the stopping criterion.
- 'newton_direction_error': in case of failure of solving linear system with Hessian matrix (e.g. non-invertible matrix).
- 'computational_error': in case of getting Infinity or None value during the computations.
history : dictionary of lists or None
Dictionary containing the progress information or None if trace=False.
Dictionary has to be organized as follows:
- history['time'] : list of floats, containing time passed from the start of the method
- history['func'] : list of function values f(x_k) on every step of the algorithm
- history['grad_norm'] : list of values Euclidian norms ||g(x_k)|| of the gradient on every step of the algorithm
- history['x'] : list of np.arrays, containing the trajectory of the algorithm. ONLY STORE IF x.size <= 2
Example:
--------
>> oracle = QuadraticOracle(np.eye(5), np.arange(5))
>> x_opt, message, history = newton(oracle, np.zeros(5), line_search_options={'method': 'Constant', 'c': 1.0})
>> print('Found optimal point: {}'.format(x_opt))
Found optimal point: [ 0. 1. 2. 3. 4.]
"""
if tolerance <= 0.:
tolerance = 1e-32
history = defaultdict(list) if trace else None
line_search_tool = get_line_search_tool(line_search_options)
x = np.copy(x_0)
start_time = time.time()
grad_norm_0 = np.linalg.norm(oracle.grad(x))
def get_alpha(x, d):
if line_search_tool.is_correct(oracle, x, d, 1.):
return 1.
return line_search_tool.line_search(oracle, x, d)
def has_nans(*args):
for arg in args:
if np.isnan(arg).any() or np.isinf(arg).any():
return True
return False
for _ in range(max_iter):
func = oracle.func(x)
grad = oracle.grad(x)
hess = oracle.hess(x)
grad_norm = np.linalg.norm(grad)
if has_nans(func, grad, hess):
return x, 'computational_error', history
if grad_norm ** 2 <= tolerance * (grad_norm_0 ** 2):
_log_if_needed(display, 'Gradient descent done, x =', x, 'f(x) =', func)
_fill_history_if_needed(history, func, grad_norm, x, start_time)
return x, 'success', history
# could be saddle point, and we can try to use solve_saddle implemented above
try:
c, low = cho_factor(hess)
if has_nans(c, low):
return x, 'computational_error', history
d = cho_solve((c, low), -grad)
except:
_log_if_needed(display, 'Failure of solving linear system with Hessian matrix')
return x, 'newton_direction_error', history
alpha = get_alpha(x, d)
if alpha is None:
return x, 'computational_error', history
_fill_history_if_needed(history, func, grad_norm, x, start_time)
x = x + alpha * d
_fill_history_if_needed(history, oracle.func(x), np.linalg.norm(oracle.grad(x)), x, start_time)
return do_check_result(oracle, x, tolerance, grad_norm_0, history, display)
| 5,342,962
|
def generate_hostname(domain, hostname):
"""If hostname defined, returns FQDN.
If not, returns FQDN with base32 timestamp.
"""
# Take time.time() - float, then:
# - remove period
# - truncate to 17 digits
# - if it happen that last digits are 0 (and will not be displayed, so
# string is shorter - pad it with 0.
#
# The result ensures that timestamp is 17 char length and is increasing.
timestamp = str(time.time()).replace('.', '')[:17].ljust(17, '0')
b32time = aws.int2str(number=int(timestamp), base=32)
if hostname[-1] == '-':
hostname = '{}{}'.format(hostname, '{time}')
return '{}.{}'.format(hostname.format(time=b32time), domain)
| 5,342,963
|
def symmetrize(M):
"""Return symmetrized version of square upper/lower triangular matrix."""
return M + M.T - np.diag(M.diagonal())
| 5,342,964
|
def process_checksums_get(storage_share, hash_type, url):
"""Run StorageShare get_object_checksum() method to get checksum of file/object.
Run StorageShare get_object_checksum() method to get the requested type of
checksum for file/object whose URL is given.
The client also needs to sp
If the StorageShare does not support the method, the client will get an
error message and the process will exit.
Arguments:
storage_share -- dynafed_storagestats StorageShare object.
hash_type -- string that indicates the type of has requested.
url -- string containing url to the desired file/object.
Returns:
String containing checksum or 'None'.
"""
try:
_checksum = storage_share.get_object_checksum(hash_type, url)
except dynafed_storagestats.exceptions.ChecksumWarningMissingChecksum as WARN:
_logger.warning("[%s]%s", storage_share.id, WARN.debug)
return None
except AttributeError as ERR:
_logger.error(
"[%s]Checksum GET operation not supported for %s. %s",
storage_share.id,
storage_share.storageprotocol,
ERR
)
print(
"[ERROR][%s]Checksum GET operation not supported %s. %s" % (
storage_share.id,
storage_share.storageprotocol,
ERR
), file=sys.stderr
)
sys.exit(1)
else:
return _checksum
| 5,342,965
|
def train_model(model_path, epoch):
"""
Train the specified model.
:param model_path: The path to save model.
:param epoch: Number of iterations to train model.
:return: Trained model.
"""
train_path = input("Please input the path of training data: ")
if train_path[-1] != '/':
train_path += '/'
test_path = input("Please input the path of test data: ")
if test_path[-1] != '/':
test_path += '/'
writer = SummaryWriter(comment="ResNet")
# get training Dataset
dataset = get_dataset(train_path)
# get training DataLoader
train_loader = torch.utils.data.dataloader.DataLoader(dataset, batch_size=64)
# get test Dataset
dataset = get_dataset(test_path)
# get test DataLoader
test_loader = torch.utils.data.dataloader.DataLoader(dataset, batch_size=256)
criterion = nn.CrossEntropyLoss().cuda()
model = resnet_34().cuda()
# the input of tensorboard
temp = torch.rand(32, 3, 224, 224).cuda()
writer.add_graph(model, (temp,))
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
best_accuracy = 0.0
step = 0
for i in range(epoch):
print("epoch:", i)
model.train()
for j, data in enumerate(train_loader):
x, y = data
x = x.cuda()
y = y.cuda()
x_var = torch.autograd.Variable(x)
y_var = torch.autograd.Variable(y.long())
prediction = model(x_var)
loss = criterion(prediction, y_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save loss and epoch
writer.add_scalar("Loss", loss, step)
step += 1
print('--------Validation--------')
correct = torch.zeros(1).squeeze().cuda()
total = torch.zeros(1).squeeze().cuda()
model.eval()
with torch.no_grad():
for j, data in enumerate(test_loader):
x, y = data
x = x.cuda()
y = y.cuda()
output = model(x)
prediction = torch.argmax(output, 1)
correct += (prediction == y.long()).sum().float()
total += len(y)
accuracy = (correct / total).cpu().item()
writer.add_scalar("Accuracy", accuracy, i)
if accuracy > best_accuracy:
# save best model
torch.save(model, "".join(model_path.split('.')[:-1]) + "_best.pth")
best_accuracy = accuracy
torch.save(model, model_path)
writer.close()
| 5,342,966
|
def test_parser_nested_let_assign():
"""
To validate the parser solves nested let.
"""
parser = Parser(Lexer(Scanner("let:\n let:\n a <- 1 + 2\n b <- a * 3\n\n")))
assign = parser()
assert assign.name == NodeType.Let
assert str(assign) == "Let(Let(MutableAssign(Name(a), Add(Num(1), Num(2)))), MutableAssign(Name(b), Mul(Name(a), Num(3))))"
parser = Parser(Lexer(Scanner("let:\n let:\n a <- 1 + 2\n\n b <- a * 3\n\n")))
assign = parser()
assert assign.name == NodeType.Let
assert str(assign) == "Let(Let(MutableAssign(Name(a), Add(Num(1), Num(2)))), MutableAssign(Name(b), Mul(Name(a), Num(3))))"
| 5,342,967
|
def test_fake_corrupt_json_file(tmpdir):
""" Creates a bad JSON file and tests the code responds properly"""
try:
d = tmpdir.mkdir("./testdir")
bad_json = d.join("bad_json.txt")
bad_json.write('{"test": }')
filename = os.path.join(bad_json.dirname, bad_json.basename)
json.load(open(filename))
except json.decoder.JSONDecodeError as exc:
print('[INFO] Corrupt JSON file found.')
pass
| 5,342,968
|
def prep_public_water_supply_fraction() -> pd.DataFrame:
"""calculates public water supply deliveries for the commercial and industrial sectors individually
as a ratio to the sum of public water supply deliveries to residential end users and thermoelectric cooling.
Used in calculation of public water supply demand to commercial and industrial sectors.
:return: DataFrame of public water supply ratios for commercial and industrial sector.
"""
# read in data
df = prep_water_use_1995(variables=['FIPS', 'State', 'PS-DelDO', 'PS-DelPT', 'PS-DelCO', 'PS-DelIN'])
df_loc = prep_water_use_2015() # prepared list of 2015 counties with FIPS codes
# calculate ratio of commercial pws to sum of domestic and thermoelectric cooling pws
df['com_pws_fraction'] = np.where((df['PS-DelDO'] + df['PS-DelPT'] <= 0),
np.nan, (df['PS-DelCO'] / (df['PS-DelDO'] + df['PS-DelPT'])))
# calculate ratio of industrial pws to sum of domestic and thermoelectric cooling pws
df["ind_pws_fraction"] = np.where(((df['PS-DelDO'] + df['PS-DelPT']) <= 0),
np.nan, df['PS-DelIN'] / (df['PS-DelDO'] + df['PS-DelPT']))
# reduce dataframe
df = df[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# fill counties with 0 commercial or industrial public water supply ratios with state averages
df_mean = df.groupby('State', as_index=False).mean()
rename_list = df_mean.columns[1:].to_list()
for col in rename_list:
new_name = f"{col}_state"
df_mean = df_mean.rename(columns={col: new_name})
df_mean_all = pd.merge(df, df_mean, how='left', on=['State'])
# replace counties with consumption fractions of zero with the state average to replace missing data
rep_list = df.columns[2:].to_list()
for col in rep_list:
mean_name = f"{col}_state"
df_mean_all[col].fillna(df_mean_all[mean_name], inplace=True)
# reduce dataframe to required output
df_output = df_mean_all[['FIPS', 'State', 'com_pws_fraction', 'ind_pws_fraction']]
# merge with full list of counties from 2015 water data
df_output = pd.merge(df_loc, df_output, how='left', on=['FIPS', 'State'])
return df_output
| 5,342,969
|
def box3d_overlap_kernel(boxes,
qboxes,
rinc,
criterion=-1,
z_axis=1,
z_center=1.0):
"""
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
"""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
min_z = min(
boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),
qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))
max_z = max(
boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,
qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)
iw = min_z - max_z
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
| 5,342,970
|
def set_matchq_in_constraint(a, cons_index):
"""
Takes care of the case, when a pattern matching has to be done inside a constraint.
"""
lst = []
res = ''
if isinstance(a, list):
if a[0] == 'MatchQ':
s = a
optional = get_default_values(s, {})
r = generate_sympy_from_parsed(s, replace_Int=True)
r, free_symbols = add_wildcards(r, optional=optional)
free_symbols = sorted(set(free_symbols)) # remove common symbols
r = sympify(r, locals={"Or": Function("Or"), "And": Function("And"), "Not":Function("Not")})
pattern = r.args[1].args[0]
cons = r.args[1].args[1]
pattern = rubi_printer(pattern, sympy_integers=True)
pattern = setWC(pattern)
res = ' def _cons_f_{}({}):\n return {}\n'.format(cons_index, ', '.join(free_symbols), cons)
res += ' _cons_{} = CustomConstraint(_cons_f_{})\n'.format(cons_index, cons_index)
res += ' pat = Pattern(UtilityOperator({}, x), _cons_{})\n'.format(pattern, cons_index)
res += ' result_matchq = is_match(UtilityOperator({}, x), pat)'.format(r.args[0])
return "result_matchq", res
else:
for i in a:
if isinstance(i, list):
r = set_matchq_in_constraint(i, cons_index)
lst.append(r[0])
res = r[1]
else:
lst.append(i)
return lst, res
| 5,342,971
|
def training_main():
""" main api to train a model.
"""
training_config = parse_training_args()
# get training and validation sample names
with open(training_config.train_fnames_path, "r", encoding="utf-8") as f:
train_base_fnames = [line.strip() for line in f]
if training_config.val_fnames_path is not None and os.path.exists(training_config.val_fnames_path):
with open(training_config.val_fnames_path, "r", encoding="utf-8") as f:
val_base_fnames = [line.strip() for line in f]
else:
val_base_fnames = []
n_train, n_val = len(train_base_fnames), len(val_base_fnames)
# if steps are set to 0, all the samples will be used
if training_config.steps_per_epoch == 0:
training_config.steps_per_epoch = n_train // training_config.batch_size
if training_config.steps_per_epoch_val == 0:
training_config.steps_per_epoch_val = n_val // training_config.batch_size
print(">>>> training configurations:")
pprint(training_config.__dict__)
model = SemanticSegmentationModel(model_name=training_config.model_name,
input_shape=(training_config.image_height,
training_config.image_width,
training_config.image_channel),
n_class=training_config.n_class,
encoder_name=training_config.encoder_name,
encoder_weights=training_config.encoder_weights,
init_filters=net_config.init_filters,
dropout=net_config.dropout,
weight_decay=net_config.weight_decay,
kernel_initializer=net_config.kernel_initializer,
bn_epsilon=net_config.bn_epsilon,
bn_momentum=net_config.bn_momentum,
upscaling_method=net_config.upsampling_method)
# load or build model
if os.path.exists(training_config.load_model_name):
print(">>>> load model from ", training_config.load_model_name)
model.load_weights(training_config.load_model_name)
else:
print(">>>> build new model: ", training_config.save_model_name)
plot_model(model, training_config.save_model_name.replace(".h5", ".png"), show_shapes=True)
if training_config.model_summary:
model.summary()
model.compile(loss=training_config.loss, optimizer=training_config.optimizer, metrics=[training_config.metric])
print("+ " * 80)
print("+ training data size = %d" % n_train)
print("+ validation data size = %d" % n_val)
print("+ training iteration/epoch = %d" % training_config.steps_per_epoch)
print("+ validation iteration/epoch = %d" % training_config.steps_per_epoch_val)
print("+ model save path: %s" % training_config.save_model_name)
print("+ " * 80)
train_datagen = ImageDataGenerator(channel_shift_range=augment_config.channel_shift_range,
horizontal_flip=augment_config.horizontal_flip,
vertical_flip=augment_config.vertical_flip
# TODO: include all the augmentations here
)
val_datagen = ImageDataGenerator()
if n_val == 0:
print("%s starting training without validation..." % datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S"))
model.fit_generator(
generator=train_datagen.flow_from_directory(
base_fnames=train_base_fnames,
image_dir=training_config.image_dir,
image_suffix=training_config.image_suffix,
image_color_mode=training_config.image_color_mode,
label_dir=training_config.label_dir,
label_suffix=training_config.label_suffix,
n_class=training_config.n_class,
feed_onehot_label=training_config.feed_onehot_label,
cval=training_config.cval,
label_cval=training_config.label_cval,
crop_mode=training_config.crop_mode,
target_size=(training_config.image_height, training_config.image_width),
batch_size=training_config.batch_size,
shuffle=True,
debug=training_config.debug,
dataset_name=training_config.dataset_name
),
steps_per_epoch=training_config.steps_per_epoch,
validation_steps=training_config.steps_per_epoch_val,
epochs=training_config.epoch,
callbacks=training_config.callbacks,
verbose=training_config.verbose
)
else:
print("%s starting training and validation..." % datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S"))
model.fit_generator(
generator=train_datagen.flow_from_directory(
base_fnames=train_base_fnames,
image_dir=training_config.image_dir,
image_suffix=training_config.image_suffix,
image_color_mode=training_config.image_color_mode,
label_dir=training_config.label_dir,
label_suffix=training_config.label_suffix,
n_class=training_config.n_class,
feed_onehot_label=training_config.feed_onehot_label,
cval=training_config.cval,
label_cval=training_config.label_cval,
crop_mode=training_config.crop_mode,
target_size=(training_config.image_height, training_config.image_width),
batch_size=training_config.batch_size,
shuffle=True,
debug=training_config.debug,
dataset_name=training_config.dataset_name
),
validation_data=val_datagen.flow_from_directory(
base_fnames=val_base_fnames,
image_dir=training_config.image_dir,
image_suffix=training_config.image_suffix,
image_color_mode=training_config.image_color_mode,
label_dir=training_config.label_dir,
label_suffix=training_config.label_suffix,
n_class=training_config.n_class,
feed_onehot_label=training_config.feed_onehot_label,
cval=training_config.cval,
label_cval=training_config.label_cval,
crop_mode=training_config.crop_mode,
target_size=(training_config.image_height, training_config.image_width),
batch_size=training_config.batch_size,
shuffle=False),
steps_per_epoch=training_config.steps_per_epoch,
validation_steps=training_config.steps_per_epoch_val,
epochs=training_config.epoch,
callbacks=training_config.callbacks,
verbose=training_config.verbose
)
print("%s training success!" % datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S"))
| 5,342,972
|
def istype(klass, object):
"""Return whether an object is a member of a given class."""
try: raise object
except klass: return 1
except: return 0
| 5,342,973
|
def call(results):
"""Call results.func on the attributes of results
:params result: dictionary-like object
:returns: None
"""
results = vars(results)
places = Places(config=results.pop('config'),
messages=results.pop('messages'))
func = results.pop('func')
func(places, **results)
| 5,342,974
|
def _embed_from_mapping(mapping: Mapping[str, Any], ref: str) -> mapry.Embed:
"""
Parse the embed from the mapping.
All the fields are parsed except the properties, which are parsed
in a separate step.
:param mapping: to be parsed
:param ref: reference to the embeddable structure in the mapry schema
:return: embeddable structure without the properties
"""
return mapry.Embed(
name=mapping['name'], description=mapping['description'], ref=ref)
| 5,342,975
|
def generate_csv_from_pnl(pnl_file_name):
"""在.pnl文件的源路径下新生成一个.csv文件. 拷贝自export_to_csv函数. pnl_file_name需包含路径. """
pnlc = alib.read_pnl_from_file(pnl_file_name)
pnl = pnlc[1]
if pnl is None:
print('pnl文件{}不存在!'.format(pnl_file_name))
pdb.set_trace()
csv_file_name = pnl_file_name[:-4] + '.csv'
outf = open(csv_file_name, 'w')
outf.write(alib.pnl_columns + '\n')
f = ','.join(['%g'] * 14) + ',%d,%d,%d'
for d in pnl:
outf.write((f + '\n') % d)
outf.close()
return csv_file_name
| 5,342,976
|
def parse_number(text, allow_to_fail):
"""
Convert to integer, throw if fails
:param text: Number as text (decimal, hex or binary)
:return: Integer value
"""
try:
if text in defines:
return parse_number(defines.get(text), allow_to_fail)
return to_number(text)
except ValueError:
if allow_to_fail:
return 0
else:
raise ASMSyntaxError(f'Invalid number format: {text}')
| 5,342,977
|
def pick_vis_func(options: EasyDict):
"""Pick the function to visualize one batch.
:param options:
:return:
"""
importlib.invalidate_caches()
vis_func = getattr(
import_module("utils.vis.{}".format(options.vis.name[0])),
"{}".format(options.vis.name[1])
)
return vis_func
| 5,342,978
|
def scale_quadrature(quad_func, order, lower, upper, **kwargs):
"""
Scale quadrature rule designed for unit interval to an arbitrary interval.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
order (int):
The quadrature order passed to the quadrature function.
lower (float):
The new lower limit for the quadrature function.
upper (float):
The new upper limit for the quadrature function.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except scaled to a new interval.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> scale_quadrature(my_quad, 2, lower=0, upper=2)
(array([[0., 1., 2.]]), array([0.5, 0.5, 0.5]))
>>> scale_quadrature(my_quad, 2, lower=-0.5, upper=0.5)
(array([[-0.5, 0. , 0.5]]), array([0.25, 0.25, 0.25]))
"""
abscissas, weights = quad_func(order=order, **kwargs)
assert numpy.all(abscissas >= 0) and numpy.all(abscissas <= 1)
assert numpy.sum(weights) <= 1+1e-10
assert numpy.sum(weights > 0)
weights = weights*(upper-lower)
abscissas = (abscissas.T*(upper-lower)+lower).T
return abscissas, weights
| 5,342,979
|
def ask_for_region(self):
"""ask user for region to select (2-step process)"""
selection = ["BACK"]
choices = []
while "BACK" in selection:
response = questionary.select(
"Select area by (you can go back and combine these choices):",
choices=["continents", "regions", "countries"],
).ask()
selection_items = getattr(self, response)
if response == "regions":
choices = (
[Choice(r) for r in selection_items if "EU" in r]
+ [Separator()]
+ [Choice(r) for r in selection_items if "EU" not in r]
)
else:
choices = [Choice(r) for r in selection_items.keys()]
# preselect previous choices
for choice in choices:
if choice.value in selection:
choice.checked = True
current_selection = questionary.checkbox("Please select", choices=choices).ask()
selection = selection + current_selection
if "BACK" not in current_selection:
selection = clean_results(selection)
print(f"Selection: {clean_results(selection)}")
selection = list(set(clean_results(selection)))
return self._extract_countries(selection)
| 5,342,980
|
def peakAlign(refw,w):
""" Difference between the maximum peak positions of the signals.
This function returns the difference, in samples, between the peaks position
of the signals. If the reference signal has various peaks, the one
chosen is the peak which is closer to the middle of the signal, and if the
other signal has more than one peak also, the chosen is the one closer to
the reference peak signal.
The first signal introduced is the reference signal.
Parameters
----------
refw: array-like
the input reference signal.
w: array-like
the input signal.
Returns
-------
al: int
the difference between the two events position
Example
-------
>>> peakAlign([5,7,3,20,13,5,7],[5,1,8,4,3,10,3])
1
See also: maxAlign(), minAlign(), peakNegAlign(), infMaxAlign(), infMinAlign()
"""
p_mw = array ( peaks(array(refw),min(refw)) )
p_w = array ( peaks(array(w),min(w)) )
if (len(p_mw)>1):
min_al = argmin(abs( (len(refw)/2) - p_mw)) #to choose the peak closer to the middle of the signal
p_mw=p_mw[min_al]
if (list(p_w) == [] ):
p_w = p_mw
elif (len(p_w)>1):
min_al = argmin(abs(p_w - p_mw)) #to choose the peak closer to the peak of the reference signal
p_w=p_w[min_al]
return int(array(p_mw-p_w))
| 5,342,981
|
def get_games(by_category, n_games):
"""
This function imports the dataframe of most popular games and returns a list of game names
with the length of 'n_games' selected by 'by_category'. Valid options for 'by_category': rank, num_user_ratings
"""
df = pd.read_csv('../data/popular_games_with_image_url.csv', index_col = 0)
if by_category == 'rank':
ascending = True
elif by_category == 'num_user_ratings':
ascending = False
df = df.sort_values(by_category, ascending = ascending)
df = df.head(n_games)
game_list = []
image_list = []
for row in df.iterrows():
#game_name = row[1]['name'] + ' (' + str(row[1]['year_published']) + ')'
game_name = row[1]['name']
game_list.append(game_name)
image_url = row[1]['image_url']
image_list.append(image_url)
return game_list, image_list
| 5,342,982
|
def download_images(url_list, internal_id, auth):
"""
Download all images in a url list.
The files are saved in a directory
named with the uuid of the article.
@param url_list: dict of files, in format
{"filename" : "x.tiff", "url" : "y"}
@type url_list: dict
@param internal_id: uuid of article
@type internal_id: string
"""
target_dir = create_directory(internal_id)
for url in url_list:
path = os.path.join(target_dir, url["filename"])
img_data = requests.get(url["url"], auth=auth).content
with open(path, 'wb') as handler:
handler.write(img_data)
| 5,342,983
|
def test_close_a_project():
"""Close a created project -> create a zip file and delete the project folder"""
runner = CliRunner()
project_path = Path("temp/test1")
# Run test isolated
with runner.isolated_filesystem():
create_dummy_project(runner, project_path)
# Close the project
result = runner.invoke(
project_meta, ["close", "--password=abcd", str(project_path)]
)
assert result.exit_code == os.EX_OK
# Check if the zip file exist
assert Path(f"{project_path}.zip").is_file()
# Check if the project initial files are still present
assert not project_path.is_dir()
| 5,342,984
|
def clean_hook(conduit):
"""
This function cleans the plugin cache file if exists. The function is called
when C{yum [options] clean [plugins | all ]} is executed.
"""
global hostfilepath
if hostfilepath and hostfilepath[0] != '/':
hostfilepath = conduit._base.conf.cachedir + '/' + hostfilepath
if os.path.exists(hostfilepath):
conduit.info(2, "Cleaning up list of fastest mirrors")
try:
os.unlink(hostfilepath)
except Exception, e:
conduit.info(2, "Cleanup failed: %s" % e)
| 5,342,985
|
def diurnalPDF( t, amplitude=0.5, phase=pi8 ):
"""
"t" must be specified in gps seconds
we convert the time in gps seconds into the number of seconds after the most recent 00:00:00 UTC
return (1 + amplitude*sin(2*pi*t/day - phase))/day
"""
if amplitude > 1:
raise ValueError("amplitude cannot be larger than 1")
t = gps2relativeUTC(t)
return (1 + amplitude*np.sin(twopi*t/day - phase))/day
| 5,342,986
|
def imap_workers(workers, size=2, exception_handler=None):
"""Concurrently converts a generator object of Workers to
a generator of Responses.
:param workers: a generator of worker objects.
:param size: Specifies the number of workers to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Worker, Exception
"""
pool = Pool(size)
def start(r):
return r.start()
for worker in pool.imap_unordered(start, workers):
if worker.response is not None:
yield worker.response
elif exception_handler:
exception_handler(worker, worker.exception)
pool.join()
| 5,342,987
|
def output_time(time_this:float=None,end:str=" | ")->float:
"""输入unix时间戳,按格式输出时间。默认为当前时间"""
if not time_this:
time_this=time.time()-TIMEZONE
print(time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime(time_this)),end=end)
#
return time_this
| 5,342,988
|
def blend_color(color1, color2, blend_ratio):
"""
Blend two colors together given the blend_ration
:param color1: pygame.Color
:param color2: pygame.Color
:param blend_ratio: float between 0.0 and 1.0
:return: pygame.Color
"""
r = color1.r + (color2.r - color1.r) * blend_ratio
g = color1.g + (color2.g - color1.g) * blend_ratio
b = color1.b + (color2.b - color1.b) * blend_ratio
a = color1.a + (color2.a - color1.a) * blend_ratio
return pygame.Color(int(r), int(g), int(b), int(a))
| 5,342,989
|
def delete_video(video_id):
"""Permanently delete a video."""
_video_request('vimeo.videos.delete', 'POST', video_id=video_id,
error_msg=('Error deleting video {video_id}: <{code} {msg}> '
'{expl}'))
| 5,342,990
|
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
"""Returns a list of images by a given tag from Flick25k dataset,
it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`_
at the first time you use it.
Parameters
------------
tag : string or None
If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`_.
If you want to get all images, set to ``None``.
path : string
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int, number of thread to read image.
printable : bool, print infomation when reading images, default is ``False``.
Examples
-----------
- Get images with tag of sky
>>> images = tl.files.load_flickr25k_dataset(tag='sky')
- Get all images
>>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)
"""
path = os.path.join(path, 'flickr25k')
filename = 'mirflickr25k.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
## download dataset
if folder_exists(path+"/mirflickr") is False:
print("[*] Flickr25k is nonexistent in {}".format(path))
maybe_download_and_extract(filename, path, url, extract=True)
del_file(path+'/'+filename)
## return images by the given tag.
# 1. image path list
folder_imgs = path+"/mirflickr"
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
path_imgs.sort(key=natural_keys)
# print(path_imgs[0:10])
# 2. tag path list
folder_tags = path+"/mirflickr/meta/tags"
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
path_tags.sort(key=natural_keys)
# print(path_tags[0:10])
# 3. select images
if tag is None:
print("[Flickr25k] reading all images")
else:
print("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
for idx in range(0, len(path_tags)):
tags = read_file(folder_tags+'/'+path_tags[idx]).split('\n')
# print(idx+1, tags)
if tag is None or tag in tags:
images_list.append(path_imgs[idx])
images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)
return images
| 5,342,991
|
def solve_a_star(start_id: str, end_id: str, nodes, edges):
"""
Get the shortest distance between two nodes using Dijkstra's algorithm.
:param start_id: ID of the start node
:param end_id: ID of the end node
:return: Shortest distance between start and end node
"""
solution_t_start = perf_counter()
solution = []
associations = {start_id: None}
closed = set() # Nodes that have been resolved
fringe = [] # Min-heap that holds nodes to check (aka. fringe)
start_y, start_x = nodes[start_id]
end_y, end_x = nodes[end_id]
start_node = (0 + calc_distance(start_y, start_x, end_y, end_x), 0, start_id)
heappush(fringe, start_node)
while len(fringe) > 0:
c_node = heappop(fringe)
c_f, c_distance, c_id = c_node
c_y, c_x = nodes[c_id]
if c_id == end_id:
return c_distance, solution, perf_counter() - solution_t_start, associations_to_path(associations, c_id,
nodes)
if c_id not in closed:
closed.add(c_id)
for child_id, c_to_child_distance in edges[c_id]:
if child_id not in closed:
# Add to solution path
if child_id not in associations:
associations[child_id] = c_id
child_distance = c_distance + c_to_child_distance # Cost function
child_y, child_x = nodes[child_id]
child_node = (
child_distance + calc_distance(child_y, child_x, end_y, end_x), child_distance, child_id)
heappush(fringe, child_node)
solution.append(((c_y, c_x), (child_y, child_x)))
return None
| 5,342,992
|
def initialize_components():
"""Initializes external interfaces and saved calibration data"""
analysis.setup_calibration()
interfaces.setup_interfaces()
| 5,342,993
|
def get_dhcp_relay_statistics(dut, interface="", family="ipv4", cli_type="", skip_error_check=True):
"""
API to get DHCP relay statistics
Author Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:type dut:
:param interface:
:type interface:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if cli_type in ['click', 'klish']:
ip_val = "ip" if family == "ipv4" else "ipv6"
if interface:
command = "show {} dhcp-relay statistics {}".format(ip_val, interface)
else:
command = "show {} dhcp-relay statistics".format(ip_val)
return st.show(dut, command, type=cli_type, skip_error_check=skip_error_check)
elif cli_type in ['rest-patch', 'rest-put']:
return _get_rest_dhcp_relay_statistics(dut, interface=interface, family=family)
else:
st.error("Unsupported CLI_TYPE: {}".format(cli_type))
return False
| 5,342,994
|
def _runcmd(cmd, proc):
"""Run a command"""
cmdstr = proc.template(cmd,
**proc.envs).render(dict(proc=proc, args=proc.args))
logger.info('Running command from pyppl_runcmd ...', proc=proc.id)
logger.debug(' ' + cmdstr, proc=proc.id)
cmd = cmdy.bash(c=cmdstr, _raise=False).iter
for line in cmd:
logger.cmdout(line, proc=proc.id)
cmd.wait()
if cmd.rc == 0:
return True
for line in cmd.stderr.splitlines():
logger.cmderr(line, proc=proc.id)
return False
| 5,342,995
|
def graph_to_raw(g, raw_directory, lines_per_file = 1000):
"""
Fills a directory with gzip files corresponding to the raw format.
:param g: the graph
:param raw_directory: the destination
:param lines_per_file: how many lines per file (per gzip file)
"""
# ensure the directory is there
Path(raw_directory).mkdir(parents = True, exist_ok = True)
coll = []
file_number = 1
for i, e in enumerate(g.edges()):
entity = {
"Device": "",
"IP": "",
"Identity": "",
"Location": "",
"Cookie": ""
}
u, v = e
source = g.nodes()[u]
target = g.nodes()[v]
entity[source["label"]] = source["id"]
entity[target["label"]] = target["id"]
entity = {str.lower(k): entity[k] for k in entity}
line = create_raw_line(**entity)
coll.append(line)
if i % lines_per_file == 0 and i > 0:
file_path = os.path.join(raw_directory, f"data{file_number}.gz")
file_number += 1
with gzip.open(file_path, 'wb') as f:
f.write("\n".join(coll).encode())
coll = []
if len(coll) > 0:
file_path = os.path.join(raw_directory, f"data{file_number}.gz")
with gzip.open(file_path, 'wb') as f:
f.write("\n".join(coll).encode())
print(f"Written {file_number} file(s) in '{raw_directory}'.")
| 5,342,996
|
def check_rule_for_Azure_ML(rule):
"""Check if the ports required for Azure Machine Learning are open"""
required_ports = ['29876', '29877']
if check_source_address_prefix(rule.source_address_prefix) is False:
return False
if check_protocol(rule.protocol) is False:
return False
if check_direction(rule.direction) is False:
return False
if check_provisioning_state(rule.provisioning_state) is False:
return False
if rule.destination_port_range is not None:
if check_ports_in_destination_port_ranges(
required_ports,
[rule.destination_port_range]) is False:
return False
else:
if check_ports_in_destination_port_ranges(
required_ports,
rule.destination_port_ranges) is False:
return False
return True
| 5,342,997
|
def text_cleanup(text: str) -> str:
"""
A simple text cleanup function that strips all new line characters and
substitutes consecutive white space characters by a single one.
:param text: Input text to be cleaned.
:return: The cleaned version of the text
"""
text.replace('\n', '')
return re.sub(r'\s{2,}', ' ', text)
| 5,342,998
|
def geocode():
"""
Call a Geocoder service
"""
if "location" in request.vars:
location = request.vars.location
else:
session.error = T("Need to specify a location to search for.")
redirect(URL(r=request, f="index"))
if "service" in request.vars:
service = request.vars.service
else:
# @ToDo: service=all should be default
service = "google"
if service == "google":
return s3base.GoogleGeocoder(location, db).get_kml()
if service == "yahoo":
return s3base.YahooGeocoder(location, db).get_xml()
| 5,342,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.