content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def register_new_face( face_encoding, face_image ) :
"""
Add a new person to our list of known faces
"""
# Add the face encoding to the list of known faces
known_face_encodings.append(face_encoding)
# Add a matching dictionary entry to our metadata list.
# We can use this to keep track of how many times a person has visited, when we last saw them, etc.
known_face_metadata.append({
"first_seen": datetime.now(),
"first_seen_this_interaction": datetime.now(),
"last_seen": datetime.now(),
"seen_count": 1,
"seen_frames": 1,
"face_image": face_image,
}) | 33,600 |
def get_files_recurse(path: Path) -> Set:
"""Get all files recursively from given :param:`path`."""
res = set()
for p in path.rglob("*"):
if p.is_dir():
continue
res.add(p)
return res | 33,601 |
def img_after_ops(img: List[str], ops: List[int]) -> List[str]:
"""Apply rotation and flip *ops* to image *img* returning the result"""
new_img = img[:]
for op in ops:
if op == Tile.ROTATE:
new_img = [cat(l)[::-1] for l in zip(*new_img)]
elif op == Tile.FLIP:
new_img = [l[::-1] for l in new_img]
return new_img | 33,602 |
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
Fix the redirect url with full_url.
Tornado use uri by default.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.current_user
if not user:
if self.request.method == "GET":
url = self.get_login_url()
if "?" not in url:
url += "?" + urllib.urlencode(dict(next=self.request.full_url()))
self.redirect(url)
return
raise HTTPError(403)
#self._current_user = user
return method(self, *args, **kwargs)
return wrapper | 33,603 |
def ring_samp_ranges(zma, rng_atoms):
""" Set sampling range for ring dihedrals.
:param zma: Z-Matrix
:type zma: automol.zmat object
:param rng_atoms: idxs for atoms inside rings
:type rng_atoms: list
"""
samp_range_dct = {}
ring_value_dct = ring_dihedrals(zma, rng_atoms)
for key, value in ring_value_dct.items():
samp_range_dct[key] = [value - math.pi/4, value + math.pi/4]
return samp_range_dct | 33,604 |
def users_key(group='default'):
""" Returns the user key """
return db.Key.from_path('users', group) | 33,605 |
def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,
random_shuffler=None, shuffle=False, sort_within_batch=False, bucket_size= 1000):
"""Sort within buckets, then batch, then shuffle batches.
Partitions data into chunks of size 100*batch_size, sorts examples within
each chunk using sort_key, then batch these examples and shuffle the
batches.
"""
if random_shuffler is None:
random_shuffler = random.shuffle
for p in batch(data, batch_size * 100, batch_size_fn):
p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) \
if sort_within_batch \
else batch(p, batch_size, batch_size_fn)
if shuffle:
for b in random_shuffler(list(p_batch)):
yield b
else:
for b in list(p_batch):
yield b | 33,606 |
def movefiles(names, dest):
"""
"""
for name in names:
copyfile(name.strip('\n'), dest+'/'+name.split("/")[-1].strip('\n'))
return | 33,607 |
def fit_classifiers():
"""
API to get fit classifiers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
status_code:
- 200 for success
"""
language = request.args.get('lang', None)
status_code, message = writer_service.fit_classifiers(language)
raise ExceptionHandler(message=message.value, status_code=status_code.value) | 33,608 |
def VolumetricFlow(self):
"""Volumetric flow (m^3/hr)."""
stream, mol = self.data
m = mol[0]
if m:
c = self.name # c = compound
c.T = stream.T
c.P = stream.P
c.phase = stream._phase
return c.Vm * m * 1000
else:
return 0. | 33,609 |
def build_queue_adapter(workflow_client, logger=None, **kwargs):
"""Constructs a queue manager based off the incoming queue socket type.
Parameters
----------
workflow_client : object ("distributed.Client", "fireworks.LaunchPad")
A object wrapper for different distributed workflow types
logger : logging.Logger, Optional. Default: None
Logger to report to
**kwargs
Additional kwargs for the Adapter
Returns
-------
ret : Adapter
Returns a valid Adapter for the selected computational queue
"""
adapter_type = type(workflow_client).__module__ + "." + type(workflow_client).__name__
if adapter_type == "parsl.dataflow.dflow.DataFlowKernel":
adapter = parsl_adapter.ParslAdapter(workflow_client, logger=logger)
elif adapter_type == "distributed.client.Client":
adapter = dask_adapter.DaskAdapter(workflow_client, logger=logger)
elif adapter_type == "fireworks.core.launchpad.LaunchPad":
adapter = fireworks_adapter.FireworksAdapter(workflow_client, logger=logger)
else:
raise KeyError("QueueAdapter type '{}' not understood".format(adapter_type))
return adapter | 33,610 |
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.worktree_parser(parser)
qitoolchain.parsers.toolchain_parser(parser)
parser.add_argument("package_name", metavar='NAME',
help="The name of the package to remove") | 33,611 |
def pad_sents(sents, pad_token, return_tensor = False):
""" Pad list of sentences according to the longest sentence in the batch.
The paddings should be at the end of each sentence.
@param sents (list[list[str]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
"""
sents_padded = []
maxLen = 0
### YOUR CODE HERE (~6 Lines)
for i in sents:
maxLen = max(len(i),maxLen)
for i in range(len(sents)):
sen = sents[i].cpu().numpy().tolist()
for j in range(maxLen - len(sen)):
sen.append(pad_token)
sen = torch.tensor(sen, dtype=torch.long).cuda()
sents_padded.append(sen)
if return_tensor:
t = torch.zeros(len(sents), maxLen).long()
for i in range(len(sents)):
t[i] = sents_padded[i]
sents_padded = t.cuda()
return sents_padded | 33,612 |
def _take_along_axis(array, indices,
axis):
"""Takes values from the input array by matching 1D index and data slices.
This function serves the same purpose as jax.numpy.take_along_axis, except
that it uses one-hot matrix multiplications under the hood on TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select elements from the
array.
(2) Otherwise, we fall back to jax.numpy.take_along_axis.
Notes:
- To simplify matters in case (1), we only support slices along the second
or last dimensions.
- We may wish to revisit (1) for very large arrays.
Args:
array: Source array.
indices: Indices to take along each 1D slice of array.
axis: Axis along which to take 1D slices.
Returns:
The indexed result.
"""
if array.ndim != indices.ndim:
raise ValueError(
"indices and array must have the same number of dimensions; "
f"{indices.ndim} vs. {array.ndim}.")
if (axis != -1 and axis != array.ndim - 1 and # Not last dimension
axis != 1 and axis != -array.ndim + 1): # Not second dimension
raise ValueError(
"Only slices along the second or last dimension are supported; "
f"array.ndim = {array.ndim}, while axis = {axis}.")
if _favor_one_hot_slices():
one_hot_length = array.shape[axis]
one_hot_indices = jax.nn.one_hot(indices, one_hot_length, axis=axis)
if axis == -1 or array.ndim == 1:
# Take i elements from last dimension (s).
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
"...s,...is->...i",
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
else:
# Take i elements from second dimension (s). We assume here that we always
# want to slice along the second dimension.
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
"ns...,nis...->ni...",
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
return jax.lax.convert_element_type(result, array.dtype)
else:
return jnp.take_along_axis(array, indices, axis=axis) | 33,613 |
def send_envelope(
adfs_host: str,
envelope: str,
) -> requests.Response:
"""Send an envelope to the target ADFS server.
Arguments:
adfs_host: target ADFS server
envelope: envelope to send
Returns:
ADFS server response
"""
url = f"http://{adfs_host}/adfs/services/policystoretransfer"
headers = {"Content-Type": "application/soap+xml"}
response = None
try:
response = requests.post(url, data=envelope, headers=headers)
except Exception as e:
logging.error(e)
return response | 33,614 |
def display_main(choice):
"""
Link option To main board
"""
return main(choice) | 33,615 |
def test_html_blocks_extra_03a():
"""
Test case extra 03: variation of 3 with LRD
"""
# Arrange
source_markdown = """- <script>
[foo]:
/url
</script>
"""
expected_tokens = [
"[ulist(1,1):-::2:: ]",
"[html-block(1,3)]",
"[text(1,3):<script>\n[foo]::]",
"[end-html-block:::True]",
"[end-ulist:::True]",
"[para(3,1):\n]",
"[text(3,1):/url\n::\n]",
"[raw-html(4,1):/script]",
"[end-para:::True]",
"[BLANK(5,1):]",
]
expected_gfm = """<ul>
<li>
<script>
[foo]:
</li>
</ul>
<p>/url
</script></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 33,616 |
def generate_network_table(seed=None):
"""
Generates a table associating MAC and IP addressed to be distributed by our virtual network adapter via DHCP.
"""
# we use the seed in case we want to generate the same table twice
if seed is not None:
random.seed(seed)
# number of IPs per network is 253 (2-254)
# generate random MACs, set ensures they are unique
macs: set[str] = set()
while len(macs) < 253:
macs.add(
"48:d2:24:bf:"
+ to_byte(random.randint(0, 255))
+ ":"
+ to_byte(random.randint(0, 255))
)
# associate each MAC with a sequential IP
table = {}
ip_counter = 2
for mac in macs:
table[mac] = "192.168.150." + str(ip_counter)
ip_counter += 1
return table | 33,617 |
def loss_mGLAD(theta, S):
"""The objective function of the graphical lasso which is
the loss function for the meta learning of glad
loss-meta = 1/B(-log|theta| + <S, theta>)
Args:
theta (tensor 3D): precision matrix BxDxD
S (tensor 3D): covariance matrix BxDxD (dim=D)
Returns:
loss (tensor 1D): the loss value of the obj function
"""
B, D, _ = S.shape
t1 = -1*torch.logdet(theta)
# Batch Matrix multiplication: torch.bmm
t21 = torch.einsum("bij, bjk -> bik", S, theta)
# getting the trace (batch mode)
t2 = torch.einsum('jii->j', t21)
# print(t1, torch.det(theta), t2)
# regularization term
# tr = 1e-02 * torch.sum(torch.abs(theta))
meta_loss = torch.sum(t1+t2)/B # sum over the batch
return meta_loss | 33,618 |
def parse_remote_path(remote_path):
""" Wrapper around the utils function - checks for the right protocol """
protocol, bucket, key = utils.parse_remote_path(remote_path)
assert protocol == "s3:", "Mismatched protocol (expected AWS S3)"
return bucket, key | 33,619 |
def version1_check(info):
""" Creates a report for SAP HANA instances running on version 1 """
found = {}
for instance in info.instances:
if _manifest_get(instance.manifest, 'release') == '1.00':
_add_hana_details(found, instance)
if found:
detected = _create_detected_instances_list(found)
reporting.create_report([
reporting.Title('Found SAP HANA 1 which is not supported with the target version of RHEL'),
reporting.Summary(
('SAP HANA 1.00 is not supported with the version of RHEL you are upgrading to.\n\n'
'The following instances have been detected to be version 1.00:\n'
'{}'.format(detected))
),
reporting.Severity(reporting.Severity.HIGH),
reporting.RemediationHint((
'In order to upgrade RHEL, you will have to upgrade your SAP HANA 1.0 software to '
'{supported}.'.format(supported=SAP_HANA_MINIMAL_VERSION_STRING))),
reporting.ExternalLink(url='https://launchpad.support.sap.com/#/notes/2235581',
title='SAP HANA: Supported Operating Systems'),
reporting.Tags([reporting.Tags.SANITY]),
reporting.Flags([reporting.Flags.INHIBITOR]),
reporting.Audience('sysadmin')
]) | 33,620 |
def operations(func: Callable) -> Callable:
"""Allows developers to specify operations which
should not be called in the fuzzing process.
Examples:
Ignoring operations specified by operation ids in lists
>>> @fuzz_lightyear.exclude.operations
... def b():
... return ['get_pets', 'get_store_inventory']
Ignoring operations specified by "tag.operation_id" in lists
>>> @fuzz_lightyear.exclude.operations
... def c():
... return ['pets.get_pets', 'store.get_store_inventory']
"""
get_operations_fn = _get_formatted_operations(func)
get_excluded_operations().update(get_operations_fn())
return func | 33,621 |
def _gen_dfa_table(t: UxsdComplex) -> str:
"""Generate a 2D C++ array representing DFA table from an UxsdComplex's DFA.
The array is indexed by the state and input token value, such that table[state][input]
gives the next state.
"""
assert isinstance(t.content, UxsdDfa)
dfa = t.content.dfa
out = ""
out += "constexpr int NUM_%s_STATES = %d;\n" % (t.cpp.upper(), len(dfa.states))
out += "constexpr const int NUM_%s_INPUTS = %d;\n" % (t.cpp.upper(), len(dfa.alphabet))
out += "constexpr int gstate_%s[NUM_%s_STATES][NUM_%s_INPUTS] = {\n" % (t.cpp, t.cpp.upper(), t.cpp.upper())
for i in range(0, max(dfa.states)+1):
state = dfa.transitions[i]
row = [str(state[x]) if state.get(x) is not None else "-1" for x in dfa.alphabet]
out += "\t{%s},\n" % ", ".join(row)
out += "};\n"
return out | 33,622 |
def get_heat_capacity_derivative(Cv, temperature_list, plotfile='dCv_dT.pdf'):
"""
Fit a heat capacity vs T dataset to cubic spline, and compute derivatives
:param Cv: heat capacity data series
:type Cv: Quantity or numpy 1D array
:param temperature_list: List of temperatures used in replica exchange simulations
:type temperature: Quantity or numpy 1D array
:param plotfile: path to filename to output plot
:type plotfile: str
:returns:
- dC_v_out ( 1D numpy array (float) ) - 1st derivative of heat capacity, from a cubic spline evaluated at each point in Cv)
- d2C_v_out ( 1D numpy array (float) ) - 2nd derivative of heat capacity, from a cubic spline evaluated at each point in Cv)
- spline_tck ( scipy spline object (tuple) ) - knot points (t), coefficients (c), and order of the spline (k) fit to Cv data
"""
xdata = temperature_list
ydata = Cv
# Strip units off quantities:
if type(xdata[0]) == unit.quantity.Quantity:
xdata_val = np.zeros((len(xdata)))
xunit = xdata[0].unit
for i in range(len(xdata)):
xdata_val[i] = xdata[i].value_in_unit(xunit)
xdata = xdata_val
if type(ydata[0]) == unit.quantity.Quantity:
ydata_val = np.zeros((len(ydata)))
yunit = ydata[0].unit
for i in range(len(ydata)):
ydata_val[i] = ydata[i].value_in_unit(yunit)
ydata = ydata_val
# Fit cubic spline to data, no smoothing
spline_tck = interpolate.splrep(xdata, ydata, s=0)
xfine = np.linspace(xdata[0],xdata[-1],1000)
yfine = interpolate.splev(xfine, spline_tck, der=0)
dCv = interpolate.splev(xfine, spline_tck, der=1)
d2Cv = interpolate.splev(xfine, spline_tck, der=2)
dCv_out = interpolate.splev(xdata, spline_tck, der=1)
d2Cv_out = interpolate.splev(xdata, spline_tck, der=2)
figure, axs = plt.subplots(
nrows=3,
ncols=1,
sharex=True,
)
axs[0].plot(
xdata,
ydata,
'ok',
markersize=4,
fillstyle='none',
label='simulation data',
)
axs[0].plot(
xfine,
yfine,
'-b',
label='cubic spline',
)
axs[0].set_ylabel(r'$C_{V} (kJ/mol/K)$')
axs[0].legend()
axs[1].plot(
xfine,
dCv,
'-r',
label=r'$\frac{dC_{V}}{dT}$',
)
axs[1].legend()
axs[1].set_ylabel(r'$\frac{dC_{V}}{dT}$')
axs[2].plot(
xfine,
d2Cv,
'-g',
label=r'$\frac{d^{2}C_{V}}{dT^{2}}$',
)
axs[2].legend()
axs[2].set_ylabel(r'$\frac{d^{2}C_{V}}{dT^{2}}$')
axs[2].set_xlabel(r'$T (K)$')
plt.tight_layout()
plt.savefig(plotfile)
plt.close()
return dCv_out, d2Cv_out, spline_tck | 33,623 |
def do_mon_show(cs, args):
"""Shows details info of a mon."""
mon = _find_mon(cs, args.mon)
_print_mon(mon) | 33,624 |
def cfgfile_cl1(db200_static_file):
"""Return path to configfile for compliance level 1.
This fixture not only configures the server, but also
creates a mock database with 200 factoids in the temporary
directory where the configfile is written to.
As compliance level 1 is read only by default, we can set scope to session.
"""
cfg = copy.deepcopy(BASE_CFG)
cfgfile = os.path.join(os.path.dirname(db200_static_file), 'papi.toml')
cfg['connector']['filename'] = db200_static_file
cfg['api']['complianceLevel'] = 1
with open(cfgfile, 'w') as fh:
fh.write(toml.dumps(cfg))
fh.flush()
yield cfgfile | 33,625 |
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False):
"""
Get gradients of the outputs of the activation functions, regarding the loss.
Intuitively, it shows how your activation maps change over a tiny modification of the loss.
:param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
'mobilenet_v2', 'mobilenetv2'].
:param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
:param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List.
:param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
:param output_format: Change the output dictionary key of the function.
- 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
return {'d1': ...}.
- 'full': output key will match the full name of the output layer name. In the example above, it will
return {'d1/BiasAdd:0': ...}.
- 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
:param nested: (optional) If set, will move recursively through the model definition to retrieve nested layers.
Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.
E.g., a model with the following structure
-layer1
-conv1
...
-fc1
-layer2
-fc2
... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.
The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
:return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}.
"""
nodes = _get_nodes(model, output_format, nested=nested, layer_names=layer_names)
return _get_gradients(model, x, y, nodes) | 33,626 |
def gJoin(gl1, gl2):
"""Return gl1+gl2, i.e [gl1[0],...,gl1[n],gl2[0],...]
Apparently only useful when gl1 is finite.
"""
for x in gl1:
yield x
for x in gl2:
yield x | 33,627 |
def string_to_screens_and_lines(source, allowed_width, allowed_height, f, pixels_between_lines = None, end_screens_with = (), do_not_include = ()):
"""
Convert a string to screens and lines.
Pygame does not allow line breaks ("\n") when rendering text. The purpose
of this function is to break a string into lines and screens given a font
and screen dimensions.
The following two assumptions are made:
1. Line breaks ("\n") in source denote the start of a new paragraph.
Therefore, to have an actual blank line (i.e., an empty string)
appear in the returned array, add another "\n" immediately
following the first.
2. Spaces denote the end of a word.
Parameters:
source: the string to divide into screens and lines.
allowed_width: the width, in pixels, permitted for lines; can be a
number of pixels or a proportion of the active screen's width.
allowed_height: same as allowed_width but for the height of a single
screen.
f: the font with which source is measured.
Keyword Parameters:
pixels_between_lines: blank pixel rows between lines of text; defaults
to None, in which case it is obtained from f.
end_screens_with: a restricted set of characters that may end a
screen; defaults to an empty tuple, in which case any character
ending a word can end a screen.
do_not_include: words that are exceptions to the end_screens_with
words (e.g., "Mrs." ends in a period but should not end a screen)
Returns:
screens: a multidimensional list of screens and lines.
"""
# Check if allowed_height and allowed_width need to be set:
if 0 < allowed_width <= 1 and 0 < allowed_height <= 1:
allowed_width, allowed_height = screen_dimensions()
elif 0 < allowed_width <= 1 or 0 < allowed_height <= 1:
raise ValueError("Both or neither of allowed_width and \
allowed_height can be between 0 and 1.")
# Check if pixels_between_lines needs to be set:
if not pixels_between_lines:
pixels_between_lines = f.get_linesize()
else:
assert pixels_between_lines > 0, "pixels_between_lines must be \
positive."
# Make sure that allowed_height can accommodate the tallest word in
# source:
assert f.size(source)[1] <= allowed_height, "allowed_height cannot \
accommodate source."
screens = []
# Break source into paragraphs and paragraphs into single words:
paragraphs = source.split("\n")
single_words = []
for paragraph in paragraphs:
individual_words = paragraph.split(" ")
# While here, verify that the longest word fits:
widest_word, pixels = longest_string_to_render(individual_words, f)
assert pixels < allowed_width, "{:s} in source is too long for \
allowed_width.".format(widest_word)
single_words.append(individual_words)
# The function branches next, depending on whether restrictions have been
# placed on where screen breaks can occur.
if not end_screens_with:
# Screen breaks can occur following any word.
# Break single_words into lines without regard to screens:
lines_of_text, total_height = wrap_text(
single_words,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if total_height <= allowed_height:
# Everything fits on one screen.
screens.append(lines_of_text)
else:
# There will be at least two screens.
# Initialize the first screen and a height counter:
screen = []
screen_height = 0
for line in lines_of_text:
line_height = f.size(line)[1]
screen_height = screen_height+line_height+pixels_between_lines
if screen_height < allowed_height:
# line fits on the current screen.
screen.append(line)
elif screen_height == allowed_height or screen_height-pixels_between_lines < allowed_height:
# line fits, but no more will.
screen.append(line)
screens.append(screen)
screen = []
screen_height = 0
else:
# line doesn't fit.
screens.append(screen)
screen = [line]
screen_height = line_height+pixels_between_lines
# Check for a remaining screen:
if screen:
screens.append(screen)\
else:
# Screens can only end following specific strings.
# These strings do not need to be end-of-sentence characters, but it
# is difficult to imagine what else they would be. Therefore, I refer
# to the resulting strings as sentences, acknowledging that this may
# be incorrect terminology.
# Break paragraphs into sentences:
sentences = []
for paragraph in paragraphs:
if sentences:
# This is not the first line, so start the paragraph on a new
# line:
sentences.append("")
if paragraph:
# paragraph is not a blank line.
# Break it into sentences:
paragraph_as_sentences = text_to_sentences(
paragraph,
terminators = end_screens_with,
exclude = do_not_include
)
sentences = sentences+paragraph_as_sentences
else:
# paragraph is a blank line.
sentences.append("")
# Initialize the first screen:
screen = []
for sentence in sentences:
# Determine whether sentence starts on a new line or continues
# from the current line:
if screen:
# If the last line in screen is blank, then sentence starts on
# a new line.
last_line = screen[-1]
if last_line:
next_line = False
else:
next_line = True
else:
# This screen is blank.
# Arbitrarily set next_line to False:
next_line = False
# Try adding sentence to the current screen:
possible_screen, screen_height = wrap_text(
sentence,
allowed_width,
f,
old_text = screen,
start_new_line = next_line,
return_height = True,
line_height = pixels_between_lines
)
if screen_height <= allowed_height:
# Update the current screen:
screen = possible_screen
else:
# This sentence does not fit.
# If screen is currently blank, it means that sentence needs
# to be broken across screens (i.e., it will not fit on a
# single screen).
if screen:
# This is not an issue.
# Save screen:
screens.append(screen)
# Initialize the next screen with sentence:
screen, current_height = wrap_text(
sentence,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if current_height > allowed_height:
# sentence needs to be broken across screens.
# This can be accomplished by calling the present
# function without restrictions on screen endings.
# However, the text currently on screen is needed too.
text_to_add = ""
for line in screen:
text_to_add = text_to_add+line+""
text_to_add = text_to_add+sentence
multiple_screens = string_to_screens_and_lines(
text_to_add,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
else:
# screen is empty, but sentence will not fit.
# Call the present function to get this sentence's
# screens:
multiple_screens = string_to_screens_and_lines(
sentence,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
# Check if a final screen needs to be added:
if screen:
screens.append(screen)
return screens | 33,628 |
def trim_spectrum(self, scouse, flux):
"""
Trims a spectrum according to the user inputs
"""
return flux[scouse.trimids] | 33,629 |
def test_sin_2ndord_2vars():
"""
Function testing 2nd order derivative for sin with two-variable input
"""
x, y = fwd.Variable(), fwd.Variable()
f = fwd.sin(x/y)
df_dxdy = lambda x, y: -(y*np.cos(x/y) - x*np.sin(x/y))/y**3
assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),
f.derivative_at( x, {x: 1.5, y:2.5}, order=2))
assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2),
f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))
assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2),
df_dxdy(1.5, 2.5)) | 33,630 |
def test_bitwise_and(a, b):
"""
>>> test_bitwise_and(0b01, 0b10)
0L
>>> test_bitwise_and(0b01, 0b11)
1L
>>> test_bitwise_and(0b01, 2.0)
Traceback (most recent call last):
...
NumbaError: 27:15: Expected an int, or object, or bool
>>> test_bitwise_and(2.0, 0b01)
Traceback (most recent call last):
...
NumbaError: 27:11: Expected an int, or object, or bool
"""
return a & b | 33,631 |
def main():
"""Post reminders about open CodeCommit pull requests to slack."""
open_pull_requests = get_open_pull_requests()
post_message(open_pull_requests) | 33,632 |
def flagFunction(method, name=None):
"""
Determine whether a function is an optional handler for a I{flag} or an
I{option}.
A I{flag} handler takes no additional arguments. It is used to handle
command-line arguments like I{--nodaemon}.
An I{option} handler takes one argument. It is used to handle command-line
arguments like I{--path=/foo/bar}.
@param method: The bound method object to inspect.
@param name: The name of the option for which the function is a handle.
@type name: L{str}
@raise UsageError: If the method takes more than one argument.
@return: If the method is a flag handler, return C{True}. Otherwise return
C{False}.
"""
if _PY3:
reqArgs = len(inspect.signature(method).parameters)
if reqArgs > 1:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 1:
return False
else:
reqArgs = len(inspect.getargspec(method).args)
if reqArgs > 2:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 2:
return False
return True | 33,633 |
def azure_blob(Config, file_name, dump_path, db):
"""
upload file to azure blob container.
Parameters:
Config (sectionproxy): database details from configparser
file_name (str): local file name, to set as blob name
dump_path (str): local backup dump file path to upload
db (str): backup config details from configparser
"""
azureBlobConf = Config["ARCHIVE_AZURE"]
account_name = azureBlobConf.get('account_name')
account_key = azureBlobConf.get('account_key')
container_name = azureBlobConf.get('container_name')
logging.debug('Initiating archive to Azure blob storage')
logging.debug("Uploading the backup file - '{}'".format(dump_path))
result = AzureBlobStorage(
account_name,
account_key,
container_name,
file_name,
dump_path).upload()
if result == True:
logging.debug("Successfully uploaded the backup '{}' to blob container '{}'".format(file_name, container_name))
else:
logging.error('Upload Failed..!')
logging.error('Error Message: {}'.format(result))
notification_slack(Config, db=db, task='Archive', status='Failed') | 33,634 |
def envnotfound(env):
"""`'Env "my-venv" not found. Did you mean "./my-venv"?'`"""
msg = f'Env "{env}" not found.'
if arg_is_name(env) and Path(env).exists():
msg += f'\nDid you mean "./{env}"?'
return msg | 33,635 |
def random_translation_along_x(gt_boxes, points, offset_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_range: [min max]]
Returns:
"""
offset = np.random.uniform(offset_range[0], offset_range[1])
points[:, 0] += offset
gt_boxes[:, 0] += offset
# if gt_boxes.shape[1] > 7:
# gt_boxes[:, 7] += offset
return gt_boxes, points | 33,636 |
def predband(xd,yd,a,b,conf=0.95,x=None):
"""
Calculates the prediction band of the linear regression model at the desired confidence
level, using analytical methods.
Clarification of the difference between confidence and prediction bands:
"The 2sigma confidence interval is 95% sure to contain the best-fit regression line.
This is not the same as saying it will contain 95% of the data points. The prediction bands are
further from the best-fit line than the confidence bands, a lot further if you have many data
points. The 95% prediction interval is the area in which you expect 95% of all data points to fall."
(from http://graphpad.com/curvefit/linear_regression.htm)
Arguments:
- conf: desired confidence level, by default 0.95 (2 sigma)
- xd,yd: data arrays
- a,b: linear fit parameters as in y=ax+b
- x: (optional) array with x values to calculate the confidence band. If none is provided, will
by default generate 100 points in the original x-range of the data.
Usage:
>>> lpb,upb,x=nemmen.predband(all.kp,all.lg,a,b,conf=0.95)
calculates the prediction bands for the given input arrays
>>> pylab.fill_between(x, lpb, upb, alpha=0.3, facecolor='gray')
plots a shaded area containing the prediction band
:returns: Sequence (lpb,upb,x) with the arrays holding the lower and upper confidence bands
corresponding to the [input] x array.
References:
1. `Introduction to Simple Linear Regression, Gerard
E. Dallal, Ph.D. <http://www.JerryDallal.com/LHSP/slr.htm>`_
"""
alpha=1.-conf # significance
n=xd.size # data sample size
if x is None: x=numpy.linspace(xd.min(),xd.max(),100)
# Predicted values (best-fit model)
y=a*x+b
# Auxiliary definitions
sd=scatterfit(xd,yd,a,b) # Scatter of data about the model
sxd=numpy.sum((xd-xd.mean())**2)
sx=(x-xd.mean())**2 # array
# Quantile of Student's t distribution for p=1-alpha/2
q=scipy.stats.t.ppf(1.-alpha/2.,n-2)
# Prediction band
dy=q*sd*numpy.sqrt( 1.+1./n + sx/sxd )
upb=y+dy # Upper prediction band
lpb=y-dy # Lower prediction band
return lpb,upb,x | 33,637 |
def orient_edges(G):
"""Orient remaining edges after colliders have been oriented.
:param G: partially oriented graph (colliders oriented)
:returns: maximally oriented DAG
"""
undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)]
undir_len = len(undir_list)
idx = 0
while idx < undir_len:
success = False
for edge in undir_list:
if can_orient(G,edge):
G.remove_edge(*edge[::-1])
success = True
if success:
undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)]
idx += 1
else:
break
return G | 33,638 |
def _create_deserialize_fn(attributes: dict, globals: dict, bases: tuple[type]) -> str:
"""
Create a deserialize function for binary struct from a buffer
The function will first deserialize parent classes, then the class attributes
"""
lines = []
# For this class bases
for parent in bases:
if not _is_parent_fn_callable(parent, 'deserialize'):
continue
lines.append(f'{parent.__name__}.deserialize(self, buf)')
lines.append(f'buf = buf[{parent.__name__}._bs_size(self):]')
# For this class attributes
for name, annotation in attributes.items():
annotation_type = _get_annotation_type(annotation)
if annotation_type == AnnotationType.TYPED_BUFFER:
lines.append(f'self.{name}.deserialize(buf)')
else:
lines.append(f'self.{name}.deserialize(buf[:self.{name}.size_in_bytes])')
lines.append(f'buf = buf[self.{name}.size_in_bytes:]')
return _create_fn('deserialize', ['self, buf'], lines + ['return self'], globals) | 33,639 |
def blck_repeat(preprocessor: Preprocessor, args: str, contents: str) -> str:
"""The repeat block.
usage: repeat <number>
renders its contents one and copies them number times"""
args = args.strip()
if not args.isnumeric():
preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]")
number = int(args)
if number <= 0:
preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]")
preprocessor.context.update(preprocessor.current_position.end, "in block repeat")
contents = preprocessor.parse(contents)
preprocessor.context.pop()
return contents * number | 33,640 |
def is_prime(n: int) -> bool:
"""Determines if the natural number n is prime."""
# simple test for small n: 2 and 3 are prime, but 1 is not
if n <= 3:
return n > 1
# check if multiple of 2 or 3
if n % 2 == 0 or n % 3 == 0:
return False
# search for subsequent prime factors around multiples of 6
max_factor = int(math.sqrt(n))
for i in range(5, max_factor + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True | 33,641 |
def annotate(number_of_scans): # annotation starting from scan 1
"""
Annotate the data form 3DIrcad1 dataset, starting from scan 1, up to scan passed in param.
Save the labels to an excel file 'tabulka.xlsx'.
"""
df = pd.DataFrame(
columns=[
"Ircad ID",
"Mark 1 slice id",
"Mark 2 slice id",
"Mark 3 slice id",
"Mark 4 slice id",
]
)
for i in range(number_of_scans):
dr = io3d.DataReader()
pth = io3d.datasets.join_path(
"medical/orig/3Dircadb1.{}/".format(i + 1), get_root=True
)
datap = dr.Get3DData(pth + "PATIENT_DICOM/", dataplus_format=True)
datap_labelled = dr.Get3DData(pth + "MASKS_DICOM/liver", dataplus_format=True)
ed = sed3.sed3(
datap["data3d"], contour=datap_labelled["data3d"], windowW=400, windowC=40
)
ed.show()
nz = np.nonzero(ed.seeds)
ids = np.unique(nz[0])
order = input("Did liver end before kidney started? (y/n)")
if order == "y":
df = df.append(
{
"Ircad ID": i + 1,
"Mark 1 slice id": ids[0],
"Mark 2 slice id": ids[1],
"Mark 3 slice id": ids[2],
"Mark 4 slice id": ids[3],
},
ignore_index=True,
)
elif order == "n":
df = df.append(
{
"Ircad ID": i + 1,
"Mark 1 slice id": ids[0],
"Mark 2 slice id": ids[2],
"Mark 3 slice id": ids[1],
"Mark 4 slice id": ids[3],
},
ignore_index=True,
)
else:
print("ERROR")
break
df.to_excel("tabulka.xlsx", sheet_name="List1", index=False) | 33,642 |
def test_envset_returns_false():
"""Env set returns false if env>set doesn't exist.."""
# Deliberately have 1 pre-existing $ENV to update, and 1 unset so can
# create it anew as part of test
os.environ['ARB_DELETE_ME2'] = 'arb from pypyr context ARB_DELETE_ME2'
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'env': {'blah': {
'ARB_DELETE_ME1': 'blah blah {key2} and {key1} goes here.'
}}
})
assert not pypyr.steps.env.env_set(context) | 33,643 |
def get_word_prob():
"""Returns the probabilities of all the words in the mechanical turk video labels.
"""
import constants as c
import cPickle
data = cPickle.load(open(c.datafile)) # Read in the words from the labels
wordcount = dict()
totalcount = 0
for label in data:
for word in label:
totalcount += 1
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
wordprob = dict([(word, float(wc)/totalcount) for word, wc in wordcount.items()])
return wordprob | 33,644 |
def angDistance(ra, dec, df, raCol='fieldRA', decCol='fieldDec'):
"""
"""
df['dist'] = angSep(ra, dec, df[raCol], df[decCol])
idx = df.dist.idxmin()
rval = df.loc[idx]
df.drop('dist', axis=1, inplace=True)
return rval | 33,645 |
def test_login(test_client):
"""Make sure login and logout works."""
response = login(test_client, 'aa@gmail.com', 'b')
assert b"Email does not exist." in response.data
response = login(test_client, 'jay@gmail.com', 'b')
assert b"Incorrect password, try again." in response.data
response = login(test_client, 'jay@gmail.com', 'asdfasdf')
assert b"Logged in successfully!" in response.data
assert response.status_code == 200 | 33,646 |
def offer_in_influencing_offers(offerId, influencing_offers):
"""
Find if a passed offerId is in the influencing_offers list
Parameters
----------
offerId: Offer Id from portfolio dataframe.
influencing_offers : List of offers found for a customer
Returns
-------
1 if offer is found 0 if not found
"""
if (offerId in influencing_offers):
return 1
else:
return 0 | 33,647 |
def setupVortexServer(portNum=8345, wsPortNum=8344):
""" Setup Site
Sets up the web site to listen for connections and serve the site.
Supports customisation of resources based on user details
@return: Port object
"""
defer.setDebugging(True)
# Register the test tuple
from vortex.test import TestTuple
TestTuple.__unused = False # Crap code
from vortex.test import VortexJSTupleLoaderTestHandler
VortexJSTupleLoaderTestHandler.__unused = False # Crap code
rootResource = TestRootResource()
VortexFactory.createServer("default", rootResource)
VortexFactory.createWebsocketServer("default", wsPortNum)
# rootResource.putChild(b"vortex", VortexResource())
site = server.Site(rootResource)
site.protocol = HTTPChannel
port = reactor.listenTCP(portNum, site).port
# ip = subprocess.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
logger.info('VortexServer test is alive and listening on port %s', port)
logger.info('VortexServerWebsocket test is alive and listening on port %s', wsPortNum)
logger.debug("Logging debug messages enabled")
NotifyTestTimer.startTupleUpdateNotifyer()
return port | 33,648 |
def identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
filters: integer, used for first and second conv layers, third conv layer double this value
stage: integer, current stage label, used for generating layer names
block: integer, current block label, used for generating layer names
# Returns
Output layer for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(input_tensor)
x = BatchNormalization(name=bn_name + '1', **bn_params)(x)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2')(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Add()([x, input_tensor])
x = Activation('relu', name=relu_name)(x)
return x
return layer | 33,649 |
def renderscene():
"""This function litterally prints the scene."""
global xrot, yrot
global X, Y, Z
global quater
global rotx, roty, rotz
global maxx, maxy, maxz
global mesg1
global mesg2
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
indic = -1
for i in range(-1,2,1):
for j in range(-1,2,1):
for k in range(-1,2,1):
if (i==j==k==0)==False:
indic = indic + 1
if rotx[indic]!=maxx[indic]:
speed = find_dir(rotx[indic],maxx[indic])
rotx[indic]=rotx[indic]+speed
update_cur_mov()
quater[indic]=quat.quaternion(X[indic], angl=speed)*quater[indic]
Y[indic]=np.transpose(utl.Rq(speed*np.pi/180.,X[indic])*np.transpose(np.matrix(Y[indic])))
Z[indic]=np.transpose(utl.Rq(speed*np.pi/180.,X[indic])*np.transpose(np.matrix(Z[indic])))
if roty[indic]!=maxy[indic]:
speed = find_dir(roty[indic],maxy[indic])
roty[indic]=roty[indic]+speed
update_cur_mov()
quater[indic]=quat.quaternion(Y[indic], angl=speed)*quater[indic]
X[indic]=np.transpose(utl.Rq(speed*np.pi/180.,Y[indic])*np.transpose(np.matrix(X[indic])))
Z[indic]=np.transpose(utl.Rq(speed*np.pi/180.,Y[indic])*np.transpose(np.matrix(Z[indic])))
if rotz[indic]!=maxz[indic]:
speed = find_dir(rotz[indic],maxz[indic])
rotz[indic]=rotz[indic]+speed
update_cur_mov()
quater[indic]=quat.quaternion(Z[indic], angl=speed)*quater[indic]
Y[indic]=np.transpose(utl.Rq(speed*np.pi/180.,Z[indic])*np.transpose(np.matrix(Y[indic])))
X[indic]=np.transpose(utl.Rq(speed*np.pi/180.,Z[indic])*np.transpose(np.matrix(X[indic])))
glLoadIdentity()
rotation_x = quat.quaternion([1.,0.,0.], angl=-xrot)
rotation_y = quat.quaternion([0.,1.,0.], angl=-yrot)
glMultMatrixf(rotation_x.matrix)
glMultMatrixf(rotation_y.matrix)
glMultMatrixf((quater[indic]).matrix)
glTranslatef(i*0.5,j*0.5,k*0.5)
trace(i*0.5, j*0.5, k*0.5)
glLoadIdentity()
glColor3ub(0, 0, 100)
glRasterPos2f(-1.15, -1.5)
glutBitmapString(GLUT_BITMAP_HELVETICA_18, mesg1+" "+mesg2)
glRasterPos2f(-1., 1.75)
glutBitmapString(GLUT_BITMAP_HELVETICA_18, mesg3)
glRasterPos2f(-1.35, 1.5)
glutBitmapString(GLUT_BITMAP_HELVETICA_18, mesg4)
glRasterPos2f(-1.35, -1.75)
glutBitmapString(GLUT_BITMAP_HELVETICA_18, mesg5)
glPopMatrix()
glutSwapBuffers()
glutPostRedisplay() | 33,650 |
def filebeat_service_running():
"""
Checks if the filebeat service is currently running on the OS.
:return: True if filebeat service detected and running, False otherwise.
"""
result = False
try:
filebeat_service = psutil.win_service_get('filebeat')
filebeat_service_info = filebeat_service.as_dict()
if filebeat_service_info['status'] == 'running':
result = True
except psutil.NoSuchProcess:
return result
return result | 33,651 |
def unhandled_request_message(request, cassette):
"""Generate exception for unhandled requests."""
return UNHANDLED_REQUEST_EXCEPTION.format(
url=request.url, cassette_file_path=cassette.cassette_name,
cassette_record_mode=cassette.record_mode,
cassette_match_options=cassette.match_options
) | 33,652 |
def logout(request):
"""
Logs out the user and displays 'You are logged out' message.
"""
if request.method == 'GET':
return _new_api_403()
from django.contrib.auth import logout as auth_logout
auth_logout(request) | 33,653 |
def compara_dv(cpf, primeiro_dv, segundo_dv):
"""Valida se dígitos verificadores calculados são iguais aos inseridos."""
return "válido" if primeiro_dv == int(cpf[9]) and segundo_dv == int(cpf[10]) else "inválido" | 33,654 |
def write_file(file_name, data, line_length):
""" Writes the results to a text file using a name based on file_name
input: string, list
returns: int
"""
pos = file_name.rfind('.')
fn_o = file_name[:pos] + '.OUT' + file_name[pos:]
f = open(fn_o, "w")
for fsn, sequence in data:
f.write(fsn + '\n')
l_length = len(sequence) if line_length == 0 else line_length
for p in range(0, len(sequence), l_length):
f.write(sequence[p:p+l_length] + '\n')
f.close()
return len(data) | 33,655 |
def handle_duplicates(df, cutoff=5, agg_source_col='multiple'):
"""Aggregates duplicate measurements in a DataFrame.
Parameters
----------
df : pandas DataFrame
DataFrame with required columns: 'smiles', 'solvent', 'peakwavs_max'
cutoff : int
Wavelength cutoff in nm. Duplicate measurements of the same smiles-solvent
pair with standard deviation less than cutoff are averaged. Those with
standard deviation greater than cutoff are dropped.
Returns
-------
df : pandas DataFrame
An updated DataFrame with duplicates aggregated or removed
"""
col_names = ['smiles', 'solvent'] + target_names + ['source']
cols = [x for x in df.columns if x not in col_names]
agg_dict = {}
for property in target_names:
agg_dict[property] = ['mean','std']
if agg_source_col=='multiple':
agg_dict['source'] = lambda x: 'multiple' if len(x) > 1 else x,
elif agg_source_col=='random':
np.random.seed(0)
agg_dict['source'] = np.random.choice
for col in cols:
agg_dict[col] = 'mean'
# For all smiles+solvent pairs, find mean and std of target property/properties
# If std > cutoff, drop; elif std <= cutoff, take mean
df = df.groupby(['smiles','solvent']).agg(agg_dict).reset_index()
for property in target_names:
high_std_idx = df[df[property]['std']>cutoff].index
df.drop(index=high_std_idx, inplace=True)
df.drop(columns='std', level=1, inplace=True)
df.columns = df.columns.get_level_values(0)
return df | 33,656 |
def splitData(features, target, trainFraction=0.25):
"""
Split the data into test and train data
Inputs:
> features: the model feature data (DataFrame)
> target: the target data (Series)
> trainFraction (0.25 by default): fraction of events to use for training
Outputs:
> Training feature data (DataFrame), Testing feature data (DataFrame), Training target data (Series), Testing target data (Series)
"""
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=1-trainFraction, random_state=42)
return X_train, X_test, y_train, y_test | 33,657 |
def poisson_moment( k, n):
"""
returns the moment of x**n with expectation value k
CURRENTLY A SET OF HARD CODED EXPRESSIONS! VERY FRAGILE!
--> would be *much* better if we could do this algorithmically
"""
if n==0:
return 1
elif n==1:
return k
elif n==2:
return k**2 + k
elif n==3:
return k**3 + 3*k**2 + k
elif n==4:
return k**4 + 6*k**3 + 7*k**2 + k
elif n==5:
return k**5 + 10*k**4 + 25*k**3 + 15*k**2 + k
elif n==6:
return k**6 + 15*k**5 + 65*k**4 + 90*k**3 + 31*k**2 + k
elif n==7:
return k**7 + 21*k**6 + 140*k**5 + 350*k**4 + 301*k**3 + 63*k**2 + k
elif n==8:
return k**8 + 28*k**7 + 266*k**6 + 1050*k*85 + 1701*k**4 + 966*k**3 + 127*k**2 + k
else:
raise NotImplementedError('currently only support n<=8') | 33,658 |
def save_results(annot_bundles_df, intensity_matrix, params, rel_points):
"""
Function: Save results to a pickle file
Inputs:
- annot_bundles_df
- intensity_matrix
- params
- rel_points
Outputs: output_data as adictionary saved in output folder
- component of output_data: category_id, sample_id, region_id, slice_type, intensity_matrix, parameter, relative_positions
"""
analysis_params_general = settings.analysis_params_general
paths = settings.paths
matching_info = settings.matching_info
# image_folder_path, roi_folder_path, annot_path, log_path, fig_out_folder_path, data_out_folder_path = paths
category_id = annot_bundles_df.iloc[0]['CategoryID']
time_id = annot_bundles_df.iloc[0]['TimeID']
sample_id = annot_bundles_df.iloc[0]['SampleID']
region_id = annot_bundles_df.iloc[0]['RegionID']
ind_part = int(len(annot_bundles_df.index))
bundles_list = annot_bundles_df.index
output_data = {
'category_ID': category_id,
'time_ID':time_id,
'sample_ID': sample_id,
'region_ID': region_id,
'slice_type': analysis_params_general.slice_type,
'intensity_matrix': intensity_matrix,
'parameter': params,
'relative_positions': rel_points,
'bundle_nos': list(bundles_list),
'annot_csv': paths.annot_name,
'analysis_params_general':analysis_params_general
}
now = datetime.datetime.now()
date_info = str(now.year)+str(now.month)+str(now.day)
output_name = f'{category_id}_{time_id}hrs_sample{sample_id}_region{region_id}_slice{analysis_params_general.slice_type}_v{date_info}.pickle'
output_dir = os.path.join(paths.data_out_folder_path)
my_help.check_dir(output_dir)
output_dir = os.path.join(output_dir,category_id)
my_help.check_dir(output_dir)
output_name = os.path.join(output_dir, output_name)
pickle_out = open(output_name,"wb")
pickle.dump(output_data, pickle_out)
pickle_out.close()
output_dir = os.path.join(paths.data_out_folder_path)
my_help.check_dir(output_dir)
output_dir = os.path.join(output_dir,category_id)
my_help.check_dir(output_dir)
output_name = os.path.join(output_dir, output_name)
pickle_out = open(output_name,"wb")
pickle.dump(output_data, pickle_out)
pickle_out.close() | 33,659 |
def drug_encoder(input_smiles):
"""
Drug Encoder
Args:
input_smiles: input drug sequence.
Returns:
v_d: padded drug sequence.
temp_mask_d: masked drug sequence.
"""
temp_d = drug_bpe.process_line(input_smiles).split()
try:
idx_d = np.asarray([drug_idx[i] for i in temp_d])
except:
idx_d = np.array([0])
flag = len(idx_d)
if flag < D_MAX:
v_d = np.pad(idx_d, (0, D_MAX - flag), 'constant', constant_values=0)
temp_mask_d = [1] * flag + [0] * (D_MAX - flag)
else:
v_d = idx_d[:D_MAX]
temp_mask_d = [1] * D_MAX
return v_d, np.asarray(temp_mask_d) | 33,660 |
def resp_graph(dataframe, image_name, dir='./'):
"""Response time graph for bucketed data
:param pandas.DataFrame dataframe: dataframe containing all data
:param str image_name: the output file name
:param str dir: the output directory
:return: None
"""
fig = pygal.TimeLine(x_title='Elapsed Time In Test (secs)',
y_title='Response Time (secs)',
x_label_rotation=25,
js=('scripts/pygal-tooltip.min.js',))
fig.add('AVG', [(get_local_time(index), row['mean'] if pd.notnull(row['mean']) else None)
for index, row in dataframe.iterrows()])
fig.add('90%', [(get_local_time(index), row['90%'] if pd.notnull(row['90%']) else None)
for index, row in dataframe.iterrows()])
fig.add('80%', [(get_local_time(index), row['80%'] if pd.notnull(row['80%']) else None)
for index, row in dataframe.iterrows()])
fig.render_to_file(filename=os.path.join(dir, image_name)) | 33,661 |
def load_mapping_files():
"""Load local and remote mapping files."""
mappings = {}
local = ["properties", "countries", "professions",
"latin_countries", "latin_languages"]
remote = ["selibr"]
for title in local:
f = os.path.join(MAPPINGS, '{}.json'.format(title))
mappings[title] = utils.load_json(f)
for title in remote:
mappings[title] = utils.get_wd_items_using_prop(
mappings["properties"][title])
print("Loaded local mappings: {}.".format(", ".join(local)))
print("Loaded remote mappings: {}.".format(", ".join(remote)))
return mappings | 33,662 |
def optimise_acqu_func_mledr(acqu_func, bounds, X_ob, func_gradient=True, gridSize=10000, n_start=5):
"""
Optimise acquisition function built on GP- model with learning dr
:param acqu_func: acquisition function
:param bounds: input space bounds
:param X_ob: observed input data
:param func_gradient: whether to use the acquisition function gradient in optimisation
:param gridSize: random grid size
:param n_start: the top n_start points in the random grid search from which we do gradient-based local optimisation
:return np.array([opt_location]): global optimum input
:return f_opt: global optimum
"""
# Turn the acquisition function to be - acqu_func for minimisation
target_func = lambda x: - acqu_func._compute_acq(x)
# Define a new function combingin the acquisition function and its derivative
def target_func_with_gradient(x):
acqu_f, dacqu_f = acqu_func._compute_acq_withGradients(x)
return -acqu_f, -dacqu_f
# Define bounds for the local optimisers based on the optimal dr
nchannel = acqu_func.model.nchannel
d = acqu_func.model.opt_dr
d_vector = int(acqu_func.model.opt_dr ** 2 * nchannel)
bounds = np.vstack([[-1, 1]] * d_vector)
# Project X_ob to optimal dr learnt
h_d = int(X_ob.shape[1] / acqu_func.model.nchannel)
X_ob_d_r = downsample_projection(acqu_func.model.dim_reduction, X_ob, int(d ** 2), h_d, nchannel=nchannel,
align_corners=True)
# Create grid for random search but split the grid into n_batches to avoid memory overflow
good_results_list = []
random_starts_candidates_list = []
n_batch = 5
gridSize_sub = int(gridSize / n_batch)
for x_grid_idx in range(n_batch):
Xgrid_sub = np.tile(bounds[:, 0], (gridSize_sub, 1)) + np.tile((bounds[:, 1] - bounds[:, 0]),
(gridSize_sub, 1)) * np.random.rand(gridSize_sub,
d_vector)
if x_grid_idx == 0:
Xgrid_sub = np.vstack((Xgrid_sub, X_ob_d_r))
results = target_func(Xgrid_sub)
top_candidates_sub = results.flatten().argsort()[:5] # give the smallest n_start values in the ascending order
random_starts_candidates = Xgrid_sub[top_candidates_sub]
good_results = results[top_candidates_sub]
random_starts_candidates_list.append(random_starts_candidates)
good_results_list.append(good_results)
# Find the top n_start candidates from random grid search to perform local optimisation
results = np.vstack(good_results_list)
X_random_starts = np.vstack(random_starts_candidates_list)
top_candidates_idx = results.flatten().argsort()[
:n_start] # give the smallest n_start values in the ascending order
random_starts = X_random_starts[top_candidates_idx]
f_min = results[top_candidates_idx[0]]
opt_location = random_starts[0]
# Perform multi-start gradient-based optimisation
for random_start in random_starts:
if func_gradient:
x, f_at_x, info = fmin_l_bfgs_b(target_func_with_gradient, random_start, bounds=bounds,
approx_grad=False, maxiter=5000)
else:
x, f_at_x, info = fmin_l_bfgs_b(target_func, random_start, bounds=bounds,
approx_grad=True, maxiter=5000)
if f_at_x < f_min:
f_min = f_at_x
opt_location = x
f_opt = -f_min
return np.array([opt_location]), f_opt | 33,663 |
def get_path_from_pc_name(pc_name):
"""Find out path of a template
Parameters
----------
pc_name : string
Name of template.
Returns
-------
tplPath : string
Path of template
"""
tplPath = pc_name + '.json'
# change path to template if in subdir
for i in pcTplEnv.list_templates(filter_func=filter_func):
if i.split('/')[-1] == tplPath:
tplPath = i
return tplPath | 33,664 |
def eq(*, alpha=None, omega):
"""Define dyadic comparison function equal to.
Dyadic case:
3 = 2 3 4
0 1 0
"""
return int(alpha == omega) | 33,665 |
def generate_days(year):
"""Generates all tuples (YYYY, MM, DD) of days in a year
"""
cal = calendar.Calendar()
days = []
for m in range(1,13):
days.extend(list(cal.itermonthdays3(year, m)))
days = [d for d in set(days) if d[0] == year]
days.sort()
return days | 33,666 |
def nounClassifier(word):
"""Classifies noun as actor o object
Parameters
----------
word : str
Lematized noun to be classified (case-insensitive).
"""
word = word.lower()
response_raw = requests.get(
f"{API_URL}senses/search?lemma={word}&&&partOfSpeech=noun&&&&&&"
)
response = json.loads(response_raw.content)
response = [
item for item in response["content"] if item["lemma"]["word"].lower() == word
]
if len(response) == 0:
return None
if any(
item["domain"]["name"][item["domain"]["name"].rfind("_") + 1 :] in ACTOR_DOMAINS
for item in response
):
return IGElement.ACTOR
else:
return IGElement.OBJECT | 33,667 |
def svn_repos_post_lock_hook(*args):
"""svn_repos_post_lock_hook(svn_repos_t repos, apr_pool_t pool) -> char"""
return _repos.svn_repos_post_lock_hook(*args) | 33,668 |
def delete_folder(path):
"""
Deletes a whole test folder.
"""
command = ['rm', '-rf', TEST_DIR]
file_operation(path, command) | 33,669 |
def libdmtx_function(fname, restype, *args):
"""Returns a foreign function exported by `libdmtx`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
"""
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libdmtx())) | 33,670 |
def make_plot(title: str, safe_ratios: Dict[AgentType, List[float]]):
"""Generate a plot for a single BenchResult"""
plt.figure()
for typ, ratios in safe_ratios.items():
plt.plot([x * 100 for x in ratios], color=AGENT_COLORS[typ.name])
plt.title(title)
plt.xlabel("Time")
plt.ylabel("Safe agents")
plt.yticks(range(0, 101, 10))
plt.grid(True)
plt.legend() | 33,671 |
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
control_unit: ControlUnit = hass.data[DOMAIN][config_entry.entry_id]
diag: dict[str, Any] = {
"config": async_redact_data(config_entry.as_dict(), REDACT_CONFIG)
}
platform_stats, device_types = control_unit.async_get_entity_stats()
diag["platform_stats"] = platform_stats
diag["devices"] = device_types
return diag | 33,672 |
def test_stage_function_one_function_stage_two_output_success():
"""
Feature: test servable_config.py stage
Description: Test stage with one input, two outputs
Expectation: Serving server work ok.
"""
servable_content = r"""
import numpy as np
from mindspore_serving.server import register
tensor_add = register.declare_model(model_file="tensor_add.mindir", model_format="MindIR", with_batch_dim=True)
def test_concat(x1):
return x1 + 1, x1-1
@register.register_method(output_names=["y1", "y2"])
def predict(x1):
y1, y2 = register.add_stage(test_concat, x1, outputs_count=2)
return y1, y2
"""
base = start_serving_server(servable_content, model_file="tensor_add.mindir")
# Client
instances = []
y1s = []
y2s = []
x1s = []
x1s.append(np.array([[101.1, 205.2], [41.3, 62.4]], np.float32))
x1s.append(np.array([[41.3, 32.2], [4.1, 3.9]], np.float32))
x1s.append(np.array([[11.1, 21.2], [41.9, 61.8]], np.float32))
for i in range(3):
instances.append({"x1": x1s[i]})
y1s.append(x1s[i] + 1)
y2s.append(x1s[i] - 1)
client = create_client("localhost:5500", base.servable_name, "predict")
result = client.infer(instances)
print("result", result)
assert (result[0]["y1"] == y1s[0]).all()
assert (result[1]["y1"] == y1s[1]).all()
assert (result[2]["y1"] == y1s[2]).all()
assert (result[0]["y2"] == y2s[0]).all()
assert (result[1]["y2"] == y2s[1]).all()
assert (result[2]["y2"] == y2s[2]).all() | 33,673 |
async def instance_set_name_inurl(cluster_id: str, vm_uuid: str, new_name: str):
""" Set Instance (VM/Template) Name """
try:
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
except KeyError as key_error:
raise HTTPException(
status_code=400, detail=f"{key_error} is not a valid path"
)
_vm: VM = VM.get_by_uuid(session=session, uuid=vm_uuid)
if _vm is not None:
ret = dict(success=_vm.set_name(new_name))
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror) | 33,674 |
def key_released(key):
"""
Takes a key, that's either a keycode or a character,
and says if it was released this frame.
"""
keycode = _to_keycode(key)
return (keycode not in current_frame_held_buttons) and \
(keycode in last_frame_held_buttons) | 33,675 |
def main():
""" Entry point.
"""
unittest.main() | 33,676 |
def trim_snakes_old(lcs,ref,Nb,Ne,dat,Mb,Me):
"""Previously found matches can cause problems if they are not optimal.
In such a case sticking with the matches as found prevents subsequent
more advanced diff routines from recovering from an early sub-optimal
choice. To counter this all snakes and pseudo-snakes are trimmed down
such that they involve whole lines only.
The process is:
1. Merge subsequent snakes to build a list in which each pair of
snakes is separated by a non-empty section of mismatching tokens.
2. Trim each snake by increasing the starting point to the first token
on the next line, and decreasing the end point to the last token on
the previous line. If as a result the begin token exceeds the end
token then eliminate the snake.
The routine returns the revised snake list.
"""
#
# Collapse the snake list by merging adjacent snakes.
#
nsnake = len(lcs)
isnake = 0
if nsnake > 0:
lcs_tmp = []
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
while (isnake < nsnake):
(xi3,yj3,xi4,yj4,itype) = lcs[isnake]
isnake = isnake + 1
if (xi2+1 == xi3 and yj2+1 == yj3):
#
# This snake continues from the previous one so merge the two.
#
xi2 = xi4
yj2 = yj4
#
else:
#
# This snake is separated from the previous one so store the
# previous one and restart the merge procedure.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
xi1 = xi3
yj1 = yj3
xi2 = xi4
yj2 = yj4
#
# Store the last snake.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
lcs = lcs_tmp
#
# Trim the snakes to precisely matching lines.
#
nsnake = len(lcs)
isnake = 0
lcs_tmp = []
txi = 0
tyj = 0
while (isnake < nsnake):
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
#
# Move the starting point to the first token on the next line unless
# the token is the first token on the current line.
#
lxi1 = toldiff_tokens.tokenno2lineno(dat,xi1)
txi1 = toldiff_tokens.lineno2tokenno(dat,lxi1)
lyj1 = toldiff_tokens.tokenno2lineno(ref,yj1)
tyj1 = toldiff_tokens.lineno2tokenno(ref,lyj1)
if txi1 != xi1 or tyj1 != yj1:
xi1 = toldiff_tokens.lineno2tokenno(dat,lxi1+1)
yj1 = toldiff_tokens.lineno2tokenno(ref,lyj1+1)
#
# Move the end point to the last token on the previous line unless
# the token is the last token on the current line.
#
lxi2 = toldiff_tokens.tokenno2lineno(dat,xi2)
txi2 = toldiff_tokens.lineno2tokenno(dat,lxi2+1)-1
lyj2 = toldiff_tokens.tokenno2lineno(ref,yj2)
tyj2 = toldiff_tokens.lineno2tokenno(ref,lyj2+1)-1
if txi2 != xi2 or tyj2 != yj2:
xi2 = toldiff_tokens.lineno2tokenno(dat,lxi2)-1
yj2 = toldiff_tokens.lineno2tokenno(ref,lyj2)-1
if xi1-1 <= xi2 and yj1-1 <= yj2 and (xi1 > txi or yj1 > tyj):
#
# There is a non-empty snake remaining so store it.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
txi = max(xi1,xi2)
tyj = max(yj1,yj2)
#
lcs = lcs_tmp
return lcs | 33,677 |
def render_mov(fname, steps, fps=30):
"""
Load a saved SafeLifeGame state and render it as an animated gif.
Parameters
----------
fname : str
steps : int
The number of steps to evolve the game state. This is the same
as the number of frames that will be rendered.
fps : float
Frames per second for gif animation.
"""
game = GameState.load(fname)
bare_fname = '.'.join(fname.split('.')[:-1])
frames = []
for _ in range(steps):
frames.append(render_game(game))
game.advance_board()
imageio.mimwrite(bare_fname+'.gif', frames,
duration=1/fps, subrectangles=True) | 33,678 |
def test_getitem(posterior):
"""Getitem performs as expected."""
np.testing.assert_allclose(posterior[0].mean, posterior.states[0].mean)
np.testing.assert_allclose(posterior[0].cov, posterior.states[0].cov)
np.testing.assert_allclose(posterior[-1].mean, posterior.states[-1].mean)
np.testing.assert_allclose(posterior[-1].cov, posterior.states[-1].cov)
np.testing.assert_allclose(posterior[:].mean, posterior.states[:].mean)
np.testing.assert_allclose(posterior[:].cov, posterior.states[:].cov) | 33,679 |
async def start(actual_coroutine):
"""
Start the testing coroutine and wait 1 second for it to complete.
:raises asyncio.CancelledError when the coroutine fails to finish its work
in 1 second.
:returns: the return value of the actual_coroutine.
:rtype: Any
"""
try:
return await asyncio.wait_for(actual_coroutine, 2)
except asyncio.CancelledError:
pass | 33,680 |
def breadthIterArgs(limit=1000, testFn='<function isIterable>', *args):
"""
iterator doing a breadth first expansion of args
"""
pass | 33,681 |
def get_node(path):
"""Returns a :class:`Node` instance at ``path`` (relative to the current site) or ``None``."""
try:
current_site = Site.objects.get_current()
except Site.DoesNotExist:
current_site = None
trailing_slash = False
if path[-1] == '/':
trailing_slash = True
try:
node, subpath = Node.objects.get_with_path(path, root=getattr(current_site, 'root_node', None), absolute_result=False)
except Node.DoesNotExist:
return None
if subpath is None:
subpath = ""
subpath = "/" + subpath
if trailing_slash and subpath[-1] != "/":
subpath += "/"
node._path = path
node._subpath = subpath
return node | 33,682 |
def new_post(blog_id, username, password, post, publish):
"""
metaWeblog.newPost(blog_id, username, password, post, publish)
=> post_id
"""
user = authenticate(username, password, 'zinnia.add_entry')
if post.get('dateCreated'):
creation_date = datetime.strptime(
post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S')
if settings.USE_TZ:
creation_date = timezone.make_aware(
creation_date, timezone.utc)
else:
creation_date = timezone.now()
entry_dict = {'title': post['title'],
'content': post['description'],
'excerpt': post.get('mt_excerpt', ''),
'publication_date': creation_date,
'creation_date': creation_date,
'last_update': creation_date,
'comment_enabled': post.get('mt_allow_comments', 1) == 1,
'pingback_enabled': post.get('mt_allow_pings', 1) == 1,
'trackback_enabled': post.get('mt_allow_pings', 1) == 1,
'featured': post.get('sticky', 0) == 1,
'tags': 'mt_keywords' in post and post['mt_keywords'] or '',
'slug': 'wp_slug' in post and post['wp_slug'] or slugify(
post['title']),
'password': post.get('wp_password', '')}
if user.has_perm('zinnia.can_change_status'):
entry_dict['status'] = publish and PUBLISHED or DRAFT
entry = Entry.objects.create(**entry_dict)
author = user
if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'):
if int(post['wp_author_id']) != user.pk:
author = Author.objects.get(pk=post['wp_author_id'])
entry.authors.add(author)
entry.sites.add(Site.objects.get_current())
if 'categories' in post:
entry.categories.add(*[
Category.objects.get_or_create(
title=cat, slug=slugify(cat))[0]
for cat in post['categories']])
return entry.pk | 33,683 |
def upload(userid, filedata):
"""
Creates a preview-size copy of an uploaded image file for a new avatar
selection file.
"""
if filedata:
media_item = media.make_resized_media_item(filedata, (600, 500), 'FileType')
orm.UserMediaLink.make_or_replace_link(userid, 'avatar-source', media_item)
else:
orm.UserMediaLink.clear_link(userid, 'avatar')
return bool(filedata) | 33,684 |
def print_warning(port: int) -> None:
"""Prints a message on screen to run an app or api on the specific port.
Args:
port: Port number.
"""
sys.stdout.write(f'\rRun an application on the port {port} to start tunneling.')
sleep(5)
sys.stdout.flush()
sys.stdout.write('\r') | 33,685 |
def autoconfig(
rewrite_asserts=default,
magics=default,
clean=default,
addopts=default,
run_in_thread=default,
defopts=default,
display_columns=default,
):
"""Configure ``ipytest`` with reasonable defaults.
Specifically, it sets:
{defaults_docs}
See :func:`ipytest.config` for details.
"""
args = collect_args()
config(
**{
key: replace_with_default(default, args[key], defaults.get(key))
for key in current_config
}
) | 33,686 |
def get_account_2_json(usr, pwd):
"""
将从环境变量获取的账号密码拼接成json
:return: 字典
"""
username = os.popen("env | grep {}".format(usr))
password = os.popen("env | grep {}".format(pwd))
username_list = username.read().split()
password_list = password.read().split()
username_dict = str2dict(";".join(username_list))
password_dict = str2dict(";".join(password_list))
account_dict = {}
for usr_key, pwd_key in zip(username_dict,password_dict):
account_dict[usr_key] = {"email": username_dict[usr_key], "password": password_dict[pwd_key]}
return account_dict | 33,687 |
def browse(bot, update):
"""/browse command. Displays a keyboard/buttons UI for navigation.
Catalog-like UI where the user can browse through different categories of
pictograms.
Args:
bot(object): the bot instance.
update(object): the message sent by the user.
"""
# 1st level keyboard: main keyboard
reply_markup = build_keyboard(keyboards['KB_MAIN']['layout'])
bot.sendMessage(
chat_id=update.message.chat_id,
text=keyboards['KB_MAIN']['text'],
reply_markup=reply_markup
) | 33,688 |
def time_dconv_bn_nolinear(nb_filter, nb_row, nb_col,
stride=(2, 2), activation="relu"):
"""
Create time convolutional Batch Norm layer in decoders.
Parameters:
---------
filter_num : int
number of filters to use in convolution layer.
row_num : int
number of row
col_num : int
number of column
stride : int
size of stride
Returns:
---------
dconv_bn
"""
def _dconv_bn(x):
x = TimeDistributed(UnPooling2D(size=stride))(x)
x = TimeDistributed(ReflectionPadding2D(padding=(int(nb_row/2),
int(nb_col/2))))(x)
x = TimeDistributed(Conv2D(nb_filter, (nb_row, nb_col),
padding='valid',
kernel_regularizer=regularizers.
l2(reg_weights)))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Activation(activation))(x)
return x
return _dconv_bn | 33,689 |
def ___generate_random_row_major_GM___(i, j, s=None):
"""Make a random row major sparse matrix of shape (i,j) at sparsity=s.
:param i:
:param j:
:param s:
:return:
"""
if s is None:
s = random.uniform(0, 0.1)
if s < 0.02: s = 0
if rAnk == mAster_rank:
random_list = random.sample(range(0, i), i)
distribution = [i // sIze + (1 if x < i % sIze else 0) for x in range(sIze)]
no_empty_rows = list()
_ = 0
for r in range(sIze):
no_empty_rows.append(random_list[_:_+distribution[r]])
_ += distribution[r]
else:
no_empty_rows = None
no_empty_rows = cOmm.scatter(no_empty_rows, root=mAster_rank)
_ = spspa.random(i, j, s, format='csr')
A = spspa.lil_matrix((i,j))
A[no_empty_rows,:] = _[no_empty_rows,:]
A = A.tocsr()
A = GlobalMatrix(A)
A.IS.regularly_distributed = 'row'
A.___PRIVATE_self_regularity_checker___()
return A | 33,690 |
def link_control_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
admin_state="Enabled",
mode="Normal",
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
"""This is a function used to make a Link Control Policy on Cisco
Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
admin_state (str):
Optional; The administrative state of Unidirectional Link Detection
(UDLD). The accepted values are "Enabled" and "Disabled". The
default value is "Enabled".
mode (str):
Optional; The Unidirectional Link Detection (UDLD) mode. The
accepted values are "Normal" and "Aggressive". The default value is
"Normal".
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create Link Control Policy object in Intersight
builder(LinkControlPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
admin_state=admin_state,
mode=mode
)) | 33,691 |
def set_level(self, level):
"""Set logging level"""
if not level:
self.setLevel(0)
return
level = _get_level_number(level)
self.setLevel(level) | 33,692 |
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4') | 33,693 |
def find_path(
start_path: pathlib.Path = pathlib.Path("."),
) -> Optional[pathlib.Path]:
"""Traverse the file system looking for the config file .craftier.ini.
It will stop earlier at the user's home directory, if it encounters a Git or
Mercurial directory, or if it traversed too deep.
"""
home = pathlib.Path.home()
path = start_path.resolve()
for path in [path, *path.parents][:_MAX_SEARCH_DEPTH]:
config_file = path / CONFIG_FILENAME
if config_file.is_file():
return config_file
for stop_dir in _STOP_SEARCH_ON_DIRS:
if (path / stop_dir).is_dir():
return None
if path == home:
return None
return None | 33,694 |
def crop_point_data_to_base_raster(raster_name, raster_directory, csv_file, EPSG_code = 0):
"""
This function create a new csv file cropped to the base raster. It can lower the processing time if your point data is on a significantly larger area than the base raster.
"""
print("ok let me load your dataset and your hdr file")
# Read the file
df = bamboo_bears.read_csv(csv_file)
# Read and sort the csv_info
with open(raster_directory+raster_name+".hdr","r") as hdr_file:
print("I got these")
for line in hdr_file:
if(line[0:8] == "map info"):
info = line[12:-2]
info = info.split(",")
x_min = float(info[3])
y_max = float(info[4])
x_res = float(info[5])
y_res = float(info[6])
utm_zone = int(info[7])
utm_hemisphere = info[8]
else:
if(line[0:7] == "samples"):
num_col = line.replace(" ","").split("=")[1]
print("there are " + str(num_col) + " columns")
num_col = int(num_col)
else:
if(line[0:5] == "lines"):
num_lines = line.replace(" ","").split("=")[1]
print("there are " + str(num_lines) + " lines")
num_lines = int(num_lines)
# Now I calculate the size of the dem
x_max = x_min + x_res*num_col
y_min = y_max - y_res*num_lines
# Conversion UTM to lat/long
inProj = Proj(init='epsg:'+str(EPSG_code))
outProj = Proj(init='epsg:4326')
long_min,lat_min = transform(inProj,outProj,x_min,y_min)
long_max,lat_max = transform(inProj,outProj,x_max,y_max)
# data sorting
df = df[df.longitude<long_max]
df = df[df.latitude<lat_max]
df = df[df.latitude>lat_min]
df = df[df.longitude>long_min]
df.to_csv(csv_file[:-4]+"_"+raster_name+"_filtered.csv", index = False)
#return the name of the new csv file
return csv_file[:-4]+"_"+raster_name+"_filtered.csv" | 33,695 |
def test_gen_ipv6_3():
"""Generate a IPv6 address with custom prefix."""
result = gen_ipaddr(ipv6=True, prefix=['e2d3'])
assert len(result.split(':')) == 8
assert result.startswith('e2d3:') | 33,696 |
def generate_schedule_report_data(pools_info, pools_allocated_mem):
"""
Generate the schedule report data.
:param pools_info: (dict) The information about the configuration and statistics of the pool participating
in the scheduling.
:param pools_allocated_mem: (dict) The allocated memory of the pool participating in the scheduling.
:return: (DataFrame) A DataFrame object of report data.
"""
columns = [ReportColumn.RESOURCE_POOL,
ReportColumn.MEM_BEFORE_SCHEDULE,
ReportColumn.MEM_AFTER_SCHEDULE,
ReportColumn.MEM_MOVED,
ReportColumn.MEM_USED,
ReportColumn.MEM_LACK,
ReportColumn.QUERY_NUMBER,
ReportColumn.WORK_TIME,
ReportColumn.QUEUED_TIME,
ReportColumn.WEIGHT,
ReportColumn.MIN_MEM,
ReportColumn.MAX_MEM]
data = [[pool_info.pool_name,
int(convert_mem_unit(pool_info.current_mem)),
int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem))),
int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem)
- pool_info.current_mem)),
int(convert_mem_unit(pool_info.pool_stat.used_mem_avg)) \
if int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)) == 0 \
else int(convert_mem_unit(pool_info.current_mem)),
int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)),
pool_info.pool_stat.query_total,
int(pool_info.pool_stat.run_secs),
int(pool_info.pool_stat.wait_secs),
pool_info.weight,
int(convert_mem_unit(pool_info.min_mem)),
int(convert_mem_unit(pool_info.max_mem))]
for pool_info in list(pools_info.values())]
return pd.DataFrame(data, columns=columns) | 33,697 |
def create_feature_rule_json(device, feature="foo", rule="json"):
"""Creates a Feature/Rule Mapping and Returns the rule."""
feature_obj, _ = ComplianceFeature.objects.get_or_create(slug=feature, name=feature)
rule = ComplianceRule(
feature=feature_obj,
platform=device.platform,
config_type=ComplianceRuleTypeChoice.TYPE_JSON,
config_ordered=False,
)
rule.save()
return rule | 33,698 |
def test_returns_specified_plugin(application):
"""Verify we get the plugin we want."""
desired = mock.Mock()
execute = desired.execute
application.formatting_plugins = {
'default': mock.Mock(),
'desired': desired,
}
with mock.patch.object(app.LOG, 'warning') as warning:
assert execute is application.formatter_for('desired')
assert warning.called is False | 33,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.