content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show() | 31,400 |
def test_name(request):
"""Returns (module_name, function_name[args]) for a given test"""
return (
request.module.__name__,
request._parent_request._pyfuncitem.name, # pylint: disable=protected-access
) | 31,401 |
def inputwrap(x, ARG_indented: bool=False, ARG_end_with: str=" "):
"""Textwrapping for regular 'input' commands.
Parameters
----------
x
The text to be wrapped.
ARG_indented : bool (default is 'False')
Whether or not the textwrapped string should be indented.
ARG_end_with : str (default is ' ')
The string that the textwrapped string will end with.
Returns
-------
str
User input.
"""
if ARG_indented is True:
_input = input (textwrap.fill (x, width=70, subsequent_indent=" ") + ARG_end_with)
return _input
else:
_input = input (textwrap.fill (x, width=70) + ARG_end_with)
return _input | 31,402 |
def test_reproject_geometry(landpoly):
"""Reproject geometry."""
with fiona.open(landpoly, "r") as src:
for feature in src:
# WGS84 to Spherical Mercator
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs), CRS().from_epsg(3857)
)
assert out_geom.is_valid
# WGS84 to LAEA
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs), CRS().from_epsg(3035)
)
assert out_geom.is_valid
# WGS84 to WGS84
out_geom = reproject_geometry(
shape(feature["geometry"]), CRS(src.crs), CRS().from_epsg(4326)
)
assert out_geom.is_valid
# WGS84 bounds to Spherical Mercator
big_box = box(-180, -90, 180, 90)
reproject_geometry(big_box, CRS().from_epsg(4326), CRS().from_epsg(3857))
# WGS84 bounds to Spherical Mercator raising clip error
with pytest.raises(RuntimeError):
reproject_geometry(
big_box, CRS().from_epsg(4326), CRS().from_epsg(3857), error_on_clip=True
)
outside_box = box(-180, 87, 180, 90)
assert reproject_geometry(
outside_box,
CRS().from_epsg(4326),
CRS().from_epsg(3857),
).is_valid
# empty geometry
assert reproject_geometry(
Polygon(), CRS().from_epsg(4326), CRS().from_epsg(3857)
).is_empty
assert reproject_geometry(
Polygon(), CRS().from_epsg(4326), CRS().from_epsg(4326)
).is_empty
# CRS parameter
big_box = box(-180, -90, 180, 90)
assert reproject_geometry(big_box, 4326, 3857) == reproject_geometry(
big_box, "4326", "3857"
)
with pytest.raises(TypeError):
reproject_geometry(big_box, 1.0, 1.0) | 31,403 |
async def save_token(token):
"""
Save the oauth token for re-use instead of logging in again. We
store it in the vault cubbyhole secrets engine.
"""
os.umask(0)
with open(
os.open(TESLA_API_TOKEN_FILE, os.O_CREAT | os.O_WRONLY, 0o600), "w"
) as fh:
fh.write(token) | 31,404 |
def SBP_single(ell_fix, redshift, pixel_scale, zeropoint, ax=None, offset=0.0,
x_min=1.0, x_max=4.0, alpha=1, physical_unit=False, show_dots=False, show_grid=False,
show_banner=True, vertical_line=None, linecolor='firebrick', linestyle='-',
linewidth=3, labelsize=25, ticksize=30, label='SBP', labelloc='lower left'):
"""Display the 1-D profiles, without showing PA and ellipticity.
Parameters:
ell_fix: astropy Table or numpy table, should be the output of IRAF ELLIPSE.
redshift (float): redshift of the object.
pixel_scale (float): pixel scale in arcsec/pixel.
zeropoint (float): zeropoint of the photometry system.
ax (``matplotlib.pyplot.axes`` object): The user could provide axes on which the figure will be drawn.
offset (float): offset of single surface brightness profile, in the unit of ``count``.
x_min (float): Minimum value of x-axis, in ``$x^{1/4}$`` scale.
x_max (float): Maximum value of x-axis, in ``$x^{1/4}$`` scale.
alpha (float): transparency.
physical_unit (bool): If true, the figure will be shown in physical scale.
show_dots (bool): If true, it will show all the data points.
show_grid (bool): If true, it will show a grid.
vertical_line (list of floats): positions of vertical lines. Maximum length is three.
linecolor (str): Color of surface brightness profile.
linestyle (str): Style of surface brightness profile. Could be "--", "-.", etc.
label (string): Label of surface brightness profile.
Returns:
ax: If the input ``ax`` is not ``None``.
"""
if ax is None:
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.0, right=1.0,
bottom=0.0, top=1.0,
wspace=0.00, hspace=0.00)
ax1 = fig.add_axes([0.08, 0.07, 0.85, 0.88])
ax1.tick_params(direction='in')
else:
ax1 = ax
ax1.tick_params(direction='in')
# Calculate physical size at this redshift
from .utils import phys_size
phys_sclae = phys_size(redshift, verbose=False)
# 1-D profile
if physical_unit is True:
x = ell_fix['sma'] * pixel_scale * phys_scale
y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint
y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale)**2) + zeropoint
y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale)**2) + zeropoint
upper_yerr = y_lower - y
lower_yerr = y - y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$(R/\mathrm{kpc})^{1/4}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
else:
x = ell_fix['sma'] * pixel_scale
y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint
y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint
y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint
upper_yerr = y_lower - y
lower_yerr = y - y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$(R/\mathrm{arcsec})^{1/4}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
if show_grid:
ax1.grid(linestyle='--', alpha=0.4, linewidth=2)
if show_dots:
ax1.errorbar((x ** 0.25), y,
yerr=asymmetric_error,
color='k', alpha=0.2, fmt='o',
capsize=4, capthick=1, elinewidth=1)
if label is not None:
ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle,
label=r'$\mathrm{' + label + '}$', alpha=alpha)
leg = ax1.legend(fontsize=labelsize, frameon=False, loc=labelloc)
for l in leg.legendHandles:
l.set_alpha(1)
else:
ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle, alpha=alpha)
ax1.fill_between(x**0.25, y_upper, y_lower, color=linecolor, alpha=0.3*alpha)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
ax1.set_xlim(x_min, x_max)
ax1.set_xlabel(xlabel, fontsize=ticksize)
ax1.set_ylabel(ylabel, fontsize=ticksize)
ax1.invert_yaxis()
# Twin axis with linear scale
if physical_unit and show_banner is True:
ax4 = ax1.twiny()
ax4.tick_params(direction='in')
lin_label = [1, 2, 5, 10, 50, 100, 150, 300]
lin_pos = [i**0.25 for i in lin_label]
ax4.set_xticks(lin_pos)
ax4.set_xlim(ax1.get_xlim())
ax4.set_xlabel(r'$\mathrm{kpc}$', fontsize=ticksize)
ax4.xaxis.set_label_coords(1, 1.025)
ax4.set_xticklabels([r'$\mathrm{'+str(i)+'}$' for i in lin_label], fontsize=ticksize)
for tick in ax4.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
# Vertical line
if vertical_line is not None:
if len(vertical_line) > 3:
raise ValueError('Maximum length of vertical_line is 3.')
ylim = ax1.get_ylim()
style_list = ['-', '--', '-.']
for k, pos in enumerate(vertical_line):
ax1.axvline(x=pos**0.25, ymin=0, ymax=1,
color='gray', linestyle=style_list[k], linewidth=3, alpha=0.75)
plt.ylim(ylim)
# Return
if ax is None:
return fig
return ax1 | 31,405 |
def min_conflicts_value(csp, var, current):
"""Return the value that will give var the least number of conflicts.
If there is a tie, choose at random."""
return argmin_random_tie(csp.domains[var],
key=lambda val: csp.nconflicts(var, val, current)) | 31,406 |
def init(jwt):
"""Initialize the JWTManager.
Parameters:
jwt (JWTManager): an instance of the jwt manager.
"""
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decoded_token):
"""Callback to check if a token is in the blacklist.
Parameters:
decrypted_token (dict): a jwt token decrypted into a dictionary.
"""
from app.model import TokenRepository
return TokenRepository().is_token_revoked(decoded_token) | 31,407 |
def get_component_rst_string(module: ModuleType, component: Callable, level: int) -> str:
"""Get a rst string, to autogenerate documentation for a component (class or function)
:param module: the module containing the component
:param component: the component (class or function)
:param level: the level in nested directory structure
"""
object_name = f"{module.__name__}.{component.__name__}"
rst_documentation = ""
level_underline = RST_LEVEL_SYMBOLS[level] * 6
if inspect.isclass(component):
rst_documentation = SPHINX_CLASS_STRING.format(
object_name=object_name, var=component.__name__, level=level_underline
)
elif inspect.isfunction(component):
rst_documentation = SPHINX_FUNC_STRING.format(
object_name=object_name, var=component.__name__, level=level_underline
)
elif type(component).__name__ == "Dispatcher":
rst_documentation = get_multidispatch_string(component, module, level_underline)
return rst_documentation | 31,408 |
def log_mse_loss(source, separated, max_snr=1e6, bias_ref_signal=None):
"""Negative log MSE loss, the negated log of SNR denominator."""
err_pow = tf.math.reduce_sum(tf.math.square(source - separated), axis=-1)
snrfactor = 10.**(-max_snr / 10.)
if bias_ref_signal is None:
ref_pow = tf.math.reduce_sum(tf.square(source), axis=-1)
else:
ref_pow = tf.math.reduce_sum(tf.math.square(bias_ref_signal), axis=-1)
bias = snrfactor * ref_pow
return 10. * _stabilized_log_base(bias + err_pow) | 31,409 |
def crack(password: str) -> Union[str, None]:
"""
Crack the given password
"""
# found 96% by caesar
return caesar(password) | 31,410 |
def insert_file(file_item):
"""Inserts a file into the autocode database
:param file_item: the file item to insert
:return:
"""
conn = create_connection()
with conn:
__insert_file(conn, file_item) | 31,411 |
def fetchOne(query):
"""
Returns a dict result from the fetch of one query row
"""
return sqliteRowToDict(query.fetchone()) | 31,412 |
def _get_class_rgb(num_classes, predicted_class):
"""Map from class to RGB value for a specific colormap.
Args:
num_classes: Integer, the total number of classes.
predicted_class: Integer, the predicted class, in [0, num_classes).
Returns:
Tuple of 3 floats in [0.0, 1.0] representing an RGB color.
Raises:
ValueError: If predicted class is not in [0, num_classes).
"""
if not 0 <= predicted_class < num_classes:
raise ValueError('Predicted class %d must be in [0, %d).' %
(predicted_class, num_classes))
# Map [0, num_classes) to [0, 255)
colormap_index = int(predicted_class * 255.0 / num_classes)
# Return just the RGB values of the colormap.
return matplotlib.pyplot.cm.get_cmap(CLASS_ANNOTATION_COLORMAP)(colormap_index)[0:3] | 31,413 |
def PUT(request):
"""Update a project's name."""
request.check_required_parameters(body={'project': {'name': 'name'}}, path={'projectId': 'string'})
project = Project.from_id(request.params_path['projectId'])
project.check_exists()
project.check_user_access(request.google_id, True)
project.set_property('name', request.params_body['project']['name'])
project.set_property('datetime_last_edited', Database.datetime_to_string(datetime.now()))
project.update()
return Response(200, 'Successfully updated project.', project.obj) | 31,414 |
def load_book_details(file_path):
""" Read book details from a csv file into a pandas DataFrame. """
books_df = pd.read_csv(file_path, index_col='book_id')
return books_df | 31,415 |
def random_string_fx() -> str:
"""
Creates a 16 digit alphanumeric string. For use
with logging tests.
Returns:
16 digit alphanumeric string.
"""
result = "".join(random.sample(string.ascii_letters, 16))
return result | 31,416 |
def parse_scheduler_nodes(
pbscmd: PBSCMD, resource_definitions: Dict[str, PBSProResourceDefinition]
) -> List[Node]:
"""
Gets the current state of the nodes as the scheduler sees them, including resources,
assigned resources, jobs currently running etc.
"""
ret: List[Node] = []
for ndict in pbscmd.pbsnodes_parsed("-a"):
node = parse_scheduler_node(ndict, resource_definitions)
if not node.available.get("ccnodeid"):
node.metadata["override_resources"] = False
logging.fine(
"'ccnodeid' is not defined so %s has not been joined to the cluster by the autoscaler"
+ " yet or this is not a CycleCloud managed node",
node,
)
ret.append(node)
return ret | 31,417 |
def GaugeSet(prefix, *, name, index, **kwargs):
"""
Factory function for Gauge Set.
Parameters
----------
prefix : str
Gauge base PV (up to 'GCC'/'GPI').
name : str
Name to refer to the gauge set.
index : str or int
Index for gauge (e.g. '02' or 3).
prefix_controller : str, optional
Base PV for the controller.
onlyGCC : optional
If defined and not :keyword:`False`, set has no Pirani.
"""
onlyGCC = kwargs.pop('onlyGCC', None)
if onlyGCC:
if 'prefix_controller' in kwargs:
return GaugeSetMks(
prefix, name=name, index=index,
prefix_controller=kwargs.pop('prefix_controller'),
**kwargs)
else:
return GaugeSetBase(prefix, name=name, index=index, **kwargs)
else:
if 'prefix_controller' in kwargs:
return GaugeSetPiraniMks(
prefix, name=name, index=index,
prefix_controller=kwargs.pop('prefix_controller'),
**kwargs)
else:
return GaugeSetPirani(prefix, name=name, index=index, **kwargs) | 31,418 |
def user_rating(user, object, category=""):
"""
Usage:
{% user_rating user obj [category] as var %}
"""
return user_rating_value(user, object, category) | 31,419 |
def weighted_mean(
x: NumericOrIter,
w: NumericOrIter = None,
na_rm: bool = False,
) -> NumericType:
"""Calculate weighted mean"""
if is_scalar(x):
x = [x] # type: ignore
if w is not None and is_scalar(w):
w = [w] # type: ignore
x = Array(x)
if w is not None:
w = Array(w)
if len(x) != len(w):
raise ValueError("'x' and 'w' must have the same length")
if na_rm:
notna = ~numpy.isnan(x)
x = x[notna]
if w is not None:
w = w[notna]
if w is not None and sum(w) == 0:
return NA
return numpy.average(x, weights=w) | 31,420 |
def internet(host="8.8.8.8", port=53, timeout=10):
"""
Check Internet Connections.
:param host: the host that check connection to
:param port: port that check connection with
:param timeout: times that check the connnection
:type host:str
:type port:int
:type timeout:int
:return bool: True if Connection is Stable
>>> internet() # if there is stable internet connection
True
>>> internet() # if there is no stable internet connection
False
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
return False | 31,421 |
def parse_date(td):
"""helper function to parse time"""
resYear = float(td.days)/364.0 # get the number of years including the the numbers after the dot
resMonth = int((resYear - int(resYear))*364/30) # get the number of months, by multiply the number after the dot by 364 and divide by 30.
resYear = int(resYear)
return str(resYear) + "y" + str(resMonth) + "m" | 31,422 |
def test_basis():
"""
Test basis change.
"""
well = Well.from_las(FNAME)
gr = well.data['GR']
x = gr.to_basis(start=100, stop=200, step=1)
assert x.size == 101
assert x[0] - 66.6059 < 0.001
y = gr.to_basis_like(x)
assert y.size == 101
assert y[0] - 66.6059 < 0.001 | 31,423 |
def describe_instance_patch_states(InstanceIds=None, NextToken=None, MaxResults=None):
"""
Retrieves the high-level patch state of one or more instances.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_instance_patch_states(
InstanceIds=[
'string',
],
NextToken='string',
MaxResults=123
)
:type InstanceIds: list
:param InstanceIds: [REQUIRED]\nThe ID of the instance whose patch state information should be retrieved.\n\n(string) --\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxResults: integer
:param MaxResults: The maximum number of instances to return (per page).
:rtype: dict
ReturnsResponse Syntax
{
'InstancePatchStates': [
{
'InstanceId': 'string',
'PatchGroup': 'string',
'BaselineId': 'string',
'SnapshotId': 'string',
'InstallOverrideList': 'string',
'OwnerInformation': 'string',
'InstalledCount': 123,
'InstalledOtherCount': 123,
'InstalledPendingRebootCount': 123,
'InstalledRejectedCount': 123,
'MissingCount': 123,
'FailedCount': 123,
'UnreportedNotApplicableCount': 123,
'NotApplicableCount': 123,
'OperationStartTime': datetime(2015, 1, 1),
'OperationEndTime': datetime(2015, 1, 1),
'Operation': 'Scan'|'Install',
'LastNoRebootInstallOperationTime': datetime(2015, 1, 1),
'RebootOption': 'RebootIfNeeded'|'NoReboot'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
InstancePatchStates (list) --
The high-level patch state for the requested instances.
(dict) --
Defines the high-level patch compliance state for a managed instance, providing information about the number of installed, missing, not applicable, and failed patches along with metadata about the operation when this information was gathered for the instance.
InstanceId (string) --
The ID of the managed instance the high-level patch compliance information was collected for.
PatchGroup (string) --
The name of the patch group the managed instance belongs to.
BaselineId (string) --
The ID of the patch baseline used to patch the instance.
SnapshotId (string) --
The ID of the patch baseline snapshot used during the patching operation when this compliance data was collected.
InstallOverrideList (string) --
An https URL or an Amazon S3 path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline , overrides the patches specified by the default patch baseline.
For more information about the InstallOverrideList parameter, see About the SSM document AWS-RunPatchBaseline in the AWS Systems Manager User Guide .
OwnerInformation (string) --
Placeholder information. This field will always be empty in the current release of the service.
InstalledCount (integer) --
The number of patches from the patch baseline that are installed on the instance.
InstalledOtherCount (integer) --
The number of patches not specified in the patch baseline that are installed on the instance.
InstalledPendingRebootCount (integer) --
The number of patches installed by Patch Manager since the last time the instance was rebooted.
InstalledRejectedCount (integer) --
The number of instances with patches installed that are specified in a RejectedPatches list. Patches with a status of InstalledRejected were typically installed before they were added to a RejectedPatches list.
Note
If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstalledRejectedCount will always be 0 (zero).
MissingCount (integer) --
The number of patches from the patch baseline that are applicable for the instance but aren\'t currently installed.
FailedCount (integer) --
The number of patches from the patch baseline that were attempted to be installed during the last patching operation, but failed to install.
UnreportedNotApplicableCount (integer) --
The number of patches beyond the supported limit of NotApplicableCount that are not reported by name to Systems Manager Inventory.
NotApplicableCount (integer) --
The number of patches from the patch baseline that aren\'t applicable for the instance and therefore aren\'t installed on the instance. This number may be truncated if the list of patch names is very large. The number of patches beyond this limit are reported in UnreportedNotApplicableCount .
OperationStartTime (datetime) --
The time the most recent patching operation was started on the instance.
OperationEndTime (datetime) --
The time the most recent patching operation completed on the instance.
Operation (string) --
The type of patching operation that was performed: SCAN (assess patch compliance state) or INSTALL (install missing patches).
LastNoRebootInstallOperationTime (datetime) --
The time of the last attempt to patch the instance with NoReboot specified as the reboot option.
RebootOption (string) --
Indicates the reboot option specified in the patch baseline.
Note
Reboot options apply to Install operations only. Reboots are not attempted for Patch Manager Scan operations.
RebootIfNeeded : Patch Manager tries to reboot the instance if it installed any patches, or if any patches are detected with a status of InstalledPendingReboot .
NoReboot : Patch Manager attempts to install missing packages without trying to reboot the system. Patches installed with this option are assigned a status of InstalledPendingReboot . These patches might not be in effect until a reboot is performed.
NextToken (string) --
The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
Exceptions
SSM.Client.exceptions.InternalServerError
SSM.Client.exceptions.InvalidNextToken
:return: {
'InstancePatchStates': [
{
'InstanceId': 'string',
'PatchGroup': 'string',
'BaselineId': 'string',
'SnapshotId': 'string',
'InstallOverrideList': 'string',
'OwnerInformation': 'string',
'InstalledCount': 123,
'InstalledOtherCount': 123,
'InstalledPendingRebootCount': 123,
'InstalledRejectedCount': 123,
'MissingCount': 123,
'FailedCount': 123,
'UnreportedNotApplicableCount': 123,
'NotApplicableCount': 123,
'OperationStartTime': datetime(2015, 1, 1),
'OperationEndTime': datetime(2015, 1, 1),
'Operation': 'Scan'|'Install',
'LastNoRebootInstallOperationTime': datetime(2015, 1, 1),
'RebootOption': 'RebootIfNeeded'|'NoReboot'
},
],
'NextToken': 'string'
}
:returns:
RebootIfNeeded : Patch Manager tries to reboot the instance if it installed any patches, or if any patches are detected with a status of InstalledPendingReboot .
NoReboot : Patch Manager attempts to install missing packages without trying to reboot the system. Patches installed with this option are assigned a status of InstalledPendingReboot . These patches might not be in effect until a reboot is performed.
"""
pass | 31,424 |
def hue_angle(a, b):
"""
Returns the *hue* angle :math:`h` in degrees.
Parameters
----------
a : numeric
Opponent colour dimension :math:`a`.
b : numeric
Opponent colour dimension :math:`b`.
Returns
-------
numeric
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> a = -0.0006241120682426434
>>> b = -0.0005062701067729668
>>> hue_angle(a, b) # doctest: +ELLIPSIS
219.0484326...
"""
h = math.degrees(np.arctan2(b, a)) % 360
return h | 31,425 |
def perform_similarity_checks(post, name):
"""
Performs 4 tests to determine similarity between links in the post and the user name
:param post: Test of the post
:param name: Username to compare against
:return: Float ratio of similarity
"""
max_similarity, similar_links = 0.0, []
# Keep checking links until one is deemed "similar"
for link in post_links(post):
domain = get_domain(link)
# Straight comparison
s1 = similar_ratio(domain, name)
# Strip all spaces
s2 = similar_ratio(domain, name.replace(" ", ""))
# Strip all hyphens
s3 = similar_ratio(domain.replace("-", ""), name.replace("-", ""))
# Strip all hyphens and all spaces
s4 = similar_ratio(domain.replace("-", "").replace(" ", ""), name.replace("-", "").replace(" ", ""))
similarity = max(s1, s2, s3, s4)
max_similarity = max(max_similarity, similarity)
if similarity >= SIMILAR_THRESHOLD:
similar_links.append(domain)
return max_similarity, similar_links | 31,426 |
def create_getters(tuples):
"""Create a series of itemgetters that return tuples
:param tuples: a list of tuples
:type tuples: collections.Iterable
:returns: a generator of item getters
:rtype: generator
::
>>> gs = list(create_getters([(0, 2), (), (1,)]))
>>> d = ['a', 'b', 'c', 'd']
>>> gs[0](d)
('a', 'c')
>>> gs[1](d)
()
>>> gs[2](d)
('b',)
"""
def tgetter0():
return lambda x: ()
def tgetter1(key):
it = itemgetter(key)
return lambda x: (it(x),)
for t in tuples:
if not t:
yield tgetter0()
elif len(t) == 1:
yield tgetter1(*t)
else:
yield itemgetter(*t) | 31,427 |
def report_cots_cv2x_bsm(bsm: dict) -> str:
"""A function to report the BSM information contained in an SPDU from a COTS C-V2X device
:param bsm: a dictionary containing BSM fields from a C-V2X SPDU
:type bsm: dict
:return: a string representation of the BSM fields
:rtype: str
"""
report = ""
for key in bsm.keys():
report += key + "\t\t\t" + str(bsm[key]) + "\n"
report += "\n"
return report | 31,428 |
def get_level_matrix(matrix, level):
"""Returns a binary matrix with positions exceeding a threshold.
matrix = numpy array object
level = floating number
The matrix it returns has 1 in the positions where matrix
has values above level and 0 elsewhere."""
logging.info("Selecting the amino acids contacts.")
(n1, n2) = matrix.shape
out_matrix = np.empty([n1, n2], dtype=float, order='F')
for i in range(n1):
for j in range(n2):
if i == j:
out_matrix[i, j] = 0
elif matrix[i, j] >= level:
out_matrix[i, j] = 1
else:
out_matrix[i, j] = 0
return out_matrix | 31,429 |
def bookmark(repo, subset, x):
"""``bookmark([name])``
The named bookmark or all bookmarks.
If `name` starts with `re:`, the remainder of the name is treated as
a regular expression. To match a bookmark that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "bookmark" is a keyword
args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
if args:
bm = getstring(args[0],
# i18n: "bookmark" is a keyword
_('the argument to bookmark must be a string'))
kind, pattern, matcher = _stringmatcher(bm)
bms = set()
if kind == 'literal':
bmrev = repo._bookmarks.get(pattern, None)
if not bmrev:
raise error.RepoLookupError(_("bookmark '%s' does not exist")
% bm)
bms.add(repo[bmrev].rev())
else:
matchrevs = set()
for name, bmrev in repo._bookmarks.iteritems():
if matcher(name):
matchrevs.add(bmrev)
if not matchrevs:
raise error.RepoLookupError(_("no bookmarks exist"
" that match '%s'") % pattern)
for bmrev in matchrevs:
bms.add(repo[bmrev].rev())
else:
bms = set([repo[r].rev()
for r in repo._bookmarks.values()])
bms -= set([node.nullrev])
return subset & bms | 31,430 |
def test_packages(host):
"""Test that the appropriate packages were installed."""
distribution = host.system_info.distribution
if distribution in ["amzn", "fedora"]:
pkgs = ["tigervnc-server"]
elif distribution in ["debian", "kali", "ubuntu"]:
pkgs = ["tigervnc-standalone-server", "tigervnc-common"]
else:
# We don't support this distribution
assert False
packages = [host.package(pkg) for pkg in pkgs]
installed = [package.is_installed for package in packages]
assert len(pkgs) != 0
assert all(installed) | 31,431 |
def indel_protein_processor(df, refgene, proteincdd=None):
"""Calculate protein features
Features not used in the final model are commented out
Args:
df (pandas.DataFrame)
refgene (str): path to refCodingExon.bed.gz
proteincdd (str): optional, path to proteinConservedDomains.txt
Returns:
df (pandas.DataFrame)
"""
# cds length & indel location
acc_len = acc_len_dict(refgene)
df["cds_length"], df["indel_location"] = zip(
*df.apply(partial(len_loc, d=acc_len), axis=1)
)
# check if the indel is in conserved domain (CDD)
# acc_dom = acc_domain_dict(proteincdd)
# df['is_in_cdd'] = df.apply(partial(is_in_conserved_domain, d=acc_dom), axis=1)
return df | 31,432 |
def setup_module():
"""Set up before all tests."""
# switch to examples/app.py
os.chdir(EXAMPLE_APP_DIR) | 31,433 |
def main() -> None:
"""Simple CLI for the module."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", dest="iso_filename", required=True, help="ISO filename")
parser.add_argument("-u", dest="preseed_url", required=True, help="preseed URL")
parser.add_argument("-o", dest="output_filename", required=True, help="output filename")
parser.add_argument("-d", dest="vnc_display", help="VNC display")
parser.add_argument("-s", dest="image_size", help="output image size", default="10G")
args = parser.parse_args()
create_image(args.output_filename, args.image_size)
install(args.iso_filename, args.preseed_url, args.output_filename, args.vnc_display)
if iso_is_arm(args.iso_filename):
extract_boot_files(args.output_filename) | 31,434 |
def test_metadata_source_value():
"""Alert Processor Input Validation - Source Entity Value"""
# Default valid alert to be modified
invalid_metadata_source = get_alert()
# metadata > source value validation
invalid_metadata_source['source_entity'] = 100
# Test with invalid metadata source values
assert_false(validate_alert(invalid_metadata_source)) | 31,435 |
def perdict_raw(model, *args, **kwargs):
"""
Tries to call model.predict(*args, **kwargs, prediction_type="RawFormulaVal"). If that fail,
calls model.predict(*args, **kwargs)
"""
try:
return model.predict(*args, **kwargs, prediction_type="RawFormulaVal")
except TypeError:
return model.predict(*args, **kwargs) | 31,436 |
def login_required(arg):
""" Decorator to check if a user is logged in"""
@wraps(arg)
def wrap(*args, **kwargs):
"""Checking if token exists in the request header"""
if request.headers.get('Authorization'):
auth_token = request.headers.get('Authorization')
token = auth_token.split(" ")[1]
resp = User.decode_token(token)
user = User.query.filter_by(id=resp).first()
if user:
return arg(*args, **kwargs)
response = jsonify({
'status': 'error',
'message': "Unauthorized"
})
response.status_code = 401
return response
return wrap | 31,437 |
def calc_manual_numbers(n):
"""
>>> calc_manual_numbers(1)
20151125
>>> calc_manual_numbers(2)
31916031
>>> calc_manual_numbers(3)
18749137
>>> calc_manual_numbers(21)
33511524
"""
return (BASE * pow(FACTOR, n - 1, MOD)) % MOD | 31,438 |
def accuracy(y0, y1):
"""
compute accuracy for y1 and y2 does not meter if either of them is in vector or integer form
:param y0: list of - labels or vector of probabilities
:param y1: list of - labels or vector of probabilities
:return: accuracy
"""
if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):
y0 = np.argmax(y0, axis=1)
elif isinstance(y0, list):
y0 = np.array(y0)
if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):
y1 = np.argmax(y1, axis=1)
elif isinstance(y1, list):
y1 = np.array(y1)
out = np.sum(y0==y1)/len(y0)
return out | 31,439 |
def random_function(*args):
"""Picks one of its arguments uniformly at random, calls it, and returns the result.
Example usage:
>>> random_function(lambda: numpy.uniform(-2, -1), lambda: numpy.uniform(1, 2))
"""
choice = random.randint(0, len(args) - 1)
return args[choice]() | 31,440 |
def get_transceiver_sensor_sub_id(ifindex, sensor):
"""
Returns sub OID for transceiver sensor. Sub OID is calculated as folows:
sub OID = transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor]
:param ifindex: interface index
:param sensor: sensor key
:return: sub OID = {{index}} * 1000 + {{lane}} * 10 + sensor id
"""
transceiver_oid, = get_transceiver_sub_id(ifindex)
return (transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor],) | 31,441 |
def test_include_2():
"""
>>> text = '''
... {% include file=test.js %}
... {% endinclude %}
... '''
>>> blocks = {'include':include}
>>> print (parseHtml(text, '%(body)s', block_callback=blocks))
<BLANKLINE>
<pre><code>$(function(){
$('.div').on('click', function(e){
e.preventDefault();
$.ajax({
url:'/test',
dataType:'json',
success:function(result){
show_message(result);
}
});
});
});</code></pre>
<BLANKLINE>
""" | 31,442 |
def patch_twitter_get_following_users(value):
"""Return a function decorator which patches the TwitterClient.get_following_user_ids method."""
return patch_twitter_client_method("get_following_user_ids", value) | 31,443 |
def test_convert_html_to_newspaper():
"""Should convert the given HTML into a newspaper article object."""
result = utils.convert_html_to_newspaper(
"<html><head></head><body><article><p>Hello</p></article>"
)
assert isinstance(result, newspaper.Article) | 31,444 |
def conv1x1_1d(inplanes: int,
outplanes: int,
stride: int = 1) -> nn.Conv1d:
"""1x1一维卷积,用于短接时降采样"""
return nn.Conv1d(
inplanes,
outplanes,
kernel_size=(1,),
stride=(stride,),
padding=0,
bias=False
) | 31,445 |
def load_cifar10(channels_last=True, x_shape=None, x_dtype=np.float32,
y_dtype=np.int32, normalize_x=False):
"""
Load the CIFAR-10 dataset as NumPy arrays.
Args:
channels_last (bool): Whether or not to place the channels axis
at the last?
x_shape: Reshape each digit into this shape. Default to
``(32, 32, 3)`` if `channels_last` is :obj:`True`, otherwise
default to ``(3, 32, 32)``.
x_dtype: Cast each digit into this data type. Default `np.float32`.
y_dtype: Cast each label into this data type. Default `np.int32`.
normalize_x (bool): Whether or not to normalize x into ``[0, 1]``,
by dividing each pixel value with 255.? (default :obj:`False`)
Returns:
(np.ndarray, np.ndarray), (np.ndarray, np.ndarray): The
(train_x, train_y), (test_x, test_y)
"""
# check the arguments
x_shape = _validate_x_shape(x_shape, channels_last)
# fetch data
path = CacheDir('cifar').download_and_extract(
CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=CIFAR_10_MD5)
data_dir = os.path.join(path, CIFAR_10_CONTENT_DIR)
# load the data
train_num = 50000
train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype)
train_y = np.zeros((train_num,), dtype=y_dtype)
for i in range(1, 6):
path = os.path.join(data_dir, 'data_batch_{}'.format(i))
x, y = _load_batch(
path, channels_last=channels_last, x_shape=x_shape,
x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x,
expected_batch_label='training batch {} of 5'.format(i)
)
(train_x[(i - 1) * 10000: i * 10000, ...],
train_y[(i - 1) * 10000: i * 10000]) = x, y
path = os.path.join(data_dir, 'test_batch')
test_x, test_y = _load_batch(
path, channels_last=channels_last, x_shape=x_shape,
x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x,
expected_batch_label='testing batch 1 of 1'
)
assert(len(test_x) == len(test_y) == 10000)
return (train_x, train_y), (test_x, test_y) | 31,446 |
def test_tag_spot_instance_request():
"""
Test that moto correctly tags a spot instance request
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(price=0.5, image_id=EXAMPLE_AMI_ID)
request[0].add_tag("tag1", "value1")
request[0].add_tag("tag2", "value2")
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
tag_dict = dict(request.tags)
tag_dict.should.equal({"tag1": "value1", "tag2": "value2"}) | 31,447 |
def unpack_mmap_block(mm, n):
"""Decode the nth 4-byte long byte string from mapped memory."""
return struct.unpack("<L", mm[n*DATA_BLOCK_SIZE:(n+1)*DATA_BLOCK_SIZE])[0] | 31,448 |
def tobs():
"""Return a list of temperatures for prior year"""
# * Query for the dates and temperature observations from the last year.
# * Convert the query results to a Dictionary using `date` as the key and `tobs` as the value.
# * Return the json representation of your dictionary.
last_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
temperature = session.query(Measurements.date, Measurements.tobs).\
filter(Measurements.date > last_year).\
order_by(Measurements.date).all()
# Create a list of dicts with `date` and `tobs` as the keys and values
temperature_totals = []
for result in temperature:
row = {}
row["date"] = temperature[0]
row["tobs"] = temperature[1]
temperature_totals.append(row)
return jsonify(temperature_totals) | 31,449 |
def answer(comment):
"""Answers given comment."""
text = comment.body.replace("\n","")
low = text.lower()
print "//INCOMING COMMENT {}: {}".format(comment.id, text)
if keyword not in text.lower():
print "//KEYWORD NOT IN COMMENT"
return
if keyword + " say " in low:
m = re.search(keyword + " say ", text, flags=re.IGNORECASE)
say = text[m.end(0):]
say = machinetools.alphabet(say).encode('ascii', 'ignore')
url = machinetools.saystuff(say)
message = text_audio.format(url)
comment.reply(message)
return
if keyword + " morse " in low:
m = re.search(keyword + " morse ",text, flags=re.IGNORECASE)
say = text[m.end(0):].encode('ascii', 'ignore')
url = machinetools.morsestuff(say)
message = text_audio.format(url)
comment.reply(message)
return
img_pattern = "(?<={} )(https?://.*?\.(?:jpe?g|png|bmp)) (.*)".format(keyword)
m = re.search(img_pattern, text, flags=re.I)
if m:
try:
url = face_detect.from_url(m.group(1), m.group(2))
message = text_face.format(url)
except:
message = "//AN ERROR OCURRED WITH THE IMAGE!"
comment.reply(message)
return
comment.reply("0000000")
print "ERROR 0000000 at {}".format(comment.id) | 31,450 |
def test_unknown_offset_raises_error():
"""It should raise an exception when an invalid offset is used."""
with pytest.raises(ValueError):
assert parse_date('(2018-01-01) + 1century', '%Y-%m-%d') | 31,451 |
def reactToAMQPMessage(message, send_back):
"""
React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given.
"""
if _instanceof(message, ConversionRequest):
return ConversionResponse(
marcxml2mods(
marc_xml=message.marc_xml,
uuid=message.uuid,
url=message.url
)
)
raise ValueError("'%s' is unknown type of request!" % str(type(message))) | 31,452 |
def recE(siEnergy, layer):
""" Reconstructed energy from sim energy """
return ( (siEnergy/mipSiEnergy) * layerWeights[layer-1] + siEnergy)*\
secondOrderEnergyCorrection | 31,453 |
def get_account_id():
"""
Retrieve the AWS account ID
"""
client = boto3.client("sts")
account_id = client.get_caller_identity()["Account"]
return account_id | 31,454 |
def process_tag(item, profile, level=0):
"""
Processes element with <code>tag</code> type
@type item: ZenNode
@type profile: dict
@type level: int
"""
if not item.name:
# looks like it's root element
return item
attrs = make_attributes_string(item, profile)
cursor = profile['place_cursor'] and zen_coding.get_caret_placeholder() or ''
self_closing = ''
is_unary = item.is_unary() and not item.children
if profile['self_closing_tag'] and is_unary:
self_closing = '/'
# define tag name
tag_name = '%' + (profile['tag_case'] == 'upper' and item.name.upper() or item.name.lower())
if tag_name.lower() == '%div' and '{' not in attrs:
# omit div tag
tag_name = ''
item.end = ''
item.start = _replace(item.start, tag_name + attrs + self_closing)
if not item.children and not is_unary:
item.start += cursor
return item | 31,455 |
def read_secret(project_id: str, secret_name: str) -> Optional[str]:
"""Reads the latest version of a GCP Secret Manager secret.
Returns None if the secret doesn't exist."""
secret_manager = secretmanager.SecretManagerServiceClient()
secret_path = secret_manager.secret_path(project_id, secret_name)
try:
response = secret_manager.access_secret_version(
request={'name': f'{secret_path}/versions/latest'}
)
return response.payload.data.decode('UTF-8')
except google.api_core.exceptions.ClientError:
# Fail gracefully if there's no secret version yet.
return None
except AttributeError:
# Sometimes the google API fails when no version is present, with:
# File "{site-packages}/google/api_core/exceptions.py",
# line 532, in from_grpc_error if isinstance(rpc_exc, grpc.Call) or _is_informative_grpc_error(rpc_exc):
# AttributeError: 'NoneType' object has no attribute 'Call'
return None | 31,456 |
def find_in_xml(data, search_params):
"""Try to find an element in an xml
Take an xml from string or as xml.etree.ElementTree
and an iterable of strings (and/or tuples in case of findall) to search.
The tuple should contain the string to search for and a true value.
"""
if isinstance(data, str):
data = xml.etree.ElementTree.fromstring(data)
param = search_params[0]
if isinstance(data, list):
result = iterate_search_data(data, param)
else:
result = xml_search_helper(data, param)
if len(search_params) == 1:
return result
return find_in_xml(result, search_params[1:]) | 31,457 |
def conditional_expect(
X,
func,
reg,
method=None,
quantile_method=None,
n_integration_samples=10,
quad_dict=None,
random_state=None,
include_x=False,
include_idx=False,
vector_func=False,
):
"""Calculates the conditional expectation, i.e. E[func(Y)|X=x_eval], where
Y | X ~ reg.predict_target_distribution, for x_eval in `X_eval`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples where the expectation should be evaluated.
func : callable
The function that transforms the random variable.
reg: ProbabilisticRegressor
Predicts the target distribution over which the expectation is calculated.
method: string, optional, optional (default=None)
The method by which the expectation is computed.
-'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and
thereby only takes the function value at the expected y value.
-'monte_carlo' Basic monte carlo integration. Taking the average
of randomly drawn samples. `n_integration_samples` specifies the
number of monte carlo samples.
-'quantile' Uses the quantile function to transform the integration
space into the interval from 0 to 1 and than uses the method from
'quantile_method' to calculate the integral. The number of integration
points is specified by `n_integration_samples`.
-'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X
to be gaussian distributed. The number of evaluation points is given
by `n_integration_samples`.
-'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous`
random variable of `reg`, which in turn uses a dynamic gaussian
quadrature routine for calculating the integral. Performance is worse
using a vector function.
If `method is None` 'gauss_hermite' is used.
quantile_method: string, optional (default=None)
Specifies the integration methods used after the quantile
transformation.
-'trapezoid' Trapezoidal method for integration using evenly spaced
samples.
-'simpson' Simpson method for integration using evenly spaced samples.
-'average' Taking the average value for integration using evenly spaced
samples.
-'romberg' Romberg method for integration. If `n_integration_samples` is
not equal to `2**k + 1` for a natural number k, the number of
samples used for integration is put to the smallest such number greater
than `n_integration_samples`.
-'quadrature' Gaussian quadrature method for integration.
If `quantile_method is None` quadrature is used.
n_integration_samples: int, optional (default=10)
The number of integration samples used in 'quantile', 'monte_carlo' and
'gauss-hermite'.
quad_dict: dict, optional (default=None)
Further arguments for using `scipy's` `expect`
random_state: numeric | np.random.RandomState, optional (default=None)
Random state for fixing the number generation.
include_x: bool, optional (default=False)
If `include_x` is `True`, `func` also takes the x value.
include_idx: bool, optional (default=False)
If `include_idx` is `True`, `func` also takes the index of the x value.
vector_func: bool or str, optional (default=False)
If `vector_func` is `True`, the integration values are passed as a whole
to the function `func`. If `vector_func` is 'both', the integration
values might or might not be passed as a whole. The integration values
if passed as a whole are of the form (n_samples, n_integration), where
n_integration denotes the number of integration values.
Returns
-------
expectation : numpy.ndarray of shape (n_samples)
The conditional expectation for each value applied.
"""
X = check_array(X, allow_nd=True)
check_type(reg, "reg", ProbabilisticRegressor)
check_type(
method,
"method",
target_vals=[
"monte_carlo",
"assume_linear",
"dynamic_quad",
"gauss_hermite",
"quantile",
None,
],
)
check_type(
quantile_method,
"quantile_method",
target_vals=[
"trapezoid",
"simpson",
"average",
"romberg",
"quadrature",
None,
],
)
check_scalar(n_integration_samples, "n_monte_carlo", int, min_val=1)
check_type(quad_dict, "scipy_args", dict, target_vals=[None])
check_type(include_idx, "include_idx", bool)
check_type(include_x, "include_x", bool)
check_type(vector_func, "vector_func", bool, target_vals=["both"])
check_callable(func, "func", n_free_parameters=1 + include_idx + include_x)
if method is None:
method = "gauss_hermite"
if quantile_method is None:
quantile_method = "quadrature"
if quad_dict is None:
quad_dict = {}
if method == "quantile" and quantile_method == "romberg":
# n_integration_samples need to be of the form 2**k + 1
n_integration_samples = (
2 ** int(np.log2(n_integration_samples) + 1) + 1
)
is_optional = vector_func == "both"
if is_optional:
vector_func = True
random_state = check_random_state(random_state)
def arg_filter(idx_y, x_y, y):
ret = tuple()
if include_idx:
ret += (idx_y,)
if include_x:
ret += (x_y,)
ret += (y,)
return ret
def evaluate_func(inner_potential_y):
if vector_func:
inner_output = func(
*arg_filter(np.arange(len(X)), X, inner_potential_y)
)
else:
inner_output = np.zeros_like(inner_potential_y)
for idx_x, inner_x in enumerate(X):
for idx_y, y_val in enumerate(inner_potential_y[idx_x]):
inner_output[idx_x, idx_y] = func(
*arg_filter(idx_x, inner_x, y_val)
)
return inner_output
expectation = np.zeros(len(X))
if method in ["assume_linear", "monte_carlo"]:
if method == "assume_linear":
potential_y = reg.predict(X).reshape(-1, 1)
else: # method equals "monte_carlo"
potential_y = reg.sample_y(
X=X,
n_samples=n_integration_samples,
random_state=random_state,
)
expectation = np.average(evaluate_func(potential_y), axis=1)
elif method == "quantile":
if quantile_method in ["trapezoid", "simpson", "average", "romberg"]:
eval_points = np.arange(1, n_integration_samples + 1) / (
n_integration_samples + 1
)
cond_dist = _reshape_scipy_dist(
reg.predict_target_distribution(X), shape=(-1, 1)
)
potential_y = cond_dist.ppf(eval_points.reshape(1, -1))
output = evaluate_func(potential_y)
if quantile_method == "trapezoid":
expectation = integrate.trapezoid(
output, dx=1 / n_integration_samples, axis=1
)
elif quantile_method == "simpson":
expectation = integrate.simpson(
output, dx=1 / n_integration_samples, axis=1
)
elif quantile_method == "average":
expectation = np.average(output, axis=-1)
else: # quantile_method equals "romberg"
expectation = integrate.romb(
output, dx=1 / n_integration_samples, axis=1
)
else: # quantile_method equals "quadrature"
def fixed_quad_function_wrapper(inner_eval_points):
inner_cond_dist = _reshape_scipy_dist(
reg.predict_target_distribution(X), shape=(-1, 1)
)
inner_potential_y = inner_cond_dist.ppf(
inner_eval_points.reshape(1, -1)
)
return evaluate_func(inner_potential_y)
expectation, _ = integrate.fixed_quad(
fixed_quad_function_wrapper, 0, 1, n=n_integration_samples
)
elif method == "gauss_hermite":
unscaled_potential_y, weights = roots_hermitenorm(
n_integration_samples
)
cond_mean, cond_std = reg.predict(X, return_std=True)
potential_y = (
cond_std[:, np.newaxis] * unscaled_potential_y[np.newaxis, :]
+ cond_mean[:, np.newaxis]
)
output = evaluate_func(potential_y)
expectation = (
1
/ (2 * np.pi) ** (1 / 2)
* np.sum(weights[np.newaxis, :] * output, axis=1)
)
else: # method equals "dynamic_quad"
for idx, x in enumerate(X):
cond_dist = reg.predict_target_distribution([x])
def quad_function_wrapper(y):
if is_optional or not vector_func:
return func(*arg_filter(idx, x, y))
else:
return func(
*arg_filter(
np.arange(len(X)), X, np.full((len(X), 1), y)
)
)[idx]
expectation[idx] = cond_dist.expect(
quad_function_wrapper,
**quad_dict,
)
return expectation | 31,458 |
def pump_volume(volume, direction):
"""
Moves volume of solution through pump
:param volume: amount of volume to move (float)
:param direction: 0 to pull solution in, 1 to pump out
"""
volume_to_add = volume
# pull in solution
if direction == 0:
# if volume_to_add is greater than space in the pump
space_in_pump = constants.MAX_PUMP_CAPACITY - constants.volume_in_pump
if volume_to_add > space_in_pump:
volume_to_add = constants.MAX_PUMP_CAPACITY - constants.volume_in_pump
drive_pump(volume_to_add, direction)
# pump out solution
elif direction == 1:
if volume_to_add > constants.MAX_PUMP_CAPACITY:
lcd_out("Volume > pumpable", style=constants.LCD_CENT_JUST, line=4)
# volume greater than max capacity of pump
# add all current volume in pump
next_volume = constants.volume_in_pump
drive_pump(next_volume, 1)
# recalculate volume to add
volume_to_add = volume_to_add - next_volume
while volume_to_add > 0:
# pump in and out more solution
next_volume = min(volume_to_add, constants.MAX_PUMP_CAPACITY)
drive_pump(next_volume, 0)
drive_pump(next_volume, 1)
volume_to_add -= next_volume
elif volume_to_add > constants.volume_in_pump:
# volume greater than volume in pump
next_volume = constants.volume_in_pump
drive_pump(next_volume, 1)
# calculate rest of volume to add
volume_to_add -= next_volume
drive_pump(volume_to_add, 0)
drive_pump(volume_to_add, 1)
else:
# volume less than volume in pump
drive_pump(volume_to_add, direction) | 31,459 |
def delete_temp_files():
"""
Deletes the 3 single component temporary files
"""
for component in COMPS:
filename = PREFIX + component
os.unlink(filename)
filename = PREFIX_PROC + component
os.unlink(filename) | 31,460 |
def getSumOfSquaresPixel16_Image16(Image):
"""getSumOfSquaresPixel16_Image16(Image) -> unsigned __int16"""
return _ImageFunctions.getSumOfSquaresPixel16_Image16(Image) | 31,461 |
def greeting_py():
"""A simple Python function"""
print("Hello World, from Python!") | 31,462 |
def parse_statement(parsed, output):
"""Parses a tokenized sql_parse token and returns an encoded table."""
# Get the name of the table being created
table_name = next(token.value for token in parsed.tokens if isinstance(token, Identifier))
# Add the table metadata to the cached tables to access later.
if len(table_name.split('.')) == 2\
and not found_table(table_name.split('.')[0], table_name.split('.')[1]):
this_table = Table(
table_name.split('.')[0], table_name.split('.')[1], cursor
)
print(f'Appending this table ({this_table.alias}):')
print(this_table)
this_table.query_data()
tables.append(this_table)
elif len(table_name.split('.')) == 3 \
and not found_table(table_name.split('.')[1], table_name.split('.')[2]):
this_table = Table(
table_name.split('.')[1], table_name.split('.')[2], cursor
)
print('Appending this table')
print(this_table)
this_table.query_data()
tables.append(this_table)
# print(this_table)
# Get all the FROM statements's metadata
froms = {k: v for d in extract_from_part(parsed, cursor) for k, v in d.items()}
print('Tables:')
print([table for table in tables])
# Get all the JOIN statements's metadata
joins = list(extract_join_part(parsed, cursor))
# Get all of the comparisons to compare the number of comparisons to the number of JOIN
# statements
comparisons = list(extract_comparisons(parsed))
# Get all the columns selected by this query. The table aliases are used to identify where
# the columns originate from.
selects = list(
extract_selects(parsed, {**froms, **{k: v for d in joins for k, v in d.items()}})
)
# When the number of comparisons does not match the number of joins, the parsing was
# incorrect, raise and exception.
if len(comparisons) != len(joins):
raise Exception('Parsing messed up!')
return encode_table(joins, froms, table_name, selects, comparisons, output) | 31,463 |
def create_hue_success_response(entity_number, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = f"/lights/{entity_number}/state/{attr}"
return {"success": {success_key: value}} | 31,464 |
def get_deaths():
"""***DEPRECATED - Use get_data_jhu instead.***
Get most recent fatality counts from JHU."""
# Deprecated warning
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
warnings.warn("This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.", DeprecatedWarning, stacklevel=2)
print("These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).")
return _get_table(url, "time_series_covid19_deaths_global.csv", source="jhu", update=True) | 31,465 |
def init_enforcer(policy_file=None, rules=None,
default_rule=None, use_conf=True):
"""Synchronously initializes the policy enforcer
:param policy_file: Custom policy file to use, if none is specified,
`CONF.oslo_policy.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use,
CONF.oslo_policy.policy_default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
if _ENFORCER:
return
# NOTE(deva): Register defaults for policy-in-code here so that they are
# loaded exactly once - when this module-global is initialized.
# Defining these in the relevant API modules won't work
# because API classes lack singletons and don't use globals.
_ENFORCER = policy.Enforcer(CONF, policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf)
_ENFORCER.register_defaults(list_policies()) | 31,466 |
def transform_sentence(text, model):
"""
Mean embedding vector
"""
def preprocess_text(raw_text, model=model):
"""
Excluding unknown words and get corresponding token
"""
raw_text = raw_text.split()
return list(filter(lambda x: x in model.vocab, raw_text))
tokens = preprocess_text(text)
if not tokens:
return np.zeros(model.vector_size)
text_vector = np.mean(model[tokens], axis=0)
return np.array(text_vector) | 31,467 |
def main():
"""As usual, the main line of the program is pretty compact. Here I have opted to read in the microrarray
data file in the main line of the program rather than cluttering up my Tree class.
"""
i = 0
# filename = 'ratiodata.txt'
filename = 'BacillusData2.txt'
infile = open(filename, 'r')
# Create the .gtr and .cdt output files
gtr = open(filename.replace('.txt', '.gtr'), 'w')
cdt = open(filename.replace('.txt', '.cdt'), 'w')
# Print the header for the .cdt file
print >> cdt, 'GID\tUNIQID\tNAME\tWt0A T-1.5\tWt0A T0\tWt0A T2\twtsF T2'
for line in infile.readlines():
line = line.strip()
tempdata = line.split('\t')
if tempdata[0] != 'GENE': # Instantiate a new node, but ignore the data label row of the input file
# Tree("GENE" + str(i), tempdata[0], [float(k) for k in tempdata[1:]], 'Euclidian')
Tree("GENE" + str(i) + 'X', tempdata[0], [float(k) for k in tempdata[1:]], 'Pearson')
i += 1
# for current_node in Tree.nodes:
# Tree.nodes[current_node].node_dump()
Tree.Cluster(gtr)
Tree.DFS(Tree.nodes['NODE' + str(i-2) + 'X'], cdt) | 31,468 |
def model_primary_key_columns_and_names(Model: DeclarativeMeta) -> (Sequence[Column], List[str]):
""" Get the list of primary columns and their names as two separate tuples
Example:
pk_columns, pk_names = model_primary_key_columns_and_names(models.User)
pk_columns # -> (models.User.id, )
pk_names # -> ('id', )
"""
pk_columns: Sequence[Column] = inspect(Model).primary_key
pk_names: List[str] = [col.key for col in pk_columns]
return pk_columns, pk_names | 31,469 |
def biweekly_test_data():
""" Provides test data for the full system test when using "biweekly" time_scale."""
time_scale = "biweekly"
time_per_task = {
"Free" : 480 * 9 * 2,
"Work" : 480 * 5 * 2,
"Sleep" : 480 * 7 * 2
}
min_task_time = 60
preferences = {
"Free" : {
"avoid" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
"inconvenient" : [],
"neutral" : [],
"convenient" :[ "Monday1,06:00PM-Monday1,08:00PM",
"Tuesday1,06:00PM-Tuesday1,08:00PM",
"Wednesday1,06:00PM-Wednesday1,08:00PM",
"Thursday1,06:00PM-Thursday1,08:00PM",
"Friday1,06:00PM-Friday1,08:00PM",
"Monday2,06:00PM-Monday2,08:00PM",
"Tuesday2,06:00PM-Tuesday2,08:00PM",
"Wednesday2,06:00PM-Wednesday2,08:00PM",
"Thursday2,06:00PM-Thursday2,08:00PM",
"Friday2,06:00PM-Friday2,08:00PM"],
"preferred" : [],
"required" : []
},
"Work" : {
"avoid" : [],
"inconvenient" : [],
"neutral" : [],
"convenient" : [],
"preferred" : [],
"required" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
},
"Sleep" : {
"avoid" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
"inconvenient" : [],
"neutral" : [],
"convenient" : [],
"preferred" : [ "Monday1,10:00PM-Tuesday1,06:00AM",
"Tuesday1,10:00PM-Wednesday1,06:00AM",
"Wednesday1,10:00PM-Thursday1,06:00AM",
"Thursday1,10:00PM-Friday1,06:00AM",
"Friday1,10:00PM-Saturday1,06:00AM",
"Saturday1,10:00PM-Sunday1,06:00AM",
"Sunday1,10:00PM-Monday2,06:00AM",
"Monday2,10:00PM-Tuesday2,06:00AM",
"Tuesday2,10:00PM-Wednesday2,06:00AM",
"Wednesday2,10:00PM-Thursday2,06:00AM",
"Thursday2,10:00PM-Friday2,06:00AM",
"Friday2,10:00PM-Saturday2,06:00AM",
"Saturday2,10:00PM-Sunday2,06:00AM",
"Sunday2,10:00PM-Monday1,06:00AM"],
"required" : []
}
}
return time_scale, time_per_task, min_task_time, preferences | 31,470 |
def test_apply_rename_through_arg(spark_session) -> None:
"""
"""
assert "name_renamed" in TestApplyRenameThroughArg.load(spark_session).columns | 31,471 |
def plot_bar_whiskers_jitter_significance(data, comparison_columns,
significant_comparison_columns,
heights, ylabel,
xlabels=None,
ax_handle=None,
median_notch=False,
boxplot_color='black',
boxplot_linewidth=2,
markersize=12,
xtick_rotation=90,
marker=None,
color=None,
alpha=0.2,
whis = [2.5, 97.5]):
"""
Make a jittered boxplot significance test
Parameters
-------------------
d : A pandas dataframe, where each column corresponds to data to be plotted with jitter + boxplot
heights : A list, heights of the significance annotations, for each comparison
comparison_columns : A list of lists, where each element corresponds to a pair of columns to compare
significant_comparison_columns : A list of lists, where each element corresponds to a pair of significant column comparisons
heights : A list of floats, the height of each comparison annotation
xlabels : A list of strings, the x-labels
ax_handle : A matplotlib axis handle, for adding onto an existing plot
median_notch : A bool, to plot the lower and upper quartiles of the median
boxplot_color : A string, the boxplot color
boxplot_linewidth : A float, the boxplot linewidth
markersize: An int, the marker size
marker : A string or a list of strings, the marker of the points
color : A string or a list of strings, the color of the points
alpha : A float, transparency
whis : A list of floats, the quantiles for whiskers
Returns
-------------
fig : A matplotlib figure handle (if ax_handle is None)
ax : A matplotlib axis handle (if ax_handle is None)
"""
if ax_handle is None:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
else:
ax = ax_handle
make_jitter_plots(data, names=data.columns, ylabel=ylabel, ax_handle=ax,
alpha=alpha, markersize=markersize, xlabels=xlabels,
marker=marker, color=color)
bp = data.boxplot(ax=ax,notch=median_notch, grid=False, whis = whis,
showfliers=False, return_type='dict')
for _, line_list in bp.items():
for line in line_list:
line.set_color(boxplot_color)
line.set_linewidth(boxplot_linewidth)
previous_ymaxes = []
for i, comparison in enumerate(comparison_columns):
comp1, comp2 = comparison
x1, x2 = np.nonzero(data.columns==comp1)[0][0]+1, np.nonzero(data.columns==comp2)[0][0]+1
y_max = data.loc[:,[comp1,comp2]].max().values.max()
previous_ymaxes.append(y_max)
y, h, col = max(previous_ymaxes) + heights[i], 2, 'k'
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c=col)
if comparison in significant_comparison_columns:
ax.text((x1+x2)*.5, y+h, "*", ha='center', va='bottom', color=col, fontsize=20)
else:
ax.text((x1+x2)*.5, y+h, "ns", ha='center', va='bottom', color=col, fontsize=20)
if xlabels is not None:
ax.set_xticklabels(xlabels, rotation=xtick_rotation)
if ax_handle is None:
return fig, ax | 31,472 |
def test_configure(mock_va):
"""
Test configuration
"""
sample_config = {"ebr-trackerbot": {"api_url": "https:/test_url.com", "slack_token": "test-token"}}
mock_va.return_value.auth_from_file = Mock(return_value=True)
mock_va.return_value.load = Mock(return_value=sample_config)
bot.configure("test_config.yaml", "test_vault.yaml", "test_creds.yaml")
# Validate calls to VaultAnyConfig instance
mock_va.assert_called_once_with("test_vault.yaml")
mock_va.return_value.auth_from_file.assert_called_once_with("test_creds.yaml")
mock_va.return_value.load.assert_called_once_with("test_config.yaml")
assert bot.config["api_url"] == "https:/test_url.com"
assert bot.config["slack_token"] == "test-token"
assert bot.config["check_tests_delay"] == 86400
assert bot.config["slack_message_template"] == "Test *{{test}}* failed *{{count}}* in the last {{period}}\n" | 31,473 |
def _is_trans_valid(seed, mutate_sample):
"""
Check a mutated sample is valid. If the number of changed pixels in
a seed is less than pixels_change_rate*size(seed), this mutate is valid.
Else check the infinite norm of seed changes, if the value of the
infinite norm less than pixel_value_change_rate*255, this mutate is
valid too. Otherwise the opposite.
"""
is_valid = False
pixels_change_rate = 0.02
pixel_value_change_rate = 0.2
diff = np.array(seed - mutate_sample).flatten()
size = np.shape(diff)[0]
l0_norm = np.linalg.norm(diff, ord=0)
linf = np.linalg.norm(diff, ord=np.inf)
if l0_norm > pixels_change_rate * size:
if linf < 256:
is_valid = True
else:
if linf < pixel_value_change_rate * 255:
is_valid = True
return is_valid | 31,474 |
def split_axis(x, indices_or_sections, axis):
"""Splits given variables along an axis.
Args:
x (tuple of Variables): Variables to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
Returns:
``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
.. note::
This function raises ``ValueError`` if at least
one of the outputs is splitted to zero-size
(i.e. `axis`-th value of its shape is zero).
"""
return SplitAxis(indices_or_sections, axis)(x) | 31,475 |
def status(**kwargs):
"""Execute \"git status\" on the repository."""
status = check_output(["git", "status"]).decode("utf-8")
repo_clean = True
for keyword in ["ahead", "modified", "untracked"]:
if keyword in status:
repo_clean = False
return {"clean": repo_clean, "status": status} | 31,476 |
def preprocessing_raw_csv(PATH=".//tcdata//hy_round2_train_20200225//",
local_file_name="train.pkl"):
"""Loading and processing all train csv data."""
if PATH is None:
raise ValueError("Invalid PATH !")
file_names = sorted(os.listdir(PATH), key=lambda s: int(s.split(".")[0]))
# Loading all trajectory data.
traj_data = []
for name in file_names:
traj_data.append(pd.read_csv(PATH + name, encoding="utf-8"))
# Processing each trajectory data.
print("\n@Multi-processing RAW CSV started:")
print("-----------------------------")
with mp.Pool(processes=mp.cpu_count()) as p:
tmp = list(tqdm(p.imap(preprocessing_traj, traj_data),
total=len(traj_data)))
print("-----------------------------")
print("@Multi-processing RAW CSV ended, to the local file: {}.\n".format(
local_file_name))
traj_data = [item[0] for item in tmp]
change_record = [item[1] for item in tmp]
change_record = pd.DataFrame(change_record,
columns=["speed_change", "coord_change"])
# Saving processed data to the lcoal path with *.pkl format
file_processor = LoadSave(PATH)
file_processor.save_data(path=".//tcdata_tmp//{}".format(local_file_name),
data=traj_data)
return change_record | 31,477 |
def update_points(points):
"""Atualiza os pontos maximos."""
file = open('max_points.txt', 'w')
file.write(str(points)) | 31,478 |
def random_rewire (G, n, allow_self_loops=False):
"""Randomly rewire edges.
This function performs a full rewire i.e., it will ensure the newly created
edge contains all the same properties as the original.
Parameters
----------
G : NetworkX graph
It is assumed that this is configured for use with NetEvo, with
defined dynamics for each node or edge (as appropriate).
n : int
Number of edges to randomly rewire.
allow_self_loops : boolean (default=False)
Flag as to whether self loops are allowed.
"""
nodes = G.nodes()
edges = G.edges()
for i in range(n):
# Pick a random edge
(u, v) = edges[int(random.random()*G.number_of_edges())-1]
# Attempt to find a new random edge (maximum 1000 trials)
trial = 0
while trial < 1000:
new_u = int(random.random()*len(G))
new_v = int(random.random()*len(G))
if allow_self_loops:
if G.has_edge(nodes[new_u], nodes[new_v]) == False:
break
else:
if new_u != new_v and \
G.has_edge(nodes[new_u], nodes[new_v]) == False:
break
trial += 1
# Rewire if max trials not reached
if trial >= 1000:
print 'WARNING: Could not rewire edge - max trials exceeded'
else:
# Rewire it
G.remove_edge(u, v)
G.add_edge(nodes[new_u], nodes[new_v]) | 31,479 |
def find_listener_frequent_words(df, num):
"""
Given a conversation dataframe from a certain subreddit, find the top frequent words spoken by listeners.
Args:
df: A specified dataframe from a subreddit.
num: A ranking number used for finding the top frequent words.
Return:
result: A dataframe showing the top frequent words.
"""
# extract listeners' turn
df_listener = df[df['dialog turn'] != 1]
# compute tokens
df_listener_filtered = compute_tokens(df_listener)
# find top (num) frequent words
result = pd.DataFrame(Counter(df_listener_filtered.sum()).most_common(num), columns = ["word", "count"])
return result | 31,480 |
def is_rescue_entry(boot_entry):
"""
Determines whether the given boot entry is rescue.
:param BootEntry boot_entry: Boot entry to assess
:return: True is the entry is rescue
:rtype: bool
"""
return 'rescue' in boot_entry.kernel_image.lower() | 31,481 |
def plotLineChart(df, wb, sheetname, **kwargs):
"""Line chart of columns in given DataFrame
Parameters
----------
df : pandas.DataFrame
DataFrame with data
wb : xlsxwriter.Workbook
sheetname: : string
Name of sheet to which data and plot should be written
Other parameters
----------------
subtype : string, optional
Possible values: 'marker_only', 'straight_with_markers', 'straight', 'smooth_with_markers', 'smooth'
title : string, optional
Chart title
style : int, optional
Used to set the style of the chart to one of the 48 built-in styles available on the Design tab in Excel
loc : (int, int) tuple, optional
Row and column number where to locate the plot, if not specified the plot is placed to the right of the data
secondary_y : iterable, optional
list of columns whose scale goes on the secondary y-axis
"""
worksheet = writeData(df, wb, sheetname, **kwargs)
params = {'type': 'line'}
if 'subtype' in kwargs:
params['subtype'] = kwargs['subtype']
chart = wb.add_chart(params)
__addAxisInfo(chart, kwargs)
addSeries(df, chart, sheetname, **kwargs)
#Handle subtype here, since it is not actually an Xlsxwriter option for line charts
if 'subtype' in kwargs:
subtype = kwargs['subtype']
if 'marker' in subtype:
# Go through each series and define default values.
for series in chart.series:
# Set a marker type unless there is a user defined type.
series['marker'] = {'type': 'automatic',
'automatic': True,
'defined': True,
'line': {'defined': False},
'fill': {'defined': False}
}
# Turn on smoothing if required
if 'smooth' in subtype:
for series in chart.series:
series['smooth'] = True
if subtype == 'marker_only':
for series in chart.series:
series['line'] = {'width': 2.25,
'none': 1,
'defined': True,
}
# Insert the chart into the worksheet (with an offset).
cell = __getLocation(df, kwargs)
worksheet.insert_chart(cell, chart, {'x_scale': 2.0, 'y_scale': 2.0}) | 31,482 |
def estimate_cost(features, ssd):
"""Generate a TensorFlow subgraph to estimate the cost of an architecture.
Args:
features: A 1D float tensor containing features for a single network
architecture.
ssd: The name of the search space definition to use for the cost model.
Returns:
A scalar float tensor containing the estimated cost for the specified
network architecture
"""
kernel_data = cost_model_data.KERNEL_DATA[ssd]
kernel_data = base64.decodebytes(kernel_data)
kernel = np.frombuffer(kernel_data, cost_model_data.SERIALIZATION_DTYPE)
kernel = kernel.reshape([-1, 1]).astype(np.float32)
bias_data = cost_model_data.BIAS_DATA[ssd]
bias_data = base64.decodebytes(bias_data)
bias = np.frombuffer(bias_data, cost_model_data.SERIALIZATION_DTYPE)
bias = bias.reshape([1]).astype(np.float32)
with tf.name_scope('estimate_cost'):
batch_features = tf.expand_dims(features, axis=0)
batch_prediction = tf.linalg.matmul(batch_features, kernel)
batch_prediction = tf.nn.bias_add(batch_prediction, bias)
return tf.squeeze(batch_prediction, axis=[0, 1]) | 31,483 |
def test_bind_gre_segment_vxlan_tunnel_advertised(
f5_mech_driver, context, agent_with_vxlan_vtep, gre_segment):
"""Test proper behaviour when agent advertises vxlan tunnels.
The network segment is gre and the only advertised tunnel type
is vxlan
Pass if no binding is performed
"""
retval = f5_mech_driver.try_to_bind_segment_for_agent(
context, gre_segment, agent_with_vxlan_vtep)
assert not retval
context.set_binding.assert_not_called() | 31,484 |
def load_bias_module(df, dataset):
"""
"""
print('Launch dataset module thread : load_bias_module')
thread = Thread(target=bias.compute_bias_metrics,
args=(df, dataset,))
thread.start() | 31,485 |
def installFromRequests(requestOrPackages = "requests", fromRequests = True):
"""Install PyPI packages from a requests file or a list"""
if fromRequests:
requestOrPackages = withoutFormat.getLines(requestOrPackages)
bar = ProgressBar()
l = len(requestOrPackages)
c = 0
for package in requestOrPackages:
if '\n' in package:
package = package.rstrip('\n')
if isntInstalled(package):
install_option_1(package)
c += 1
bar.display(int(c / l * 100), 32, 24, True) | 31,486 |
def short_bubble(l, debug=True):
"""
what if the whole list is already in ascending order, the bubble would then still go through the entire outer loops
actually, if there is no swap happens in a certain iteration ( the inner loop), the next loop should stop
:param l:
:param debug:
:return:
"""
swapped = True
i = 0
while i < len(l) and swapped:
swapped = False
for j in range(0, len(l) - i - 1):
if l[j] > l[j + 1]:
l[j], l[j + 1] = l[j + 1], l[j]
swapped = True
if debug:
print('iteration {}'.format(i), l)
i += 1
return l | 31,487 |
def initdb_command():
""" Initializes the database."""
init_db()
print('Initialized the database.') | 31,488 |
def IsResourceLike(item):
"""Return True if item is a dict like object or list of dict like objects."""
return yaml.dict_like(item) or (yaml.list_like(item) and
all(yaml.dict_like(x) for x in item)) | 31,489 |
def rad_to_gon(angle: float) -> float:
"""Converts from radiant to gon (grad).
Args:
angle: Angle in rad.
Returns:
Converted angle in gon.
"""
return angle * 200 / math.pi | 31,490 |
def check_interface_status(conn_obj, interface, state, device="dut"):
"""
API to check the interface state
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param conn_obj:
:param interface:
:param state:
:param device:
:return:
"""
interface_state = get_interface_status(conn_obj, interface, device=device)
if interface_state != state:
return False
return True | 31,491 |
def test_efficiency():
"""Test solved per (2^n)-1 algorithm for lowest move count."""
assert not t(0)[3] # none
assert t(1)[3] == 2 ** 1 - 1 # 1
assert t(2)[3] == 2 ** 2 - 1 # 3
assert t(3)[3] == 2 ** 3 - 1 # 7
assert t(10)[3] == 2 ** 10 - 1 | 31,492 |
def hungarian_match(self, y_true, y_pred):
"""Matches predicted labels to original using hungarian algorithm."""
y_true = self.adjust_range(y_true)
y_pred = self.adjust_range(y_pred)
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
# Confusion matrix.
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(-w)
d = {i:j for i, j in ind}
y_pred = np.array([d[v] for v in y_pred])
return y_true, y_pred | 31,493 |
def orthantree(scaled, capacity=8):
"""Constructs a :ref:`tree <presolve>` for the given :func:`~pybbfmm.scale`'d problem.
This is a bit of a mess of a function, but long story short it starts with all the sources allocated to the root
and repeatedly subdivides overfull boxes, constructing the various tree tensors as it goes.
:param scaled: :func:`~pybbfmm.scale`'d problem.
:param capacity: the max number of sources or targets per box.
:return: A :ref:`tree <presolve>`.
"""
D = scaled.sources.shape[1]
points = torch.cat([scaled.sources, scaled.targets])
indices = points.new_zeros((len(points),), dtype=torch.long)
tree = arrdict.arrdict(
parents=indices.new_full((1,), -1),
depths=indices.new_zeros((1,)),
centers=points.new_zeros((1, D)),
terminal=indices.new_ones((1,), dtype=torch.bool),
children=indices.new_full((1,) + (2,)*D, -1),
descent=indices.new_zeros((1, D)))
bases = 2**torch.flip(torch.arange(D, device=indices.device), (0,))
subscript_offsets = sets.cartesian_product(torch.tensor([0, 1], device=indices.device), D)
center_offsets = sets.cartesian_product(torch.tensor([-1, +1], device=indices.device), D)
depthcounts = [torch.as_tensor([1], device=indices.device)]
depth = 0
while True:
used, used_inv = torch.unique(indices, return_inverse=True)
source_idxs, target_idxs = indices[:len(scaled.sources)], indices[-len(scaled.targets):]
tree.terminal[used] = underoccupied(source_idxs, target_idxs, tree.terminal, capacity)[used]
used_is_active = ~tree.terminal[used]
point_is_active = used_is_active[used_inv]
if not point_is_active.any():
break
depth += 1
active = used[used_is_active]
active_inv = (used_is_active.cumsum(0) - used_is_active.long())[used_inv[point_is_active]]
first_child = len(tree.parents) + 2**D*torch.arange(len(active), device=active.device)
point_offset = ((points[point_is_active] >= tree.centers[active][active_inv])*bases).sum(-1)
child_box = first_child[active_inv] + point_offset
indices[point_is_active] = child_box
trailing_ones = (slice(None),) + (None,)*D
tree.children[active] = first_child[trailing_ones] + (subscript_offsets*bases).sum(-1)
centers = tree.centers[active][trailing_ones] + center_offsets.float()/2**depth
descent = center_offsets[None].expand_as(centers)
n_children = len(active)*2**D
children = arrdict.arrdict(
parents=active.repeat_interleave(2**D),
depths=tree.depths.new_full((n_children,), depth),
centers=centers.reshape(-1, D),
descent=descent.reshape(-1, D),
terminal=tree.terminal.new_ones((n_children,)),
children=tree.children.new_full((n_children,) + (2,)*D, -1))
tree = arrdict.cat([tree, children])
depthcounts.append(n_children)
tree['id'] = torch.arange(len(tree.parents), device=points.device)
indices = arrdict.arrdict(
sources=indices[:len(scaled.sources)],
targets=indices[-len(scaled.targets):])
depths = ragged.Ragged(
torch.arange(len(tree.id), device=points.device),
torch.as_tensor(depthcounts, device=points.device))
return tree, indices, depths | 31,494 |
def make_sparse(
docs_to_fit, min_df=50, stop_words=None,
docs_to_transform=None, ngram_range=None,
):
"""
Take a pre-tokenized document and turn into a sparse matrix.
:param docs_to_fit: A list of lists of tokenized words to build the vocabulary from.
:param min_df: Number of records that a word should appear in to be stored as a feature.
:param stop_words: List of words to exclude, if any.
:param docs_to_transform: A list of lists of tokenized words to transform. If none, we transform the first argument.
:return:
"""
cv = CountVectorizer(
tokenizer=no_tokenization, preprocessor=None, ngram_range=ngram_range,
stop_words=stop_words, lowercase=False, min_df=min_df
)
if docs_to_transform is None:
return cv, cv.fit_transform(docs_to_fit)
elif docs_to_transform is not None:
cv.fit(docs_to_fit)
return cv, cv.transform(docs_to_transform) | 31,495 |
def weightedPriorityReliabilityScore(service_instances, last_records):
"""
Algorithm to find highest priority of the service based on
reliability score achieved in past discovery results
"""
priority_list = []
for i in range(0, len(service_instances)):
single_instance = {}
single_instance['ip'] = service_instances[i][1]
single_instance['port'] = service_instances[i][2]
score = 0.0
discovery_instances = sharkradarDbutils.getLatestRecordsDiscoveryLogs(
service_instances[i][0],
service_instances[i][1],
service_instances[i][2],
last_records)
len_discovery = len(discovery_instances)
for i in range(0, len_discovery):
if discovery_instances[i][0] == "FAIL":
score = score + ((-1.0) * (len_discovery - i))
if discovery_instances[i][0] == "SUCCESS":
score = score + ((1.0) * (len_discovery - i))
single_instance['score'] = score
priority_list.append(single_instance)
priority_list.sort(key=lambda x: x['score'], reverse=True)
res = priority_list[0]
res_list = list(
filter(
lambda x: x['score'] == res['score'],
priority_list))
res = random.choice(res_list)
return str(res['ip']), str(res['port']) | 31,496 |
def calculate_output(param_dict, select_device, input_example):
"""Calculate the output of the imported graph given the input.
Load the graph def from graph file on selected device, then get the tensors based on the input and output name from the graph,
then feed the input_example to the graph and retrieves the output vector.
Args:
param_dict: The dictionary contains all the user-input data in the json file.
select_device: "NGRAPH" or "CPU".
input_example: A map with key is the name of the input tensor, and value is the random generated example
Returns:
The output vector obtained from running the input_example through the graph.
"""
graph_filename = param_dict["graph_location"]
output_tensor_name = param_dict["output_tensor_name"]
if not tf.gfile.Exists(graph_filename):
raise Exception("Input graph file '" + graph_filename +
"' does not exist!")
graph_def = tf.GraphDef()
if graph_filename.endswith("pbtxt"):
with open(graph_filename, "r") as f:
text_format.Merge(f.read(), graph_def)
else:
with open(graph_filename, "rb") as f:
graph_def.ParseFromString(f.read())
set_os_env(select_device)
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
if len(output_tensor_name) == 0:
# if no outputs are specified, then compare for all tensors
output_tensor_name = sum(
[[j.name for j in i.outputs] for i in graph.get_operations()],
[])
# Create the tensor to its corresponding example map
tensor_to_example_map = {}
for item in input_example:
t = graph.get_tensor_by_name(item)
tensor_to_example_map[t] = input_example[item]
#input_placeholder = graph.get_tensor_by_name(input_tensor_name)
output_tensor = [graph.get_tensor_by_name(i) for i in output_tensor_name]
config = tf.ConfigProto(
allow_soft_placement=True,
# log_device_placement=True,
inter_op_parallelism_threads=1)
with tf.Session(graph=graph, config=config) as sess:
output_tensor = sess.run(output_tensor, feed_dict=tensor_to_example_map)
return output_tensor, output_tensor_name | 31,497 |
def is_instance_failed_alarm(alarms, instance, guest_hb=False):
"""
Check if an instance failed alarm has been raised
"""
expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_FAILED,
'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL}
return _instance_alarm_raised(alarms, expected_alarm, instance) | 31,498 |
def load_metad_fes(fes_dir_path):
"""Load all the PLUMED free energy profiles saved by sumhills."""
from modules.plumedwrapper import io as plumedio
# Read the FES in time.
fes_file_prefix_path = os.path.join(fes_dir_path, 'fes_')
fes_time = plumedio.read_table(fes_file_prefix_path + 'time.dat')
all_metad_fes = []
for i in range(len(fes_time['time'])):
all_metad_fes.append(plumedio.read_table(fes_file_prefix_path + str(i) + '.dat'))
return all_metad_fes, fes_time | 31,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.