_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q22300
|
_basis2name
|
train
|
def _basis2name(basis):
"""
converts the 'basis' into the proper name.
"""
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis)
return component_name
|
python
|
{
"resource": ""
}
|
q22301
|
dendrogram
|
train
|
def dendrogram(adata: AnnData, groupby: str,
n_pcs: Optional[int]=None,
use_rep: Optional[str]=None,
var_names: Optional[List[str]]=None,
use_raw: Optional[bool]=None,
cor_method: Optional[str]='pearson',
linkage_method: Optional[str]='complete',
key_added: Optional[str]=None) -> None:
"""\
Computes a hierarchical clustering for the given `groupby` categories.
By default, the PCA representation is used unless `.X` has less than 50 variables.
Alternatively, a list of `var_names` (e.g. genes) can be given.
Average values of either `var_names` or components are used to compute a correlation matrix.
The hierarchical clustering can be visualized using `sc.pl.dendrogram` or multiple other
visualizations that can include a dendrogram: `matrixplot`, `heatmap`, `dotplot` and `stacked_violin`
.. note::
The computation of the hierarchical clustering is based on predefined groups and not
per cell. The correlation matrix is computed using by default pearson but other methods
are available.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
{n_pcs}
{use_rep}
var_names : `list of str` (default: None)
List of var_names to use for computing the hierarchical clustering. If `var_names` is given,
then `use_rep` and `n_pcs` is ignored.
use_raw : `bool`, optional (default: None)
Only when `var_names` is not None. Use `raw` attribute of `adata` if present.
cor_method : `str`, optional (default: `"pearson"`)
correlation method to use. Options are 'pearson', 'kendall', and 'spearman'
linkage_method : `str`, optional (default: `"complete"`)
linkage method to use. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
for more information.
key_added : : `str`, optional (default: `None`)
By default, the dendrogram information is added to `.uns['dendrogram_' + groupby]`. Notice
that the `groupby` information is added to the dendrogram.
Returns
-------
adata.uns['dendrogram'] (or instead of 'dendrogram' the value selected for `key_added`) is updated
with the dendrogram information
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, groupby='bulk_labels')
>>> sc.pl.dendrogram(adata)
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
"""
if groupby not in adata.obs_keys():
raise ValueError('groupby has to be a valid observation. Given value: {}, '
'valid observations: {}'.format(groupby, adata.obs_keys()))
if not is_categorical_dtype(adata.obs[groupby]):
# if the groupby column is not categorical, turn it into one
# by subdividing into `num_categories` categories
raise ValueError('groupby has to be a categorical observation. Given value: {}, '
'Column type: {}'.format(groupby, adata.obs[groupby].dtype))
if var_names is None:
rep_df = pd.DataFrame(choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs))
rep_df.set_index(adata.obs[groupby], inplace=True)
categories = rep_df.index.categories
else:
if use_raw is None and adata.raw is not None: use_raw = True
gene_names = adata.raw.var_names if use_raw else adata.var_names
from ..plotting._anndata import _prepare_dataframe
categories, rep_df = _prepare_dataframe(adata, gene_names, groupby, use_raw)
if key_added is None:
key_added = 'dendrogram_' + groupby
logg.info('Storing dendrogram info using `.uns[{!r}]`'.format(key_added))
# aggregate values within categories using 'mean'
mean_df = rep_df.groupby(level=0).mean()
import scipy.cluster.hierarchy as sch
corr_matrix = mean_df.T.corr(method=cor_method)
z_var = sch.linkage(corr_matrix, method=linkage_method)
dendro_info = sch.dendrogram(z_var, labels=categories, no_plot=True)
# order of groupby categories
categories_idx_ordered = dendro_info['leaves']
adata.uns[key_added] = {'linkage': z_var,
'groupby': groupby,
'use_rep': use_rep,
'cor_method': cor_method,
'linkage_method': linkage_method,
'categories_idx_ordered': categories_idx_ordered,
'dendrogram_info': dendro_info,
'correlation_matrix': corr_matrix.values}
|
python
|
{
"resource": ""
}
|
q22302
|
paga_compare
|
train
|
def paga_compare(
adata,
basis=None,
edges=False,
color=None,
alpha=None,
groups=None,
components=None,
projection='2d',
legend_loc='on data',
legend_fontsize=None,
legend_fontweight='bold',
color_map=None,
palette=None,
frameon=False,
size=None,
title=None,
right_margin=None,
left_margin=0.05,
show=None,
save=None,
title_graph=None,
groups_graph=None,
**paga_graph_params):
"""Scatter and PAGA graph side-by-side.
Consists in a scatter plot and the abstracted graph. See
:func:`~scanpy.api.pl.paga` for all related parameters.
See :func:`~scanpy.api.pl.paga_path` for visualizing gene changes along paths
through the abstracted graph.
Additional parameters are as follows.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
kwds_scatter : `dict`
Keywords for :func:`~scanpy.api.pl.scatter`.
kwds_paga : `dict`
Keywords for :func:`~scanpy.api.pl.paga`.
Returns
-------
A list of `matplotlib.axes.Axes` if `show` is `False`.
"""
axs, _, _, _ = utils.setup_axes(panels=[0, 1],
right_margin=right_margin)
if color is None:
color = adata.uns['paga']['groups']
suptitle = None # common title for entire figure
if title_graph is None:
suptitle = color if title is None else title
title, title_graph = '', ''
if basis is None:
if 'X_draw_graph_fa' in adata.obsm.keys():
basis = 'draw_graph_fa'
elif 'X_umap' in adata.obsm.keys():
basis = 'umap'
elif 'X_tsne' in adata.obsm.keys():
basis = 'tsne'
elif 'X_draw_graph_fr' in adata.obsm.keys():
basis = 'draw_graph_fr'
else:
basis = 'umap'
from .scatterplots import plot_scatter
plot_scatter(
adata,
ax=axs[0],
basis=basis,
color=color,
edges=edges,
alpha=alpha,
groups=groups,
components=components,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
frameon=frameon,
size=size,
title=title,
show=False,
save=False)
if 'pos' not in paga_graph_params:
if color == adata.uns['paga']['groups']:
paga_graph_params['pos'] = utils._tmp_cluster_pos
else:
paga_graph_params['pos'] = adata.uns['paga']['pos']
xlim, ylim = axs[0].get_xlim(), axs[0].get_ylim()
axs[1].set_xlim(xlim)
axs[1].set_ylim(ylim)
if 'labels' in paga_graph_params:
labels = paga_graph_params.pop('labels')
else:
labels = groups_graph
paga(
adata,
ax=axs[1],
show=False,
save=False,
title=title_graph,
labels=labels,
colors=color,
frameon=frameon,
**paga_graph_params)
if suptitle is not None: pl.suptitle(suptitle)
utils.savefig_or_show('paga_compare', show=show, save=save)
if show == False: return axs
|
python
|
{
"resource": ""
}
|
q22303
|
paga_adjacency
|
train
|
def paga_adjacency(
adata,
adjacency='connectivities',
adjacency_tree='connectivities_tree',
as_heatmap=True,
color_map=None,
show=None,
save=None):
"""Connectivity of paga groups.
"""
connectivity = adata.uns[adjacency].toarray()
connectivity_select = adata.uns[adjacency_tree]
if as_heatmap:
matrix(connectivity, color_map=color_map, show=False)
for i in range(connectivity_select.shape[0]):
neighbors = connectivity_select[i].nonzero()[1]
pl.scatter([i for j in neighbors], neighbors, color='black', s=1)
# as a stripplot
else:
pl.figure()
for i, cs in enumerate(connectivity):
x = [i for j, d in enumerate(cs) if i != j]
y = [c for j, c in enumerate(cs) if i != j]
pl.scatter(x, y, color='gray', s=1)
neighbors = connectivity_select[i].nonzero()[1]
pl.scatter([i for j in neighbors],
cs[neighbors], color='black', s=1)
utils.savefig_or_show('paga_connectivity', show=show, save=save)
|
python
|
{
"resource": ""
}
|
q22304
|
clustermap
|
train
|
def clustermap(
adata, obs_keys=None, use_raw=None, show=None, save=None, **kwds):
"""\
Hierarchically-clustered heatmap.
Wraps `seaborn.clustermap
<https://seaborn.pydata.org/generated/seaborn.clustermap.html>`__ for
:class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
obs_keys : `str`
Categorical annotation to plot with a different color map.
Currently, only a single key is supported.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
{show_save_ax}
**kwds : keyword arguments
Keyword arguments passed to `seaborn.clustermap
<https://seaborn.pydata.org/generated/seaborn.clustermap.html>`__.
Returns
-------
If `show == False`, a `seaborn.ClusterGrid` object.
Notes
-----
The returned object has a savefig() method that should be used if you want
to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
clustergrid.dendrogram_row.reordered_ind
Column indices, use: clustergrid.dendrogram_col.reordered_ind
Examples
--------
Soon to come with figures. In the meanwile, see
https://seaborn.pydata.org/generated/seaborn.clustermap.html.
>>> import scanpy.api as sc
>>> adata = sc.datasets.krumsiek11()
>>> sc.pl.clustermap(adata, obs_keys='cell_type')
"""
if not isinstance(obs_keys, (str, type(None))):
raise ValueError('Currently, only a single key is supported.')
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
X = adata.raw.X if use_raw else adata.X
if issparse(X):
X = X.toarray()
df = pd.DataFrame(X, index=adata.obs_names, columns=adata.var_names)
if obs_keys is not None:
row_colors = adata.obs[obs_keys]
utils.add_colors_for_categorical_sample_annotation(adata, obs_keys)
# do this more efficiently... just a quick solution
lut = dict(zip(
row_colors.cat.categories,
adata.uns[obs_keys + '_colors']))
row_colors = adata.obs[obs_keys].map(lut)
g = sns.clustermap(df, row_colors=row_colors, **kwds)
else:
g = sns.clustermap(df, **kwds)
show = settings.autoshow if show is None else show
if show: pl.show()
else: return g
|
python
|
{
"resource": ""
}
|
q22305
|
dendrogram
|
train
|
def dendrogram(adata, groupby, dendrogram_key=None, orientation='top', remove_labels=False,
show=None, save=None):
"""Plots a dendrogram of the categories defined in `groupby`.
See :func:`~scanpy.tl.dendrogram`.
Parameters
----------
adata : :class:`~anndata.AnnData`
groupby : `str`
Categorical data column used to create the dendrogram
dendrogram_key : `str`, optional(default: `None`)
Key under with the dendrogram information was stored.
By default the dendrogram information is stored under .uns['dendrogram_' + groupby].
orientation : `str`, optional(default: `top`)
Options are `top`, `bottom`, `left`, and `right`. Only when `show_correlation` is False
remove_labels : `bool`, optional(default: `False`)
{show_save_ax}
Returns
-------
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, 'bulk_labels')
>>> sc.pl.dendrogram(adata, 'bulk_labels')
"""
fig, ax = pl.subplots()
_plot_dendrogram(ax, adata, groupby, dendrogram_key=dendrogram_key,
remove_labels=remove_labels, orientation=orientation)
utils.savefig_or_show('dendrogram', show=show, save=save)
return ax
|
python
|
{
"resource": ""
}
|
q22306
|
_prepare_dataframe
|
train
|
def _prepare_dataframe(adata, var_names, groupby=None, use_raw=None, log=False,
num_categories=7, layer=None, gene_symbols=None):
"""
Given the anndata object, prepares a data frame in which the row index are the categories
defined by group by and the columns correspond to var_names.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
var_names : `str` or list of `str`
`var_names` should be a valid subset of `adata.var_names`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. It is expected that
groupby is a categorical. If groupby is not a categorical observation,
it would be subdivided into `num_categories`.
log : `bool`, optional (default: `False`)
Use the log of the values
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
num_categories : `int`, optional (default: `7`)
Only used if groupby observation is not categorical. This value
determines the number of groups into which the groupby observation
should be subdivided.
gene_symbols : string, optional (default: `None`)
Key for field in .var that stores gene symbols.
Returns
-------
Tuple of `pandas.DataFrame` and list of categories.
"""
from scipy.sparse import issparse
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
if groupby is not None:
if groupby not in adata.obs_keys():
raise ValueError('groupby has to be a valid observation. Given value: {}, '
'valid observations: {}'.format(groupby, adata.obs_keys()))
if gene_symbols is not None and gene_symbols in adata.var.columns:
# translate gene_symbols to var_names
# slow method but gives a meaningful error en case no gene symbol is found:
translated_var_names = []
for symbol in var_names:
if symbol not in adata.var[gene_symbols].values:
logg.error("Gene symbol {!r} not found in given gene_symbols column: {!r}".format(symbol, gene_symbols))
return
translated_var_names.append(adata.var[adata.var[gene_symbols] == symbol].index[0])
symbols = var_names
var_names = translated_var_names
if layer is not None:
if layer not in adata.layers.keys():
raise KeyError('Selected layer: {} is not in the layers list. The list of '
'valid layers is: {}'.format(layer, adata.layers.keys()))
matrix = adata[:, var_names].layers[layer]
elif use_raw:
matrix = adata.raw[:, var_names].X
else:
matrix = adata[:, var_names].X
if issparse(matrix):
matrix = matrix.toarray()
if log:
matrix = np.log1p(matrix)
obs_tidy = pd.DataFrame(matrix, columns=var_names)
if groupby is None:
groupby = ''
categorical = pd.Series(np.repeat('', len(obs_tidy))).astype('category')
else:
if not is_categorical_dtype(adata.obs[groupby]):
# if the groupby column is not categorical, turn it into one
# by subdividing into `num_categories` categories
categorical = pd.cut(adata.obs[groupby], num_categories)
else:
categorical = adata.obs[groupby]
obs_tidy.set_index(categorical, groupby, inplace=True)
if gene_symbols is not None:
# translate the column names to the symbol names
obs_tidy.rename(columns=dict([(var_names[x], symbols[x]) for x in range(len(var_names))]), inplace=True)
categories = obs_tidy.index.categories
return categories, obs_tidy
|
python
|
{
"resource": ""
}
|
q22307
|
_reorder_categories_after_dendrogram
|
train
|
def _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=None,
var_group_labels=None,
var_group_positions=None):
"""
Function used by plotting functions that need to reorder the the groupby observations
based on the dendrogram results.
The function checks if a dendrogram has already been precomputed. If not, sc.tl.dendrogram
is run with default parameters.
The results found in .uns[dendrogram_key] are used to reorder var_group_labels
and var_group_positions.
Returns
-------
dictionary with keys: 'categories_idx_ordered','var_group_names_idx_ordered',
'var_group_labels', and 'var_group_positions'
"""
key = _get_dendrogram_key(adata, dendrogram, groupby)
dendro_info = adata.uns[key]
if groupby != dendro_info['groupby']:
raise ValueError("Incompatible observations. The precomputed dendrogram contains information "
"for the observation: '{}' while the plot is made for the "
"observation: '{}. Please run sc.tl.dendrogram "
"using the right observation.'".format(groupby, dendro_info['groupby']))
has_var_groups = True if var_group_positions is not None and len(var_group_positions) > 0 else False
categories = adata.obs[dendro_info['groupby']].cat.categories
# order of groupby categories
categories_idx_ordered = dendro_info['categories_idx_ordered']
if len(categories) != len(categories_idx_ordered):
raise ValueError("Incompatible observations. Dendrogram data has {} categories but "
"current groupby observation {!r} contains {} categories. Most likely "
"the underlying groupby observation changed after the initial "
"computation of `sc.tl.dendrogram`. Please run sc.tl.dendrogram "
"again.'".format(len(categories_idx_ordered),
groupby, len(categories)))
# reorder var_groups (if any)
if var_names is not None:
var_names_idx_ordered = list(range(len(var_names)))
if has_var_groups:
if list(var_group_labels) == list(categories):
positions_ordered = []
labels_ordered = []
position_start = 0
var_names_idx_ordered = []
for idx in categories_idx_ordered:
position = var_group_positions[idx]
_var_names = var_names[position[0]:position[1] + 1]
var_names_idx_ordered.extend(range(position[0], position[1] + 1))
positions_ordered.append((position_start, position_start + len(_var_names) - 1))
position_start += len(_var_names)
labels_ordered.append(var_group_labels[idx])
var_group_labels = labels_ordered
var_group_positions = positions_ordered
else:
logg.warn("Groups are not reordered because the `groupby` categories "
"and the `var_group_labels` are different.\n"
"categories: {}\nvar_group_labels: {}".format(
_format_first_three_categories(categories),
_format_first_three_categories(var_group_labels)))
else:
var_names_idx_ordered = None
var_group_data = {'categories_idx_ordered': categories_idx_ordered,
'var_names_idx_ordered': var_names_idx_ordered,
'var_group_labels': var_group_labels,
'var_group_positions': var_group_positions}
return var_group_data
|
python
|
{
"resource": ""
}
|
q22308
|
_plot_categories_as_colorblocks
|
train
|
def _plot_categories_as_colorblocks(groupby_ax, obs_tidy, colors=None, orientation='left', cmap_name='tab20'):
"""
Plots categories as colored blocks. If orientation is 'left', the categories are plotted vertically, otherwise
they are plotted horizontally.
Parameters
----------
groupby_ax : matplotlib ax
obs_tidy
colors : list of valid color names optional (default: `None`)
Color to use for each category.
orientation : `str`, optional (default: `left`)
cmap_name : `str`
Name of colormap to use, in case colors is None
Returns
-------
ticks position, labels, colormap
"""
groupby = obs_tidy.index.name
from matplotlib.colors import ListedColormap, BoundaryNorm
if colors is None:
groupby_cmap = pl.get_cmap(cmap_name)
else:
groupby_cmap = ListedColormap(colors, groupby + '_cmap')
norm = BoundaryNorm(np.arange(groupby_cmap.N+1)-.5, groupby_cmap.N)
# determine groupby label positions such that they appear
# centered next/below to the color code rectangle assigned to the category
value_sum = 0
ticks = [] # list of centered position of the labels
labels = []
label2code = {} # dictionary of numerical values asigned to each label
for code, (label, value) in enumerate(obs_tidy.index.value_counts(sort=False).iteritems()):
ticks.append(value_sum + (value / 2))
labels.append(label)
value_sum += value
label2code[label] = code
groupby_ax.grid(False)
if orientation == 'left':
groupby_ax.imshow(np.matrix([label2code[lab] for lab in obs_tidy.index]).T, aspect='auto', cmap=groupby_cmap, norm=norm)
if len(labels) > 1:
groupby_ax.set_yticks(ticks)
groupby_ax.set_yticklabels(labels)
# remove y ticks
groupby_ax.tick_params(axis='y', left=False, labelsize='small')
# remove x ticks and labels
groupby_ax.tick_params(axis='x', bottom=False, labelbottom=False)
# remove surrounding lines
groupby_ax.spines['right'].set_visible(False)
groupby_ax.spines['top'].set_visible(False)
groupby_ax.spines['left'].set_visible(False)
groupby_ax.spines['bottom'].set_visible(False)
groupby_ax.set_ylabel(groupby)
else:
groupby_ax.imshow(np.matrix([label2code[lab] for lab in obs_tidy.index]), aspect='auto', cmap=groupby_cmap, norm=norm)
if len(labels) > 1:
groupby_ax.set_xticks(ticks)
if max([len(x) for x in labels]) < 3:
# if the labels are small do not rotate them
rotation = 0
else:
rotation = 90
groupby_ax.set_xticklabels(labels, rotation=rotation)
# remove x ticks
groupby_ax.tick_params(axis='x', bottom=False, labelsize='small')
# remove y ticks and labels
groupby_ax.tick_params(axis='y', left=False, labelleft=False)
# remove surrounding lines
groupby_ax.spines['right'].set_visible(False)
groupby_ax.spines['top'].set_visible(False)
groupby_ax.spines['left'].set_visible(False)
groupby_ax.spines['bottom'].set_visible(False)
groupby_ax.set_xlabel(groupby)
return ticks, labels, groupby_cmap, norm
|
python
|
{
"resource": ""
}
|
q22309
|
DPT.branchings_segments
|
train
|
def branchings_segments(self):
"""Detect branchings and partition the data into corresponding segments.
Detect all branchings up to `n_branchings`.
Writes
------
segs : np.ndarray
Array of dimension (number of segments) × (number of data
points). Each row stores a mask array that defines a segment.
segs_tips : np.ndarray
Array of dimension (number of segments) × 2. Each row stores the
indices of the two tip points of each segment.
segs_names : np.ndarray
Array of dimension (number of data points). Stores an integer label
for each segment.
"""
self.detect_branchings()
self.postprocess_segments()
self.set_segs_names()
self.order_pseudotime()
|
python
|
{
"resource": ""
}
|
q22310
|
DPT.detect_branchings
|
train
|
def detect_branchings(self):
"""Detect all branchings up to `n_branchings`.
Writes Attributes
-----------------
segs : np.ndarray
List of integer index arrays.
segs_tips : np.ndarray
List of indices of the tips of segments.
"""
logg.m(' detect', self.n_branchings,
'branching' + ('' if self.n_branchings == 1 else 's'))
# a segment is a subset of points of the data set (defined by the
# indices of the points in the segment)
# initialize the search for branchings with a single segment,
# that is, get the indices of the whole data set
indices_all = np.arange(self._adata.shape[0], dtype=int)
# let's keep a list of segments, the first segment to add is the
# whole data set
segs = [indices_all]
# a segment can as well be defined by the two points that have maximal
# distance in the segment, the "tips" of the segment
#
# the rest of the points in the segment is then defined by demanding
# them to "be close to the line segment that connects the tips", that
# is, for such a point, the normalized added distance to both tips is
# smaller than one:
# (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1
# of course, this condition is fulfilled by the full cylindrical
# subspace surrounding that line segment, where the radius of the
# cylinder can be infinite
#
# if D denotes a euclidian distance matrix, a line segment is a linear
# object, and the name "line" is justified. if we take the
# diffusion-based distance matrix Dchosen, which approximates geodesic
# distance, with "line", we mean the shortest path between two points,
# which can be highly non-linear in the original space
#
# let us define the tips of the whole data set
if False: # this is safe, but not compatible with on-the-fly computation
tips_all = np.array(np.unravel_index(np.argmax(self.distances_dpt), self.distances_dpt.shape))
else:
if self.iroot is not None:
tip_0 = np.argmax(self.distances_dpt[self.iroot])
else:
tip_0 = np.argmax(self.distances_dpt[0])
tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])
# we keep a list of the tips of each segment
segs_tips = [tips_all]
segs_connects = [[]]
segs_undecided = [True]
segs_adjacency = [[]]
logg.m(' do not consider groups with less than {} points for splitting'
.format(self.min_group_size))
for ibranch in range(self.n_branchings):
iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)
if iseg == -1:
logg.m(' partitioning converged')
break
logg.m(' branching {}:'.format(ibranch + 1),
'split group', iseg) # [third start end]
# detect branching and update segs and segs_tips
self.detect_branching(segs, segs_tips,
segs_connects,
segs_undecided,
segs_adjacency, iseg, tips3)
# store as class members
self.segs = segs
self.segs_tips = segs_tips
self.segs_undecided = segs_undecided
# the following is a bit too much, but this allows easy storage
self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)
self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)
for i, seg_adjacency in enumerate(segs_adjacency):
self.segs_connects[i, seg_adjacency] = segs_connects[i]
for i in range(len(segs)):
for j in range(len(segs)):
self.segs_adjacency[i, j] = self.distances_dpt[self.segs_connects[i, j],
self.segs_connects[j, i]]
self.segs_adjacency = self.segs_adjacency.tocsr()
self.segs_connects = self.segs_connects.tocsr()
|
python
|
{
"resource": ""
}
|
q22311
|
DPT.select_segment
|
train
|
def select_segment(self, segs, segs_tips, segs_undecided) -> Tuple[int, int]:
"""Out of a list of line segments, choose segment that has the most
distant second data point.
Assume the distance matrix Ddiff is sorted according to seg_idcs.
Compute all the distances.
Returns
-------
iseg : int
Index identifying the position within the list of line segments.
tips3 : int
Positions of tips within chosen segment.
"""
scores_tips = np.zeros((len(segs), 4))
allindices = np.arange(self._adata.shape[0], dtype=int)
for iseg, seg in enumerate(segs):
# do not consider too small segments
if segs_tips[iseg][0] == -1: continue
# restrict distance matrix to points in segment
if not isinstance(self.distances_dpt, OnFlySymMatrix):
Dseg = self.distances_dpt[np.ix_(seg, seg)]
else:
Dseg = self.distances_dpt.restrict(seg)
third_maximizer = None
if segs_undecided[iseg]:
# check that none of our tips "connects" with a tip of the
# other segments
for jseg in range(len(segs)):
if jseg != iseg:
# take the inner tip, the "second tip" of the segment
for itip in range(2):
if (self.distances_dpt[segs_tips[jseg][1], segs_tips[iseg][itip]]
< 0.5 * self.distances_dpt[segs_tips[iseg][~itip], segs_tips[iseg][itip]]):
# logg.m(' group', iseg, 'with tip', segs_tips[iseg][itip],
# 'connects with', jseg, 'with tip', segs_tips[jseg][1], v=4)
# logg.m(' do not use the tip for "triangulation"', v=4)
third_maximizer = itip
# map the global position to the position within the segment
tips = [np.where(allindices[seg] == tip)[0][0]
for tip in segs_tips[iseg]]
# find the third point on the segment that has maximal
# added distance from the two tip points
dseg = Dseg[tips[0]] + Dseg[tips[1]]
if not np.isfinite(dseg).any():
continue
# add this point to tips, it's a third tip, we store it at the first
# position in an array called tips3
third_tip = np.argmax(dseg)
if third_maximizer is not None:
# find a fourth point that has maximal distance to all three
dseg += Dseg[third_tip]
fourth_tip = np.argmax(dseg)
if fourth_tip != tips[0] and fourth_tip != third_tip:
tips[1] = fourth_tip
dseg -= Dseg[tips[1]]
else:
dseg -= Dseg[third_tip]
tips3 = np.append(tips, third_tip)
# compute the score as ratio of the added distance to the third tip,
# to what it would be if it were on the straight line between the
# two first tips, given by Dseg[tips[:2]]
# if we did not normalize, there would be a danger of simply
# assigning the highest score to the longest segment
score = dseg[tips3[2]] / Dseg[tips3[0], tips3[1]]
score = len(seg) if self.choose_largest_segment else score # simply the number of points
logg.m(' group', iseg, 'score', score, 'n_points', len(seg),
'(too small)' if len(seg) < self.min_group_size else '', v=4)
if len(seg) <= self.min_group_size: score = 0
# write result
scores_tips[iseg, 0] = score
scores_tips[iseg, 1:] = tips3
iseg = np.argmax(scores_tips[:, 0])
if scores_tips[iseg, 0] == 0: return -1, None
tips3 = scores_tips[iseg, 1:].astype(int)
return iseg, tips3
|
python
|
{
"resource": ""
}
|
q22312
|
DPT.postprocess_segments
|
train
|
def postprocess_segments(self):
"""Convert the format of the segment class members."""
# make segs a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for iseg, seg in enumerate(self.segs):
mask = np.zeros(self._adata.shape[0], dtype=bool)
mask[seg] = True
self.segs[iseg] = mask
# convert to arrays
self.segs = np.array(self.segs)
self.segs_tips = np.array(self.segs_tips)
|
python
|
{
"resource": ""
}
|
q22313
|
DPT.set_segs_names
|
train
|
def set_segs_names(self):
"""Return a single array that stores integer segment labels."""
segs_names = np.zeros(self._adata.shape[0], dtype=np.int8)
self.segs_names_unique = []
for iseg, seg in enumerate(self.segs):
segs_names[seg] = iseg
self.segs_names_unique.append(iseg)
self.segs_names = segs_names
|
python
|
{
"resource": ""
}
|
q22314
|
DPT.order_pseudotime
|
train
|
def order_pseudotime(self):
"""Define indices that reflect segment and pseudotime order.
Writes
------
indices : np.ndarray
Index array of shape n, which stores an ordering of the data points
with respect to increasing segment index and increasing pseudotime.
changepoints : np.ndarray
Index array of shape len(ssegs)-1, which stores the indices of
points where the segment index changes, with respect to the ordering
of indices.
"""
# within segs_tips, order tips according to pseudotime
if self.iroot is not None:
for itips, tips in enumerate(self.segs_tips):
if tips[0] != -1:
indices = np.argsort(self.pseudotime[tips])
self.segs_tips[itips] = self.segs_tips[itips][indices]
else:
logg.m(' group', itips, 'is very small', v=4)
# sort indices according to segments
indices = np.argsort(self.segs_names)
segs_names = self.segs_names[indices]
# find changepoints of segments
changepoints = np.arange(indices.size-1)[np.diff(segs_names) == 1] + 1
if self.iroot is not None:
pseudotime = self.pseudotime[indices]
for iseg, seg in enumerate(self.segs):
# only consider one segment, it's already ordered by segment
seg_sorted = seg[indices]
# consider the pseudotime on this segment and sort them
seg_indices = np.argsort(pseudotime[seg_sorted])
# within the segment, order indices according to increasing pseudotime
indices[seg_sorted] = indices[seg_sorted][seg_indices]
# define class members
self.indices = indices
self.changepoints = changepoints
|
python
|
{
"resource": ""
}
|
q22315
|
DPT.kendall_tau_split
|
train
|
def kendall_tau_split(self, a, b) -> int:
"""Return splitting index that maximizes correlation in the sequences.
Compute difference in Kendall tau for all splitted sequences.
For each splitting index i, compute the difference of the two
correlation measures kendalltau(a[:i], b[:i]) and
kendalltau(a[i:], b[i:]).
Returns the splitting index that maximizes
kendalltau(a[:i], b[:i]) - kendalltau(a[i:], b[i:])
Parameters
----------
a, b : np.ndarray
One dimensional sequences.
Returns
-------
Splitting index according to above description.
"""
if a.size != b.size:
raise ValueError('a and b need to have the same size')
if a.ndim != b.ndim != 1:
raise ValueError('a and b need to be one-dimensional arrays')
import scipy as sp
min_length = 5
n = a.size
idx_range = np.arange(min_length, a.size-min_length-1, dtype=int)
corr_coeff = np.zeros(idx_range.size)
pos_old = sp.stats.kendalltau(a[:min_length], b[:min_length])[0]
neg_old = sp.stats.kendalltau(a[min_length:], b[min_length:])[0]
for ii, i in enumerate(idx_range):
if True:
# compute differences in concordance when adding a[i] and b[i]
# to the first subsequence, and removing these elements from
# the second subsequence
diff_pos, diff_neg = self._kendall_tau_diff(a, b, i)
pos = pos_old + self._kendall_tau_add(i, diff_pos, pos_old)
neg = neg_old + self._kendall_tau_subtract(n-i, diff_neg, neg_old)
pos_old = pos
neg_old = neg
if False:
# computation using sp.stats.kendalltau, takes much longer!
# just for debugging purposes
pos = sp.stats.kendalltau(a[:i+1], b[:i+1])[0]
neg = sp.stats.kendalltau(a[i+1:], b[i+1:])[0]
if False:
# the following is much slower than using sp.stats.kendalltau,
# it is only good for debugging because it allows to compute the
# tau-a version, which does not account for ties, whereas
# sp.stats.kendalltau computes tau-b version, which accounts for
# ties
pos = sp.stats.mstats.kendalltau(a[:i], b[:i], use_ties=False)[0]
neg = sp.stats.mstats.kendalltau(a[i:], b[i:], use_ties=False)[0]
corr_coeff[ii] = pos - neg
iimax = np.argmax(corr_coeff)
imax = min_length + iimax
corr_coeff_max = corr_coeff[iimax]
if corr_coeff_max < 0.3:
logg.m(' is root itself, never obtain significant correlation', v=4)
return imax
|
python
|
{
"resource": ""
}
|
q22316
|
DPT._kendall_tau_diff
|
train
|
def _kendall_tau_diff(self, a: np.ndarray, b: np.ndarray, i) -> Tuple[int, int]:
"""Compute difference in concordance of pairs in split sequences.
Consider splitting a and b at index i.
Parameters
----------
a
?
b
?
Returns
-------
diff_pos
Difference between concordant pairs for both subsequences.
diff_neg
Difference between non-concordant pairs for both subsequences.
"""
# compute ordering relation of the single points a[i] and b[i]
# with all previous points of the sequences a and b, respectively
a_pos = np.zeros(a[:i].size, dtype=int)
a_pos[a[:i] > a[i]] = 1
a_pos[a[:i] < a[i]] = -1
b_pos = np.zeros(b[:i].size, dtype=int)
b_pos[b[:i] > b[i]] = 1
b_pos[b[:i] < b[i]] = -1
diff_pos = np.dot(a_pos, b_pos).astype(float)
# compute ordering relation of the single points a[i] and b[i]
# with all later points of the sequences
a_neg = np.zeros(a[i:].size, dtype=int)
a_neg[a[i:] > a[i]] = 1
a_neg[a[i:] < a[i]] = -1
b_neg = np.zeros(b[i:].size, dtype=int)
b_neg[b[i:] > b[i]] = 1
b_neg[b[i:] < b[i]] = -1
diff_neg = np.dot(a_neg, b_neg)
return diff_pos, diff_neg
|
python
|
{
"resource": ""
}
|
q22317
|
deprecated_arg_names
|
train
|
def deprecated_arg_names(arg_mapping):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping : dict[str, str]
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter(
'always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
"Keyword argument '{0}' has been deprecated in favour "
"of '{1}'. '{0}' will be removed in a future version."
.format(old, new),
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
warnings.simplefilter(
'default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return func_wrapper
return decorator
|
python
|
{
"resource": ""
}
|
q22318
|
doc_params
|
train
|
def doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__doc__ = dedent(obj.__doc__).format(**kwds)
return obj
return dec
|
python
|
{
"resource": ""
}
|
q22319
|
get_graph_tool_from_adjacency
|
train
|
def get_graph_tool_from_adjacency(adjacency, directed=None):
"""Get graph_tool graph from adjacency matrix."""
import graph_tool as gt
adjacency_edge_list = adjacency
if not directed:
from scipy.sparse import tril
adjacency_edge_list = tril(adjacency)
g = gt.Graph(directed=directed)
g.add_vertex(adjacency.shape[0]) # this adds adjacency.shap[0] vertices
g.add_edge_list(np.transpose(adjacency_edge_list.nonzero()))
weights = g.new_edge_property('double')
for e in g.edges():
# graph_tool uses the following convention,
# which is opposite to the rest of scanpy
weights[e] = adjacency[int(e.source()), int(e.target())]
g.edge_properties['weight'] = weights
return g
|
python
|
{
"resource": ""
}
|
q22320
|
get_igraph_from_adjacency
|
train
|
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shap[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warn('The constructed graph has only {} nodes. '
'Your adjacency matrix contained redundant nodes.'
.format(g.vcount()))
return g
|
python
|
{
"resource": ""
}
|
q22321
|
compute_association_matrix_of_groups
|
train
|
def compute_association_matrix_of_groups(adata, prediction, reference,
normalization='prediction',
threshold=0.01, max_n_names=2):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata : AnnData
prediction : str
Field name of adata.obs.
reference : str
Field name of adata.obs.
normalization : {'prediction', 'reference'}
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold : float, optional (default: 0.01)
Do not consider associations whose overlap is below this fraction.
max_n_names : int or None, optional (default: 2)
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
Tuple of
asso_names : list of associated reference names (`max_n_names` for each
predicted name)
asso_matrix : matrix where rows correspond to the predicted labels and
columns to the reference labels, entries are proportional to degree of
association
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError('`normalization` needs to be either "prediction" or "reference".')
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info('Ignoring category \'{}\' '
'as it\'s in `settings.categories_to_ignore`.'
.format(cat))
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(
adata.obs[prediction].cat.categories):
if '?' in pred_group: pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (np.sum(mask_pred_int) -
np.sum(mask_ref_or_pred - mask_ref)) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (np.sum(mask_ref) -
np.sum(mask_ref_or_pred - mask_pred_int)) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple('compute_association_matrix_of_groups',
['asso_names', 'asso_matrix'])
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
|
python
|
{
"resource": ""
}
|
q22322
|
compute_group_overlap_score
|
train
|
def compute_group_overlap_score(ref_labels, pred_labels,
threshold_overlap_pred=0.5,
threshold_overlap_ref=0.5):
"""How well do the pred_labels explain the ref_labels?
A predicted cluster explains a reference cluster if it is contained within the reference
cluster with at least 50% (threshold_overlap_pred) of its points and these correspond
to at least 50% (threshold_overlap_ref) of the reference cluster.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
summary = []
for true in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(pred_labels[true == ref_labels], return_counts=True)
relative_overlaps_pred = [sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)]
relative_overlaps_ref = [sub_pred_counts[i] / ref_dict[true] for i, n in enumerate(sub_pred_unique)]
pred_best_index = np.argmax(relative_overlaps_pred)
summary.append(1 if (relative_overlaps_pred[pred_best_index] >= threshold_overlap_pred and
relative_overlaps_ref[pred_best_index] >= threshold_overlap_ref)
else 0)
# print(true, sub_pred_unique[pred_best_index], relative_overlaps_pred[pred_best_index],
# relative_overlaps_ref[pred_best_index], summary[-1])
return sum(summary)/len(summary)
|
python
|
{
"resource": ""
}
|
q22323
|
identify_groups
|
train
|
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(pred_labels[ref_label == ref_labels], return_counts=True)
relative_overlaps_pred = [sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)]
relative_overlaps_ref = [sub_pred_counts[i] / ref_dict[ref_label] for i, n in enumerate(sub_pred_unique)]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps: return associated_predictions, associated_overlaps
else: return associated_predictions
|
python
|
{
"resource": ""
}
|
q22324
|
unique_categories
|
train
|
def unique_categories(categories):
"""Pass array-like categories, return sorted cleaned unique categories."""
categories = np.unique(categories)
categories = np.setdiff1d(categories, np.array(settings.categories_to_ignore))
categories = np.array(natsorted(categories, key=lambda v: v.upper()))
return categories
|
python
|
{
"resource": ""
}
|
q22325
|
fill_in_datakeys
|
train
|
def fill_in_datakeys(example_parameters, dexdata):
"""Update the 'examples dictionary' _examples.example_parameters.
If a datakey (key in 'datafile dictionary') is not present in the 'examples
dictionary' it is used to initialize an entry with that key.
If not specified otherwise, any 'exkey' (key in 'examples dictionary') is
used as 'datakey'.
"""
# default initialization of 'datakey' key with entries from data dictionary
for exkey in example_parameters:
if 'datakey' not in example_parameters[exkey]:
if exkey in dexdata:
example_parameters[exkey]['datakey'] = exkey
else:
example_parameters[exkey]['datakey'] = 'unspecified in dexdata'
return example_parameters
|
python
|
{
"resource": ""
}
|
q22326
|
moving_average
|
train
|
def moving_average(a, n):
"""Moving average over one-dimensional array.
Parameters
----------
a : np.ndarray
One-dimensional array.
n : int
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
|
python
|
{
"resource": ""
}
|
q22327
|
update_params
|
train
|
def update_params(old_params, new_params, check=False):
"""Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params : dict
new_params : dict
check : bool, optional (default: False)
Returns
-------
updated_params : dict
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError('\'' + key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys())))
if val is not None:
updated_params[key] = val
return updated_params
|
python
|
{
"resource": ""
}
|
q22328
|
read_args_tool
|
train
|
def read_args_tool(toolkey, example_parameters, tool_add_args=None):
"""Read args for single tool.
"""
import scanpy as sc
p = default_tool_argparser(help(toolkey), example_parameters)
if tool_add_args is None:
p = add_args(p)
else:
p = tool_add_args(p)
args = vars(p.parse_args())
args = settings.process_args(args)
return args
|
python
|
{
"resource": ""
}
|
q22329
|
default_tool_argparser
|
train
|
def default_tool_argparser(description, example_parameters):
"""Create default parser for single tools.
"""
import argparse
epilog = '\n'
for k, v in sorted(example_parameters.items()):
epilog += ' ' + k + '\n'
p = argparse.ArgumentParser(
description=description,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=('available values for examples (exkey):'+epilog))
return p
|
python
|
{
"resource": ""
}
|
q22330
|
pretty_dict_string
|
train
|
def pretty_dict_string(d, indent=0):
"""Pretty output of nested dictionaries.
"""
s = ''
for key, value in sorted(d.items()):
s += ' ' * indent + str(key)
if isinstance(value, dict):
s += '\n' + pretty_dict_string(value, indent+1)
else:
s += '=' + str(value) + '\n'
return s
|
python
|
{
"resource": ""
}
|
q22331
|
merge_dicts
|
train
|
def merge_dicts(*ds):
"""Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
Notes
-----
http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
"""
result = ds[0]
for d in ds[1:]:
result.update(d)
return result
|
python
|
{
"resource": ""
}
|
q22332
|
masks
|
train
|
def masks(list_of_index_lists, n):
"""Make an array in which rows store 1d mask arrays from list of index lists.
Parameters
----------
n : int
Maximal index / number of samples.
"""
# make a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for il,l in enumerate(list_of_index_lists):
mask = np.zeros(n,dtype=bool)
mask[l] = True
list_of_index_lists[il] = mask
# convert to arrays
masks = np.array(list_of_index_lists)
return masks
|
python
|
{
"resource": ""
}
|
q22333
|
warn_with_traceback
|
train
|
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
|
python
|
{
"resource": ""
}
|
q22334
|
subsample_n
|
train
|
def subsample_n(X, n=0, seed=0):
"""Subsample n samples from rows of array.
Parameters
----------
X : np.ndarray
Data array.
seed : int
Seed for sampling.
Returns
-------
Xsampled : np.ndarray
Subsampled X.
rows : np.ndarray
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
|
python
|
{
"resource": ""
}
|
q22335
|
check_presence_download
|
train
|
def check_presence_download(filename, backup_url):
"""Check if file is present otherwise download."""
import os
filename = str(filename) # Throws error for Path on 3.5
if not os.path.exists(filename):
from .readwrite import download_progress
dr = os.path.dirname(filename)
try:
os.makedirs(dr)
except FileExistsError:
pass # ignore if dir already exists
from urllib.request import urlretrieve
urlretrieve(backup_url, filename, reporthook=download_progress)
|
python
|
{
"resource": ""
}
|
q22336
|
hierarch_cluster
|
train
|
def hierarch_cluster(M):
"""Cluster matrix using hierarchical clustering.
Parameters
----------
M : np.ndarray
Matrix, for example, distance matrix.
Returns
-------
Mclus : np.ndarray
Clustered matrix.
indices : np.ndarray
Indices used to cluster the matrix.
"""
import scipy as sp
import scipy.cluster
link = sp.cluster.hierarchy.linkage(M)
indices = sp.cluster.hierarchy.leaves_list(link)
Mclus = np.array(M[:, indices])
Mclus = Mclus[indices, :]
if False:
pl.matshow(Mclus)
pl.colorbar()
return Mclus, indices
|
python
|
{
"resource": ""
}
|
q22337
|
GetVersionNamespace
|
train
|
def GetVersionNamespace(version):
""" Get version namespace from version """
ns = nsMap[version]
if not ns:
ns = serviceNsMap[version]
versionId = versionIdMap[version]
if not versionId:
namespace = ns
else:
namespace = '%s/%s' % (ns, versionId)
return namespace
|
python
|
{
"resource": ""
}
|
q22338
|
GetWsdlMethod
|
train
|
def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[(ns, wsdlName)]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[(ns, wsdlName)]
else:
raise KeyError("{0} {1}".format(ns, name))
|
python
|
{
"resource": ""
}
|
q22339
|
GetVmodlType
|
train
|
def GetVmodlType(name):
""" Get type from vmodl name """
# If the input is already a type, just return
if isinstance(name, type):
return name
# Try to get type from vmodl type names table
typ = vmodlTypes.get(name)
if typ:
return typ
# Else get the type from the _wsdlTypeMap
isArray = name.endswith("[]")
if isArray:
name = name[:-2]
ns, wsdlName = _GetWsdlInfo(name)
try:
typ = GetWsdlType(ns, wsdlName)
except KeyError:
raise KeyError(name)
if typ:
return isArray and typ.Array or typ
else:
raise KeyError(name)
|
python
|
{
"resource": ""
}
|
q22340
|
VmomiJSONEncoder.explode
|
train
|
def explode(self, obj):
""" Determine if the object should be exploded. """
if obj in self._done:
return False
result = False
for item in self._explode:
if hasattr(item, '_moId'):
# If it has a _moId it is an instance
if obj._moId == item._moId:
result = True
else:
# If it does not have a _moId it is a template
if obj.__class__.__name__ == item.__name__:
result = True
if result:
self._done.add(obj)
return result
|
python
|
{
"resource": ""
}
|
q22341
|
main
|
train
|
def main():
"""
Simple command-line program for powering on virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
atexit.register(Disconnect, si)
# Retreive the list of Virtual Machines from the inventory objects
# under the rootFolder
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
# Find the vm and power it on
tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames]
# Wait for power on to complete
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e))
|
python
|
{
"resource": ""
}
|
q22342
|
PrintVmInfo
|
train
|
def PrintVmInfo(vm, depth=1):
"""
Print information for a particular virtual machine or recurse into a folder
or vApp with depth protection
"""
maxdepth = 10
# if this is a group it will have children. if it does, recurse into them
# and then return
if hasattr(vm, 'childEntity'):
if depth > maxdepth:
return
vmList = vm.childEntity
for c in vmList:
PrintVmInfo(c, depth+1)
return
# if this is a vApp, it likely contains child VMs
# (vApps can nest vApps, but it is hardly a common usecase, so ignore that)
if isinstance(vm, vim.VirtualApp):
vmList = vm.vm
for c in vmList:
PrintVmInfo(c, depth + 1)
return
summary = vm.summary
print("Name : ", summary.config.name)
print("Path : ", summary.config.vmPathName)
print("Guest : ", summary.config.guestFullName)
annotation = summary.config.annotation
if annotation != None and annotation != "":
print("Annotation : ", annotation)
print("State : ", summary.runtime.powerState)
if summary.guest != None:
ip = summary.guest.ipAddress
if ip != None and ip != "":
print("IP : ", ip)
if summary.runtime.question != None:
print("Question : ", summary.runtime.question.text)
print("")
|
python
|
{
"resource": ""
}
|
q22343
|
main
|
train
|
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host,args.user))
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
for child in content.rootFolder.childEntity:
if hasattr(child, 'vmFolder'):
datacenter = child
vmFolder = datacenter.vmFolder
vmList = vmFolder.childEntity
for vm in vmList:
PrintVmInfo(vm)
return 0
|
python
|
{
"resource": ""
}
|
q22344
|
localSslFixup
|
train
|
def localSslFixup(host, sslContext):
"""
Connections to 'localhost' do not need SSL verification as a certificate
will never match. The OS provides security by only allowing root to bind
to low-numbered ports.
"""
if not sslContext and host in ['localhost', '127.0.0.1', '::1']:
import ssl
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
return sslContext
|
python
|
{
"resource": ""
}
|
q22345
|
Connect
|
train
|
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
version=None, keyFile=None, certFile=None, thumbprint=None,
sslContext=None, b64token=None, mechanism='userpass'):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param b64token: base64 encoded token
@type b64token: string
@param mechanism: authentication mechanism: userpass or sspi
@type mechanism: string
"""
try:
info = re.match(_rx, host)
if info is not None:
host = info.group(1)
if host[0] == '[':
host = info.group(1)[1:-1]
if info.group(2) is not None:
port = int(info.group(2)[1:])
except ValueError as ve:
pass
sslContext = localSslFixup(host, sslContext)
if namespace:
assert(version is None)
version = versionMap[namespace]
elif not version:
version = "vim.version.version6"
si, stub = None, None
if mechanism == 'userpass':
si, stub = __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout)
elif mechanism == 'sspi':
si, stub = __LoginBySSPI(host, port, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, b64token, connectionPoolTimeout)
else:
raise Exception('''The provided connection mechanism is not available, the
supported mechanisms are userpass or sspi''')
SetSi(si)
return si
|
python
|
{
"resource": ""
}
|
q22346
|
ConnectNoSSL
|
train
|
def ConnectNoSSL(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
b64token=None, mechanism='userpass'):
"""
Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the Connect method.
"""
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
else:
sslContext = None
return Connect(host=host,
port=port,
user=user,
pwd=pwd,
service=service,
adapter=adapter,
namespace=namespace,
path=path,
version=version,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
|
python
|
{
"resource": ""
}
|
q22347
|
__RetrieveContent
|
train
|
def __RetrieveContent(host, port, adapter, version, path, keyFile, certFile,
thumbprint, sslContext, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC):
"""
Retrieve service instance for connection.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
"""
# XXX remove the adapter and service arguments once dependent code is fixed
if adapter != "SOAP":
raise ValueError(adapter)
# Create the SOAP stub adapter
stub = SoapStubAdapter(host, port, version=version, path=path,
certKeyFile=keyFile, certFile=certFile,
thumbprint=thumbprint, sslContext=sslContext,
connectionPoolTimeout=connectionPoolTimeout)
# Get Service instance
si = vim.ServiceInstance("ServiceInstance", stub)
content = None
try:
content = si.RetrieveContent()
except vmodl.MethodFault:
raise
except Exception as e:
# NOTE (hartsock): preserve the traceback for diagnostics
# pulling and preserving the traceback makes diagnosing connection
# failures easier since the fault will also include where inside the
# library the fault occurred. Without the traceback we have no idea
# why the connection failed beyond the message string.
(type, value, traceback) = sys.exc_info()
if traceback:
fault = vim.fault.HostConnectFault(msg=str(e))
reraise(vim.fault.HostConnectFault, fault, traceback)
else:
raise vim.fault.HostConnectFault(msg=str(e))
return content, si, stub
|
python
|
{
"resource": ""
}
|
q22348
|
__GetElementTree
|
train
|
def __GetElementTree(protocol, server, port, path, sslContext):
"""
Private method that returns a root from ElementTree for a remote XML document.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
if protocol == "https":
kwargs = {"context": sslContext} if sslContext else {}
conn = http_client.HTTPSConnection(server, port=port, **kwargs)
elif protocol == "http":
conn = http_client.HTTPConnection(server, port=port)
else:
raise Exception("Protocol " + protocol + " not supported.")
conn.request("GET", path)
response = conn.getresponse()
if response.status == 200:
try:
tree = ElementTree.fromstring(response.read())
return tree
except ExpatError:
pass
return None
|
python
|
{
"resource": ""
}
|
q22349
|
__GetServiceVersionDescription
|
train
|
def __GetServiceVersionDescription(protocol, server, port, path, sslContext):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext)
return tree
|
python
|
{
"resource": ""
}
|
q22350
|
__VersionIsSupported
|
train
|
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
|
python
|
{
"resource": ""
}
|
q22351
|
__FindSupportedVersion
|
train
|
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path,
sslContext)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
|
python
|
{
"resource": ""
}
|
q22352
|
SmartStubAdapter
|
train
|
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = localSslFixup(host, sslContext)
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext)
|
python
|
{
"resource": ""
}
|
q22353
|
SmartConnect
|
train
|
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk", connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, sslContext=None, b64token=None, mechanism='userpass'):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = localSslFixup(host, sslContext)
supportedVersion = __FindSupportedVersion(protocol,
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path,
connectionPoolTimeout=connectionPoolTimeout,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
|
python
|
{
"resource": ""
}
|
q22354
|
SmartConnectNoSSL
|
train
|
def SmartConnectNoSSL(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk", connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, b64token=None, mechanism='userpass'):
"""
Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the SmartConnect method.
"""
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
else:
sslContext = None
return SmartConnect(protocol=protocol,
host=host,
port=port,
user=user,
pwd=pwd,
service=service,
path=path,
connectionPoolTimeout=connectionPoolTimeout,
preferredApiVersions=preferredApiVersions,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
|
python
|
{
"resource": ""
}
|
q22355
|
OpenUrlWithBasicAuth
|
train
|
def OpenUrlWithBasicAuth(url, user='root', pwd=''):
"""
Open the specified URL, using HTTP basic authentication to provide
the specified credentials to the server as part of the request.
Returns the response as a file-like object.
"""
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=False)
|
python
|
{
"resource": ""
}
|
q22356
|
main
|
train
|
def main():
"""
Simple command-line program for dumping the contents of any managed object.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host,args.user))
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
obj = VmomiSupport.templateOf(args.type)(args.id, si._stub)
print(json.dumps(obj, cls=VmomiSupport.VmomiJSONEncoder,
sort_keys=True, indent=4))
|
python
|
{
"resource": ""
}
|
q22357
|
SoapSerializer._NSPrefix
|
train
|
def _NSPrefix(self, ns):
""" Get xml ns prefix. self.nsMap must be set """
if ns == self.defaultNS:
return ''
prefix = self.nsMap[ns]
return prefix and prefix + ':' or ''
|
python
|
{
"resource": ""
}
|
q22358
|
SoapDeserializer.SplitTag
|
train
|
def SplitTag(self, tag):
""" Split tag into ns, name """
idx = tag.find(NS_SEP)
if idx >= 0:
return tag[:idx], tag[idx + 1:]
else:
return "", tag
|
python
|
{
"resource": ""
}
|
q22359
|
SoapDeserializer.LookupWsdlType
|
train
|
def LookupWsdlType(self, ns, name, allowManagedObjectReference=False):
""" Lookup wsdl type. Handle special case for some vmodl version """
try:
return GetWsdlType(ns, name)
except KeyError:
if allowManagedObjectReference:
if name.endswith('ManagedObjectReference') and ns == XMLNS_VMODL_BASE:
return GetWsdlType(ns, name[:-len('Reference')])
# WARNING!!! This is a temporary hack to get around server not
# honoring @service tag (see bug 521744). Once it is fix, I am
# going to back out this change
if name.endswith('ManagedObjectReference') and allowManagedObjectReference:
return GetWsdlType(XMLNS_VMODL_BASE, name[:-len('Reference')])
return GuessWsdlType(name)
|
python
|
{
"resource": ""
}
|
q22360
|
IsPrimitiveType
|
train
|
def IsPrimitiveType(obj):
"""See if the passed in type is a Primitive Type"""
return (isinstance(obj, types.bool) or isinstance(obj, types.byte) or
isinstance(obj, types.short) or isinstance(obj, six.integer_types) or
isinstance(obj, types.double) or isinstance(obj, types.float) or
isinstance(obj, six.string_types) or
isinstance(obj, types.PropertyPath) or
isinstance(obj, types.ManagedMethod) or
isinstance(obj, types.datetime) or
isinstance(obj, types.URI) or isinstance(obj, type))
|
python
|
{
"resource": ""
}
|
q22361
|
DiffAnys
|
train
|
def DiffAnys(obj1, obj2, looseMatch=False, ignoreArrayOrder=True):
"""Diff any two objects. Objects can either be primitive type
or DataObjects"""
differ = Differ(looseMatch = looseMatch, ignoreArrayOrder = ignoreArrayOrder)
return differ.DiffAnyObjects(obj1, obj2)
|
python
|
{
"resource": ""
}
|
q22362
|
Differ.DiffAnyObjects
|
train
|
def DiffAnyObjects(self, oldObj, newObj, isObjLink=False):
"""Diff any two Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffAnyObjects: One of the objects is unset.')
return self._looseMatch
oldObjInstance = oldObj
newObjInstance = newObj
if isinstance(oldObj, list):
oldObjInstance = oldObj[0]
if isinstance(newObj, list):
newObjInstance = newObj[0]
# Need to see if it is a primitive type first since type information
# will not be available for them.
if (IsPrimitiveType(oldObj) and IsPrimitiveType(newObj)
and oldObj.__class__.__name__ == newObj.__class__.__name__):
if oldObj == newObj:
return True
elif oldObj == None or newObj == None:
__Log__.debug('DiffAnyObjects: One of the objects in None')
return False
oldType = Type(oldObjInstance)
newType = Type(newObjInstance)
if oldType != newType:
__Log__.debug('DiffAnyObjects: Types do not match %s != %s' %
(repr(GetWsdlName(oldObjInstance.__class__)),
repr(GetWsdlName(newObjInstance.__class__))))
return False
elif isinstance(oldObj, list):
return self.DiffArrayObjects(oldObj, newObj, isObjLink)
elif isinstance(oldObjInstance, types.ManagedObject):
return (not oldObj and not newObj) or (oldObj and newObj
and oldObj._moId == newObj._moId)
elif isinstance(oldObjInstance, types.DataObject):
if isObjLink:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffAnyObjects: Keys do not match %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
return bMatch
return self.DiffDataObjects(oldObj, newObj)
else:
raise TypeError("Unknown type: "+repr(GetWsdlName(oldObj.__class__)))
|
python
|
{
"resource": ""
}
|
q22363
|
Differ.DiffDoArrays
|
train
|
def DiffDoArrays(self, oldObj, newObj, isElementLinks):
"""Diff two DataObject arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if isElementLinks:
if i.GetKey() != j.GetKey():
__Log__.debug('DiffDoArrays: Keys do not match %s != %s'
% (i.GetKey(), j.GetKey()))
return False
else:
if not self.DiffDataObjects(i, j):
__Log__.debug(
'DiffDoArrays: one of the elements do not match')
return False
return True
|
python
|
{
"resource": ""
}
|
q22364
|
Differ.DiffAnyArrays
|
train
|
def DiffAnyArrays(self, oldObj, newObj, isElementLinks):
"""Diff two arrays which contain Any objects"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffAnyArrays: Array lengths do not match. %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if not self.DiffAnyObjects(i, j, isElementLinks):
__Log__.debug('DiffAnyArrays: One of the elements do not match.')
return False
return True
|
python
|
{
"resource": ""
}
|
q22365
|
Differ.DiffPrimitiveArrays
|
train
|
def DiffPrimitiveArrays(self, oldObj, newObj):
"""Diff two primitive arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
match = True
if self._ignoreArrayOrder:
oldSet = oldObj and frozenset(oldObj) or frozenset()
newSet = newObj and frozenset(newObj) or frozenset()
match = (oldSet == newSet)
else:
for i, j in zip(oldObj, newObj):
if i != j:
match = False
break
if not match:
__Log__.debug(
'DiffPrimitiveArrays: One of the elements do not match.')
return False
return True
|
python
|
{
"resource": ""
}
|
q22366
|
Differ.DiffArrayObjects
|
train
|
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False):
"""Method which deligates the diffing of arrays based on the type"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
return False
if len(oldObj) != len(newObj):
__Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
firstObj = oldObj[0]
if IsPrimitiveType(firstObj):
return self.DiffPrimitiveArrays(oldObj, newObj)
elif isinstance(firstObj, types.ManagedObject):
return self.DiffAnyArrays(oldObj, newObj, isElementLinks)
elif isinstance(firstObj, types.DataObject):
return self.DiffDoArrays(oldObj, newObj, isElementLinks)
else:
raise TypeError("Unknown type: %s" % oldObj.__class__)
|
python
|
{
"resource": ""
}
|
q22367
|
Differ.DiffDataObjects
|
train
|
def DiffDataObjects(self, oldObj, newObj):
"""Diff Data Objects"""
if oldObj == newObj:
return True
if not oldObj or not newObj:
__Log__.debug('DiffDataObjects: One of the objects in None')
return False
oldType = Type(oldObj)
newType = Type(newObj)
if oldType != newType:
__Log__.debug(
'DiffDataObjects: Types do not match for dataobjects. %s != %s'
% (oldObj._wsdlName, newObj._wsdlName))
return False
for prop in oldObj._GetPropertyList():
oldProp = getattr(oldObj, prop.name)
newProp = getattr(newObj, prop.name)
propType = oldObj._GetPropertyInfo(prop.name).type
if not oldProp and not newProp:
continue
elif ((prop.flags & VmomiSupport.F_OPTIONAL) and
self._looseMatch and (not newProp or not oldProp)):
continue
elif not oldProp or not newProp:
__Log__.debug(
'DiffDataObjects: One of the objects has property %s unset'
% prop.name)
return False
bMatch = True
if IsPrimitiveType(oldProp):
bMatch = oldProp == newProp
elif isinstance(oldProp, types.ManagedObject):
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, types.DataObject):
if prop.flags & VmomiSupport.F_LINK:
bMatch = oldObj.GetKey() == newObj.GetKey()
LogIf(not bMatch, 'DiffDataObjects: Key match failed %s != %s'
% (oldObj.GetKey(), newObj.GetKey()))
else:
bMatch = self.DiffAnyObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
elif isinstance(oldProp, list):
bMatch = self.DiffArrayObjects(oldProp, newProp, prop.flags
& VmomiSupport.F_LINK)
else:
raise TypeError("Unknown type: "+repr(propType))
if not bMatch:
__Log__.debug('DiffDataObjects: Objects differ in property %s'
% prop.name)
return False
return True
|
python
|
{
"resource": ""
}
|
q22368
|
Cache
|
train
|
def Cache(fn):
""" Function cache decorator """
def fnCache(*args, **kwargs):
""" Cache function """
key = (args and tuple(args) or None,
kwargs and frozenset(kwargs.items()) or None)
if key not in fn.__cached__:
fn.__cached__[key] = cache = fn(*args, **kwargs)
else:
cache = fn.__cached__[key]
return cache
def ResetCache():
""" Reset cache """
fn.__cached__ = {}
setattr(fn, "__cached__", {})
setattr(fn, "__resetcache__", ResetCache)
fnCache.__name__ = fn.__name__
fnCache.__doc__ = fn.__doc__
fnCache.__dict__.update(fn.__dict__)
return fnCache
|
python
|
{
"resource": ""
}
|
q22369
|
DynamicTypeImporter.GetTypeManager
|
train
|
def GetTypeManager(self):
""" Get dynamic type manager """
dynTypeMgr = None
if self.hostSystem:
try:
dynTypeMgr = self.hostSystem.RetrieveDynamicTypeManager()
except vmodl.fault.MethodNotFound as err:
pass
if not dynTypeMgr:
# Older host not support RetrieveDynamicTypeManager
cmdlineTypesMoId = "ha-dynamic-type-manager"
dynTypeMgr = vmodl.reflect.DynamicTypeManager(cmdlineTypesMoId,
self.stub)
return dynTypeMgr
|
python
|
{
"resource": ""
}
|
q22370
|
DynamicTypeImporter.ImportTypes
|
train
|
def ImportTypes(self, prefix=''):
""" Build dynamic types """
# Use QueryTypeInfo to get all types
dynTypeMgr = self.GetTypeManager()
filterSpec = None
if prefix != '':
filterSpec = vmodl.reflect.DynamicTypeManager.TypeFilterSpec(
typeSubstr=prefix)
allTypes = dynTypeMgr.QueryTypeInfo(filterSpec)
## Convert dynamic types to pyVmomi types
#
DynamicTypeConstructor().CreateTypes(allTypes)
return allTypes
|
python
|
{
"resource": ""
}
|
q22371
|
DynamicTypeConstructor.CreateTypes
|
train
|
def CreateTypes(self, allTypes):
"""
Create pyVmomi types from vmodl.reflect.DynamicTypeManager.AllTypeInfo
"""
enumTypes, dataTypes, managedTypes = self._ConvertAllTypes(allTypes)
self._CreateAllTypes(enumTypes, dataTypes, managedTypes)
|
python
|
{
"resource": ""
}
|
q22372
|
DynamicTypeConstructor._ConvertAllTypes
|
train
|
def _ConvertAllTypes(self, allTypes):
""" Convert all dynamic types to pyVmomi type definitions """
# Generate lists good for VmomiSupport.CreateXYZType
enumTypes = self._Filter(self._ConvertEnumType, allTypes.enumTypeInfo)
dataTypes = self._Filter(self._ConvertDataType, allTypes.dataTypeInfo)
managedTypes = self._Filter(self._ConvertManagedType,
allTypes.managedTypeInfo)
retAllTypes = (enumTypes, dataTypes, managedTypes)
return retAllTypes
|
python
|
{
"resource": ""
}
|
q22373
|
DynamicTypeConstructor._CreateAllTypes
|
train
|
def _CreateAllTypes(self, enumTypes, dataTypes, managedTypes):
""" Create pyVmomi types from pyVmomi type definitions """
# Create versions
for typeInfo in managedTypes:
name = typeInfo[0]
version = typeInfo[3]
VmomiSupport.AddVersion(version, '', '1.0', 0, name)
VmomiSupport.AddVersionParent(version, 'vmodl.version.version0')
VmomiSupport.AddVersionParent(version, 'vmodl.version.version1')
VmomiSupport.AddVersionParent(version, version)
# Create partial types
for fn, infos in (VmomiSupport.CreateEnumType, enumTypes), \
(VmomiSupport.CreateDataType, dataTypes), \
(VmomiSupport.CreateManagedType, managedTypes):
for typeInfo in infos:
try:
fn(*typeInfo)
except Exception as err:
#Ignore errors due to duplicate importing
pass
|
python
|
{
"resource": ""
}
|
q22374
|
DynamicTypeConstructor._ConvertAnnotations
|
train
|
def _ConvertAnnotations(self, annotations):
""" Convert annotations to pyVmomi flags """
flags = 0
if annotations:
for annotation in annotations:
flags |= self._mapFlags.get(annotation.name, 0)
return flags
|
python
|
{
"resource": ""
}
|
q22375
|
DynamicTypeConstructor._ConvertParamType
|
train
|
def _ConvertParamType(self, paramType):
"""
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
"""
if paramType:
name = paramType.name
version = paramType.version
aType = paramType.type
flags = self._ConvertAnnotations(paramType.annotation)
privId = paramType.privId
param = (name, aType, version, flags, privId)
else:
param = None
return param
|
python
|
{
"resource": ""
}
|
q22376
|
DynamicTypeConstructor._ConvertMethodType
|
train
|
def _ConvertMethodType(self, methodType):
"""
Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method
definition
"""
if methodType:
name = methodType.name
wsdlName = methodType.wsdlName
version = methodType.version
params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo)
privId = methodType.privId
faults = methodType.fault
# Figure out reture info
if methodType.returnTypeInfo:
returnTypeInfo = methodType.returnTypeInfo
retFlags = self._ConvertAnnotations(returnTypeInfo.annotation)
methodRetType = returnTypeInfo.type
else:
retFlags = 0
methodRetType = "void"
if wsdlName.endswith("_Task"):
# TODO: Need a seperate task return type for task, instead of
# hardcode vim.Task as return type
retType = "vim.Task"
else:
retType = methodRetType
retInfo = (retFlags, retType, methodRetType)
method = (name, wsdlName, version, params, retInfo, privId, faults)
else:
method = None
return method
|
python
|
{
"resource": ""
}
|
q22377
|
DynamicTypeConstructor._ConvertManagedPropertyType
|
train
|
def _ConvertManagedPropertyType(self, propType):
"""
Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi
managed property definition
"""
if propType:
name = propType.name
version = propType.version
aType = propType.type
flags = self._ConvertAnnotations(propType.annotation)
privId = propType.privId
prop = (name, aType, version, flags, privId)
else:
prop = None
return prop
|
python
|
{
"resource": ""
}
|
q22378
|
DynamicTypeConstructor._ConvertManagedType
|
train
|
def _ConvertManagedType(self, managedType):
"""
Convert vmodl.reflect.DynamicTypeManager.ManagedTypeInfo to pyVmomi
managed type definition
"""
if managedType:
vmodlName = managedType.name
wsdlName = managedType.wsdlName
version = managedType.version
parent = managedType.base[0]
props = self._Filter(self._ConvertManagedPropertyType, managedType.property)
methods = self._Filter(self._ConvertMethodType, managedType.method)
moType = (vmodlName, wsdlName, parent, version, props, methods)
else:
moType = None
return moType
|
python
|
{
"resource": ""
}
|
q22379
|
DynamicTypeConstructor._ConvertDataPropertyType
|
train
|
def _ConvertDataPropertyType(self, propType):
"""
Convert vmodl.reflect.DynamicTypeManager.PropertyTypeInfo to pyVmomi
data property definition
"""
if propType:
name = propType.name
version = propType.version
aType = propType.type
flags = self._ConvertAnnotations(propType.annotation)
prop = (name, aType, version, flags)
else:
prop = None
return prop
|
python
|
{
"resource": ""
}
|
q22380
|
DynamicTypeConstructor._ConvertDataType
|
train
|
def _ConvertDataType(self, dataType):
"""
Convert vmodl.reflect.DynamicTypeManager.DataTypeInfo to pyVmomi data
type definition
"""
if dataType:
vmodlName = dataType.name
wsdlName = dataType.wsdlName
version = dataType.version
parent = dataType.base[0]
props = self._Filter(self._ConvertDataPropertyType, dataType.property)
doType = (vmodlName, wsdlName, parent, version, props)
else:
doType = None
return doType
|
python
|
{
"resource": ""
}
|
q22381
|
DynamicTypeConstructor._ConvertEnumType
|
train
|
def _ConvertEnumType(self, enumType):
"""
Convert vmodl.reflect.DynamicTypeManager.EnumTypeInfo to pyVmomi enum
type definition
"""
if enumType:
vmodlName = enumType.name
wsdlName = enumType.wsdlName
version = enumType.version
values = enumType.value
enumType = (vmodlName, wsdlName, version, values)
else:
enumType = None
return enumType
|
python
|
{
"resource": ""
}
|
q22382
|
WaitForTask
|
train
|
def WaitForTask(task,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None):
"""
Wait for task to complete.
@type raiseOnError : bool
@param raiseOnError : Any exception thrown is thrown up to the caller
if raiseOnError is set to true.
@type si : ManagedObjectReference to a ServiceInstance.
@param si : ServiceInstance to use. If None, use the
information from the task.
@type pc : ManagedObjectReference to a PropertyCollector.
@param pc : Property collector to use. If None, get it from
the ServiceInstance.
@type onProgressUpdate : callable
@param onProgressUpdate : Callable to call with task progress updates.
For example::
def OnTaskProgressUpdate(task, percentDone):
print 'Task %s is %d%% complete.' % (task, percentDone)
"""
if si is None:
si = vim.ServiceInstance("ServiceInstance", task._stub)
if pc is None:
pc = si.content.propertyCollector
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
filter = CreateFilter(pc, task)
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while state not in (vim.TaskInfo.State.success, vim.TaskInfo.State.error):
try:
version, state = GetTaskStatus(task, version, pc)
progressUpdater.UpdateIfNeeded()
except vmodl.fault.ManagedObjectNotFound as e:
print("Task object has been deleted: %s" % e.obj)
break
filter.Destroy()
if state == "error":
progressUpdater.Update('error: %s' % str(task.info.error))
if raiseOnError:
raise task.info.error
else:
print("Task reported error: " + str(task.info.error))
else:
progressUpdater.Update('completed')
return state
|
python
|
{
"resource": ""
}
|
q22383
|
WaitForTasks
|
train
|
def WaitForTasks(tasks,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None,
results=None):
"""
Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times
"""
if not tasks:
return
if si is None:
si = vim.ServiceInstance("ServiceInstance", tasks[0]._stub)
if pc is None:
pc = si.content.propertyCollector
if results is None:
results = []
progressUpdaters = {}
for task in tasks:
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
progressUpdaters[str(task)] = progressUpdater
filter = CreateTasksFilter(pc, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(progressUpdaters):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
taskId = str(task)
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
progressUpdater = progressUpdaters.get(taskId)
if not progressUpdater:
continue
if state == vim.TaskInfo.State.success:
progressUpdater.Update('completed')
progressUpdaters.pop(taskId)
# cache the results, as task objects could expire if one
# of the tasks take a longer time to complete
results.append(task.info.result)
elif state == vim.TaskInfo.State.error:
err = task.info.error
progressUpdater.Update('error: %s' % str(err))
if raiseOnError:
raise err
else:
print("Task %s reported error: %s" % (taskId, str(err)))
progressUpdaters.pop(taskId)
else:
if onProgressUpdate:
progressUpdater.UpdateIfNeeded()
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
return
|
python
|
{
"resource": ""
}
|
q22384
|
CreateTasksFilter
|
train
|
def CreateTasksFilter(pc, tasks):
""" Create property collector filter for tasks """
if not tasks:
return None
# First create the object specification as the task object.
objspecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
# Next, create the property specification as the state.
propspec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
# Create a filter spec with the specified object and property spec.
filterspec = vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = objspecs
filterspec.propSet = [propspec]
# Create the filter
return pc.CreateFilter(filterspec, True)
|
python
|
{
"resource": ""
}
|
q22385
|
CheckForQuestionPending
|
train
|
def CheckForQuestionPending(task):
"""
Check to see if VM needs to ask a question, throw exception
"""
vm = task.info.entity
if vm is not None and isinstance(vm, vim.VirtualMachine):
qst = vm.runtime.question
if qst is not None:
raise TaskBlocked("Task blocked, User Intervention required")
|
python
|
{
"resource": ""
}
|
q22386
|
Adb.cmd
|
train
|
def cmd(self, *args, **kwargs):
'''adb command, add -s serial by default. return the subprocess.Popen object.'''
serial = self.device_serial()
if serial:
if " " in serial: # TODO how to include special chars on command line
serial = "'%s'" % serial
return self.raw_cmd(*["-s", serial] + list(args))
else:
return self.raw_cmd(*args)
|
python
|
{
"resource": ""
}
|
q22387
|
AutomatorServer.sdk_version
|
train
|
def sdk_version(self):
'''sdk version of connected device.'''
if self.__sdk == 0:
try:
self.__sdk = int(self.adb.cmd("shell", "getprop", "ro.build.version.sdk").communicate()[0].decode("utf-8").strip())
except:
pass
return self.__sdk
|
python
|
{
"resource": ""
}
|
q22388
|
AutomatorServer.stop
|
train
|
def stop(self):
'''Stop the rpc server.'''
if self.uiautomator_process and self.uiautomator_process.poll() is None:
res = None
try:
res = urllib2.urlopen(self.stop_uri)
self.uiautomator_process.wait()
except:
self.uiautomator_process.kill()
finally:
if res is not None:
res.close()
self.uiautomator_process = None
try:
out = self.adb.cmd("shell", "ps", "-C", "uiautomator").communicate()[0].decode("utf-8").strip().splitlines()
if out:
index = out[0].split().index("PID")
for line in out[1:]:
if len(line.split()) > index:
self.adb.cmd("shell", "kill", "-9", line.split()[index]).wait()
except:
pass
|
python
|
{
"resource": ""
}
|
q22389
|
AutomatorDevice.click
|
train
|
def click(self, x, y):
'''click at arbitrary coordinates.'''
return self.server.jsonrpc.click(x, y)
|
python
|
{
"resource": ""
}
|
q22390
|
AutomatorDevice.long_click
|
train
|
def long_click(self, x, y):
'''long click at arbitrary coordinates.'''
return self.swipe(x, y, x + 1, y + 1)
|
python
|
{
"resource": ""
}
|
q22391
|
AutomatorDevice.dump
|
train
|
def dump(self, filename=None, compressed=True, pretty=True):
'''dump device window and pull to local file.'''
content = self.server.jsonrpc.dumpWindowHierarchy(compressed, None)
if filename:
with open(filename, "wb") as f:
f.write(content.encode("utf-8"))
if pretty and "\n " not in content:
xml_text = xml.dom.minidom.parseString(content.encode("utf-8"))
content = U(xml_text.toprettyxml(indent=' '))
return content
|
python
|
{
"resource": ""
}
|
q22392
|
AutomatorDevice.screenshot
|
train
|
def screenshot(self, filename, scale=1.0, quality=100):
'''take screenshot.'''
result = self.server.screenshot(filename, scale, quality)
if result:
return result
device_file = self.server.jsonrpc.takeScreenshot("screenshot.png",
scale, quality)
if not device_file:
return None
p = self.server.adb.cmd("pull", device_file, filename)
p.wait()
self.server.adb.cmd("shell", "rm", device_file).wait()
return filename if p.returncode is 0 else None
|
python
|
{
"resource": ""
}
|
q22393
|
AutomatorDevice.orientation
|
train
|
def orientation(self, value):
'''setter of orientation property.'''
for values in self.__orientation:
if value in values:
# can not set upside-down until api level 18.
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.")
|
python
|
{
"resource": ""
}
|
q22394
|
AutomatorDeviceUiObject.set_text
|
train
|
def set_text(self, text):
'''set the text field.'''
if text in [None, ""]:
return self.jsonrpc.clearTextField(self.selector) # TODO no return
else:
return self.jsonrpc.setText(self.selector, text)
|
python
|
{
"resource": ""
}
|
q22395
|
AutomatorDeviceObject.child
|
train
|
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
)
|
python
|
{
"resource": ""
}
|
q22396
|
AutomatorDeviceObject.sibling
|
train
|
def sibling(self, **kwargs):
'''set fromParent selector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().sibling(**kwargs)
)
|
python
|
{
"resource": ""
}
|
q22397
|
minimize
|
train
|
def minimize(model,
data,
algo,
max_evals,
trials,
functions=None,
rseed=1337,
notebook_name=None,
verbose=True,
eval_space=False,
return_space=False,
keep_temp=False):
"""
Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameter-less function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
notebook_name: If running from an ipython notebook, provide filename (not path)
verbose: Print verbose output
eval_space: Evaluate the best run in the search space such that 'choice's contain actually meaningful values instead of mere indices
return_space: Return the hyperopt search space object (e.g. for further processing) as last return value
keep_temp: Keep temp_model.py file on the filesystem
Returns
-------
If `return_space` is False: A pair consisting of the results dictionary of the best run and the corresponding
keras model.
If `return_space` is True: The pair of best result and corresponding keras model, and the hyperopt search space
"""
best_run, space = base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
full_model_string=None,
notebook_name=notebook_name,
verbose=verbose,
keep_temp=keep_temp)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
# unpack the values from lists without overwriting the mutable dict within 'trial'
unpacked_vals = unpack_hyperopt_vals(vals)
# identify the best_run (comes with unpacked values from the hyperopt function `base.Trials.argmin`)
if unpacked_vals == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
if eval_space is True:
# evaluate the search space
best_run = eval_hyperopt_space(space, best_run)
if return_space is True:
# return the space as well
return best_run, best_model, space
else:
# the default case for backwards compatibility with expanded return arguments
return best_run, best_model
|
python
|
{
"resource": ""
}
|
q22398
|
with_line_numbers
|
train
|
def with_line_numbers(code):
"""
Adds line numbers to each line of a source code fragment
Parameters
----------
code : string
any multiline text, such as as (fragments) of source code
Returns
-------
str : string
The input with added <n>: for each line
Example
-------
code = "def do_stuff(x):\n\tprint(x)\n"
with_line_numbers(code)
1: def do_stuff(x):
2: print(x)
3:
"""
max_number_length = str(len(str(len(code))))
format_str = "{:>" + max_number_length + "d}: {:}"
return "\n".join([format_str.format(line_number + 1, line) for line_number, line in enumerate(code.split("\n"))])
|
python
|
{
"resource": ""
}
|
q22399
|
create_model
|
train
|
def create_model(x_train, y_train, x_test, y_test):
"""
Create your model...
"""
layer_1_size = {{quniform(12, 256, 4)}}
l1_dropout = {{uniform(0.001, 0.7)}}
params = {
'l1_size': layer_1_size,
'l1_dropout': l1_dropout
}
num_classes = 10
model = Sequential()
model.add(Dense(int(layer_1_size), activation='relu'))
model.add(Dropout(l1_dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
out = {
'loss': -acc,
'score': score,
'status': STATUS_OK,
'model_params': params,
}
# optionally store a dump of your model here so you can get it from the database later
temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
model.save(temp_name)
with open(temp_name, 'rb') as infile:
model_bytes = infile.read()
out['model_serial'] = model_bytes
return out
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.