_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q22200
|
normalize_per_cell
|
train
|
def normalize_per_cell(
data,
counts_per_cell_after=None,
counts_per_cell=None,
key_n_counts=None,
copy=False,
layers=[],
use_rep=None,
min_counts=1,
) -> Optional[AnnData]:
"""Normalize total counts per cell.
.. warning::
.. deprecated:: 1.3.7
Use :func:`~scanpy.api.pp.normalize_total` instead.
The new function is equivalent to the present
function, except that
* the new function doesn't filter cells based on `min_counts`,
use :func:`~scanpy.api.pp.filter_cells` if filtering is needed.
* some arguments were renamed
* `copy` is replaced by `inplace`
Normalize each cell by total counts over all genes, so that every cell has
the same total count after normalization.
Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger
[Zheng17]_ or SPRING [Weinreb17]_.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
counts_per_cell_after : `float` or `None`, optional (default: `None`)
If `None`, after normalization, each cell has a total count equal
to the median of the *counts_per_cell* before normalization.
counts_per_cell : `np.array`, optional (default: `None`)
Precomputed counts per cell.
key_n_counts : `str`, optional (default: `'n_counts'`)
Name of the field in `adata.obs` where the total counts per cell are
stored.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
min_counts : `int`, optional (default: 1)
Cells with counts less than `min_counts` are filtered out during
normalization.
Returns
-------
Returns or updates `adata` with normalized version of the original
`adata.X`, depending on `copy`.
Examples
--------
>>> adata = AnnData(
>>> data=np.array([[1, 0], [3, 0], [5, 6]]))
>>> print(adata.X.sum(axis=1))
[ 1. 3. 11.]
>>> sc.pp.normalize_per_cell(adata)
>>> print(adata.obs)
>>> print(adata.X.sum(axis=1))
n_counts
0 1.0
1 3.0
2 11.0
[ 3. 3. 3.]
>>> sc.pp.normalize_per_cell(adata, counts_per_cell_after=1,
>>> key_n_counts='n_counts2')
>>> print(adata.obs)
>>> print(adata.X.sum(axis=1))
n_counts n_counts2
0 1.0 3.0
1 3.0 3.0
2 11.0 3.0
[ 1. 1. 1.]
"""
if key_n_counts is None: key_n_counts = 'n_counts'
if isinstance(data, AnnData):
logg.msg('normalizing by total count per cell', r=True)
adata = data.copy() if copy else data
if counts_per_cell is None:
cell_subset, counts_per_cell = materialize_as_ndarray(
filter_cells(adata.X, min_counts=min_counts))
adata.obs[key_n_counts] = counts_per_cell
adata._inplace_subset_obs(cell_subset)
counts_per_cell=counts_per_cell[cell_subset]
normalize_per_cell(adata.X, counts_per_cell_after, counts_per_cell)
layers = adata.layers.keys() if layers == 'all' else layers
if use_rep == 'after':
after = counts_per_cell_after
elif use_rep == 'X':
after = np.median(counts_per_cell[cell_subset])
elif use_rep is None:
after = None
else: raise ValueError('use_rep should be "after", "X" or None')
for layer in layers:
subset, counts = filter_cells(adata.layers[layer],
min_counts=min_counts)
temp = normalize_per_cell(adata.layers[layer], after, counts, copy=True)
adata.layers[layer] = temp
logg.msg(' finished', t=True, end=': ')
logg.msg('normalized adata.X and added', no_indent=True)
logg.msg(' \'{}\', counts per cell before normalization (adata.obs)'
.format(key_n_counts))
return adata if copy else None
# proceed with data matrix
X = data.copy() if copy else data
if counts_per_cell is None:
if copy == False:
raise ValueError('Can only be run with copy=True')
cell_subset, counts_per_cell = filter_cells(X, min_counts=min_counts)
X = X[cell_subset]
counts_per_cell = counts_per_cell[cell_subset]
if counts_per_cell_after is None:
counts_per_cell_after = np.median(counts_per_cell)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
counts_per_cell += counts_per_cell == 0
counts_per_cell /= counts_per_cell_after
if not issparse(X): X /= materialize_as_ndarray(counts_per_cell[:, np.newaxis])
else: sparsefuncs.inplace_row_scale(X, 1/counts_per_cell)
return X if copy else None
|
python
|
{
"resource": ""
}
|
q22201
|
scale
|
train
|
def scale(data, zero_center=True, max_value=None, copy=False) -> Optional[AnnData]:
"""Scale data to unit variance and zero mean.
.. note::
Variables (genes) that do not display any variation (are constant across
all observations) are retained and set to 0 during this operation. In
the future, they might be set to NaNs.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
zero_center : `bool`, optional (default: `True`)
If `False`, omit zero-centering variables, which allows to handle sparse
input efficiently.
max_value : `float` or `None`, optional (default: `None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Depending on `copy` returns or updates `adata` with a scaled `adata.X`.
"""
if isinstance(data, AnnData):
adata = data.copy() if copy else data
# need to add the following here to make inplace logic work
if zero_center and issparse(adata.X):
logg.msg(
'... scale_data: as `zero_center=True`, sparse input is '
'densified and may lead to large memory consumption')
adata.X = adata.X.toarray()
scale(adata.X, zero_center=zero_center, max_value=max_value, copy=False)
return adata if copy else None
X = data.copy() if copy else data # proceed with the data matrix
zero_center = zero_center if zero_center is not None else False if issparse(X) else True
if not zero_center and max_value is not None:
logg.msg(
'... scale_data: be careful when using `max_value` without `zero_center`',
v=4)
if max_value is not None:
logg.msg('... clipping at max_value', max_value)
if zero_center and issparse(X):
logg.msg('... scale_data: as `zero_center=True`, sparse input is '
'densified and may lead to large memory consumption, returning copy',
v=4)
X = X.toarray()
copy = True
_scale(X, zero_center)
if max_value is not None: X[X > max_value] = max_value
return X if copy else None
|
python
|
{
"resource": ""
}
|
q22202
|
subsample
|
train
|
def subsample(data, fraction=None, n_obs=None, random_state=0, copy=False) -> Optional[AnnData]:
"""Subsample to a fraction of the number of observations.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
fraction : `float` in [0, 1] or `None`, optional (default: `None`)
Subsample to this `fraction` of the number of observations.
n_obs : `int` or `None`, optional (default: `None`)
Subsample to this number of observations.
random_state : `int` or `None`, optional (default: 0)
Random seed to change subsampling.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Returns `X[obs_indices], obs_indices` if data is array-like, otherwise
subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or
returns a subsampled copy of it (`copy == True`).
"""
np.random.seed(random_state)
old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]
if n_obs is not None:
new_n_obs = n_obs
elif fraction is not None:
if fraction > 1 or fraction < 0:
raise ValueError('`fraction` needs to be within [0, 1], not {}'
.format(fraction))
new_n_obs = int(fraction * old_n_obs)
logg.msg('... subsampled to {} data points'.format(new_n_obs))
else:
raise ValueError('Either pass `n_obs` or `fraction`.')
obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)
if isinstance(data, AnnData):
adata = data.copy() if copy else data
adata._inplace_subset_obs(obs_indices)
return adata if copy else None
else:
X = data
return X[obs_indices], obs_indices
|
python
|
{
"resource": ""
}
|
q22203
|
downsample_counts
|
train
|
def downsample_counts(
adata: AnnData,
counts_per_cell: Optional[Union[int, Collection[int]]] = None,
total_counts: Optional[int] = None,
random_state: Optional[int] = 0,
replace: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""Downsample counts from count matrix.
If `counts_per_cell` is specified, each cell will downsampled. If
`total_counts` is specified, expression matrix will be downsampled to
contain at most `total_counts`.
Parameters
----------
adata
Annotated data matrix.
counts_per_cell
Target total counts per cell. If a cell has more than 'counts_per_cell',
it will be downsampled to this number. Resulting counts can be specified
on a per cell basis by passing an array.Should be an integer or integer
ndarray with same length as number of obs.
total_counts
Target total counts. If the count matrix has more than `total_counts`
it will be downsampled to have this number.
random_state
Random seed for subsampling.
replace
Whether to sample the counts with replacement.
copy
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Depending on `copy` returns or updates an `adata` with downsampled `.X`.
"""
# This logic is all dispatch
total_counts_call = total_counts is not None
counts_per_cell_call = counts_per_cell is not None
if total_counts_call is counts_per_cell_call:
raise ValueError("Must specify exactly one of `total_counts` or `counts_per_cell`.")
if copy:
adata = adata.copy()
adata.X = adata.X.astype(np.integer) # Numba doesn't want floats
if total_counts_call:
adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)
elif counts_per_cell_call:
adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)
if copy:
return adata
|
python
|
{
"resource": ""
}
|
q22204
|
_downsample_array
|
train
|
def _downsample_array(col: np.array, target: int, random_state: int=0,
replace: bool = True, inplace: bool=False):
"""
Evenly reduce counts in cell to target amount.
This is an internal function and has some restrictions:
* `dtype` of col must be an integer (i.e. satisfy issubclass(col.dtype.type, np.integer))
* total counts in cell must be less than target
"""
np.random.seed(random_state)
cumcounts = col.cumsum()
if inplace:
col[:] = 0
else:
col = np.zeros_like(col)
total = cumcounts[-1]
sample = np.random.choice(total, target, replace=replace)
sample.sort()
geneptr = 0
for count in sample:
while count >= cumcounts[geneptr]:
geneptr += 1
col[geneptr] += 1
return col
|
python
|
{
"resource": ""
}
|
q22205
|
_sec_to_str
|
train
|
def _sec_to_str(t):
"""Format time in seconds.
Parameters
----------
t : int
Time in seconds.
"""
from functools import reduce
return "%d:%02d:%02d.%02d" % \
reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],
[(t*100,), 100, 60, 60])
|
python
|
{
"resource": ""
}
|
q22206
|
paga_degrees
|
train
|
def paga_degrees(adata) -> List[int]:
"""Compute the degree of each node in the abstracted graph.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
List of degrees for each node.
"""
import networkx as nx
g = nx.Graph(adata.uns['paga']['connectivities'])
degrees = [d for _, d in g.degree(weight='weight')]
return degrees
|
python
|
{
"resource": ""
}
|
q22207
|
paga_expression_entropies
|
train
|
def paga_expression_entropies(adata) -> List[float]:
"""Compute the median expression entropy for each node-group.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
Entropies of median expressions for each node.
"""
from scipy.stats import entropy
groups_order, groups_masks = utils.select_groups(
adata, key=adata.uns['paga']['groups'])
entropies = []
for mask in groups_masks:
X_mask = adata.X[mask].todense()
x_median = np.nanmedian(X_mask, axis=1,overwrite_input=True)
x_probs = (x_median - np.nanmin(x_median)) / (np.nanmax(x_median) - np.nanmin(x_median))
entropies.append(entropy(x_probs))
return entropies
|
python
|
{
"resource": ""
}
|
q22208
|
_calc_density
|
train
|
def _calc_density(
x: np.ndarray,
y: np.ndarray,
):
"""
Function to calculate the density of cells in an embedding.
"""
# Calculate the point density
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
min_z = np.min(z)
max_z = np.max(z)
# Scale between 0 and 1
scaled_z = (z-min_z)/(max_z-min_z)
return(scaled_z)
|
python
|
{
"resource": ""
}
|
q22209
|
read_10x_h5
|
train
|
def read_10x_h5(filename, genome=None, gex_only=True) -> AnnData:
"""Read 10x-Genomics-formatted hdf5 file.
Parameters
----------
filename : `str` | :class:`~pathlib.Path`
Filename.
genome : `str`, optional (default: ``None``)
Filter expression to this genes within this genome. For legacy 10x h5
files, this must be provided if the data contains more than one genome.
gex_only : `bool`, optional (default: `True`)
Only keep 'Gene Expression' data and ignore other feature types,
e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'
Returns
-------
Annotated data matrix, where obsevations/cells are named by their
barcode and variables/genes by gene name. The data matrix is stored in
`adata.X`, cell names in `adata.obs_names` and gene names in
`adata.var_names`. The gene IDs are stored in `adata.var['gene_ids']`.
The feature types are stored in `adata.var['feature_types']`
"""
logg.info('reading', filename, r=True, end=' ')
with tables.open_file(str(filename), 'r') as f:
v3 = '/matrix' in f
if v3:
adata = _read_v3_10x_h5(filename)
if genome:
if genome not in adata.var['genome'].values:
raise ValueError(
"Could not find data corresponding to genome '{genome}' in '{filename}'. "
"Available genomes are: {avail}."
.format(
genome=genome, filename=filename,
avail=list(adata.var["genome"].unique()),
)
)
adata = adata[:, list(map(lambda x: x == str(genome), adata.var['genome']))]
if gex_only:
adata = adata[:, list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))]
return adata
else:
return _read_legacy_10x_h5(filename, genome=genome)
|
python
|
{
"resource": ""
}
|
q22210
|
_read_legacy_10x_h5
|
train
|
def _read_legacy_10x_h5(filename, genome=None):
"""
Read hdf5 file from Cell Ranger v2 or earlier versions.
"""
with tables.open_file(str(filename), 'r') as f:
try:
children = [x._v_name for x in f.list_nodes(f.root)]
if not genome:
if len(children) > 1:
raise ValueError(
"'{filename}' contains more than one genome. For legacy 10x h5 "
"files you must specify the genome if more than one is present. "
"Available genomes are: {avail}"
.format(filename=filename, avail=children)
)
genome = children[0]
elif genome not in children:
raise ValueError(
"Could not find genome '{genome}' in '{filename}'. "
"Available genomes are: {avail}"
.format(
genome=genome, filename=str(filename),
avail=children,
)
)
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
# AnnData works with csr matrices
# 10x stores the transposed data, so we do the transposition right away
from scipy.sparse import csr_matrix
M, N = dsets['shape']
data = dsets['data']
if dsets['data'].dtype == np.dtype('int32'):
data = dsets['data'].view('float32')
data[:] = dsets['data']
matrix = csr_matrix((data, dsets['indices'], dsets['indptr']),
shape=(N, M))
# the csc matrix is automatically the transposed csr matrix
# as scanpy expects it, so, no need for a further transpostion
adata = AnnData(matrix,
{'obs_names': dsets['barcodes'].astype(str)},
{'var_names': dsets['gene_names'].astype(str),
'gene_ids': dsets['genes'].astype(str)})
logg.info(t=True)
return adata
except KeyError:
raise Exception('File is missing one or more required datasets.')
|
python
|
{
"resource": ""
}
|
q22211
|
_read_v3_10x_h5
|
train
|
def _read_v3_10x_h5(filename):
"""
Read hdf5 file from Cell Ranger v3 or later versions.
"""
with tables.open_file(str(filename), 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/matrix', 'Array'):
dsets[node.name] = node.read()
from scipy.sparse import csr_matrix
M, N = dsets['shape']
data = dsets['data']
if dsets['data'].dtype == np.dtype('int32'):
data = dsets['data'].view('float32')
data[:] = dsets['data']
matrix = csr_matrix((data, dsets['indices'], dsets['indptr']),
shape=(N, M))
adata = AnnData(matrix,
{'obs_names': dsets['barcodes'].astype(str)},
{'var_names': dsets['name'].astype(str),
'gene_ids': dsets['id'].astype(str),
'feature_types': dsets['feature_type'].astype(str),
'genome': dsets['genome'].astype(str)})
logg.info(t=True)
return adata
except KeyError:
raise Exception('File is missing one or more required datasets.')
|
python
|
{
"resource": ""
}
|
q22212
|
read_10x_mtx
|
train
|
def read_10x_mtx(path, var_names='gene_symbols', make_unique=True, cache=False, gex_only=True) -> AnnData:
"""Read 10x-Genomics-formatted mtx directory.
Parameters
----------
path : `str`
Path to directory for `.mtx` and `.tsv` files,
e.g. './filtered_gene_bc_matrices/hg19/'.
var_names : {'gene_symbols', 'gene_ids'}, optional (default: 'gene_symbols')
The variables index.
make_unique : `bool`, optional (default: `True`)
Whether to make the variables index unique by appending '-1',
'-2' etc. or not.
cache : `bool`, optional (default: `False`)
If `False`, read from source, if `True`, read from fast 'h5ad' cache.
gex_only : `bool`, optional (default: `True`)
Only keep 'Gene Expression' data and ignore other feature types,
e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'
Returns
-------
An :class:`~anndata.AnnData` object
"""
path = Path(path)
genefile_exists = (path / 'genes.tsv').is_file()
read = _read_legacy_10x_mtx if genefile_exists else _read_v3_10x_mtx
adata = read(
str(path),
var_names=var_names,
make_unique=make_unique,
cache=cache,
)
if genefile_exists or not gex_only:
return adata
else:
gex_rows = list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))
return adata[:, gex_rows]
|
python
|
{
"resource": ""
}
|
q22213
|
_read_legacy_10x_mtx
|
train
|
def _read_legacy_10x_mtx(path, var_names='gene_symbols', make_unique=True, cache=False):
"""
Read mex from output from Cell Ranger v2 or earlier versions
"""
path = Path(path)
adata = read(path / 'matrix.mtx', cache=cache).T # transpose the data
genes = pd.read_csv(path / 'genes.tsv', header=None, sep='\t')
if var_names == 'gene_symbols':
var_names = genes[1]
if make_unique:
var_names = anndata.utils.make_index_unique(pd.Index(var_names))
adata.var_names = var_names
adata.var['gene_ids'] = genes[0].values
elif var_names == 'gene_ids':
adata.var_names = genes[0]
adata.var['gene_symbols'] = genes[1].values
else:
raise ValueError('`var_names` needs to be \'gene_symbols\' or \'gene_ids\'')
adata.obs_names = pd.read_csv(path / 'barcodes.tsv', header=None)[0]
return adata
|
python
|
{
"resource": ""
}
|
q22214
|
read_params
|
train
|
def read_params(filename, asheader=False, verbosity=0) -> Dict[str, Union[int, float, bool, str, None]]:
"""Read parameter dictionary from text file.
Assumes that parameters are specified in the format:
par1 = value1
par2 = value2
Comments that start with '#' are allowed.
Parameters
----------
filename : str, Path
Filename of data file.
asheader : bool, optional
Read the dictionary from the header (comment section) of a file.
Returns
-------
Dictionary that stores parameters.
"""
filename = str(filename) # allow passing pathlib.Path objects
from collections import OrderedDict
params = OrderedDict([])
for line in open(filename):
if '=' in line:
if not asheader or line.startswith('#'):
line = line[1:] if line.startswith('#') else line
key, val = line.split('=')
key = key.strip()
val = val.strip()
params[key] = convert_string(val)
return params
|
python
|
{
"resource": ""
}
|
q22215
|
write_params
|
train
|
def write_params(path, *args, **dicts):
"""Write parameters to file, so that it's readable by read_params.
Uses INI file format.
"""
path = Path(path)
if not path.parent.is_dir():
path.parent.mkdir(parents=True)
if len(args) == 1:
d = args[0]
with path.open('w') as f:
for key in d:
f.write(key + ' = ' + str(d[key]) + '\n')
else:
with path.open('w') as f:
for k, d in dicts.items():
f.write('[' + k + ']\n')
for key, val in d.items():
f.write(key + ' = ' + str(val) + '\n')
|
python
|
{
"resource": ""
}
|
q22216
|
get_params_from_list
|
train
|
def get_params_from_list(params_list):
"""Transform params list to dictionary.
"""
params = {}
for i in range(0, len(params_list)):
if '=' not in params_list[i]:
try:
if not isinstance(params[key], list): params[key] = [params[key]]
params[key] += [params_list[i]]
except KeyError:
raise ValueError('Pass parameters like `key1=a key2=b c d key3=...`.')
else:
key_val = params_list[i].split('=')
key, val = key_val
params[key] = convert_string(val)
return params
|
python
|
{
"resource": ""
}
|
q22217
|
_slugify
|
train
|
def _slugify(path: Union[str, PurePath]) -> str:
"""Make a path into a filename."""
if not isinstance(path, PurePath):
path = PurePath(path)
parts = list(path.parts)
if parts[0] == '/':
parts.pop(0)
elif len(parts[0]) == 3 and parts[0][1:] == ':\\':
parts[0] = parts[0][0] # C:\ → C
filename = '-'.join(parts)
assert '/' not in filename, filename
assert not filename[1:].startswith(':'), filename
return filename
|
python
|
{
"resource": ""
}
|
q22218
|
_read_softgz
|
train
|
def _read_softgz(filename) -> AnnData:
"""Read a SOFT format data file.
The SOFT format is documented here
http://www.ncbi.nlm.nih.gov/geo/info/soft2.html.
Notes
-----
The function is based on a script by Kerby Shedden.
http://dept.stat.lsa.umich.edu/~kshedden/Python-Workshop/gene_expression_comparison.html
"""
filename = str(filename) # allow passing pathlib.Path objects
import gzip
with gzip.open(filename, mode='rt') as file:
# The header part of the file contains information about the
# samples. Read that information first.
samples_info = {}
for line in file:
if line.startswith("!dataset_table_begin"):
break
elif line.startswith("!subset_description"):
subset_description = line.split("=")[1].strip()
elif line.startswith("!subset_sample_id"):
subset_ids = line.split("=")[1].split(",")
subset_ids = [x.strip() for x in subset_ids]
for k in subset_ids:
samples_info[k] = subset_description
# Next line is the column headers (sample id's)
sample_names = file.readline().strip().split("\t")
# The column indices that contain gene expression data
I = [i for i, x in enumerate(sample_names) if x.startswith("GSM")]
# Restrict the column headers to those that we keep
sample_names = [sample_names[i] for i in I]
# Get a list of sample labels
groups = [samples_info[k] for k in sample_names]
# Read the gene expression data as a list of lists, also get the gene
# identifiers
gene_names, X = [], []
for line in file:
# This is what signals the end of the gene expression data
# section in the file
if line.startswith("!dataset_table_end"):
break
V = line.split("\t")
# Extract the values that correspond to gene expression measures
# and convert the strings to numbers
x = [float(V[i]) for i in I]
X.append(x)
gene_names.append(V[1])
# Convert the Python list of lists to a Numpy array and transpose to match
# the Scanpy convention of storing samples in rows and variables in colums.
X = np.array(X).T
obs = pd.DataFrame({"groups": groups}, index=sample_names)
var = pd.DataFrame(index=gene_names)
return AnnData(X=X, obs=obs, var=var)
|
python
|
{
"resource": ""
}
|
q22219
|
convert_bool
|
train
|
def convert_bool(string):
"""Check whether string is boolean.
"""
if string == 'True':
return True, True
elif string == 'False':
return True, False
else:
return False, False
|
python
|
{
"resource": ""
}
|
q22220
|
convert_string
|
train
|
def convert_string(string):
"""Convert string to int, float or bool.
"""
if is_int(string):
return int(string)
elif is_float(string):
return float(string)
elif convert_bool(string)[0]:
return convert_bool(string)[1]
elif string == 'None':
return None
else:
return string
|
python
|
{
"resource": ""
}
|
q22221
|
get_used_files
|
train
|
def get_used_files():
"""Get files used by processes with name scanpy."""
import psutil
loop_over_scanpy_processes = (proc for proc in psutil.process_iter()
if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path)
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass
return set(filenames)
|
python
|
{
"resource": ""
}
|
q22222
|
check_datafile_present_and_download
|
train
|
def check_datafile_present_and_download(path, backup_url=None):
"""Check whether the file is present, otherwise download.
"""
path = Path(path)
if path.is_file(): return True
if backup_url is None: return False
logg.info('try downloading from url\n' + backup_url + '\n' +
'... this may take a while but only happens once')
if not path.parent.is_dir():
logg.info('creating directory', str(path.parent) + '/', 'for saving data')
path.parent.mkdir(parents=True)
from urllib.request import urlretrieve
urlretrieve(backup_url, str(path), reporthook=download_progress)
logg.info('')
return True
|
python
|
{
"resource": ""
}
|
q22223
|
is_valid_filename
|
train
|
def is_valid_filename(filename, return_ext=False):
"""Check whether the argument is a filename."""
ext = Path(filename).suffixes
if len(ext) > 2:
logg.warn('Your filename has more than two extensions: {}.\n'
'Only considering the two last: {}.'.format(ext, ext[-2:]))
ext = ext[-2:]
# cases for gzipped/bzipped text files
if len(ext) == 2 and ext[0][1:] in text_exts and ext[1][1:] in ('gz', 'bz2'):
return ext[0][1:] if return_ext else True
elif ext and ext[-1][1:] in avail_exts:
return ext[-1][1:] if return_ext else True
elif ''.join(ext) == '.soft.gz':
return 'soft.gz' if return_ext else True
elif ''.join(ext) == '.mtx.gz':
return 'mtx.gz' if return_ext else True
else:
if return_ext:
raise ValueError('"{}" does not end on a valid extension.\n'
'Please, provide one of the available extensions.\n{}\n'
'Text files with .gz and .bz2 extensions are also supported.'
.format(filename, avail_exts))
else:
return False
|
python
|
{
"resource": ""
}
|
q22224
|
correlation_matrix
|
train
|
def correlation_matrix(adata,groupby=None ,group=None, corr_matrix=None, annotation_key=None):
"""Plot correlation matrix.
Plot a correlation matrix for genes strored in sample annotation using rank_genes_groups.py
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groupby : `str`, optional (default: None)
If specified, searches data_annotation for correlation_matrix+groupby+str(group)
group : int
Identifier of the group (necessary if and only if groupby is also specified)
corr_matrix : DataFrame, optional (default: None)
Correlation matrix as a DataFrame (annotated axis) that can be transferred manually if wanted
annotation_key: `str`, optional (default: None)
If specified, looks in data annotation for this key.
"""
# TODO: At the moment, noly works for int identifiers
if corr_matrix is None:
# This will produce an error if he annotation doesn't exist, which is okay
if annotation_key is None:
if groupby is None:
corr_matrix = adata.uns['Correlation_matrix']
else:
corr_matrix= adata.uns['Correlation_matrix' + groupby+ str(group)]
# Throws error if does not exist
else:
corr_matrix = adata.uns[annotation_key]
# Set up mask
mask = np.zeros_like(corr_matrix, dtype=np.bool)
di = np.diag_indices(len(corr_matrix.axes[0]))
mask[di] = True
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(240, 10, as_cmap=True)
sns.heatmap(corr_matrix, mask=mask, cmap=cmap,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
if annotation_key is None:
if groupby is None:
plt.title('Correlation Matrix')
else:
plt.title('Correlation Matrix for Group ' + str(group) + "in " + groupby)
else:
plt.title('Correlation Matrix for' + annotation_key)
plt.show()
|
python
|
{
"resource": ""
}
|
q22225
|
tqdm_hook
|
train
|
def tqdm_hook(t):
"""
Wraps tqdm instance.
Don't forget to close() or __exit__()
the tqdm instance once you're done with it (easiest using `with` syntax).
Example
-------
>>> with tqdm(...) as t:
... reporthook = my_hook(t)
... urllib.urlretrieve(..., reporthook=reporthook)
"""
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
|
python
|
{
"resource": ""
}
|
q22226
|
matrix
|
train
|
def matrix(matrix, xlabel=None, ylabel=None, xticks=None, yticks=None,
title=None, colorbar_shrink=0.5, color_map=None, show=None,
save=None, ax=None):
"""Plot a matrix."""
if ax is None: ax = pl.gca()
img = ax.imshow(matrix, cmap=color_map)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
if title is not None: ax.set_title(title)
if xticks is not None:
ax.set_xticks(range(len(xticks)), xticks, rotation='vertical')
if yticks is not None:
ax.set_yticks(range(len(yticks)), yticks)
pl.colorbar(img, shrink=colorbar_shrink, ax=ax) # need a figure instance for colorbar
savefig_or_show('matrix', show=show, save=save)
|
python
|
{
"resource": ""
}
|
q22227
|
timeseries
|
train
|
def timeseries(X, **kwargs):
"""Plot X. See timeseries_subplot."""
pl.figure(figsize=(2*rcParams['figure.figsize'][0], rcParams['figure.figsize'][1]),
subplotpars=sppars(left=0.12, right=0.98, bottom=0.13))
timeseries_subplot(X, **kwargs)
|
python
|
{
"resource": ""
}
|
q22228
|
timeseries_subplot
|
train
|
def timeseries_subplot(X,
time=None,
color=None,
var_names=(),
highlightsX=(),
xlabel='',
ylabel='gene expression',
yticks=None,
xlim=None,
legend=True,
palette=None,
color_map='viridis'):
"""Plot X.
Parameters
----------
X : np.ndarray
Call this with:
X with one column, color categorical.
X with one column, color continuous.
X with n columns, color is of length n.
"""
if color is not None:
use_color_map = isinstance(color[0], float) or isinstance(color[0], np.float32)
palette = default_palette(palette)
x_range = np.arange(X.shape[0]) if time is None else time
if X.ndim == 1: X = X[:, None]
if X.shape[1] > 1:
colors = palette[:X.shape[1]].by_key()['color']
subsets = [(x_range, X[:, i]) for i in range(X.shape[1])]
elif use_color_map:
colors = [color]
subsets = [(x_range, X[:, 0])]
else:
levels, _ = np.unique(color, return_inverse=True)
colors = np.array(palette[:len(levels)].by_key()['color'])
subsets = [(x_range[color == l], X[color == l, :]) for l in levels]
for i, (x, y) in enumerate(subsets):
pl.scatter(
x, y,
marker='.',
edgecolor='face',
s=rcParams['lines.markersize'],
c=colors[i],
label=var_names[i] if len(var_names) > 0 else '',
cmap=color_map,
rasterized=settings._vector_friendly)
ylim = pl.ylim()
for ih, h in enumerate(highlightsX):
pl.plot([h, h], [ylim[0], ylim[1]], '--', color='black')
pl.ylim(ylim)
if xlim is not None:
pl.xlim(xlim)
pl.xlabel(xlabel)
pl.ylabel(ylabel)
if yticks is not None:
pl.yticks(yticks)
if len(var_names) > 0 and legend:
pl.legend(frameon=False)
|
python
|
{
"resource": ""
}
|
q22229
|
timeseries_as_heatmap
|
train
|
def timeseries_as_heatmap(X, var_names=None, highlightsX=None, color_map=None):
"""Plot timeseries as heatmap.
Parameters
----------
X : np.ndarray
Data array.
var_names : array_like
Array of strings naming variables stored in columns of X.
"""
if highlightsX is None:
highlightsX = []
if var_names is None:
var_names = []
if len(var_names) == 0:
var_names = np.arange(X.shape[1])
if var_names.ndim == 2:
var_names = var_names[:, 0]
# transpose X
X = X.T
minX = np.min(X)
# insert space into X
if False:
# generate new array with highlightsX
space = 10 # integer
Xnew = np.zeros((X.shape[0], X.shape[1] + space*len(highlightsX)))
hold = 0
_hold = 0
space_sum = 0
for ih, h in enumerate(highlightsX):
_h = h + space_sum
Xnew[:, _hold:_h] = X[:, hold:h]
Xnew[:, _h:_h+space] = minX * np.ones((X.shape[0], space))
# update variables
space_sum += space
_hold = _h + space
hold = h
Xnew[:, _hold:] = X[:, hold:]
fig = pl.figure(figsize=(1.5*4, 2*4))
im = pl.imshow(np.array(X, dtype=np.float_), aspect='auto',
interpolation='nearest', cmap=color_map)
pl.colorbar(shrink=0.5)
pl.yticks(range(X.shape[0]), var_names)
for ih, h in enumerate(highlightsX):
pl.plot([h, h], [0, X.shape[0]], '--', color='black')
pl.xlim([0, X.shape[1]-1])
pl.ylim([0, X.shape[0]-1])
|
python
|
{
"resource": ""
}
|
q22230
|
savefig
|
train
|
def savefig(writekey, dpi=None, ext=None):
"""Save current figure to file.
The `filename` is generated as follows:
filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs
"""
if dpi is None:
# we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...
if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150:
if settings._low_resolution_warning:
logg.warn(
'You are using a low resolution (dpi<150) for saving figures.\n'
'Consider running `set_figure_params(dpi_save=...)`, which will '
'adjust `matplotlib.rcParams[\'savefig.dpi\']`')
settings._low_resolution_warning = False
else:
dpi = rcParams['savefig.dpi']
if not os.path.exists(settings.figdir): os.makedirs(settings.figdir)
if settings.figdir[-1] != '/': settings.figdir += '/'
if ext is None: ext = settings.file_format_figs
filename = settings.figdir + writekey + settings.plot_suffix + '.' + ext
# output the following msg at warning level; it's really important for the user
logg.msg('saving figure to file', filename, v=1)
pl.savefig(filename, dpi=dpi, bbox_inches='tight')
|
python
|
{
"resource": ""
}
|
q22231
|
scatter_group
|
train
|
def scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None):
"""Scatter of group using representation of data Y.
"""
mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values
color = adata.uns[key + '_colors'][imask]
if not isinstance(color[0], str):
from matplotlib.colors import rgb2hex
color = rgb2hex(adata.uns[key + '_colors'][imask])
if not is_color_like(color):
raise ValueError('"{}" is not a valid matplotlib color.'.format(color))
data = [Y[mask, 0], Y[mask, 1]]
if projection == '3d': data.append(Y[mask, 2])
ax.scatter(*data,
marker='.',
alpha=alpha,
c=color,
edgecolors='none',
s=size,
label=adata.obs[key].cat.categories[imask],
rasterized=settings._vector_friendly)
return mask
|
python
|
{
"resource": ""
}
|
q22232
|
setup_axes
|
train
|
def setup_axes(
ax=None,
panels='blue',
colorbars=[False],
right_margin=None,
left_margin=None,
projection='2d',
show_ticks=False):
"""Grid of axes for plotting, legends and colorbars.
"""
if '3d' in projection: from mpl_toolkits.mplot3d import Axes3D
avail_projections = {'2d', '3d'}
if projection not in avail_projections:
raise ValueError('choose projection from', avail_projections)
if left_margin is not None:
raise ValueError('Currently not supporting to pass `left_margin`.')
if np.any(colorbars) and right_margin is None:
right_margin = 1 - rcParams['figure.subplot.right'] + 0.21 # 0.25
elif right_margin is None:
right_margin = 1 - rcParams['figure.subplot.right'] + 0.06 # 0.10
# make a list of right margins for each panel
if not isinstance(right_margin, list):
right_margin_list = [right_margin for i in range(len(panels))]
else:
right_margin_list = right_margin
# make a figure with len(panels) panels in a row side by side
top_offset = 1 - rcParams['figure.subplot.top']
bottom_offset = 0.15 if show_ticks else 0.08
left_offset = 1 if show_ticks else 0.3 # in units of base_height
base_height = rcParams['figure.figsize'][1]
height = base_height
base_width = rcParams['figure.figsize'][0]
if show_ticks: base_width *= 1.1
draw_region_width = base_width - left_offset - top_offset - 0.5 # this is kept constant throughout
right_margin_factor = sum([1 + right_margin for right_margin in right_margin_list])
width_without_offsets = right_margin_factor * draw_region_width # this is the total width that keeps draw_region_width
right_offset = (len(panels) - 1) * left_offset
figure_width = width_without_offsets + left_offset + right_offset
draw_region_width_frac = draw_region_width / figure_width
left_offset_frac = left_offset / figure_width
right_offset_frac = 1 - (len(panels) - 1) * left_offset_frac
if ax is None:
pl.figure(figsize=(figure_width, height),
subplotpars=sppars(left=0, right=1, bottom=bottom_offset))
left_positions = [left_offset_frac, left_offset_frac + draw_region_width_frac]
for i in range(1, len(panels)):
right_margin = right_margin_list[i-1]
left_positions.append(left_positions[-1] + right_margin * draw_region_width_frac)
left_positions.append(left_positions[-1] + draw_region_width_frac)
panel_pos = [[bottom_offset], [1-top_offset], left_positions]
axs = []
if ax is None:
for icolor, color in enumerate(panels):
left = panel_pos[2][2*icolor]
bottom = panel_pos[0][0]
width = draw_region_width / figure_width
height = panel_pos[1][0] - bottom
if projection == '2d': ax = pl.axes([left, bottom, width, height])
elif projection == '3d': ax = pl.axes([left, bottom, width, height], projection='3d')
axs.append(ax)
else:
axs = ax if isinstance(ax, list) else [ax]
return axs, panel_pos, draw_region_width, figure_width
|
python
|
{
"resource": ""
}
|
q22233
|
arrows_transitions
|
train
|
def arrows_transitions(ax, X, indices, weight=None):
"""
Plot arrows of transitions in data matrix.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
X : np.array
Data array, any representation wished (X, psi, phi, etc).
indices : array_like
Indices storing the transitions.
"""
step = 1
width = axis_to_data(ax, 0.001)
if X.shape[0] > 300:
step = 5
width = axis_to_data(ax, 0.0005)
if X.shape[0] > 500:
step = 30
width = axis_to_data(ax, 0.0001)
head_width = 10*width
for ix, x in enumerate(X):
if ix % step == 0:
X_step = X[indices[ix]] - x
# don't plot arrow of length 0
for itrans in range(X_step.shape[0]):
alphai = 1
widthi = width
head_widthi = head_width
if weight is not None:
alphai *= weight[ix, itrans]
widthi *= weight[ix, itrans]
if np.any(X_step[itrans, :1]):
ax.arrow(x[0], x[1],
X_step[itrans, 0], X_step[itrans, 1],
length_includes_head=True,
width=widthi,
head_width=head_widthi,
alpha=alphai,
color='grey')
|
python
|
{
"resource": ""
}
|
q22234
|
scale_to_zero_one
|
train
|
def scale_to_zero_one(x):
"""Take some 1d data and scale it so that min matches 0 and max 1.
"""
xscaled = x - np.min(x)
xscaled /= np.max(xscaled)
return xscaled
|
python
|
{
"resource": ""
}
|
q22235
|
hierarchy_pos
|
train
|
def hierarchy_pos(G, root, levels=None, width=1., height=1.):
"""Tree layout for networkx graph.
See https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3
answer by burubum.
If there is a cycle that is reachable from root, then this will see
infinite recursion.
Parameters
----------
G: the graph
root: the root node
levels: a dictionary
key: level number (starting from 0)
value: number of nodes in this level
width: horizontal space allocated for drawing
height: vertical space allocated for drawing
"""
TOTAL = "total"
CURRENT = "current"
def make_levels(levels, node=root, currentLevel=0, parent=None):
"""Compute the number of nodes for each level
"""
if currentLevel not in levels:
levels[currentLevel] = {TOTAL: 0, CURRENT: 0}
levels[currentLevel][TOTAL] += 1
neighbors = list(G.neighbors(node))
if parent is not None:
neighbors.remove(parent)
for neighbor in neighbors:
levels = make_levels(levels, neighbor, currentLevel + 1, node)
return levels
def make_pos(pos, node=root, currentLevel=0, parent=None, vert_loc=0):
dx = 1/levels[currentLevel][TOTAL]
left = dx/2
pos[node] = ((left + dx*levels[currentLevel][CURRENT])*width,
vert_loc)
levels[currentLevel][CURRENT] += 1
neighbors = list(G.neighbors(node))
if parent is not None:
neighbors.remove(parent)
for neighbor in neighbors:
pos = make_pos(pos, neighbor, currentLevel + 1, node, vert_loc-vert_gap)
return pos
if levels is None:
levels = make_levels({})
else:
levels = {l: {TOTAL: levels[l], CURRENT: 0} for l in levels}
vert_gap = height / (max([l for l in levels])+1)
return make_pos({})
|
python
|
{
"resource": ""
}
|
q22236
|
zoom
|
train
|
def zoom(ax, xy='x', factor=1):
"""Zoom into axis.
Parameters
----------
"""
limits = ax.get_xlim() if xy == 'x' else ax.get_ylim()
new_limits = (0.5*(limits[0] + limits[1])
+ 1./factor * np.array((-0.5, 0.5)) * (limits[1] - limits[0]))
if xy == 'x':
ax.set_xlim(new_limits)
else:
ax.set_ylim(new_limits)
|
python
|
{
"resource": ""
}
|
q22237
|
get_ax_size
|
train
|
def get_ax_size(ax, fig):
"""Get axis size
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
fig : matplotlib.Figure
Figure.
"""
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
|
python
|
{
"resource": ""
}
|
q22238
|
axis_to_data
|
train
|
def axis_to_data(ax, width):
"""For a width in axis coordinates, return the corresponding in data
coordinates.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
width : float
Width in xaxis coordinates.
"""
xlim = ax.get_xlim()
widthx = width*(xlim[1] - xlim[0])
ylim = ax.get_ylim()
widthy = width*(ylim[1] - ylim[0])
return 0.5*(widthx + widthy)
|
python
|
{
"resource": ""
}
|
q22239
|
axis_to_data_points
|
train
|
def axis_to_data_points(ax, points_axis):
"""Map points in axis coordinates to data coordinates.
Uses matplotlib.transform.
Parameters
----------
ax : matplotlib.axis
Axis object from matplotlib.
points_axis : np.array
Points in axis coordinates.
"""
axis_to_data = ax.transAxes + ax.transData.inverted()
return axis_to_data.transform(points_axis)
|
python
|
{
"resource": ""
}
|
q22240
|
console_main
|
train
|
def console_main():
"""This serves as CLI entry point and will not show a Python traceback if a called command fails"""
cmd = main(check=False)
if cmd is not None:
sys.exit(cmd.returncode)
|
python
|
{
"resource": ""
}
|
q22241
|
filter_rank_genes_groups
|
train
|
def filter_rank_genes_groups(adata, key=None, groupby=None, use_raw=True, log=True,
key_added='rank_genes_groups_filtered',
min_in_group_fraction=0.25, min_fold_change=2,
max_out_group_fraction=0.5):
"""Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')
"""
if key is None:
key = 'rank_genes_groups'
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby'])
# convert structured numpy array into DataFrame
gene_names = pd.DataFrame(adata.uns[key]['names'])
fraction_in_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
fold_change_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index)
fraction_out_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
logg.info("Filtering genes using: min_in_group_fraction: {} "
"min_fold_change: {}, max_out_group_fraction: {}".format(min_in_group_fraction, min_fold_change,
max_out_group_fraction))
from ..plotting._anndata import _prepare_dataframe
for cluster in gene_names.columns:
# iterate per column
var_names = gene_names[cluster].values
# add column to adata as __is_in_cluster__. This facilitates to measure fold change
# of each gene with respect to all other clusters
adata.obs['__is_in_cluster__'] = pd.Categorical(adata.obs[groupby] == cluster)
# obs_tidy has rows=groupby, columns=var_names
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby='__is_in_cluster__', use_raw=use_raw)
# for if category defined by groupby (if any) compute for each var_name
# 1. the mean value over the category
# 2. the fraction of cells in the category having a value > 0
# 1. compute mean value
mean_obs = obs_tidy.groupby(level=0).mean()
# 2. compute fraction of cells having value >0
# transform obs_tidy into boolean matrix
obs_bool = obs_tidy.astype(bool)
# compute the sum per group which in the boolean matrix this is the number
# of values >0, and divide the result by the total number of values in the group
# (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# Because the dataframe groupby is based on the '__is_in_cluster__' column,
# in this context, [True] means __is_in_cluster__.
# Also, in this context, fraction_obs.loc[True].values is the row of values
# that is assigned *as column* to fraction_in_cluster_matrix to follow the
# structure of the gene_names dataFrame
fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values
fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values
# compute fold change.
if log:
fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values
else:
fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values
# remove temporary columns
adata.obs.drop(columns='__is_in_cluster__')
# filter original_matrix
gene_names = gene_names[(fraction_in_cluster_matrix > min_in_group_fraction) &
(fraction_out_cluster_matrix < max_out_group_fraction) &
(fold_change_matrix > min_fold_change)]
# create new structured array using 'key_added'.
adata.uns[key_added] = adata.uns[key].copy()
adata.uns[key_added]['names'] = gene_names.to_records(index=False)
|
python
|
{
"resource": ""
}
|
q22242
|
blobs
|
train
|
def blobs(n_variables=11, n_centers=5, cluster_std=1.0, n_observations=640) -> AnnData:
"""Gaussian Blobs.
Parameters
----------
n_variables : `int`, optional (default: 11)
Dimension of feature space.
n_centers : `int`, optional (default: 5)
Number of cluster centers.
cluster_std : `float`, optional (default: 1.0)
Standard deviation of clusters.
n_observations : `int`, optional (default: 640)
Number of observations. By default, this is the same observation number as in
``sc.datasets.krumsiek11()``.
Returns
-------
Annotated data matrix containing a observation annotation 'blobs' that
indicates cluster identity.
"""
import sklearn.datasets
X, y = sklearn.datasets.make_blobs(n_samples=n_observations,
n_features=n_variables,
centers=n_centers,
cluster_std=cluster_std,
random_state=0)
return AnnData(X, obs={'blobs': y.astype(str)})
|
python
|
{
"resource": ""
}
|
q22243
|
toggleswitch
|
train
|
def toggleswitch() -> AnnData:
"""Simulated toggleswitch.
Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature
(2000) <https://doi.org/10.1038/35002131>`__.
Simulate via :func:`~scanpy.api.sim`.
Returns
-------
Annotated data matrix.
"""
filename = os.path.dirname(__file__) + '/toggleswitch.txt'
adata = sc.read(filename, first_column_names=True)
adata.uns['iroot'] = 0
return adata
|
python
|
{
"resource": ""
}
|
q22244
|
pbmc68k_reduced
|
train
|
def pbmc68k_reduced() -> AnnData:
"""Subsampled and processed 68k PBMCs.
10x PBMC 68k dataset from
https://support.10xgenomics.com/single-cell-gene-expression/datasets
The original PBMC 68k dataset was preprocessed using scanpy and was saved
keeping only 724 cells and 221 highly variable genes.
The saved file contains the annotation of cell types (key: 'bulk_labels'), UMAP coordinates,
louvain clustering and gene rankings based on the bulk_labels.
Returns
-------
Annotated data matrix.
"""
filename = os.path.dirname(__file__) + '/10x_pbmc68k_reduced.h5ad'
return sc.read(filename)
|
python
|
{
"resource": ""
}
|
q22245
|
OnFlySymMatrix.restrict
|
train
|
def restrict(self, index_array):
"""Generate a view restricted to a subset of indices.
"""
new_shape = index_array.shape[0], index_array.shape[0]
return OnFlySymMatrix(self.get_row, new_shape, DC_start=self.DC_start,
DC_end=self.DC_end,
rows=self.rows, restrict_array=index_array)
|
python
|
{
"resource": ""
}
|
q22246
|
Neighbors.compute_neighbors
|
train
|
def compute_neighbors(
self,
n_neighbors: int = 30,
knn: bool = True,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
method: str = 'umap',
random_state: Optional[Union[RandomState, int]] = 0,
write_knn_indices: bool = False,
metric: str = 'euclidean',
metric_kwds: Mapping[str, Any] = {}
) -> None:
"""\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
"""
if n_neighbors > self._adata.shape[0]: # very small datasets
n_neighbors = 1 + int(0.5*self._adata.shape[0])
logg.warn('n_obs too small: adjusting to `n_neighbors = {}`'
.format(n_neighbors))
if method == 'umap' and not knn:
raise ValueError('`method = \'umap\' only with `knn = True`.')
if method not in {'umap', 'gauss'}:
raise ValueError('`method` needs to be \'umap\' or \'gauss\'.')
if self._adata.shape[0] >= 10000 and not knn:
logg.warn(
'Using high n_obs without `knn=True` takes a lot of memory...')
self.n_neighbors = n_neighbors
self.knn = knn
X = choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs)
# neighbor search
use_dense_distances = (metric == 'euclidean' and X.shape[0] < 8192) or knn == False
if use_dense_distances:
_distances = pairwise_distances(X, metric=metric, **metric_kwds)
knn_indices, knn_distances = get_indices_distances_from_dense_matrix(
_distances, n_neighbors)
if knn:
self._distances = get_sparse_matrix_from_indices_distances_numpy(
knn_indices, knn_distances, X.shape[0], n_neighbors)
else:
self._distances = _distances
else:
# non-euclidean case and approx nearest neighbors
if X.shape[0] < 4096:
X = pairwise_distances(X, metric=metric, **metric_kwds)
metric = 'precomputed'
knn_indices, knn_distances, _ = compute_neighbors_umap(
X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds)
#self._rp_forest = _make_forest_dict(forest)
# write indices as attributes
if write_knn_indices:
self.knn_indices = knn_indices
self.knn_distances = knn_distances
logg.msg('computed neighbors', t=True, v=4)
if not use_dense_distances or method == 'umap':
# we need self._distances also for method == 'gauss' if we didn't
# use dense distances
self._distances, self._connectivities = compute_connectivities_umap(
knn_indices, knn_distances, self._adata.shape[0], self.n_neighbors)
# overwrite the umap connectivities if method is 'gauss'
# self._distances is unaffected by this
if method == 'gauss':
self._compute_connectivities_diffmap()
logg.msg('computed connectivities', t=True, v=4)
self._number_connected_components = 1
if issparse(self._connectivities):
from scipy.sparse.csgraph import connected_components
self._connected_components = connected_components(self._connectivities)
self._number_connected_components = self._connected_components[0]
|
python
|
{
"resource": ""
}
|
q22247
|
Neighbors.compute_transitions
|
train
|
def compute_transitions(self, density_normalize=True):
"""Compute transition matrix.
Parameters
----------
density_normalize : `bool`
The density rescaling of Coifman and Lafon (2006): Then only the
geometry of the data matters, not the sampled density.
Returns
-------
Makes attributes `.transitions_sym` and `.transitions` available.
"""
W = self._connectivities
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(W.sum(axis=0))
if not issparse(W):
Q = np.diag(1.0/q)
else:
Q = scipy.sparse.spdiags(1.0/q, 0, W.shape[0], W.shape[0])
K = Q.dot(W).dot(Q)
else:
K = W
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
self.Z = np.diag(1.0/z)
else:
self.Z = scipy.sparse.spdiags(1.0/z, 0, K.shape[0], K.shape[0])
self._transitions_sym = self.Z.dot(K).dot(self.Z)
logg.msg('computed transitions', v=4, time=True)
|
python
|
{
"resource": ""
}
|
q22248
|
Neighbors.compute_eigen
|
train
|
def compute_eigen(self, n_comps=15, sym=None, sort='decrease'):
"""Compute eigen decomposition of transition matrix.
Parameters
----------
n_comps : `int`
Number of eigenvalues/vectors to be computed, set `n_comps = 0` if
you need all eigenvectors.
sym : `bool`
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
matrix : sparse matrix, np.ndarray, optional (default: `.connectivities`)
Matrix to diagonalize. Merely for testing and comparison purposes.
Returns
-------
Writes the following attributes.
eigen_values : numpy.ndarray
Eigenvalues of transition matrix.
eigen_basis : numpy.ndarray
Matrix of eigenvectors (stored in columns). `.eigen_basis` is
projection of data matrix on right eigenvectors, that is, the
projection on the diffusion components. these are simply the
components of the right eigenvectors and can directly be used for
plotting.
"""
np.set_printoptions(precision=10)
if self._transitions_sym is None:
raise ValueError('Run `.compute_transitions` first.')
matrix = self._transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0]-1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps,
which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
logg.info(' eigenvalues of transition matrix\n'
' {}'.format(str(evals).replace('\n', '\n ')))
if self._number_connected_components > len(evals)/2:
logg.warn('Transition matrix has many disconnected components!')
self._eigen_values = evals
self._eigen_basis = evecs
|
python
|
{
"resource": ""
}
|
q22249
|
Neighbors._set_pseudotime
|
train
|
def _set_pseudotime(self):
"""Return pseudotime with respect to root point.
"""
self.pseudotime = self.distances_dpt[self.iroot].copy()
self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf])
|
python
|
{
"resource": ""
}
|
q22250
|
Neighbors._set_iroot_via_xroot
|
train
|
def _set_iroot_via_xroot(self, xroot):
"""Determine the index of the root cell.
Given an expression vector, find the observation index that is closest
to this vector.
Parameters
----------
xroot : np.ndarray
Vector that marks the root cell, the vector storing the initial
condition, only relevant for computing pseudotime.
"""
if self._adata.shape[1] != xroot.size:
raise ValueError(
'The root vector you provided does not have the '
'correct dimension.')
# this is the squared distance
dsqroot = 1e10
iroot = 0
for i in range(self._adata.shape[0]):
diff = self._adata.X[i, :] - xroot
dsq = diff.dot(diff)
if dsq < dsqroot:
dsqroot = dsq
iroot = i
if np.sqrt(dsqroot) < 1e-10: break
logg.msg('setting root index to', iroot, v=4)
if self.iroot is not None and iroot != self.iroot:
logg.warn('Changing index of iroot from {} to {}.'.format(self.iroot, iroot))
self.iroot = iroot
|
python
|
{
"resource": ""
}
|
q22251
|
mitochondrial_genes
|
train
|
def mitochondrial_genes(host, org) -> pd.Index:
"""Mitochondrial gene symbols for specific organism through BioMart.
Parameters
----------
host : {{'www.ensembl.org', ...}}
A valid BioMart host URL.
org : {{'hsapiens', 'mmusculus', 'drerio'}}
Organism to query. Currently available are human ('hsapiens'), mouse
('mmusculus') and zebrafish ('drerio').
Returns
-------
A :class:`pandas.Index` containing mitochondrial gene symbols.
"""
try:
from bioservices import biomart
except ImportError:
raise ImportError(
'You need to install the `bioservices` module.')
from io import StringIO
s = biomart.BioMart(host=host)
# building query
s.new_query()
if org == 'hsapiens':
s.add_dataset_to_xml('hsapiens_gene_ensembl')
s.add_attribute_to_xml('hgnc_symbol')
elif org == 'mmusculus':
s.add_dataset_to_xml('mmusculus_gene_ensembl')
s.add_attribute_to_xml('mgi_symbol')
elif org == 'drerio':
s.add_dataset_to_xml('drerio_gene_ensembl')
s.add_attribute_to_xml('zfin_id_symbol')
else:
logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
return None
s.add_attribute_to_xml('chromosome_name')
xml = s.get_xml()
# parsing mitochondrial gene symbols
res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None)
res.columns = ['symbol', 'chromosome_name']
res = res.dropna()
res = res[res['chromosome_name'] == 'MT']
res = res.set_index('symbol')
res = res[~res.index.duplicated(keep='first')]
return res.index
|
python
|
{
"resource": ""
}
|
q22252
|
highest_expr_genes
|
train
|
def highest_expr_genes(
adata, n_top=30, show=None, save=None,
ax=None, gene_symbols=None, **kwds
):
"""\
Fraction of counts assigned to each gene over all cells.
Computes, for each gene, the fraction of counts assigned to that gene within
a cell. The `n_top` genes with the highest mean fraction over all cells are
plotted as boxplots.
This plot is similar to the `scater` package function `plotHighestExprs(type
= "highest-expression")`, see `here
<https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/vignette-qc.html>`__. Quoting
from there:
*We expect to see the “usual suspects”, i.e., mitochondrial genes, actin,
ribosomal protein, MALAT1. A few spike-in transcripts may also be
present here, though if all of the spike-ins are in the top 50, it
suggests that too much spike-in RNA was added. A large number of
pseudo-genes or predicted genes may indicate problems with alignment.*
-- Davis McCarthy and Aaron Lun
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_top : `int`, optional (default:30)
Number of top
{show_save_ax}
gene_symbols : `str`, optional (default:None)
Key for field in .var that stores gene symbols if you do not want to use .var_names.
**kwds : keyword arguments
Are passed to `seaborn.boxplot`.
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes`.
"""
from scipy.sparse import issparse
# compute the percentage of each gene per cell
dat = normalize_per_cell(adata, counts_per_cell_after=100, copy=True)
# identify the genes with the highest mean
if issparse(dat.X):
dat.var['mean_percent'] = dat.X.mean(axis=0).A1
else:
dat.var['mean_percent'] = dat.X.mean(axis=0)
top = dat.var.sort_values('mean_percent', ascending=False).index[:n_top]
dat = dat[:, top]
columns = dat.var_names if gene_symbols is None else dat.var[gene_symbols]
dat = pd.DataFrame(dat.X.toarray(), index=dat.obs_names, columns=columns)
if not ax:
# figsize is hardcoded to produce a tall image. To change the fig size,
# a matplotlib.axes.Axes object needs to be passed.
height = (n_top * 0.2) + 1.5
fig, ax = plt.subplots(figsize=(5, height))
sns.boxplot(data=dat, orient='h', ax=ax, fliersize=1, **kwds)
ax.set_xlabel('% of total counts')
utils.savefig_or_show('highest_expr_genes', show=show, save=save)
return ax if show == False else None
|
python
|
{
"resource": ""
}
|
q22253
|
filter_genes_cv_deprecated
|
train
|
def filter_genes_cv_deprecated(X, Ecutoff, cvFilter):
"""Filter genes by coefficient of variance and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.')
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + .0001) > cvFilter
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset
|
python
|
{
"resource": ""
}
|
q22254
|
filter_genes_fano_deprecated
|
train
|
def filter_genes_fano_deprecated(X, Ecutoff, Vcutoff):
"""Filter genes by fano factor and mean.
See `filter_genes_dispersion`.
Reference: Weinreb et al. (2017).
"""
if issparse(X):
raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.')
mean_filter = np.mean(X, axis=0) > Ecutoff
var_filter = np.var(X, axis=0) / (np.mean(X, axis=0) + .0001) > Vcutoff
gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0]
return gene_subset
|
python
|
{
"resource": ""
}
|
q22255
|
materialize_as_ndarray
|
train
|
def materialize_as_ndarray(a):
"""Convert distributed arrays to ndarrays."""
if type(a) in (list, tuple):
if da is not None and any(isinstance(arr, da.Array) for arr in a):
return da.compute(*a, sync=True)
return tuple(np.asarray(arr) for arr in a)
return np.asarray(a)
|
python
|
{
"resource": ""
}
|
q22256
|
mnn_concatenate
|
train
|
def mnn_concatenate(*adatas, geneset=None, k=20, sigma=1, n_jobs=None, **kwds):
"""Merge AnnData objects and correct batch effects using the MNN method.
Batch effect correction by matching mutual nearest neighbors [Haghverdi18]_
has been implemented as a function 'mnnCorrect' in the R package
`scran <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
This function provides a wrapper to use the mnnCorrect function when
concatenating Anndata objects by using the Python-R interface `rpy2
<https://pypi.org/project/rpy2/>`__.
Parameters
----------
adatas : :class:`~anndata.AnnData`
AnnData matrices to concatenate with. Each dataset should generally be
log-transformed, e.g., log-counts. Datasets should have the same number
of genes, or at lease have all the genes in geneset.
geneset : `list`, optional (default: `None`)
A list specifying the genes with which distances between cells are
calculated in mnnCorrect, typically the highly variable genes.
All genes are used if no geneset provided. See the `scran manual
<https://bioconductor.org/packages/release/bioc/html/scran.html>`__ for
details.
k : `int`, ptional (default: 20)
See the `scran manual <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
for details.
sigma : `int`, ptional (default: 20)
See the `scran manual <https://bioconductor.org/packages/release/bioc/html/scran.html>`__
for details.
n_jobs : `int` or `None` (default: `sc.settings.n_jobs`)
Number of jobs.
kwds :
Keyword arguments passed to Anndata.concatenate
Returns
-------
An :class:`~anndata.AnnData` object with MNN corrected data matrix X.
Example
-------
>>> adata1
AnnData object with n_obs × n_vars = 223 × 33694
obs: 'n_genes', 'percent_mito', 'n_counts', 'Sample', 'Donor', 'Tissue'
var: 'gene_ids', 'n_cells'
>>> adata2
AnnData object with n_obs × n_vars = 1457 × 33694
obs: 'n_genes', 'percent_mito', 'n_counts', 'Sample', 'Donor', 'Tissue'
var: 'gene_ids', 'n_cells'
>>> adata3 = sc.pp.mnnconcatenate(adata2, adata1, geneset = hvgs)
"""
from rpy2.robjects.packages import importr
from rpy2.robjects import numpy2ri
adata = AnnData.concatenate(*adatas, **kwds)
if geneset is None:
datamats = tuple([adata.X.T for adata in adatas])
else:
datamats = tuple([adata[:, geneset].X.T for adata in adatas])
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
numpy2ri.activate()
rbase = importr('base')
rscran = importr('scran')
bpparam = importr('BiocParallel').MulticoreParam(
workers = n_jobs) if n_jobs > 1 else importr('BiocParallel').SerialParam()
mnn_result = rscran.mnnCorrect(*datamats, k=k, sigma=sigma, BPPARAM = bpparam)
corrected = np.asarray(rbase.do_call(rbase.cbind, mnn_result[0])).T
numpy2ri.deactivate()
if geneset is None:
adata = adata[:, geneset]
adata.X = corrected
return adata
|
python
|
{
"resource": ""
}
|
q22257
|
_design_matrix
|
train
|
def _design_matrix(
model: pd.DataFrame,
batch_key: str,
batch_levels: Collection[str],
) -> pd.DataFrame:
"""
Computes a simple design matrix.
Parameters
--------
model
Contains the batch annotation
batch_key
Name of the batch column
batch_levels
Levels of the batch annotation
Returns
--------
The design matrix for the regression problem
"""
import patsy
design = patsy.dmatrix(
"~ 0 + C(Q('{}'), levels=batch_levels)".format(batch_key),
model,
return_type="dataframe",
)
model = model.drop([batch_key], axis=1)
numerical_covariates = model.select_dtypes('number').columns.values
logg.info("Found {} batches\n".format(design.shape[1]))
other_cols = [c for c in model.columns.values if c not in numerical_covariates]
if other_cols:
col_repr = " + ".join("Q('{}')".format(x) for x in other_cols)
factor_matrix = patsy.dmatrix("~ 0 + {}".format(col_repr),
model[other_cols],
return_type="dataframe")
design = pd.concat((design, factor_matrix), axis=1)
logg.info("Found {} categorical variables:".format(len(other_cols)))
logg.info("\t" + ", ".join(other_cols) + '\n')
if numerical_covariates is not None:
logg.info("Found {} numerical variables:".format(len(numerical_covariates)))
logg.info("\t" + ", ".join(numerical_covariates) + '\n')
for nC in numerical_covariates:
design[nC] = model[nC]
return design
|
python
|
{
"resource": ""
}
|
q22258
|
_standardize_data
|
train
|
def _standardize_data(
model: pd.DataFrame,
data: pd.DataFrame,
batch_key: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]:
"""
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data : pandas.DataFrame
Standardized Data
design : pandas.DataFrame
Batch assignment as one-hot encodings
var_pooled : numpy.ndarray
Pooled variance per gene
stand_mean : numpy.ndarray
Gene-wise mean
"""
# compute the design matrix
batch_items = model.groupby(batch_key).groups.items()
batch_levels, batch_info = zip(*batch_items)
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
design = _design_matrix(model, batch_key, batch_levels)
# compute pooled variance estimator
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = (data - np.dot(design, B_hat).T)**2
var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array))
# Compute the means
if np.sum(var_pooled == 0) > 0:
print(
'Found {} genes with zero variance.'
.format(np.sum(var_pooled == 0))
)
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array))))
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
# need to be a bit careful with the zero variance genes
# just set the zero variance genes to zero in the standardized data
s_data = np.where(var_pooled == 0, 0, (
(data - stand_mean) /
np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array))))
))
s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns)
return s_data, design, var_pooled, stand_mean
|
python
|
{
"resource": ""
}
|
q22259
|
_it_sol
|
train
|
def _it_sol(s_data, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001) -> Tuple[float, float]:
"""
Iteratively compute the conditional posterior means for gamma and delta.
gamma is an estimator for the additive batch effect, deltat is an estimator
for the multiplicative batch effect. We use an EB framework to estimate these
two. Analytical expressions exist for both parameters, which however depend on each other.
We therefore iteratively evalutate these two expressions until convergence is reached.
Parameters
--------
s_data : pd.DataFrame
Contains the standardized Data
g_hat : float
Initial guess for gamma
d_hat : float
Initial guess for delta
g_bar, t_2, a, b : float
Hyperparameters
conv: float, optional (default: `0.0001`)
convergence criterium
Returns:
--------
gamma : float
estimated value for gamma
delta : float
estimated value for delta
"""
n = (1 - np.isnan(s_data)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
# we place a normally distributed prior on gamma and and inverse gamma prior on delta
# in the loop, gamma and delta are updated together. they depend on each other. we iterate until convergence.
while change > conv:
g_new = (t2*n*g_hat + d_old*g_bar) / (t2*n + d_old)
sum2 = s_data - g_new.reshape((g_new.shape[0], 1)) @ np.ones((1, s_data.shape[1]))
sum2 = sum2 ** 2
sum2 = sum2.sum(axis=1)
d_new = (0.5*sum2 + b) / (n/2.0 + a-1.0)
change = max((abs(g_new - g_old) / g_old).max(), (abs(d_new - d_old) / d_old).max())
g_old = g_new # .copy()
d_old = d_new # .copy()
count = count + 1
return g_new, d_new
|
python
|
{
"resource": ""
}
|
q22260
|
top_proportions
|
train
|
def top_proportions(mtx, n):
"""
Calculates cumulative proportions of top expressed genes
Parameters
----------
mtx : `Union[np.array, sparse.spmatrix]`
Matrix, where each row is a sample, each column a feature.
n : `int`
Rank to calculate proportions up to. Value is treated as 1-indexed,
`n=50` will calculate cumulative proportions up to the 50th most
expressed gene.
"""
if issparse(mtx):
if not isspmatrix_csr(mtx):
mtx = csr_matrix(mtx)
# Allowing numba to do more
return top_proportions_sparse_csr(mtx.data, mtx.indptr, np.array(n))
else:
return top_proportions_dense(mtx, n)
|
python
|
{
"resource": ""
}
|
q22261
|
top_segment_proportions
|
train
|
def top_segment_proportions(mtx, ns):
"""
Calculates total percentage of counts in top ns genes.
Parameters
----------
mtx : `Union[np.array, sparse.spmatrix]`
Matrix, where each row is a sample, each column a feature.
ns : `Container[Int]`
Positions to calculate cumulative proportion at. Values are considered
1-indexed, e.g. `ns=[50]` will calculate cumulative proportion up to
the 50th most expressed gene.
"""
# Pretty much just does dispatch
if not (max(ns) <= mtx.shape[1] and min(ns) > 0):
raise IndexError("Positions outside range of features.")
if issparse(mtx):
if not isspmatrix_csr(mtx):
mtx = csr_matrix(mtx)
return top_segment_proportions_sparse_csr(mtx.data, mtx.indptr,
np.array(ns, dtype=np.int))
else:
return top_segment_proportions_dense(mtx, ns)
|
python
|
{
"resource": ""
}
|
q22262
|
add_args
|
train
|
def add_args(p):
"""
Update parser with tool specific arguments.
This overwrites was is done in utils.uns_args.
"""
# dictionary for adding arguments
dadd_args = {
'--opfile': {
'default': '',
'metavar': 'f',
'type': str,
'help': 'Specify a parameter file '
'(default: "sim/${exkey}_params.txt")'}}
p = utils.add_args(p, dadd_args)
return p
|
python
|
{
"resource": ""
}
|
q22263
|
_check_branching
|
train
|
def _check_branching(X,Xsamples,restart,threshold=0.25):
"""\
Check whether time series branches.
Parameters
----------
X (np.array): current time series data.
Xsamples (np.array): list of previous branching samples.
restart (int): counts number of restart trials.
threshold (float, optional): sets threshold for attractor
identification.
Returns
-------
check : bool
true if branching realization
Xsamples
updated list
"""
check = True
if restart == 0:
Xsamples.append(X)
else:
for Xcompare in Xsamples:
Xtmax_diff = np.absolute(X[-1,:] - Xcompare[-1,:])
# If the second largest element is smaller than threshold
# set check to False, i.e. at least two elements
# need to change in order to have a branching.
# If we observe all parameters of the system,
# a new attractor state must involve changes in two
# variables.
if np.partition(Xtmax_diff,-2)[-2] < threshold:
check = False
if check:
Xsamples.append(X)
if not check:
logg.m('realization {}:'.format(restart), 'no new branch', v=4)
else:
logg.m('realization {}:'.format(restart), 'new branch', v=4)
return check, Xsamples
|
python
|
{
"resource": ""
}
|
q22264
|
check_nocycles
|
train
|
def check_nocycles(Adj, verbosity=2):
"""\
Checks that there are no cycles in graph described by adjacancy matrix.
Parameters
----------
Adj (np.array): adjancancy matrix of dimension (dim, dim)
Returns
-------
True if there is no cycle, False otherwise.
"""
dim = Adj.shape[0]
for g in range(dim):
v = np.zeros(dim)
v[g] = 1
for i in range(dim):
v = Adj.dot(v)
if v[g] > 1e-10:
if verbosity > 2:
settings.m(0,Adj)
settings.m(0,'contains a cycle of length',i+1,
'starting from node',g,
'-> reject')
return False
return True
|
python
|
{
"resource": ""
}
|
q22265
|
sample_coupling_matrix
|
train
|
def sample_coupling_matrix(dim=3,connectivity=0.5):
"""\
Sample coupling matrix.
Checks that returned graphs contain no self-cycles.
Parameters
----------
dim : int
dimension of coupling matrix.
connectivity : float
fraction of connectivity, fully connected means 1.,
not-connected means 0, in the case of fully connected, one has
dim*(dim-1)/2 edges in the graph.
Returns
-------
Tuple (Coupl,Adj,Adj_signed) of coupling matrix, adjancancy and
signed adjacancy matrix.
"""
max_trial = 10
check = False
for trial in range(max_trial):
# random topology for a given connectivity / edge density
Coupl = np.zeros((dim,dim))
n_edges = 0
for gp in range(dim):
for g in range(dim):
if gp != g:
# need to have the factor 0.5, otherwise
# connectivity=1 would lead to dim*(dim-1) edges
if np.random.rand() < 0.5*connectivity:
Coupl[gp,g] = 0.7
n_edges += 1
# obtain adjacancy matrix
Adj_signed = np.zeros((dim,dim),dtype='int_')
Adj_signed = np.sign(Coupl)
Adj = np.abs(Adj_signed)
# check for cycles and whether there is at least one edge
if check_nocycles(Adj) and n_edges > 0:
check = True
break
if not check:
raise ValueError('did not find graph without cycles after',
max_trial,'trials')
return Coupl, Adj, Adj_signed, n_edges
|
python
|
{
"resource": ""
}
|
q22266
|
GRNsim.sim_model
|
train
|
def sim_model(self,tmax,X0,noiseDyn=0,restart=0):
""" Simulate the model.
"""
self.noiseDyn = noiseDyn
#
X = np.zeros((tmax,self.dim))
X[0] = X0 + noiseDyn*np.random.randn(self.dim)
# run simulation
for t in range(1,tmax):
if self.modelType == 'hill':
Xdiff = self.Xdiff_hill(X[t-1])
elif self.modelType == 'var':
Xdiff = self.Xdiff_var(X[t-1])
#
X[t] = X[t-1] + Xdiff
# add dynamic noise
X[t] += noiseDyn*np.random.randn(self.dim)
return X
|
python
|
{
"resource": ""
}
|
q22267
|
GRNsim.Xdiff_hill
|
train
|
def Xdiff_hill(self,Xt):
""" Build Xdiff from coefficients of boolean network,
that is, using self.boolCoeff. The employed functions
are Hill type activation and deactivation functions.
See Wittmann et al., BMC Syst. Biol. 3, 98 (2009),
doi:10.1186/1752-0509-3-98 for more details.
"""
verbosity = self.verbosity>0 and self.writeOutputOnce
self.writeOutputOnce = False
Xdiff = np.zeros(self.dim)
for ichild,child in enumerate(self.pas.keys()):
# check whether list of parents is non-empty,
# otherwise continue
if self.pas[child]:
Xdiff_syn = 0 # synthesize term
if verbosity > 0:
Xdiff_syn_str = ''
else:
continue
# loop over all tuples for which the boolean update
# rule returns true, these are stored in self.boolCoeff
for ituple,tuple in enumerate(self.boolCoeff[child]):
Xdiff_syn_tuple = 1
Xdiff_syn_tuple_str = ''
for iv,v in enumerate(tuple):
iparent = self.varNames[self.pas[child][iv]]
x = Xt[iparent]
threshold = 0.1/np.abs(self.Coupl[ichild,iparent])
Xdiff_syn_tuple *= self.hill_a(x,threshold) if v else self.hill_i(x,threshold)
if verbosity > 0:
Xdiff_syn_tuple_str += (('a' if v else 'i')
+'('+self.pas[child][iv]+','+'{:.2}'.format(threshold)+')')
Xdiff_syn += Xdiff_syn_tuple
if verbosity > 0:
Xdiff_syn_str += ('+' if ituple != 0 else '') + Xdiff_syn_tuple_str
# multiply with degradation term
Xdiff[ichild] = self.invTimeStep*(Xdiff_syn - Xt[ichild])
if verbosity > 0:
Xdiff_str = (child+'_{+1}-' + child + ' = ' + str(self.invTimeStep)
+ '*('+Xdiff_syn_str+'-'+child+')' )
settings.m(0,Xdiff_str)
return Xdiff
|
python
|
{
"resource": ""
}
|
q22268
|
GRNsim.hill_a
|
train
|
def hill_a(self,x,threshold=0.1,power=2):
""" Activating hill function. """
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return x_pow / (x_pow + threshold_pow)
|
python
|
{
"resource": ""
}
|
q22269
|
GRNsim.hill_i
|
train
|
def hill_i(self,x,threshold=0.1,power=2):
""" Inhibiting hill function.
Is equivalent to 1-hill_a(self,x,power,threshold).
"""
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return threshold_pow / (x_pow + threshold_pow)
|
python
|
{
"resource": ""
}
|
q22270
|
GRNsim.nhill_a
|
train
|
def nhill_a(self,x,threshold=0.1,power=2,ichild=2):
""" Normalized activating hill function. """
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return x_pow / (x_pow + threshold_pow) * (1 + threshold_pow)
|
python
|
{
"resource": ""
}
|
q22271
|
GRNsim.nhill_i
|
train
|
def nhill_i(self,x,threshold=0.1,power=2):
""" Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold).
"""
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return threshold_pow / (x_pow + threshold_pow) * (1 - x_pow)
|
python
|
{
"resource": ""
}
|
q22272
|
GRNsim.read_model
|
train
|
def read_model(self):
""" Read the model and the couplings from the model file.
"""
if self.verbosity > 0:
settings.m(0,'reading model',self.model)
# read model
boolRules = []
for line in open(self.model):
if line.startswith('#') and 'modelType =' in line:
keyval = line
if '|' in line:
keyval, type = line.split('|')[:2]
self.modelType = keyval.split('=')[1].strip()
if line.startswith('#') and 'invTimeStep =' in line:
keyval = line
if '|' in line:
keyval, type = line.split('|')[:2]
self.invTimeStep = float(keyval.split('=')[1].strip())
if not line.startswith('#'):
boolRules.append([s.strip() for s in line.split('=')])
if line.startswith('# coupling list:'):
break
self.dim = len(boolRules)
self.boolRules = collections.OrderedDict(boolRules)
self.varNames = collections.OrderedDict([(s, i)
for i, s in enumerate(self.boolRules.keys())])
names = self.varNames
# read couplings via names
self.Coupl = np.zeros((self.dim, self.dim))
boolContinue = True
for line in open(self.model): # open(self.model.replace('/model','/couplList')):
if line.startswith('# coupling list:'):
boolContinue = False
if boolContinue:
continue
if not line.startswith('#'):
gps, gs, val = line.strip().split()
self.Coupl[int(names[gps]), int(names[gs])] = float(val)
# adjancecy matrices
self.Adj_signed = np.sign(self.Coupl)
self.Adj = np.abs(np.array(self.Adj_signed))
# build bool coefficients (necessary for odefy type
# version of the discrete model)
self.build_boolCoeff()
|
python
|
{
"resource": ""
}
|
q22273
|
GRNsim.set_coupl_old
|
train
|
def set_coupl_old(self):
""" Using the adjacency matrix, sample a coupling matrix.
"""
if self.model == 'krumsiek11' or self.model == 'var':
# we already built the coupling matrix in set_coupl20()
return
self.Coupl = np.zeros((self.dim,self.dim))
for i in range(self.Adj.shape[0]):
for j,a in enumerate(self.Adj[i]):
# if there is a 1 in Adj, specify co and antiregulation
# and strength of regulation
if a != 0:
co_anti = np.random.randint(2)
# set a lower bound for the coupling parameters
# they ought not to be smaller than 0.1
# and not be larger than 0.4
self.Coupl[i,j] = 0.0*np.random.rand() + 0.1
# set sign for coupling
if co_anti == 1:
self.Coupl[i,j] *= -1
# enforce certain requirements on models
if self.model == 1:
self.coupl_model1()
elif self.model == 5:
self.coupl_model5()
elif self.model in [6,7]:
self.coupl_model6()
elif self.model in [8,9,10]:
self.coupl_model8()
# output
if self.verbosity > 1:
settings.m(0,self.Coupl)
|
python
|
{
"resource": ""
}
|
q22274
|
GRNsim.coupl_model1
|
train
|
def coupl_model1(self):
""" In model 1, we want enforce the following signs
on the couplings. Model 2 has the same couplings
but arbitrary signs.
"""
self.Coupl[0,0] = np.abs(self.Coupl[0,0])
self.Coupl[0,1] = -np.abs(self.Coupl[0,1])
self.Coupl[1,1] = np.abs(self.Coupl[1,1])
|
python
|
{
"resource": ""
}
|
q22275
|
GRNsim.coupl_model5
|
train
|
def coupl_model5(self):
""" Toggle switch.
"""
self.Coupl = -0.2*self.Adj
self.Coupl[2,0] *= -1
self.Coupl[3,0] *= -1
self.Coupl[4,1] *= -1
self.Coupl[5,1] *= -1
|
python
|
{
"resource": ""
}
|
q22276
|
GRNsim.coupl_model8
|
train
|
def coupl_model8(self):
""" Variant of toggle switch.
"""
self.Coupl = 0.5*self.Adj_signed
# reduce the value of the coupling of the repressing genes
# otherwise completely unstable solutions are obtained
for x in np.nditer(self.Coupl,op_flags=['readwrite']):
if x < -1e-6:
x[...] = -0.2
|
python
|
{
"resource": ""
}
|
q22277
|
GRNsim.sim_model_backwards
|
train
|
def sim_model_backwards(self,tmax,X0):
""" Simulate the model backwards in time.
"""
X = np.zeros((tmax,self.dim))
X[tmax-1] = X0
for t in range(tmax-2,-1,-1):
sol = sp.optimize.root(self.sim_model_back_help,
X[t+1],
args=(X[t+1]),method='hybr')
X[t] = sol.x
return X
|
python
|
{
"resource": ""
}
|
q22278
|
GRNsim.parents_from_boolRule
|
train
|
def parents_from_boolRule(self,rule):
""" Determine parents based on boolean updaterule.
Returns list of parents.
"""
rule_pa = rule.replace('(','').replace(')','').replace('or','').replace('and','').replace('not','')
rule_pa = rule_pa.split()
# if there are no parents, continue
if not rule_pa:
return []
# check whether these are meaningful parents
pa_old = []
pa_delete = []
for pa in rule_pa:
if pa not in self.varNames.keys():
settings.m(0,'list of available variables:')
settings.m(0,list(self.varNames.keys()))
message = ('processing of rule "' + rule
+ ' yields an invalid parent: ' + pa
+ ' | check whether the syntax is correct: \n'
+ 'only python expressions "(",")","or","and","not" '
+ 'are allowed, variable names and expressions have to be separated '
+ 'by white spaces')
raise ValueError(message)
if pa in pa_old:
pa_delete.append(pa)
for pa in pa_delete:
rule_pa.remove(pa)
return rule_pa
|
python
|
{
"resource": ""
}
|
q22279
|
GRNsim.build_boolCoeff
|
train
|
def build_boolCoeff(self):
''' Compute coefficients for tuple space.
'''
# coefficients for hill functions from boolean update rules
self.boolCoeff = collections.OrderedDict([(s,[]) for s in self.varNames.keys()])
# parents
self.pas = collections.OrderedDict([(s,[]) for s in self.varNames.keys()])
#
for key in self.boolRules.keys():
rule = self.boolRules[key]
self.pas[key] = self.parents_from_boolRule(rule)
pasIndices = [self.varNames[pa] for pa in self.pas[key]]
# check whether there are coupling matrix entries for each parent
for g in range(self.dim):
if g in pasIndices:
if np.abs(self.Coupl[self.varNames[key],g]) < 1e-10:
raise ValueError('specify coupling value for '+str(key)+' <- '+str(g))
else:
if np.abs(self.Coupl[self.varNames[key],g]) > 1e-10:
raise ValueError('there should be no coupling value for '+str(key)+' <- '+str(g))
if self.verbosity > 1:
settings.m(0,'...'+key)
settings.m(0,rule)
settings.m(0,rule_pa)
# now evaluate coefficients
for tuple in list(itertools.product([False,True],repeat=len(self.pas[key]))):
if self.process_rule(rule,self.pas[key],tuple):
self.boolCoeff[key].append(tuple)
#
if self.verbosity > 1:
settings.m(0,self.boolCoeff[key])
|
python
|
{
"resource": ""
}
|
q22280
|
GRNsim.process_rule
|
train
|
def process_rule(self,rule,pa,tuple):
''' Process a string that denotes a boolean rule.
'''
for i,v in enumerate(tuple):
rule = rule.replace(pa[i],str(v))
return eval(rule)
|
python
|
{
"resource": ""
}
|
q22281
|
StaticCauseEffect.sim_givenAdj
|
train
|
def sim_givenAdj(self, Adj: np.array, model='line'):
"""\
Simulate data given only an adjacancy matrix and a model.
The model is a bivariate funtional dependence. The adjacancy matrix
needs to be acyclic.
Parameters
----------
Adj
adjacancy matrix of shape (dim,dim).
Returns
-------
Data array of shape (n_samples,dim).
"""
# nice examples
examples = [{'func' : 'sawtooth', 'gdist' : 'uniform',
'sigma_glob' : 1.8, 'sigma_noise' : 0.1}]
# nr of samples
n_samples = 100
# noise
sigma_glob = 1.8
sigma_noise = 0.4
# coupling function / model
func = self.funcs[model]
# glob distribution
sourcedist = 'uniform'
# loop over source nodes
dim = Adj.shape[0]
X = np.zeros((n_samples,dim))
# source nodes have no parents themselves
nrpar = 0
children = list(range(dim))
parents = []
for gp in range(dim):
if Adj[gp,:].sum() == nrpar:
if sourcedist == 'gaussian':
X[:,gp] = np.random.normal(0,sigma_glob,n_samples)
if sourcedist == 'uniform':
X[:,gp] = np.random.uniform(-sigma_glob,sigma_glob,n_samples)
parents.append(gp)
children.remove(gp)
# all of the following guarantees for 3 dim, that we generate the data
# in the correct sequence
# then compute all nodes that have 1 parent, then those with 2 parents
children_sorted = []
nrchildren_par = np.zeros(dim)
nrchildren_par[0] = len(parents)
for nrpar in range(1,dim):
# loop over child nodes
for gp in children:
if Adj[gp,:].sum() == nrpar:
children_sorted.append(gp)
nrchildren_par[nrpar] += 1
# if there is more than a child with a single parent
# order these children (there are two in three dim)
# by distance to the source/parent
if nrchildren_par[1] > 1:
if Adj[children_sorted[0],parents[0]] == 0:
help = children_sorted[0]
children_sorted[0] = children_sorted[1]
children_sorted[1] = help
for gp in children_sorted:
for g in range(dim):
if Adj[gp,g] > 0:
X[:,gp] += 1./Adj[gp,:].sum()*func(X[:,g])
X[:,gp] += np.random.normal(0,sigma_noise,n_samples)
# fig = pl.figure()
# fig.add_subplot(311)
# pl.plot(X[:,0],X[:,1],'.',mec='white')
# fig.add_subplot(312)
# pl.plot(X[:,1],X[:,2],'.',mec='white')
# fig.add_subplot(313)
# pl.plot(X[:,2],X[:,0],'.',mec='white')
# pl.show()
return X
|
python
|
{
"resource": ""
}
|
q22282
|
StaticCauseEffect.sim_combi
|
train
|
def sim_combi(self):
""" Simulate data to model combi regulation.
"""
n_samples = 500
sigma_glob = 1.8
X = np.zeros((n_samples,3))
X[:,0] = np.random.uniform(-sigma_glob,sigma_glob,n_samples)
X[:,1] = np.random.uniform(-sigma_glob,sigma_glob,n_samples)
func = self.funcs['tanh']
# XOR type
# X[:,2] = (func(X[:,0])*sp.stats.norm.pdf(X[:,1],0,0.2)
# + func(X[:,1])*sp.stats.norm.pdf(X[:,0],0,0.2))
# AND type / diagonal
# X[:,2] = (func(X[:,0]+X[:,1])*sp.stats.norm.pdf(X[:,1]-X[:,0],0,0.2))
# AND type / horizontal
X[:,2] = (func(X[:,0])*sp.stats.norm.cdf(X[:,1],1,0.2))
pl.scatter(X[:,0],X[:,1],c=X[:,2],edgecolor='face')
pl.show()
pl.plot(X[:,1],X[:,2],'.')
pl.show()
return X
|
python
|
{
"resource": ""
}
|
q22283
|
_calc_overlap_count
|
train
|
def _calc_overlap_count(
markers1: dict,
markers2: dict,
):
"""Calculate overlap count between the values of two dictionaries
Note: dict values must be sets
"""
overlaps=np.zeros((len(markers1), len(markers2)))
j=0
for marker_group in markers1:
tmp = [len(markers2[i].intersection(markers1[marker_group])) for i in markers2.keys()]
overlaps[j,:] = tmp
j += 1
return overlaps
|
python
|
{
"resource": ""
}
|
q22284
|
_calc_overlap_coef
|
train
|
def _calc_overlap_coef(
markers1: dict,
markers2: dict,
):
"""Calculate overlap coefficient between the values of two dictionaries
Note: dict values must be sets
"""
overlap_coef=np.zeros((len(markers1), len(markers2)))
j=0
for marker_group in markers1:
tmp = [len(markers2[i].intersection(markers1[marker_group]))/
max(min(len(markers2[i]), len(markers1[marker_group])),1) for i in markers2.keys()]
overlap_coef[j,:] = tmp
j += 1
return overlap_coef
|
python
|
{
"resource": ""
}
|
q22285
|
_calc_jaccard
|
train
|
def _calc_jaccard(
markers1: dict,
markers2: dict,
):
"""Calculate jaccard index between the values of two dictionaries
Note: dict values must be sets
"""
jacc_results=np.zeros((len(markers1), len(markers2)))
j=0
for marker_group in markers1:
tmp = [len(markers2[i].intersection(markers1[marker_group]))/
len(markers2[i].union(markers1[marker_group])) for i in markers2.keys()]
jacc_results[j,:] = tmp
j += 1
return jacc_results
|
python
|
{
"resource": ""
}
|
q22286
|
pca_overview
|
train
|
def pca_overview(adata, **params):
"""\
Plot PCA results.
The parameters are the ones of the scatter plot. Call pca_ranking separately
if you want to change the default settings.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
color : string or list of strings, optional (default: `None`)
Keys for observation/cell annotation either as list `["ann1", "ann2"]` or
string `"ann1,ann2,..."`.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
{scatter_bulk}
show : bool, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
"""
show = params['show'] if 'show' in params else None
if 'show' in params: del params['show']
scatterplots.pca(adata, **params, show=False)
pca_loadings(adata, show=False)
pca_variance_ratio(adata, show=show)
|
python
|
{
"resource": ""
}
|
q22287
|
pca_loadings
|
train
|
def pca_loadings(adata, components=None, show=None, save=None):
"""Rank genes according to contributions to PCs.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
components : str or list of integers, optional
For example, ``'1,2,3'`` means ``[1, 2, 3]``, first, second, third
principal component.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
"""
if components is None: components = [1, 2, 3]
elif isinstance(components, str): components = components.split(',')
components = np.array(components) - 1
ranking(adata, 'varm', 'PCs', indices=components)
utils.savefig_or_show('pca_loadings', show=show, save=save)
|
python
|
{
"resource": ""
}
|
q22288
|
pca_variance_ratio
|
train
|
def pca_variance_ratio(adata, n_pcs=30, log=False, show=None, save=None):
"""Plot the variance ratio.
Parameters
----------
n_pcs : `int`, optional (default: `30`)
Number of PCs to show.
log : `bool`, optional (default: `False`)
Plot on logarithmic scale..
show : `bool`, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
"""
ranking(adata, 'uns', 'variance_ratio', n_points=n_pcs, dictionary='pca', labels='PC', log=log)
utils.savefig_or_show('pca_variance_ratio', show=show, save=save)
|
python
|
{
"resource": ""
}
|
q22289
|
dpt_timeseries
|
train
|
def dpt_timeseries(adata, color_map=None, show=None, save=None, as_heatmap=True):
"""Heatmap of pseudotime series.
Parameters
----------
as_heatmap : bool (default: False)
Plot the timeseries as heatmap.
"""
if adata.n_vars > 100:
logg.warn('Plotting more than 100 genes might take some while,'
'consider selecting only highly variable genes, for example.')
# only if number of genes is not too high
if as_heatmap:
# plot time series as heatmap, as in Haghverdi et al. (2016), Fig. 1d
timeseries_as_heatmap(adata.X[adata.obs['dpt_order_indices'].values],
var_names=adata.var_names,
highlightsX=adata.uns['dpt_changepoints'],
color_map=color_map)
else:
# plot time series as gene expression vs time
timeseries(adata.X[adata.obs['dpt_order_indices'].values],
var_names=adata.var_names,
highlightsX=adata.uns['dpt_changepoints'],
xlim=[0, 1.3*adata.X.shape[0]])
pl.xlabel('dpt order')
utils.savefig_or_show('dpt_timeseries', save=save, show=show)
|
python
|
{
"resource": ""
}
|
q22290
|
dpt_groups_pseudotime
|
train
|
def dpt_groups_pseudotime(adata, color_map=None, palette=None, show=None, save=None):
"""Plot groups and pseudotime."""
pl.figure()
pl.subplot(211)
timeseries_subplot(adata.obs['dpt_groups'].cat.codes,
time=adata.obs['dpt_order'].values,
color=np.asarray(adata.obs['dpt_groups']),
highlightsX=adata.uns['dpt_changepoints'],
ylabel='dpt groups',
yticks=(np.arange(len(adata.obs['dpt_groups'].cat.categories), dtype=int)
if len(adata.obs['dpt_groups'].cat.categories) < 5 else None),
palette=palette)
pl.subplot(212)
timeseries_subplot(adata.obs['dpt_pseudotime'].values,
time=adata.obs['dpt_order'].values,
color=adata.obs['dpt_pseudotime'].values,
xlabel='dpt order',
highlightsX=adata.uns['dpt_changepoints'],
ylabel='pseudotime',
yticks=[0, 1],
color_map=color_map)
utils.savefig_or_show('dpt_groups_pseudotime', save=save, show=show)
|
python
|
{
"resource": ""
}
|
q22291
|
_rank_genes_groups_plot
|
train
|
def _rank_genes_groups_plot(adata, plot_type='heatmap', groups=None,
n_genes=10, groupby=None, key=None,
show=None, save=None, **kwds):
"""\
Plot ranking of genes using the specified plot type
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groups : `str` or `list` of `str`
The groups for which to show the gene ranking.
n_genes : `int`, optional (default: 10)
Number of genes to show.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. By default,
the groupby is chosen from the rank genes groups parameter but
other groupby options can be used.
{show_save_ax}
"""
if key is None:
key = 'rank_genes_groups'
if 'dendrogram' not in kwds:
kwds['dendrogram'] = True
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby'])
group_names = (adata.uns[key]['names'].dtype.names
if groups is None else groups)
gene_names = []
start = 0
group_positions = []
group_names_valid = []
for group in group_names:
# get all genes that are 'not-nan'
genes_list = [gene for gene in adata.uns[key]['names'][group] if not pd.isnull(gene)][:n_genes]
if len(genes_list) == 0:
logg.warn("No genes found for group {}".format(group))
continue
gene_names.extend(genes_list)
end = start + len(genes_list)
group_positions.append((start, end -1))
group_names_valid.append(group)
start = end
group_names = group_names_valid
if plot_type == 'dotplot':
from .._anndata import dotplot
dotplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'heatmap':
from .._anndata import heatmap
heatmap(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'stacked_violin':
from .._anndata import stacked_violin
return stacked_violin(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'tracksplot':
from .._anndata import tracksplot
return tracksplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
elif plot_type == 'matrixplot':
from .._anndata import matrixplot
matrixplot(adata, gene_names, groupby, var_group_labels=group_names,
var_group_positions=group_positions, show=show, save=save, **kwds)
|
python
|
{
"resource": ""
}
|
q22292
|
sim
|
train
|
def sim(adata, tmax_realization=None, as_heatmap=False, shuffle=False,
show=None, save=None):
"""Plot results of simulation.
Parameters
----------
as_heatmap : bool (default: False)
Plot the timeseries as heatmap.
tmax_realization : int or None (default: False)
Number of observations in one realization of the time series. The data matrix
adata.X consists in concatenated realizations.
shuffle : bool, optional (default: False)
Shuffle the data.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
"""
from ... import utils as sc_utils
if tmax_realization is not None: tmax = tmax_realization
elif 'tmax_write' in adata.uns: tmax = adata.uns['tmax_write']
else: tmax = adata.n_obs
n_realizations = adata.n_obs/tmax
if not shuffle:
if not as_heatmap:
timeseries(adata.X,
var_names=adata.var_names,
xlim=[0, 1.25*adata.n_obs],
highlightsX=np.arange(tmax, n_realizations*tmax, tmax),
xlabel='realizations')
else:
# plot time series as heatmap, as in Haghverdi et al. (2016), Fig. 1d
timeseries_as_heatmap(adata.X,
var_names=adata.var_names,
highlightsX=np.arange(tmax, n_realizations*tmax, tmax))
pl.xticks(np.arange(0, n_realizations*tmax, tmax),
np.arange(n_realizations).astype(int) + 1)
utils.savefig_or_show('sim', save=save, show=show)
else:
# shuffled data
X = adata.X
X, rows = sc_utils.subsample(X, seed=1)
timeseries(X,
var_names=adata.var_names,
xlim=[0, 1.25*adata.n_obs],
highlightsX=np.arange(tmax, n_realizations*tmax, tmax),
xlabel='index (arbitrary order)')
utils.savefig_or_show('sim_shuffled', save=save, show=show)
|
python
|
{
"resource": ""
}
|
q22293
|
cellbrowser
|
train
|
def cellbrowser(
adata, data_dir, data_name,
embedding_keys = None,
annot_keys = ["louvain", "percent_mito", "n_genes", "n_counts"],
cluster_field = "louvain",
nb_marker = 50,
skip_matrix = False,
html_dir = None,
port = None,
do_debug = False
):
"""
Export adata to a UCSC Cell Browser project directory. If `html_dir` is
set, subsequently build the html files from the project directory into
`html_dir`. If `port` is set, start an HTTP server in the background and
serve `html_dir` on `port`.
By default, export all gene expression data from `adata.raw`, the
annotations `louvain`, `percent_mito`, `n_genes` and `n_counts` and the top
`nb_marker` cluster markers. All existing files in data_dir are
overwritten, except cellbrowser.conf.
See `UCSC Cellbrowser <https://github.com/maximilianh/cellBrowser>`__ for
details.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
data_dir : `str`
Path to directory for exported Cell Browser files.
Usually these are the files `exprMatrix.tsv.gz`, `meta.tsv`,
coordinate files like `tsne.coords.tsv`,
and cluster marker gene lists like `markers.tsv`.
A file `cellbrowser.conf` is also created with pointers to these files.
As a result, each adata object should have its own project_dir.
data_name : `str`
Name of dataset in Cell Browser, a string without special characters.
This is written to `data_dir`/cellbrowser.conf.
Ideally this is a short unique name for the dataset,
like "pbmc3k" or "tabulamuris".
embedding_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)`
2-D embeddings in `adata.obsm` to export.
The prefix "`X_`" or "`X_draw_graph_`" is not necessary.
Coordinates missing from `adata` are skipped.
By default, these keys are tried: ["tsne", "umap", "pagaFa", "pagaFr",
"pagaUmap", "phate", "fa", "fr", "kk", "drl", "rt"].
For these, default display labels are automatically used.
For other values, you can specify a dictionary instead of a list,
the values of the dictionary are then the display labels for the
coordinates, e.g. `{'tsne' : "t-SNE by Scanpy"}`
annot_keys: `list` of `str` or `dict` of `key (str)`->`display label (str)`
Annotations in `adata.obsm` to export.
Can be a dictionary with key -> display label.
skip_matrix: `boolean`
Do not export the matrix.
If you had previously exported this adata into the same `data_dir`,
then there is no need to export the whole matrix again.
This option will make the export a lot faster,
e.g. when only coordinates or meta data were changed.
html_dir: `str`
If this variable is set, the export will build html
files from `data_dir` to `html_dir`, creating html/js/json files.
Usually there is one global html output directory for all datasets.
Often, `html_dir` is located under a webserver's (like Apache)
htdocs directory or is copied to one.
A directory `html_dir`/`project_name` will be created and
an index.html will be created under `html_dir` for all subdirectories.
Existing files will be overwritten.
If do not to use html_dir,
you can use the command line tool `cbBuild` to build the html directory.
port: `int`
If this variable and `html_dir` are set,
Python's built-in web server will be spawned as a daemon in the
background and serve the files under `html_dir`.
To kill the process, call `cellbrowser.cellbrowser.stop()`.
do_debug: `boolean`
Activate debugging output
Examples
--------
See this
`tutorial <https://github.com/theislab/scanpy_usage/tree/master/181126_Cellbrowser_exports>`__.
"""
try:
import cellbrowser.cellbrowser as cb
except ImportError:
print("The package cellbrowser is not installed. Install with 'pip "
"install cellbrowser' and retry.")
cb.setDebug(do_debug)
cb.scanpyToCellbrowser(adata, data_dir, data_name,
coordFields=embedding_keys,
metaFields=annot_keys,
clusterField=cluster_field,
nb_marker=nb_marker,
skipMatrix=skip_matrix,
doDebug = None
)
if html_dir:
cb.build(data_dir, html_dir, doDebug=None)
if port:
cb.serve(html_dir, port)
|
python
|
{
"resource": ""
}
|
q22294
|
umap
|
train
|
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'umap', **kwargs)
|
python
|
{
"resource": ""
}
|
q22295
|
tsne
|
train
|
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'tsne', **kwargs)
|
python
|
{
"resource": ""
}
|
q22296
|
diffmap
|
train
|
def diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in Diffusion Map basis.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'diffmap', **kwargs)
|
python
|
{
"resource": ""
}
|
q22297
|
draw_graph
|
train
|
def draw_graph(adata, layout=None, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in graph-drawing basis.
Parameters
----------
{adata_color_etc}
layout : {{'fa', 'fr', 'drl', ...}}, optional (default: last computed)
One of the `draw_graph` layouts, see
:func:`~scanpy.api.tl.draw_graph`. By default, the last computed layout
is used.
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if layout is None:
layout = str(adata.uns['draw_graph']['params']['layout'])
basis = 'draw_graph_' + layout
if 'X_' + basis not in adata.obsm_keys():
raise ValueError('Did not find {} in adata.obs. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
return plot_scatter(adata, basis, **kwargs)
|
python
|
{
"resource": ""
}
|
q22298
|
pca
|
train
|
def pca(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in PCA coordinates.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return plot_scatter(adata, 'pca', **kwargs)
|
python
|
{
"resource": ""
}
|
q22299
|
_add_legend_or_colorbar
|
train
|
def _add_legend_or_colorbar(adata, ax, cax, categorical, value_to_plot, legend_loc,
scatter_array, legend_fontweight, legend_fontsize,
groups, multi_panel):
"""
Adds a color bar or a legend to the given ax. A legend is added when the
data is categorical and a color bar is added when a continuous value was used.
"""
# add legends or colorbars
if categorical is True:
# add legend to figure
categories = list(adata.obs[value_to_plot].cat.categories)
colors = adata.uns[value_to_plot + '_colors']
if multi_panel is True:
# Shrink current axis by 10% to fit legend and match
# size of plots that are not categorical
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])
if groups is not None:
# only label groups with the respective color
colors = [colors[categories.index(x)] for x in groups]
categories = groups
if legend_loc == 'right margin':
for idx, label in enumerate(categories):
color = colors[idx]
# use empty scatter to set labels
ax.scatter([], [], c=color, label=label)
ax.legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(categories) <= 14
else 2 if len(categories) <= 30 else 3),
fontsize=legend_fontsize)
if legend_loc == 'on data':
# identify centroids to put labels
all_pos = np.zeros((len(categories), 2))
for ilabel, label in enumerate(categories):
_scatter = scatter_array[adata.obs[value_to_plot] == label, :]
x_pos, y_pos = np.median(_scatter, axis=0)
ax.text(x_pos, y_pos, label,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize)
all_pos[ilabel] = [x_pos, y_pos]
# this is temporary storage for access by other tools
utils._tmp_cluster_pos = all_pos
else:
# add colorbar to figure
pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.