repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MCSE | MCSE-master/SentEval/senteval/tools/__init__.py | 0 | 0 | 0 | py | |
MCSE | MCSE-master/SentEval/senteval/tools/ranking.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Image Annotation/Search for COCO with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import copy
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.optim as optim
class COCOProjNet(nn.Module):
def __init__(self, config):
super(COCOProjNet, self).__init__()
self.imgdim = config['imgdim']
self.sentdim = config['sentdim']
self.projdim = config['projdim']
self.imgproj = nn.Sequential(
nn.Linear(self.imgdim, self.projdim),
)
self.sentproj = nn.Sequential(
nn.Linear(self.sentdim, self.projdim),
)
def forward(self, img, sent, imgc, sentc):
# imgc : (bsize, ncontrast, imgdim)
# sentc : (bsize, ncontrast, sentdim)
# img : (bsize, imgdim)
# sent : (bsize, sentdim)
img = img.unsqueeze(1).expand_as(imgc).contiguous()
img = img.view(-1, self.imgdim)
imgc = imgc.view(-1, self.imgdim)
sent = sent.unsqueeze(1).expand_as(sentc).contiguous()
sent = sent.view(-1, self.sentdim)
sentc = sentc.view(-1, self.sentdim)
imgproj = self.imgproj(img)
imgproj = imgproj / torch.sqrt(torch.pow(imgproj, 2).sum(1, keepdim=True)).expand_as(imgproj)
imgcproj = self.imgproj(imgc)
imgcproj = imgcproj / torch.sqrt(torch.pow(imgcproj, 2).sum(1, keepdim=True)).expand_as(imgcproj)
sentproj = self.sentproj(sent)
sentproj = sentproj / torch.sqrt(torch.pow(sentproj, 2).sum(1, keepdim=True)).expand_as(sentproj)
sentcproj = self.sentproj(sentc)
sentcproj = sentcproj / torch.sqrt(torch.pow(sentcproj, 2).sum(1, keepdim=True)).expand_as(sentcproj)
# (bsize*ncontrast, projdim)
anchor1 = torch.sum((imgproj*sentproj), 1)
anchor2 = torch.sum((sentproj*imgproj), 1)
img_sentc = torch.sum((imgproj*sentcproj), 1)
sent_imgc = torch.sum((sentproj*imgcproj), 1)
# (bsize*ncontrast)
return anchor1, anchor2, img_sentc, sent_imgc
def proj_sentence(self, sent):
output = self.sentproj(sent)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
def proj_image(self, img):
output = self.imgproj(img)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
class PairwiseRankingLoss(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc,
min=0.0).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc,
min=0.0).sum()
loss = cost_sent + cost_img
return loss
class ImageSentenceRankingPytorch(object):
# Image Sentence Ranking on COCO with Pytorch
def __init__(self, train, valid, test, config):
# fix seed
self.seed = config['seed']
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.train = train
self.valid = valid
self.test = test
self.imgdim = len(train['imgfeat'][0])
self.sentdim = len(train['sentfeat'][0])
self.projdim = config['projdim']
self.margin = config['margin']
self.batch_size = 128
self.ncontrast = 30
self.maxepoch = 20
self.early_stop = True
config_model = {'imgdim': self.imgdim,'sentdim': self.sentdim,
'projdim': self.projdim}
self.model = COCOProjNet(config_model).cuda()
self.loss_fn = PairwiseRankingLoss(margin=self.margin).cuda()
self.optimizer = optim.Adam(self.model.parameters())
def prepare_data(self, trainTxt, trainImg, devTxt, devImg,
testTxt, testImg):
trainTxt = torch.FloatTensor(trainTxt)
trainImg = torch.FloatTensor(trainImg)
devTxt = torch.FloatTensor(devTxt).cuda()
devImg = torch.FloatTensor(devImg).cuda()
testTxt = torch.FloatTensor(testTxt).cuda()
testImg = torch.FloatTensor(testImg).cuda()
return trainTxt, trainImg, devTxt, devImg, testTxt, testImg
def run(self):
self.nepoch = 0
bestdevscore = -1
early_stop_count = 0
stop_train = False
# Preparing data
logging.info('prepare data')
trainTxt, trainImg, devTxt, devImg, testTxt, testImg = \
self.prepare_data(self.train['sentfeat'], self.train['imgfeat'],
self.valid['sentfeat'], self.valid['imgfeat'],
self.test['sentfeat'], self.test['imgfeat'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
logging.info('start epoch')
self.trainepoch(trainTxt, trainImg, devTxt, devImg, nepoches=1)
logging.info('Epoch {0} finished'.format(self.nepoch))
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
score = 0
for i in range(5):
devTxt_i = devTxt[i*5000:(i+1)*5000]
devImg_i = devImg[i*5000:(i+1)*5000]
# Compute dev ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg_i,
devTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
logging.info("Image to text: {0}, {1}, {2}, {3}"
.format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute dev ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg_i,
devTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
logging.info("Text to Image: {0}, {1}, {2}, {3}"
.format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
score += (r1_i2t + r5_i2t + r10_i2t +
r1_t2i + r5_t2i + r10_t2i) / 5
logging.info("Dev mean Text to Image: {0}, {1}, {2}, {3}".format(
results['t2i']['r1'], results['t2i']['r5'],
results['t2i']['r10'], results['t2i']['medr']))
logging.info("Dev mean Image to text: {0}, {1}, {2}, {3}".format(
results['i2t']['r1'], results['i2t']['r5'],
results['i2t']['r10'], results['i2t']['medr']))
# early stop on Pearson
if score > bestdevscore:
bestdevscore = score
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
# Compute test for the 5 splits
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
for i in range(5):
testTxt_i = testTxt[i*5000:(i+1)*5000]
testImg_i = testImg[i*5000:(i+1)*5000]
# Compute test ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(testImg_i, testTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(testImg_i, testTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
return bestdevscore, results['i2t']['r1'], results['i2t']['r5'], \
results['i2t']['r10'], results['i2t']['medr'], \
results['t2i']['r1'], results['t2i']['r5'], \
results['t2i']['r10'], results['t2i']['medr']
def trainepoch(self, trainTxt, trainImg, devTxt, devImg, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = list(np.random.permutation(len(trainTxt)))
all_costs = []
for i in range(0, len(trainTxt), self.batch_size):
# forward
if i % (self.batch_size*500) == 0 and i > 0:
logging.info('samples : {0}'.format(i))
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg,
devTxt)
logging.info("Image to text: {0}, {1}, {2}, {3}".format(
r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg,
devTxt)
logging.info("Text to Image: {0}, {1}, {2}, {3}".format(
r1_t2i, r5_t2i, r10_t2i, medr_t2i))
idx = torch.LongTensor(permutation[i:i + self.batch_size])
imgbatch = Variable(trainImg.index_select(0, idx)).cuda()
sentbatch = Variable(trainTxt.index_select(0, idx)).cuda()
idximgc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idxsentc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idximgc = torch.LongTensor(idximgc)
idxsentc = torch.LongTensor(idxsentc)
# Get indexes for contrastive images and sentences
imgcbatch = Variable(trainImg.index_select(0, idximgc)).view(
-1, self.ncontrast, self.imgdim).cuda()
sentcbatch = Variable(trainTxt.index_select(0, idxsentc)).view(
-1, self.ncontrast, self.sentdim).cuda()
anchor1, anchor2, img_sentc, sent_imgc = self.model(
imgbatch, sentbatch, imgcbatch, sentcbatch)
# loss
loss = self.loss_fn(anchor1, anchor2, img_sentc, sent_imgc)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def t2i(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
idxs = torch.cuda.LongTensor(range(0, len(img_embed), 5))
ims = img_embed.index_select(0, idxs)
ranks = np.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = sent_embed[5*index: 5*index + 5]
# Compute scores
scores = torch.mm(queries, ims.transpose(0, 1)).cpu().numpy()
inds = np.zeros(scores.shape)
for i in range(len(inds)):
inds[i] = np.argsort(scores[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
def i2t(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
index_list = []
ranks = np.zeros(npts)
for index in range(npts):
# Get query image
query_img = img_embed[5 * index]
# Compute scores
scores = torch.mm(query_img.view(1, -1),
sent_embed.transpose(0, 1)).view(-1)
scores = scores.cpu().numpy()
inds = np.argsort(scores)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(5*index, 5*index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
| 15,275 | 41.433333 | 109 | py |
openqasm | openqasm-main/convert2pdf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
CONVERT_COMMAND = 'texi2pdf'
def main(relative_tex_filepath):
if not os.path.exists(relative_tex_filepath):
print(
'File %s does not exist.' % relative_tex_filepath, file=sys.stderr)
return -1
absolute_tex_filepath = os.path.abspath(relative_tex_filepath)
destination_directory = os.path.dirname(absolute_tex_filepath)
try:
subprocess.run(
['command', '-v', CONVERT_COMMAND]).check_returncode()
except subprocess.CalledProcessError:
print('Cannot find `%s`. Ensure you have LaTeX installed and '
'the command is in the PATH.' % CONVERT_COMMAND, file=sys.stderr)
return -1
try:
subprocess.run(
[CONVERT_COMMAND, '-c', absolute_tex_filepath],
cwd=destination_directory)
except subprocess.CalledProcessError:
return -1
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: convert2pdf.py path/to/texfile.tex')
sys.exit(-1)
sys.exit(main(sys.argv[1]))
| 1,138 | 24.886364 | 79 | py |
openqasm | openqasm-main/convert2svg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
CONVERT_COMMAND = 'pdftocairo'
def main(relative_tex_filepath):
if not os.path.exists(relative_tex_filepath):
print(
'File %s does not exist.' % relative_tex_filepath, file=sys.stderr)
return -1
absolute_tex_filepath = os.path.abspath(relative_tex_filepath)
destination_directory = os.path.dirname(absolute_tex_filepath)
try:
subprocess.run(
['command', '-v', CONVERT_COMMAND]).check_returncode()
except subprocess.CalledProcessError:
print('Cannot find `%s`. Ensure you it installed and the command is '
'in the PATH.' % CONVERT_COMMAND, file=sys.stderr)
return -1
try:
subprocess.run(
[CONVERT_COMMAND, '-svg', absolute_tex_filepath],
cwd=destination_directory)
except subprocess.CalledProcessError:
return -1
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: convert2svg.py path/to/pdffile.pdf')
print('For converting to PDF, use convert2pdf.py first.')
sys.exit(-1)
sys.exit(main(sys.argv[1]))
| 1,200 | 25.688889 | 79 | py |
openqasm | openqasm-main/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_extensions'))
# -- Project information -----------------------------------------------------
from typing import List
version = os.getenv('VERSION','Live')
project = f'OpenQASM {version} Specification'
copyright = '2017-2023, Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
author = 'Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'reno.sphinxext',
'multifigure'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = [
"openqasm/docs",
]
# Sets the default code-highlighting language. `.. code-block::` directives
# that are not OQ3 should specify the language manually. The value is
# interpreted as a Pygments lexer alias; this needs the dependency
# `openqasm_pygments`.
highlight_language = "qasm3"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
version_list_var = os.getenv('VERSION_LIST')
extra_nav_links = {'Live Version': '/index.html'} # default link to Live version
if version_list_var is not None:
version_list = version_list_var.split(',')
for ver in version_list:
extra_nav_links[f'Version {ver}'] = f'/versions/{ver}/index.html'
print(extra_nav_links)
# Theme specific options
html_theme_options = {
'extra_nav_links': extra_nav_links
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The URL which points to the root of the HTML documentation. It is used to
# indicate the location of document like canonical_url.
html_baseurl = os.getenv('HTML_BASEURL', '')
# Add css styles for colored text
html_css_files = ['colors.css']
# If True, figures, tables and code-blocks are automatically numbered
# if they have a caption.
numfig = True
# Necessary setting for sphinxcontrib-bibtex >= 2.0.0
bibtex_bibfiles = ['bibliography.bib']
# This is the list of local variables to export into sphinx by using the
# rst_epilogue below. Using this mechanism we can export the local 'version'
# variable, which can be defined by an environment variable, into the sphinx
# build system for changing the text to specify which specific version of the
# specification is being built
variables_to_export = [
"version",
]
frozen_locals = dict(locals())
rst_epilog = '\n'.join(map(lambda x: f".. |{x}| replace:: {frozen_locals[x]}", variables_to_export))
del frozen_locals
# Monkey-patch docutils 0.19.0 with a fix to `Node.previous_sibling` that is the
# root cause of incorrect HTML output for bibliograhy files (see gh-455).
# docutils is pinned in `constraints.txt` to a version that is known to work
# with this patch. If docutils releases a new version, this monkeypatching and
# the constraint may be able to be dropped.
import docutils.nodes
# This method is taken from docutils revision r9126, which is to a file
# explicitly placed in the public domain; there is no licence clause.
def previous_sibling(self):
if not self.parent:
return None
index = self.parent.index(self)
return self.parent[index - 1] if index > 0 else None
docutils.nodes.Node.previous_sibling = previous_sibling
| 4,580 | 35.943548 | 100 | py |
openqasm | openqasm-main/source/grammar/openqasm_reference_parser/exceptions.py | __all__ = ["Qasm3ParserError"]
class Qasm3ParserError(Exception):
pass
| 77 | 12 | 34 | py |
openqasm | openqasm-main/source/grammar/openqasm_reference_parser/tools.py | import contextlib
import io
import antlr4
from antlr4.tree.Trees import Trees, ParseTree
from . import Qasm3ParserError
from .qasm3Lexer import qasm3Lexer
from .qasm3Parser import qasm3Parser
__all__ = ["pretty_tree"]
def pretty_tree(*, program: str = None, file: str = None) -> str:
"""Get a pretty-printed string of the parsed AST of the QASM input.
The input will be taken either verbatim from the string ``program``, or read
from the file with name ``file``. Use exactly one of the possible input
arguments, passed by keyword.
Args:
program: a string containing the QASM to be parsed.
file: a string of the filename containing the QASM to be parsed.
Returns:
a pretty-printed version of the parsed AST of the given program.
Raises:
ValueError: no input is given, or too many inputs are given.
Qasm3ParserError: the input was not parseable as valid QASM 3.
"""
if program is not None and file is not None:
raise ValueError("Must supply only one of 'program' and 'file'.")
if program is not None:
input_stream = antlr4.InputStream(program)
elif file is not None:
input_stream = antlr4.FileStream(file, encoding="utf-8")
else:
raise ValueError("One of 'program' and 'file' must be supplied.")
# ANTLR errors (lexing and parsing) are sent to stderr, which we redirect
# to the variable `err`.
with io.StringIO() as err, contextlib.redirect_stderr(err):
lexer = qasm3Lexer(input_stream)
token_stream = antlr4.CommonTokenStream(lexer)
parser = qasm3Parser(token_stream)
tree = _pretty_tree_inner(parser.program(), parser.ruleNames, 0)
error = err.getvalue()
if error:
raise Qasm3ParserError(f"Parse tree build failed. Error:\n{error}")
return tree
def _pretty_tree_inner(parse_tree: ParseTree, rule_names: list, level: int) -> str:
"""Internal recursive routine used in pretty-printing the parse tree.
Args:
parse_tree: a node in the parse tree of the output of the ANTLR parser.
rule_names: the ANTLR-generated list of rule names in the grammar.
level: the current indentation level.
Returns:
the pretty-printed tree starting from this node, indented correctly.
"""
indent = " " * level
tree = indent + Trees.getNodeText(parse_tree, rule_names) + "\n"
return tree + "".join(
_pretty_tree_inner(parse_tree.getChild(i), rule_names, level + 1)
for i in range(parse_tree.getChildCount())
)
| 2,569 | 35.197183 | 83 | py |
openqasm | openqasm-main/source/grammar/openqasm_reference_parser/__init__.py | from .exceptions import *
from .tools import *
from .qasm3Lexer import qasm3Lexer
from .qasm3Parser import qasm3Parser
| 119 | 23 | 36 | py |
openqasm | openqasm-main/source/grammar/tests/test_grammar.py | import itertools
import os
import pathlib
from typing import List, Union, Sequence
import pytest
import yaml
import openqasm_reference_parser
TEST_DIR = pathlib.Path(__file__).parent
REPO_DIR = TEST_DIR.parents[2]
def find_files(
directory: Union[str, os.PathLike], suffix: str = "", raw: bool = False
) -> List:
"""Recursively find all files in ``directory`` that end in ``suffix``.
Args:
directory: the (absolute) directory to search for the files.
suffix: the string that filenames should end in to be returned. Files
without this suffix are ignored. This is useful for limiting files
to those with a particular extension.
raw: If false (the default), the output elements are all
``pytest.param`` instances with nice ids. If true, then only the file
names are returned, without the wrapping parameter.
Returns:
By default, a list of ``pytest`` parameters, where the value is a string
of the absolute path to a file, and the id is a string of the path
relative to the given directory. If ``raw`` is given, then just a list
of the files as ``pathlib.Path`` instances.
"""
directory = pathlib.Path(directory).absolute()
if raw:
def output_format(root, file):
return str(pathlib.Path(root) / file)
else:
def output_format(root, file):
path = pathlib.Path(root) / file
return pytest.param(str(path), id=str(path.relative_to(directory)))
return [
output_format(root, file)
for root, _, files in os.walk(directory)
for file in files
if file.endswith(suffix)
]
def cases_from_lines(
files: Union[str, os.PathLike, Sequence[Union[str, os.PathLike]]],
skip_comments: bool = True,
root: Union[str, os.PathLike] = TEST_DIR,
):
"""Parametrize test cases from the lines of a series of files.
Whitespace at the start and end of the lines is stripped from the case.
Args:
files: The name of a file or files to draw the test cases from. Can be
the output of :obj:`~find_files` with ``raw=True`` to do discovery.
skip_comments: Whether to skip lines which begin with the line-comment
sequence "//".
root: The directory to quote the filenames relative to, when generating
the test id.
Returns:
A sequence of pytest parameters with the individual test cases and their
ids. The id is formed of the file name and the line number the case was
taken from.
"""
if isinstance(files, (str, os.PathLike)):
files = (files,)
root = pathlib.Path(root)
def output_format(line, filename, line_number):
relative_filename = pathlib.Path(filename).relative_to(root)
return pytest.param(
line,
id=f"{relative_filename}:{line_number + 1}",
)
out = []
for filename in files:
with open(filename, "r") as file:
for line_number, line in enumerate(file):
line = line.strip()
if not line or (skip_comments and line.startswith("//")):
continue
out.append(output_format(line, filename, line_number))
return out
@pytest.mark.parametrize(
"filename",
find_files(TEST_DIR / "reference", suffix=".yaml"),
)
def test_reference_output(filename):
"""Test that the reference files parse to the exact expected output."""
with open(filename, "r") as file:
obj = yaml.safe_load(file)
# Make sure the YAML files have only the correct keys.
assert set(obj) == {"reference", "source"}
parsed = openqasm_reference_parser.pretty_tree(program=obj["source"])
assert parsed == obj["reference"]
@pytest.mark.parametrize(
"filename",
find_files(REPO_DIR / "examples", suffix=".qasm"),
)
def test_examples_parse(filename):
"""Test that the example QASM3 files all parse without error."""
openqasm_reference_parser.pretty_tree(file=filename)
class TestInvalidProgramsFailToParse:
@pytest.mark.parametrize(
"statement",
cases_from_lines(
find_files(TEST_DIR / "invalid" / "statements", ".qasm", raw=True),
root=TEST_DIR / "invalid" / "statements",
),
)
def test_single_global_statement(self, statement):
with pytest.raises(openqasm_reference_parser.Qasm3ParserError):
openqasm_reference_parser.pretty_tree(program=statement)
| 4,529 | 32.80597 | 80 | py |
openqasm | openqasm-main/source/openqasm/tools/update_antlr_version_requirements.py | import sys
import re
def parse_versions():
with open(sys.argv[2], "r") as version_file:
for line in version_file:
comment_start = line.find("#")
if comment_start >= 0:
line = line[: line.find("#")]
line = line.strip()
if not line:
continue
version_parts = tuple(int(x) for x in line.split("."))
yield version_parts[0], version_parts[1]
def main():
if len(sys.argv) != 3:
print(f"Usage: {__file__} path/to/setup.cfg path/to/antlr_versions.txt", file=sys.stderr)
sys.exit(1)
versions = sorted(parse_versions())
if not versions:
print("didn't receive any versions to support", file=sys.stderr)
sys.exit(2)
if versions[0][0] != 4 or versions[-1][0] != 4:
print("can only handle ANTLR 4", file=sys.stderr)
sys.exit(4)
min_minor = versions[0][1]
max_minor = versions[-1][1]
if {minor for _, minor in versions} != set(range(min_minor, max_minor + 1)):
print("supplied minor versions must be continuous", file=sys.stderr)
sys.exit(8)
constraint = f"antlr4_python3_runtime>=4.{min_minor},<4.{max_minor + 1}"
with open(sys.argv[1], "r") as setup_file:
contents = setup_file.read()
new_contents, count = re.subn(
r"antlr4-python3-runtime\s*#\s*__ANTLR_VERSIONS__", constraint, contents
)
if not count:
print("given setup.cfg file did not seem to contain an antlr4 dependency", file=sys.stderr)
sys.exit(16)
with open(sys.argv[1], "w") as setup_file:
setup_file.write(new_contents)
if __name__ == "__main__":
main()
| 1,687 | 32.76 | 99 | py |
openqasm | openqasm-main/source/openqasm/tests/conftest.py | import collections
import pathlib
import pytest
import openqasm3
TEST_DIR = pathlib.Path(__file__).parent
ROOT_DIR = TEST_DIR.parents[2]
EXAMPLES_DIR = ROOT_DIR / "examples"
EXAMPLES = tuple(EXAMPLES_DIR.glob("**/*.qasm"))
# Session scoped because we want the parsed examples to be session scoped as well.
@pytest.fixture(params=EXAMPLES, ids=lambda x: str(x.relative_to(EXAMPLES_DIR)), scope="session")
def example_file(request):
return str(request.param)
_ExampleASTReturn = collections.namedtuple("_ExampleASTReturn", ("filename", "ast"))
# Session scoped to avoid paying the parsing cost of each file multiple times (ANTLR-based parsing
# isn't the speediest).
@pytest.fixture(scope="session")
def parsed_example(example_file):
"""The parsed AST of each of the example OpenQASM files, and its filename. The two attributes
are `filename` and `ast`."""
with open(example_file, "r") as f:
content = f.read()
return _ExampleASTReturn(example_file, openqasm3.parse(content))
| 1,012 | 30.65625 | 98 | py |
openqasm | openqasm-main/source/openqasm/tests/test_qasm_parser.py | import dataclasses
import pytest
from openqasm3.ast import (
AccessControl,
AliasStatement,
AngleType,
Annotation,
ArrayLiteral,
ArrayReferenceType,
ArrayType,
AssignmentOperator,
BinaryExpression,
BinaryOperator,
BitType,
BitstringLiteral,
BoolType,
BooleanLiteral,
Box,
BranchingStatement,
CalibrationDefinition,
CalibrationGrammarDeclaration,
CalibrationStatement,
Cast,
ClassicalArgument,
ClassicalAssignment,
ClassicalDeclaration,
ComplexType,
Concatenation,
ContinueStatement,
DelayInstruction,
DiscreteSet,
DurationLiteral,
DurationOf,
DurationType,
EndStatement,
ExpressionStatement,
ExternArgument,
ExternDeclaration,
FloatLiteral,
FloatType,
ForInLoop,
FunctionCall,
GateModifierName,
IODeclaration,
IOKeyword,
Identifier,
ImaginaryLiteral,
Include,
IndexExpression,
IndexedIdentifier,
IntType,
IntegerLiteral,
Pragma,
Program,
QASMNode,
QuantumArgument,
QuantumGate,
QuantumGateDefinition,
QuantumGateModifier,
QuantumMeasurement,
QuantumMeasurementStatement,
QuantumPhase,
QubitDeclaration,
RangeDefinition,
ReturnStatement,
SizeOf,
Span,
StretchType,
SubroutineDefinition,
TimeUnit,
UintType,
UnaryExpression,
UnaryOperator,
)
from openqasm3.parser import parse, QASM3ParsingError
from openqasm3.visitor import QASMVisitor
def _with_annotations(node, annotations):
"""Helper function to attach annotations to a QASMNode, since the current
dataclass-based implementation does not allow us to easily add the
annotations field (with a default) to statement constructors."""
node.annotations = annotations
return node
class SpanGuard(QASMVisitor):
"""Ensure that we did not forget to set spans when we add new AST nodes"""
def visit(self, node: QASMNode):
assert node.span is not None
return super().visit(node)
def _remove_spans(node):
"""Return a new ``QASMNode`` with all spans recursively set to ``None`` to
reduce noise in test failure messages."""
if isinstance(node, list):
return [_remove_spans(item) for item in node]
if not isinstance(node, QASMNode):
return node
kwargs = {}
no_init = {}
for field in dataclasses.fields(node):
if field.name == "span":
continue
target = kwargs if field.init else no_init
target[field.name] = _remove_spans(getattr(node, field.name))
out = type(node)(**kwargs)
for attribute, value in no_init.items():
setattr(out, attribute, value)
return out
def test_qubit_declaration():
p = """
qubit q;
qubit[4] a;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QubitDeclaration(qubit=Identifier(name="q"), size=None),
QubitDeclaration(
qubit=Identifier(name="a"),
size=IntegerLiteral(4),
),
]
)
SpanGuard().visit(program)
qubit_declaration = program.statements[0]
assert qubit_declaration.span == Span(1, 0, 1, 7)
assert qubit_declaration.qubit.span == Span(1, 6, 1, 6)
def test_bit_declaration():
p = """
bit c;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[ClassicalDeclaration(BitType(None), Identifier("c"), None)]
)
SpanGuard().visit(program)
classical_declaration = program.statements[0]
assert classical_declaration.span == Span(1, 0, 1, 5)
def test_qubit_and_bit_declaration():
p = """
bit c;
qubit a;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(BitType(None), Identifier("c"), None),
QubitDeclaration(qubit=Identifier(name="a"), size=None),
]
)
SpanGuard().visit(program)
def test_integer_declaration():
p = """
uint[16] a = 100;
uint[16] a = 0b0110_0100;
int[16] a = 0B01100100;
uint[16] a = 0o144;
uint[16] a = 0xff_64;
int[16] a = 0X19_a_b;
""".strip()
program = parse(p)
uint16 = UintType(IntegerLiteral(16))
int16 = IntType(IntegerLiteral(16))
a = Identifier("a")
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(uint16, a, IntegerLiteral(100)),
ClassicalDeclaration(uint16, a, IntegerLiteral(0b0110_0100)),
ClassicalDeclaration(int16, a, IntegerLiteral(0b0110_0100)),
ClassicalDeclaration(uint16, a, IntegerLiteral(0o144)),
ClassicalDeclaration(uint16, a, IntegerLiteral(0xFF64)),
ClassicalDeclaration(int16, a, IntegerLiteral(0x19AB)),
]
)
SpanGuard().visit(program)
def test_float_declaration():
p = """
float[64] a = 125.;
float[64] a = 1_25.;
float[64] a = 1_25.e1;
float[64] a = .1_25;
float[64] a = .125e1;
float[64] a = .125e+1;
float[64] a = .125e-1;
float[64] a = 125.125e1_25;
""".strip()
program = parse(p)
float64 = FloatType(IntegerLiteral(64))
a = Identifier("a")
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(float64, a, FloatLiteral(125.0)),
ClassicalDeclaration(float64, a, FloatLiteral(125.0)),
ClassicalDeclaration(float64, a, FloatLiteral(1250.0)),
ClassicalDeclaration(float64, a, FloatLiteral(0.125)),
ClassicalDeclaration(float64, a, FloatLiteral(1.25)),
ClassicalDeclaration(float64, a, FloatLiteral(1.25)),
ClassicalDeclaration(float64, a, FloatLiteral(0.0125)),
ClassicalDeclaration(float64, a, FloatLiteral(125.125e125)),
]
)
SpanGuard().visit(program)
def test_simple_type_declarations():
p = """
int[32] a;
int[const_expr] a;
int a;
uint[32] a = 1;
uint[const_expr] a;
uint a = 1;
float[32] a;
float a;
angle[32] a;
angle a;
""".strip()
program = parse(p)
a = Identifier("a")
one = IntegerLiteral(1)
thirty_two = IntegerLiteral(32)
const_expr = Identifier("const_expr")
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(type=IntType(size=thirty_two), identifier=a, init_expression=None),
ClassicalDeclaration(type=IntType(size=const_expr), identifier=a, init_expression=None),
ClassicalDeclaration(type=IntType(size=None), identifier=a, init_expression=None),
ClassicalDeclaration(type=UintType(size=thirty_two), identifier=a, init_expression=one),
ClassicalDeclaration(
type=UintType(size=const_expr), identifier=a, init_expression=None
),
ClassicalDeclaration(type=UintType(size=None), identifier=a, init_expression=one),
ClassicalDeclaration(
type=FloatType(size=thirty_two), identifier=a, init_expression=None
),
ClassicalDeclaration(type=FloatType(size=None), identifier=a, init_expression=None),
ClassicalDeclaration(
type=AngleType(size=thirty_two), identifier=a, init_expression=None
),
ClassicalDeclaration(type=AngleType(size=None), identifier=a, init_expression=None),
],
)
SpanGuard().visit(program)
def test_complex_declaration():
p = """
complex[float[64]] a;
complex[float] fq;
complex implicit;
complex[float[64]] imag = 1im;
complex[float[64]] c64 = 2+9.2im;
complex[float] a_float = 2.1+0im;
complex c = 0-9 im ;
complex rl = 2.1im - 0.2;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(
ComplexType(base_type=FloatType(IntegerLiteral(64))),
Identifier("a"),
None,
),
ClassicalDeclaration(
ComplexType(base_type=FloatType(size=None)),
Identifier("fq"),
None,
),
ClassicalDeclaration(
ComplexType(base_type=None),
Identifier("implicit"),
None,
),
ClassicalDeclaration(
ComplexType(
base_type=FloatType(size=IntegerLiteral(64)),
),
Identifier("imag"),
ImaginaryLiteral(1.0),
),
ClassicalDeclaration(
ComplexType(
base_type=FloatType(size=IntegerLiteral(64)),
),
Identifier("c64"),
BinaryExpression(
BinaryOperator["+"],
IntegerLiteral(2),
ImaginaryLiteral(9.2),
),
),
ClassicalDeclaration(
ComplexType(
base_type=FloatType(size=None),
),
Identifier("a_float"),
BinaryExpression(
BinaryOperator["+"],
FloatLiteral(2.1),
ImaginaryLiteral(0),
),
),
ClassicalDeclaration(
ComplexType(
base_type=None,
),
Identifier("c"),
BinaryExpression(
BinaryOperator["-"],
IntegerLiteral(0),
ImaginaryLiteral(9.0),
),
),
ClassicalDeclaration(
ComplexType(
base_type=None,
),
Identifier("rl"),
BinaryExpression(
BinaryOperator["-"],
ImaginaryLiteral(2.1),
FloatLiteral(0.2),
),
),
]
)
SpanGuard().visit(program)
context_declaration = program.statements[0]
assert context_declaration.span == Span(1, 0, 1, 20)
def test_array_declaration():
p = """
array[uint[8], 2] a;
array[uint, 2] a;
array[int[8], 2] a = {1, 1};
array[bit, 2] a = b;
array[float[32], 2, 2] a;
array[complex[float[64]], 2, 2] a = {{1, 1}, {2, 2}};
array[uint[8], 2, 2] a = {b, b};
""".strip()
program = parse(p)
a, b = Identifier("a"), Identifier("b")
one, two, eight = IntegerLiteral(1), IntegerLiteral(2), IntegerLiteral(8)
SpanGuard().visit(program)
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(
type=ArrayType(base_type=UintType(eight), dimensions=[two]),
identifier=a,
init_expression=None,
),
ClassicalDeclaration(
type=ArrayType(base_type=UintType(size=None), dimensions=[two]),
identifier=a,
init_expression=None,
),
ClassicalDeclaration(
type=ArrayType(base_type=IntType(eight), dimensions=[two]),
identifier=a,
init_expression=ArrayLiteral([one, one]),
),
ClassicalDeclaration(
type=ArrayType(base_type=BitType(size=None), dimensions=[two]),
identifier=a,
init_expression=b,
),
ClassicalDeclaration(
type=ArrayType(
base_type=FloatType(IntegerLiteral(32)),
dimensions=[two, two],
),
identifier=a,
init_expression=None,
),
ClassicalDeclaration(
type=ArrayType(
base_type=ComplexType(FloatType(IntegerLiteral(64))),
dimensions=[two, two],
),
identifier=a,
init_expression=ArrayLiteral(
[ArrayLiteral([one, one]), ArrayLiteral([two, two])],
),
),
ClassicalDeclaration(
type=ArrayType(base_type=UintType(eight), dimensions=[two, two]),
identifier=a,
init_expression=ArrayLiteral([b, b]),
),
],
)
def test_extern_declarations():
p = """
extern f();
extern f() -> bool;
extern f(bool);
extern f(int[32], uint[32]);
extern f(mutable array[complex[float[64]], N_ELEMENTS]) -> int[2 * INT_SIZE];
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExternDeclaration(
name=Identifier(name="f"),
arguments=[],
),
ExternDeclaration(
name=Identifier(name="f"),
arguments=[],
return_type=BoolType(),
),
ExternDeclaration(
name=Identifier(name="f"),
arguments=[
ExternArgument(type=BoolType()),
],
),
ExternDeclaration(
name=Identifier(name="f"),
arguments=[
ExternArgument(type=IntType(IntegerLiteral(32))),
ExternArgument(type=UintType(IntegerLiteral(32))),
],
),
ExternDeclaration(
name=Identifier(name="f"),
arguments=[
ExternArgument(
type=ArrayReferenceType(
base_type=ComplexType(FloatType(IntegerLiteral(64))),
dimensions=[Identifier(name="N_ELEMENTS")],
),
access=AccessControl["mutable"],
),
],
return_type=IntType(
size=BinaryExpression(
op=BinaryOperator["*"],
lhs=IntegerLiteral(2),
rhs=Identifier(name="INT_SIZE"),
)
),
),
]
)
SpanGuard().visit(program)
def test_single_gatecall():
p = """
h q;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumGate(
modifiers=[], name=Identifier("h"), arguments=[], qubits=[Identifier(name="q")]
)
]
)
SpanGuard().visit(program)
quantum_gate = program.statements[0]
assert quantum_gate.span == Span(1, 0, 1, 3)
assert quantum_gate.qubits[0].span == Span(1, 2, 1, 2)
def test_gate_definition1():
p = """
gate xy q {
x q;
y q;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumGateDefinition(
Identifier("xy"),
[],
[Identifier("q")],
[
QuantumGate(
modifiers=[],
name=Identifier("x"),
arguments=[],
qubits=[Identifier(name="q")],
),
QuantumGate(
modifiers=[],
name=Identifier("y"),
arguments=[],
qubits=[Identifier(name="q")],
),
],
)
],
)
SpanGuard().visit(program)
gate_declaration = program.statements[0]
assert gate_declaration.span == Span(1, 0, 4, 0)
assert gate_declaration.qubits[0].span == Span(1, 8, 1, 8)
def test_gate_definition2():
p = """
gate majority a, b, c {
cx c, b;
cx c, a;
ccx a, b, c;
}""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumGateDefinition(
name=Identifier("majority"),
arguments=[],
qubits=[
Identifier(name="a"),
Identifier(name="b"),
Identifier(name="c"),
],
body=[
QuantumGate(
modifiers=[],
name=Identifier("cx"),
arguments=[],
qubits=[Identifier(name="c"), Identifier(name="b")],
),
QuantumGate(
modifiers=[],
name=Identifier("cx"),
arguments=[],
qubits=[Identifier(name="c"), Identifier(name="a")],
),
QuantumGate(
modifiers=[],
name=Identifier("ccx"),
arguments=[],
qubits=[
Identifier(name="a"),
Identifier(name="b"),
Identifier(name="c"),
],
),
],
)
],
)
SpanGuard().visit(program)
gate_declaration = program.statements[0]
assert gate_declaration.span == Span(1, 0, 5, 0)
assert gate_declaration.qubits[0].span == Span(1, 14, 1, 14)
def test_gate_definition3():
p = """
gate rz(λ) a { gphase(-λ/2); U(0, 0, λ) a; }
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumGateDefinition(
name=Identifier("rz"),
arguments=[Identifier(name="λ")],
qubits=[Identifier(name="a")],
body=[
QuantumPhase(
modifiers=[],
argument=BinaryExpression(
op=BinaryOperator["/"],
lhs=UnaryExpression(
op=UnaryOperator["-"], expression=Identifier(name="λ")
),
rhs=IntegerLiteral(value=2),
),
qubits=[],
),
QuantumGate(
modifiers=[],
name=Identifier("U"),
arguments=[
IntegerLiteral(value=0),
IntegerLiteral(value=0),
Identifier(name="λ"),
],
qubits=[Identifier(name="a")],
),
],
)
]
)
SpanGuard().visit(program)
gate_declaration = program.statements[0]
assert gate_declaration.span == Span(1, 0, 1, 43)
assert gate_declaration.arguments[0].span == Span(1, 8, 1, 8)
assert gate_declaration.qubits[0].span == Span(1, 11, 1, 11)
def test_gate_calls():
p = """
qubit q;
qubit r;
h q;
cx q, r;
inv @ h q;
""".strip()
# TODO Add "ctrl @ pow(power) @ phase(theta) q, r;" after we complete expressions
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QubitDeclaration(qubit=Identifier(name="q"), size=None),
QubitDeclaration(qubit=Identifier(name="r"), size=None),
QuantumGate(
modifiers=[], name=Identifier("h"), arguments=[], qubits=[Identifier(name="q")]
),
QuantumGate(
modifiers=[],
name=Identifier("cx"),
arguments=[],
qubits=[Identifier(name="q"), Identifier(name="r")],
),
QuantumGate(
modifiers=[QuantumGateModifier(modifier=GateModifierName["inv"], argument=None)],
name=Identifier("h"),
arguments=[],
qubits=[Identifier(name="q")],
),
],
)
SpanGuard().visit(program)
def test_gate_defs():
p = """
gate xyz q {
x q;
y q;
z q;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumGateDefinition(
name=Identifier("xyz"),
arguments=[],
qubits=[Identifier(name="q")],
body=[
QuantumGate(
modifiers=[],
name=Identifier("x"),
arguments=[],
qubits=[Identifier(name="q")],
),
QuantumGate(
modifiers=[],
name=Identifier("y"),
arguments=[],
qubits=[Identifier(name="q")],
),
QuantumGate(
modifiers=[],
name=Identifier("z"),
arguments=[],
qubits=[Identifier(name="q")],
),
],
)
],
)
SpanGuard().visit(program)
def test_alias_statement():
p = """
let a = b;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[AliasStatement(target=Identifier(name="a"), value=Identifier(name="b"))]
)
SpanGuard().visit(program)
alias_statement = program.statements[0]
assert alias_statement.span == Span(1, 0, 1, 9)
assert alias_statement.target.span == Span(1, 4, 1, 4)
assert alias_statement.value.span == Span(1, 8, 1, 8)
def test_primary_expression():
p = """
π;
pi;
5;
2.0;
true;
false;
a;
"0110_0100";
sin(0.0);
foo(x);
1.1ns;
0.3µs;
1E-4us;
(x);
q[1];
int[1](x);
bool(x);
sizeof(a);
sizeof(a, 1);
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExpressionStatement(expression=Identifier(name="π")),
ExpressionStatement(expression=Identifier(name="pi")),
ExpressionStatement(expression=IntegerLiteral(5)),
ExpressionStatement(expression=FloatLiteral(2.0)),
ExpressionStatement(expression=BooleanLiteral(True)),
ExpressionStatement(expression=BooleanLiteral(False)),
ExpressionStatement(expression=Identifier("a")),
ExpressionStatement(expression=BitstringLiteral(100, 8)),
ExpressionStatement(expression=FunctionCall(Identifier("sin"), [FloatLiteral(0.0)])),
ExpressionStatement(expression=FunctionCall(Identifier("foo"), [Identifier("x")])),
ExpressionStatement(expression=DurationLiteral(1.1, TimeUnit.ns)),
ExpressionStatement(expression=DurationLiteral(0.3, TimeUnit.us)),
ExpressionStatement(expression=DurationLiteral(1e-4, TimeUnit.us)),
ExpressionStatement(expression=Identifier("x")),
ExpressionStatement(expression=IndexExpression(Identifier("q"), [IntegerLiteral(1)])),
ExpressionStatement(expression=Cast(IntType(size=IntegerLiteral(1)), Identifier("x"))),
ExpressionStatement(expression=Cast(BoolType(), Identifier("x"))),
ExpressionStatement(expression=SizeOf(Identifier("a"))),
ExpressionStatement(expression=SizeOf(Identifier("a"), IntegerLiteral(1))),
]
)
def test_unary_expression():
p = """
~b;
!b;
-i;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExpressionStatement(
expression=UnaryExpression(
op=UnaryOperator["~"],
expression=Identifier(name="b"),
)
),
ExpressionStatement(
expression=UnaryExpression(
op=UnaryOperator["!"],
expression=Identifier(name="b"),
)
),
ExpressionStatement(
expression=UnaryExpression(
op=UnaryOperator["-"],
expression=Identifier(name="i"),
)
),
]
)
def test_binary_expression():
p = """
b1 || b2;
b1 && b2;
b1 | b2;
b1 ^ b2;
b1 & b2;
b1 != b2;
i1 >= i2;
i1 << i2;
i1 - i2;
i1 / i2;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["||"],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["&&"],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["|"],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["^"],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["&"],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["!="],
lhs=Identifier(name="b1"),
rhs=Identifier(name="b2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator[">="],
lhs=Identifier(name="i1"),
rhs=Identifier(name="i2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["<<"],
lhs=Identifier(name="i1"),
rhs=Identifier(name="i2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["-"],
lhs=Identifier(name="i1"),
rhs=Identifier(name="i2"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["/"],
lhs=Identifier(name="i1"),
rhs=Identifier(name="i2"),
)
),
]
)
def test_binary_expression_precedence():
p = """
b1 || b2 && b3;
b1 | b2 ^ b3;
b1 != b2 + b3;
i1 >= i2 + i3;
i1 - i2 << i3;
i1 - i2 / i3;
i1[i2] + -i1[i2];
-i1 ** i2;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["||"],
lhs=Identifier(name="b1"),
rhs=BinaryExpression(
op=BinaryOperator["&&"],
lhs=Identifier(name="b2"),
rhs=Identifier(name="b3"),
),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["|"],
lhs=Identifier(name="b1"),
rhs=BinaryExpression(
op=BinaryOperator["^"],
lhs=Identifier(name="b2"),
rhs=Identifier(name="b3"),
),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["!="],
lhs=Identifier(name="b1"),
rhs=BinaryExpression(
op=BinaryOperator["+"],
lhs=Identifier(name="b2"),
rhs=Identifier(name="b3"),
),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator[">="],
lhs=Identifier(name="i1"),
rhs=BinaryExpression(
op=BinaryOperator["+"],
lhs=Identifier(name="i2"),
rhs=Identifier(name="i3"),
),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["<<"],
lhs=BinaryExpression(
op=BinaryOperator["-"],
lhs=Identifier(name="i1"),
rhs=Identifier(name="i2"),
),
rhs=Identifier(name="i3"),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["-"],
lhs=Identifier(name="i1"),
rhs=BinaryExpression(
op=BinaryOperator["/"],
lhs=Identifier(name="i2"),
rhs=Identifier(name="i3"),
),
)
),
ExpressionStatement(
expression=BinaryExpression(
op=BinaryOperator["+"],
lhs=IndexExpression(collection=Identifier("i1"), index=[Identifier("i2")]),
rhs=UnaryExpression(
op=UnaryOperator["-"],
expression=IndexExpression(
collection=Identifier("i1"),
index=[Identifier("i2")],
),
),
),
),
ExpressionStatement(
expression=UnaryExpression(
op=UnaryOperator["-"],
expression=BinaryExpression(
op=BinaryOperator["**"],
lhs=Identifier("i1"),
rhs=Identifier("i2"),
),
),
),
]
)
def test_alias_assignment():
p = """
let a = b;
let a = b[0:1];
let a = b[{0, 1, 2}];
let a = b ++ c;
let a = b[{0, 1}] ++ b[2:2:4] ++ c;
""".strip()
program = parse(p)
a, b, c = Identifier(name="a"), Identifier(name="b"), Identifier(name="c")
assert _remove_spans(program) == Program(
statements=[
AliasStatement(target=a, value=b),
AliasStatement(
target=a,
value=IndexExpression(
collection=b,
index=[
RangeDefinition(
start=IntegerLiteral(0),
end=IntegerLiteral(1),
step=None,
),
],
),
),
AliasStatement(
target=a,
value=IndexExpression(
collection=b,
index=DiscreteSet(
values=[
IntegerLiteral(0),
IntegerLiteral(1),
IntegerLiteral(2),
]
),
),
),
AliasStatement(target=a, value=Concatenation(lhs=b, rhs=c)),
AliasStatement(
target=a,
value=Concatenation(
lhs=Concatenation(
lhs=IndexExpression(
collection=b,
index=DiscreteSet(
values=[IntegerLiteral(0), IntegerLiteral(1)],
),
),
rhs=IndexExpression(
collection=b,
index=[
RangeDefinition(
start=IntegerLiteral(2),
end=IntegerLiteral(4),
step=IntegerLiteral(2),
),
],
),
),
rhs=c,
),
),
],
)
SpanGuard().visit(program)
def test_measurement():
p = """
measure q;
measure q -> c[0];
c[0] = measure q[0];
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
QuantumMeasurementStatement(QuantumMeasurement(qubit=Identifier("q")), target=None),
QuantumMeasurementStatement(
measure=QuantumMeasurement(Identifier("q")),
target=IndexedIdentifier(name=Identifier("c"), indices=[[IntegerLiteral(0)]]),
),
QuantumMeasurementStatement(
measure=QuantumMeasurement(
IndexedIdentifier(Identifier("q"), indices=[[IntegerLiteral(0)]])
),
target=IndexedIdentifier(name=Identifier("c"), indices=[[IntegerLiteral(0)]]),
),
]
)
SpanGuard().visit(program)
def test_calibration_grammar_declaration():
p = """
defcalgrammar "openpulse";
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[CalibrationGrammarDeclaration("openpulse")]
)
SpanGuard().visit(program)
def test_calibration_statement():
p = """
cal {shift_phase(drive($0), -theta);}
cal {Outer {nested} outer again.}
cal {Untokenisable: *$£()"*}
cal {}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
CalibrationStatement(body="shift_phase(drive($0), -theta);"),
CalibrationStatement(body="Outer {nested} outer again."),
CalibrationStatement(body='Untokenisable: *$£()"*'),
CalibrationStatement(body=""),
],
)
def test_calibration_definition():
p = """
defcal rz(angle[20] theta) q { shift_phase drive(q), -theta; }
defcal measure $0 -> bit {Outer {nested} outer again.}
defcal rx(pi / 2) $1 {Untokenisable: *$£()"*}
defcal cx $0, $1 {}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
CalibrationDefinition(
name=Identifier("rz"),
arguments=[
ClassicalArgument(
type=AngleType(size=IntegerLiteral(20)),
name=Identifier("theta"),
)
],
qubits=[Identifier("q")],
return_type=None,
body=" shift_phase drive(q), -theta; ",
),
CalibrationDefinition(
name=Identifier("measure"),
arguments=[],
qubits=[Identifier("$0")],
return_type=BitType(size=None),
body="Outer {nested} outer again.",
),
CalibrationDefinition(
name=Identifier("rx"),
arguments=[
BinaryExpression(
lhs=Identifier("pi"),
op=BinaryOperator["/"],
rhs=IntegerLiteral(2),
)
],
qubits=[Identifier("$1")],
return_type=None,
body='Untokenisable: *$£()"*',
),
CalibrationDefinition(
name=Identifier("cx"),
arguments=[],
qubits=[Identifier("$0"), Identifier("$1")],
return_type=None,
body="",
),
]
)
SpanGuard().visit(program)
def test_subroutine_definition():
p = """
def ymeasure(qubit q) -> bit {
s q;
h q;
return measure q;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
SubroutineDefinition(
name=Identifier("ymeasure"),
arguments=[QuantumArgument(name=Identifier("q"), size=None)],
return_type=BitType(None),
body=[
QuantumGate(
modifiers=[],
name=Identifier("s"),
arguments=[],
qubits=[Identifier(name="q")],
),
QuantumGate(
modifiers=[],
name=Identifier("h"),
arguments=[],
qubits=[Identifier(name="q")],
),
ReturnStatement(expression=QuantumMeasurement(qubit=Identifier(name="q"))),
],
)
]
)
SpanGuard().visit(program)
def test_subroutine_signatures():
p = """
def a(int[8] b) {}
def a(complex[float[32]] b, qubit c) -> int[32] {}
def a(bit[5] b, qubit[2] c) -> complex[float[64]] {}
def a(qubit b, readonly array[uint[8], 2, 3] c) {}
def a(mutable array[uint[8], #dim=5] b, readonly array[uint[8], 5] c) {}
""".strip()
program = parse(p)
a, b, c = Identifier(name="a"), Identifier(name="b"), Identifier(name="c")
SpanGuard().visit(program)
assert _remove_spans(program) == Program(
statements=[
SubroutineDefinition(
name=a,
arguments=[ClassicalArgument(IntType(IntegerLiteral(8)), b)],
return_type=None,
body=[],
),
SubroutineDefinition(
name=a,
arguments=[
ClassicalArgument(
type=ComplexType(FloatType(IntegerLiteral(32))),
name=b,
),
QuantumArgument(name=c, size=None),
],
return_type=IntType(IntegerLiteral(32)),
body=[],
),
SubroutineDefinition(
name=a,
arguments=[
ClassicalArgument(
type=BitType(size=IntegerLiteral(5)),
name=b,
),
QuantumArgument(name=c, size=IntegerLiteral(2)),
],
return_type=ComplexType(FloatType(IntegerLiteral(64))),
body=[],
),
SubroutineDefinition(
name=a,
arguments=[
QuantumArgument(name=b, size=None),
ClassicalArgument(
type=ArrayReferenceType(
base_type=UintType(IntegerLiteral(8)),
dimensions=[IntegerLiteral(2), IntegerLiteral(3)],
),
name=c,
access=AccessControl.readonly,
),
],
return_type=None,
body=[],
),
SubroutineDefinition(
name=a,
arguments=[
# Note that the first ArrayReferenceType has dimensions of
# IntegerLiteral(5) referring to the number of dimensions,
# but the second has dimensions [IntegerLiteral(5)] (with a
# list), because the sizes of the dimensions are given
# explicitly.
ClassicalArgument(
type=ArrayReferenceType(
base_type=UintType(IntegerLiteral(8)),
dimensions=IntegerLiteral(5),
),
name=b,
access=AccessControl.mutable,
),
ClassicalArgument(
type=ArrayReferenceType(
base_type=UintType(IntegerLiteral(8)),
dimensions=[IntegerLiteral(5)],
),
name=c,
access=AccessControl.readonly,
),
],
return_type=None,
body=[],
),
]
)
def test_ambiguous_gate_calls():
p = """
gphase(pi);
fn(pi);
""".strip()
program = parse(p)
SpanGuard().visit(program)
assert _remove_spans(program) == Program(
statements=[
QuantumPhase(modifiers=[], argument=Identifier("pi"), qubits=[]),
ExpressionStatement(FunctionCall(name=Identifier("fn"), arguments=[Identifier("pi")])),
],
)
def test_branch_statement():
p = """
if(temp == 1) { ry(pi / 2) q; } else end;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
BranchingStatement(
condition=BinaryExpression(
op=BinaryOperator["=="],
lhs=Identifier("temp"),
rhs=IntegerLiteral(1),
),
if_block=[
QuantumGate(
modifiers=[],
name=Identifier("ry"),
arguments=[
BinaryExpression(
op=BinaryOperator["/"],
lhs=Identifier(name="pi"),
rhs=IntegerLiteral(2),
)
],
qubits=[Identifier("q")],
),
],
else_block=[EndStatement()],
)
]
)
SpanGuard().visit(program)
def test_for_in_loop():
p = """
for uint[8] i in [0: 2] { majority a[i], b[i + 1], a[i + 1]; continue; }
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ForInLoop(
type=UintType(IntegerLiteral(8)),
identifier=Identifier("i"),
set_declaration=RangeDefinition(
start=IntegerLiteral(0), end=IntegerLiteral(2), step=None
),
block=[
QuantumGate(
modifiers=[],
name=Identifier("majority"),
arguments=[],
qubits=[
IndexedIdentifier(
name=Identifier(name="a"),
indices=[[Identifier("i")]],
),
IndexedIdentifier(
name=Identifier("b"),
indices=[
[
BinaryExpression(
op=BinaryOperator["+"],
lhs=Identifier("i"),
rhs=IntegerLiteral(1),
),
]
],
),
IndexedIdentifier(
name=Identifier(name="a"),
indices=[
[
BinaryExpression(
op=BinaryOperator["+"],
lhs=Identifier("i"),
rhs=IntegerLiteral(1),
),
],
],
),
],
),
ContinueStatement(),
],
)
]
)
SpanGuard().visit(program)
def test_delay_instruction():
p = """
delay[start_stretch] $0;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
DelayInstruction(
duration=Identifier("start_stretch"),
qubits=[Identifier("$0")],
)
]
)
SpanGuard().visit(program)
def test_no_designator_type():
p = """
duration a;
stretch b;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ClassicalDeclaration(
DurationType(),
Identifier("a"),
None,
),
ClassicalDeclaration(StretchType(), Identifier("b"), None),
]
)
SpanGuard().visit(program)
def test_box():
p = """
box [maxdur] {
delay[start_stretch] $0;
x $0;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
Box(
duration=Identifier("maxdur"),
body=[
DelayInstruction(
duration=Identifier("start_stretch"),
qubits=[Identifier("$0")],
),
QuantumGate(
modifiers=[], name=Identifier("x"), arguments=[], qubits=[Identifier("$0")]
),
],
)
]
)
SpanGuard().visit(program)
def test_quantumloop():
p = """
box [maxdur] {
delay[start_stretch] $0;
for uint i in [1:2]{
h $0;
cx $0, $1;
}
x $0;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
Box(
duration=Identifier("maxdur"),
body=[
DelayInstruction(
duration=Identifier("start_stretch"),
qubits=[Identifier("$0")],
),
ForInLoop(
type=UintType(size=None),
identifier=Identifier(name="i"),
set_declaration=RangeDefinition(
start=IntegerLiteral(value=1),
end=IntegerLiteral(value=2),
step=None,
),
block=[
QuantumGate(
modifiers=[],
name=Identifier("h"),
arguments=[],
qubits=[Identifier(name="$0")],
),
QuantumGate(
modifiers=[],
name=Identifier("cx"),
arguments=[],
qubits=[Identifier(name="$0"), Identifier(name="$1")],
),
],
),
QuantumGate(
modifiers=[], name=Identifier("x"), arguments=[], qubits=[Identifier("$0")]
),
],
)
]
)
SpanGuard().visit(program)
def test_durationof():
p = """
durationof({x $0;});
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ExpressionStatement(
expression=DurationOf(
target=[
QuantumGate(
modifiers=[],
name=Identifier("x"),
arguments=[],
qubits=[Identifier("$0")],
),
]
)
)
]
)
SpanGuard().visit(program)
def test_classical_assignment():
p = """
a[0] = 1;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
ClassicalAssignment(
lvalue=IndexedIdentifier(
name=Identifier("a"),
indices=[[IntegerLiteral(value=0)]],
),
op=AssignmentOperator["="],
rvalue=IntegerLiteral(1),
)
]
)
SpanGuard().visit(program)
def test_header():
p = """
OPENQASM 3.1;
include "qelib1.inc";
input angle[16] variable1;
output angle[16] variable2;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
version="3.1",
statements=[
Include("qelib1.inc"),
IODeclaration(
io_identifier=IOKeyword["input"],
type=AngleType(size=IntegerLiteral(value=16)),
identifier=Identifier(name="variable1"),
),
IODeclaration(
io_identifier=IOKeyword["output"],
type=AngleType(size=IntegerLiteral(value=16)),
identifier=Identifier(name="variable2"),
),
],
)
SpanGuard().visit(program)
def test_end_statement():
p = """
end;
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(statements=[EndStatement()])
SpanGuard().visit(program)
def test_annotations():
p = """
@word1 command1
input uint[32] x;
@keyword command command
x = 1;
@word1 command1
@word2 command2 32f%^&
gate my_gate q {}
@word1 @not_a_separate_annotation uint x;
int[8] x;
@word1
qubit q; uint[4] y;
@outer
def fn() {
@inner1
int[8] x;
@inner2 command
x = 19;
}
""".strip()
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
_with_annotations(
IODeclaration(
type=UintType(IntegerLiteral(32)),
io_identifier=IOKeyword.input,
identifier=Identifier("x"),
),
[Annotation(keyword="word1", command="command1")],
),
# Extra spacing between the annotation and the statement is no problem.
_with_annotations(
ClassicalAssignment(
lvalue=Identifier("x"),
op=AssignmentOperator["="],
rvalue=IntegerLiteral(1),
),
[Annotation(keyword="keyword", command="command command")],
),
# Multiple annotations are correctly split in the list.
_with_annotations(
QuantumGateDefinition(
name=Identifier("my_gate"), arguments=[], qubits=[Identifier("q")], body=[]
),
[
Annotation(keyword="word1", command="command1"),
Annotation(keyword="word2", command="command2 32f%^&"),
],
),
# Nesting the annotation syntax doesn't cause problems.
_with_annotations(
ClassicalDeclaration(
type=IntType(IntegerLiteral(8)),
identifier=Identifier("x"),
init_expression=None,
),
[Annotation(keyword="word1", command="@not_a_separate_annotation uint x;")],
),
# Annotations only apply to the next statement, even if the next
# line contains several statements.
_with_annotations(
QubitDeclaration(size=None, qubit=Identifier("q")),
[Annotation(keyword="word1", command=None)],
),
ClassicalDeclaration(
type=UintType(IntegerLiteral(4)), identifier=Identifier("y"), init_expression=None
),
# Annotations work both outside and inside nested scopes.
_with_annotations(
SubroutineDefinition(
name=Identifier("fn"),
arguments=[],
return_type=None,
body=[
_with_annotations(
ClassicalDeclaration(
type=IntType(IntegerLiteral(8)),
identifier=Identifier("x"),
init_expression=None,
),
[Annotation(keyword="inner1", command=None)],
),
_with_annotations(
ClassicalAssignment(
lvalue=Identifier("x"),
op=AssignmentOperator["="],
rvalue=IntegerLiteral(19),
),
[Annotation(keyword="inner2", command="command")],
),
],
),
[Annotation(keyword="outer", command=None)],
),
],
)
SpanGuard().visit(program)
def test_pragma():
p = """
#pragma verbatim
pragma verbatim
#pragma command arg1 arg2
pragma command arg1 arg2
#pragma otherwise_invalid_token 1a2%&
pragma otherwise_invalid_token 1a2%&
""" # No strip because all line endings are important for pragmas.
program = parse(p)
assert _remove_spans(program) == Program(
statements=[
Pragma(command="verbatim"),
Pragma(command="verbatim"),
Pragma(command="command arg1 arg2"),
Pragma(command="command arg1 arg2"),
Pragma(command="otherwise_invalid_token 1a2%&"),
Pragma(command="otherwise_invalid_token 1a2%&"),
]
)
SpanGuard().visit(program)
class TestFailurePaths:
def test_missing_for_loop_type(self):
p = "for a in b {};" # No type of for-loop variable.
with pytest.raises(QASM3ParsingError):
parse(p)
@pytest.mark.parametrize("keyword", ("continue", "break"))
def test_control_flow_outside_loop(self, keyword):
message = f"'{keyword}' statement outside loop"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"{keyword};")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"if (true) {keyword};")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"def fn() {{ {keyword}; }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"gate my_gate q {{ {keyword}; }}")
def test_return_outside_subroutine(self):
message = f"'return' statement outside subroutine"
with pytest.raises(QASM3ParsingError, match=message):
parse("return;")
with pytest.raises(QASM3ParsingError, match=message):
parse("if (true) return;")
with pytest.raises(QASM3ParsingError, match=message):
parse("gate my_gate q { return; }")
def test_classical_assignment_in_gate(self):
message = "cannot assign to classical parameters in a gate"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"int a; gate my_gate q {{ x q; a = 1; }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"int a; gate my_gate q {{ a = 1; }}")
def test_classical_declaration_in_gate(self):
message = "cannot declare classical variables in a gate"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"gate my_gate q {{ int a; }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"gate my_gate q {{ int a = 1; }}")
@pytest.mark.parametrize(
("statement", "message"),
(
('defcalgrammar "openpulse";', "'defcalgrammar' statements must be global"),
("array[int, 4] arr;", "arrays can only be declared globally"),
("def fn() { }", "subroutine definitions must be global"),
("extern fn();", "extern declarations must be global"),
("gate my_gate q { }", "gate definitions must be global"),
('include "stdgates.inc";', "'include' statements must be global"),
("input int a;", "'input' declarations must be global"),
("output int a;", "'output' declarations must be global"),
("qubit q;", "qubit declarations must be global"),
("qreg q;", "qubit declarations must be global"),
("qreg q[5];", "qubit declarations must be global"),
("\npragma command\n", "pragmas must be global"),
),
)
def test_global_statement_in_nonglobal_context(self, statement, message):
with pytest.raises(QASM3ParsingError, match=message):
parse(f"for uint[8] i in [0:4] {{ {statement} }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"while (true) {{ {statement} }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"def fn() {{ {statement} }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"if (true) {{ {statement} }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"if (false) x $0; else {{ {statement} }}")
with pytest.raises(QASM3ParsingError, match=message):
parse(f"def fn() {{ if (true) {{ {statement} }} }}")
@pytest.mark.parametrize(
("statement", "operation"),
(
("measure $0 -> c[0];", "measure"),
("measure $0;", "measure"),
("reset $0;", "reset"),
),
)
def test_nonunitary_instructions_in_gate(self, statement, operation):
message = f"cannot have a non-unitary '{operation}' instruction in a gate"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"bit[5] c; gate my_gate q {{ {statement} }}")
def test_builtins_with_incorrect_arguments(self):
message = "'gphase' takes exactly one argument, .*"
with pytest.raises(QASM3ParsingError, match=message):
parse("gphase;")
with pytest.raises(QASM3ParsingError, match=message):
parse("gphase();")
with pytest.raises(QASM3ParsingError, match=message):
parse("gphase(1, 2);")
with pytest.raises(QASM3ParsingError, match=message):
parse("ctrl @ gphase $0;")
message = "'sizeof' needs either one or two arguments"
with pytest.raises(QASM3ParsingError, match=message):
parse("sizeof();")
with pytest.raises(QASM3ParsingError, match=message):
parse("sizeof(arr, 0, 1);")
@pytest.mark.parametrize(
"scalar",
("uint", "uint[32]", "bit", "bit[5]", "bool", "duration", "stretch", "complex[float[64]]"),
)
def test_complex_with_bad_scalar_type(self, scalar):
message = "invalid type of complex components"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"complex[{scalar}] f;")
def test_array_with_bad_scalar_type(self):
message = "invalid scalar type for array"
with pytest.raises(QASM3ParsingError, match=message):
parse(f"array[stretch, 4] arr;")
| 60,537 | 31.901087 | 100 | py |
openqasm | openqasm-main/source/openqasm/tests/test_printer.py | import dataclasses
import pytest
import openqasm3
from openqasm3 import ast
def _remove_spans(node):
"""Return a new ``QASMNode`` with all spans recursively set to ``None`` to
reduce noise in test failure messages."""
if isinstance(node, list):
return [_remove_spans(item) for item in node]
if not isinstance(node, ast.QASMNode):
return node
kwargs = {}
no_init = {}
for field in dataclasses.fields(node):
if field.name == "span":
continue
target = kwargs if field.init else no_init
target[field.name] = _remove_spans(getattr(node, field.name))
out = type(node)(**kwargs)
for attribute, value in no_init.items():
setattr(out, attribute, value)
return out
OPERATOR_PRECEDENCE = [
ast.BinaryOperator["||"],
ast.BinaryOperator["&&"],
ast.BinaryOperator["|"],
ast.BinaryOperator["^"],
ast.BinaryOperator["&"],
ast.BinaryOperator["<<"],
ast.BinaryOperator["+"],
ast.BinaryOperator["*"],
ast.BinaryOperator["**"],
]
class TestRoundTrip:
"""All the tests in this class are testing the round-trip properties of the "parse - print -
parse" operation. The test cases all need to be written in the preferred output format of the
printer itself."""
@pytest.mark.parametrize("indent", ["", " ", "\t"], ids=repr)
@pytest.mark.parametrize("chain_else_if", [True, False])
@pytest.mark.parametrize("old_measurement", [True, False])
def test_example_files(self, parsed_example, indent, chain_else_if, old_measurement):
"""Test that the cycle 'parse - print - parse' does not affect the generated AST of the
example files. Printing should just be an exercise in formatting, so should not affect how
subsequent runs parse the file. This also functions as something of a general integration
test, testing much of the basic surface of the language."""
roundtrip_ast = openqasm3.parse(
openqasm3.dumps(
parsed_example.ast,
indent=indent,
chain_else_if=chain_else_if,
old_measurement=old_measurement,
)
)
assert _remove_spans(roundtrip_ast) == _remove_spans(parsed_example.ast)
@pytest.mark.parametrize("version_statement", ["OPENQASM 3;", "OPENQASM 3.0;"])
def test_version(self, version_statement):
output = openqasm3.dumps(openqasm3.parse(version_statement)).strip()
assert output == version_statement
def test_io_declarations(self):
input_ = """
input int a;
input float[64] a;
input complex[float[FLOAT_WIDTH]] a;
output bit b;
output bit[SIZE] b;
output bool b;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_include(self):
input_ = 'include "stdgates.inc";'
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_qubit_declarations(self):
input_ = """
qubit q;
qubit[5] q;
qubit[SIZE] q;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
old_input = """
qreg q;
qreg q[5];
qreg q[SIZE];
""".strip()
old_output = openqasm3.dumps(openqasm3.parse(old_input)).strip()
# Note we're testing that we normalise to the new form.
assert input_ == old_output
def test_gate_definition(self):
input_ = """
gate my_gate q {
}
gate my_gate(param) q {
}
gate my_gate(param1, param2) q {
}
gate my_gate q1, q2 {
}
gate my_gate q {
x q;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_extern_declaration(self):
input_ = """
extern f();
extern f() -> bool;
extern f(bool);
extern f(int[32], uint[32]);
extern f(mutable array[complex[float[64]], N_ELEMENTS]) -> int[2 * INT_SIZE];
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_function_declaration(self):
input_ = """
def f() {
}
def f() -> angle[32] {
return pi;
}
def f(int[SIZE] a) {
}
def f(qubit q1, qubit[SIZE] q2) {
}
def f(readonly array[int[32], 2] a, mutable array[uint, #dim=2] b) {
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_unary_expression(self):
input_ = """
!a;
-a;
~(a + a);
-a ** 2;
!true;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_binary_expression(self):
input_ = """
a * b;
a / b;
1 + 2;
1 - 2;
(1 + 2) * 3;
2 ** 8;
a << 1;
a >> b;
2 < 3;
3 >= 2;
a == b;
a != b;
a & b;
a | b;
a ^ b;
a && b;
a || b;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_assignment(self):
input_ = """
a = 1;
a = 2 * b;
a = f(4);
a += 1;
a -= a * 0.5;
a *= 2.0;
a /= 1.5;
a **= 2;
a <<= 1;
a >>= 1;
a |= f(2, 3);
a &= "101001";
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_index_expression(self):
input_ = """
a[0];
a[{1, 2, 3}];
a[0][0];
a[1:2][0];
a[0][1:2];
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_literal(self):
input_ = """
1;
2.0;
1.0im;
true;
false;
"1010";
"01010";
-1;
1.0ms;
1.0ns;
2.0s;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_declaration(self):
input_ = """
bool x = true;
bit x;
bit[SIZE] x;
int x = 2;
int[32] x = -5;
uint x = 0;
uint[16] x;
angle x;
angle[SIZE] x;
float x = 2.0;
float[SIZE * 2] x = 4.0;
complex[float[64]] x;
complex z;
duration a = 1.0us;
stretch b;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_const_declaration(self):
input_ = """
const bool x = true;
const int x = 2;
const int[32] x = -5;
const uint x = 0;
const uint[16] x = 0;
const angle x = pi;
const angle[SIZE] x = pi / 8;
const float x = 2.0;
const float[SIZE * 2] x = 4.0;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_array_initializer(self):
input_ = """
array[int, 2] a = {1, 2};
array[float[64], 2, 2] a = {{1.0, 0.0}, {0.0, 1.0}};
array[angle[32], 2] a = {pi, pi / 8};
array[uint[16], 4, 4] a = {b, {1, 2, 3, 4}};
array[bool, 2, 2] a = b;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_alias(self):
input_ = """
let q = a ++ b;
let q = a[1:2];
let q = a[{0, 2, 3}] ++ a[1:1] ++ a[{4, 5}];
let q = a;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_function_call(self):
input_ = """
f(1, 2, 3);
f();
f(a, b + c, a * b / c);
f(f(a));
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_gate_call(self):
input_ = """
h q;
h q[0];
gphase(pi);
U(1, 2, 3) q;
U(1, 2, 3) q[0];
my_gate a, b[0:2], c;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_gate_modifiers(self):
input_ = """
ctrl @ U(1, 2, 3) a, b;
ctrl(1) @ x a, b[0];
negctrl @ U(1, 2, 3) a[0:2], b;
negctrl(2) @ h a, b, c;
pow(2) @ h a;
ctrl @ gphase(pi / 2) a, b;
inv @ h a;
inv @ ctrl @ x a, b;
ctrl(1) @ inv @ x a, b;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_cast(self):
input_ = """
int(a);
int[32](2.0);
int[SIZE](bitstring);
uint[16 + 16](a);
bit[SIZE](pi);
bool(i);
complex[float[64]](2.0);
complex[float](2.5);
float[32](1);
float(2.0);
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_for_loop(self):
input_ = """
for uint i in [0:2] {
a += 1;
}
for int[8] i in [a:b] {
a += 1;
}
for float[64] i in [a:2 * b:c] {
a += 1.0;
}
for uint i in {1, 2, 3} {
a += 1;
}
for int i in {2 * j, 2 + 3 / 4, j + j} {
a += 1;
}
for complex[float[64]] i in j {
a += 1;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_while_loop(self):
input_ = """
while (i) {
x $0;
i -= 1;
}
while (i == 0) {
x $0;
i -= 1;
}
while (!true) {
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_if(self):
input_ = """
if (i) {
x $0;
}
if (true) {
x $0;
}
if (2 + 3 == 5) {
x $0;
}
if (!true) {
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_else(self):
input_ = """
if (true) {
} else {
x $0;
}
if (true) {
} else {
x $0;
a = b + 2;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_else_if(self):
input_ = """
if (i == 0) {
} else if (i == 1) {
} else {
x $0;
}
if (i == 0) {
} else if (i == 1) {
x $0;
} else if (i == 2) {
} else {
x $1;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_jumps(self):
input_ = """
while (true) {
break;
continue;
end;
}
def f() {
return;
}
def f() -> int[32] {
return 2 + 3;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_measurement(self):
input_ = """
measure q;
measure $0;
measure q[0];
measure q[1:3];
c = measure q;
c = measure $0;
c = measure q[0];
c = measure q[1:3];
def f() {
return measure q;
}
def f() {
return measure $0;
}
def f() {
return measure q[0];
}
def f() {
return measure q[1:3];
}
""".strip()
output = openqasm3.dumps(
openqasm3.parse(input_), indent=" ", old_measurement=False
).strip()
assert output == input_
def test_reset(self):
input_ = """
reset q;
reset $0;
reset q[0];
reset q[1:3];
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_barrier(self):
input_ = """
barrier q;
barrier $0;
barrier q[0];
barrier q[1:3];
barrier;
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_delay(self):
input_ = """
delay[50.0ns] q;
delay[50.0ns] $0;
delay[50.0ns] q[0];
delay[50.0ns] q[1:3];
delay[2 * SIZE] q;
delay[2 * SIZE] $0;
delay[2 * SIZE] q[0];
delay[2 * SIZE] q[1:3];
delay[100.0ns];
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_)).strip()
assert output == input_
def test_box(self):
input_ = """
box {
x $0;
}
box[100.0ns] {
x $0;
}
box[a + b] {
x $0;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_duration_of(self):
input_ = """
duration a = durationof({
x $0;
ctrl @ x $1, $2;
});
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
def test_pragma(self):
input_ = """
pragma blah blah blah
pragma command
pragma !%^* word
""".lstrip() # The ending newline is important for pragmas.
output = openqasm3.dumps(openqasm3.parse(input_))
assert output == input_
@pytest.fixture(
params=[
pytest.param("", id="none"),
pytest.param("@command\n", id="single"),
pytest.param("@command keyword\n", id="single keyword"),
pytest.param("@command !£4&8 hello world\n", id="hard to tokenise"),
pytest.param("@command1\n@command2 keyword\n", id="multiple"),
]
)
def annotations(self, request):
return request.param
@pytest.mark.parametrize(
"statement",
[
pytest.param("let alias = q[1:3];", id="alias"),
pytest.param("a = b;", id="assignment"),
pytest.param("barrier q;", id="barrier"),
pytest.param("box {\n}", id="box"),
pytest.param('defcalgrammar "openpulse";', id="defcal"),
pytest.param("int[8] a;", id="classical declaration"),
pytest.param("const uint SIZE = 5;", id="const declaration"),
pytest.param("def f() {\n}", id="subroutine definition"),
pytest.param("delay[50.0ms] $0;", id="delay"),
pytest.param("4 * 5 + 3;", id="expression"),
pytest.param("extern f();", id="extern"),
pytest.param("for uint i in [0:1] {\n}", id="for"),
pytest.param("ctrl @ h q;", id="gate call"),
pytest.param("gphase(0.5);", id="gphase call"),
pytest.param("gate f q {\n}", id="gate definition"),
pytest.param("if (true) {\n}", id="if"),
pytest.param("if (true) {\n} else {\n 1;\n}", id="if-else"),
pytest.param('include "stdgates.inc";', id="include"),
pytest.param("input uint[8] i;", id="input declaration"),
pytest.param("output uint[8] i;", id="output declaration"),
pytest.param("measure $0;", id="measure"),
pytest.param("a = measure b;", id="measure assign"),
pytest.param("qubit[5] q;", id="qubit declaration"),
pytest.param("reset $0;", id="reset"),
pytest.param("while (true) {\n}", id="while"),
],
)
def test_annotations(self, statement, annotations):
input_ = annotations + statement + "\n"
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ")
assert output == input_
class TestExpression:
"""Test more specific features and properties of the printer when outputting expressions."""
@pytest.mark.parametrize(
"operator", [op for op in ast.BinaryOperator if op != ast.BinaryOperator["**"]]
)
def test_associativity_binary(self, operator):
"""Test that the associativity of binary expressions is respected in the output."""
input_ = ast.Program(
statements=[
ast.ExpressionStatement(
ast.BinaryExpression(
lhs=ast.BinaryExpression(
lhs=ast.Identifier("a"),
op=operator,
rhs=ast.Identifier("b"),
),
op=operator,
rhs=ast.Identifier("c"),
),
),
ast.ExpressionStatement(
ast.BinaryExpression(
lhs=ast.Identifier("a"),
op=operator,
rhs=ast.BinaryExpression(
lhs=ast.Identifier("b"),
op=operator,
rhs=ast.Identifier("c"),
),
),
),
],
)
expected = f"""
a {operator.name} b {operator.name} c;
a {operator.name} (b {operator.name} c);
""".strip()
output = openqasm3.dumps(input_).strip()
assert output == expected
assert openqasm3.parse(output) == input_
@pytest.mark.xfail(reason="Parser cannot handle bracketed concatenations")
def test_associativity_concatenation(self):
"""The associativity of concatenation is not fully defined by the grammar or specification,
but the printer assumes left-associativity for now."""
input_ = ast.Program(
statements=[
ast.AliasStatement(
ast.Identifier("q"),
ast.Concatenation(
lhs=ast.Concatenation(
lhs=ast.Identifier("a"),
rhs=ast.Identifier("b"),
),
rhs=ast.Identifier("c"),
),
),
ast.AliasStatement(
ast.Identifier("q"),
ast.Concatenation(
lhs=ast.Identifier("a"),
rhs=ast.Concatenation(
lhs=ast.Identifier("b"),
rhs=ast.Identifier("c"),
),
),
),
],
)
expected = """
let q = a ++ b ++ c;
let q = a ++ (b ++ c);
""".strip()
output = openqasm3.dumps(input_).strip()
assert output == expected
assert openqasm3.parse(output) == input_
@pytest.mark.xfail(reason="Currently power is still left-associative in the ANTLR grammar")
def test_associativity_power(self):
"""Test that the right-associativity of the power expression is respected in the output."""
input_ = ast.Program(
statements=[
ast.ExpressionStatement(
ast.BinaryExpression(
lhs=ast.BinaryExpression(
lhs=ast.Identifier("a"),
op=ast.BinaryOperator["**"],
rhs=ast.Identifier("b"),
),
op=ast.BinaryOperator["**"],
rhs=ast.Identifier("c"),
),
),
ast.ExpressionStatement(
ast.BinaryExpression(
lhs=ast.Identifier("a"),
op=ast.BinaryOperator["**"],
rhs=ast.BinaryExpression(
lhs=ast.Identifier("b"),
op=ast.BinaryOperator["**"],
rhs=ast.Identifier("c"),
),
),
),
],
)
expected = f"""
(a ** b) ** c;
a ** b ** c;
""".strip()
output = openqasm3.dumps(input_).strip()
assert output == expected
assert openqasm3.parse(output) == input_
@pytest.mark.parametrize(
["lower", "higher"],
[
(lower, higher)
for i, lower in enumerate(OPERATOR_PRECEDENCE[:-1])
for higher in OPERATOR_PRECEDENCE[i + 1 :]
],
)
def test_precedence(self, lower, higher):
input_ = ast.Program(
statements=[
ast.ExpressionStatement(
ast.BinaryExpression(
lhs=ast.BinaryExpression(
lhs=ast.Identifier("a"),
op=lower,
rhs=ast.Identifier("b"),
),
op=higher,
rhs=ast.BinaryExpression(
lhs=ast.Identifier("c"),
op=lower,
rhs=ast.Identifier("d"),
),
),
),
],
)
expected = f"(a {lower.name} b) {higher.name} (c {lower.name} d);"
output = openqasm3.dumps(input_).strip()
assert output == expected
assert openqasm3.parse(output) == input_
class TestOptions:
"""Test the various keyword arguments to the exporter have the desired effects."""
@pytest.mark.parametrize("indent", ["", " ", "\t", " "], ids=repr)
def test_indent(self, indent):
input_ = f"""
def f(int[32] a) -> bool {{
{indent}return a == a;
}}
gate g(param) q {{
{indent}h q;
}}
for uint i in [0:2] {{
{indent}true;
}}
while (i) {{
{indent}i -= 1;
}}
if (i) {{
{indent}x $0;
}} else {{
{indent}x $1;
}}
box {{
{indent}x $0;
}}
durationof({{
{indent}x $0;
}});
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=indent).strip()
assert output == input_
@pytest.mark.parametrize("indent", ["", " ", "\t", " "], ids=repr)
@pytest.mark.parametrize(
["outer_start", "outer_end", "allow_classical"],
[
pytest.param("gate f q {", "}", False, id="gate"),
pytest.param("durationof({", "});", False, id="durationof"),
pytest.param("def f() {", "}", True, id="function"),
pytest.param("if (true) {", "}", True, id="if"),
pytest.param("if (true) {\n} else {", "}", True, id="else"),
pytest.param("box[1.0ms] {", "}", False, id="box"),
],
)
def test_indent_nested(self, indent, outer_start, outer_end, allow_classical):
classicals = f"""
for uint i in [0:2] {{
{indent}true;
}}
while (i) {{
{indent}i -= 1;
}}
if (i) {{
{indent}x $0;
}} else {{
{indent}x $1;
}}
durationof({{
{indent}x $0;
}});
""".strip()
quantums = f"""
box {{
{indent}x $0;
}}
""".strip()
lines = quantums.splitlines()
if allow_classical:
lines.extend(classicals.splitlines())
input_ = outer_start + "\n" + "\n".join(indent + line for line in lines) + "\n" + outer_end
output = openqasm3.dumps(openqasm3.parse(input_), indent=indent).strip()
assert output == input_
def test_old_measurement(self):
old_input = "measure q -> c;"
output = openqasm3.dumps(openqasm3.parse(old_input), old_measurement=True).strip()
assert output == old_input
input_ = "c = measure q;"
output = openqasm3.dumps(openqasm3.parse(input_), old_measurement=True).strip()
assert output == old_input
def test_chain_else_if(self):
input_ = """
if (i == 0) {
} else if (i == 1) {
}
if (i == 0) {
} else if (i == 1) {
} else {
x $0;
}
if (i == 0) {
} else if (i == 1) {
} else if (i == 2) {
}
if (i == 0) {
} else if (i == 1) {
} else if (i == 2) {
} else {
if (i == 3) {
}
x $0;
}
if (i == 0) {
} else {
if (i == 2) {
x $0;
} else {
x $0;
}
x $0;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ", chain_else_if=True).strip()
assert output == input_
def test_no_chain_else_if(self):
input_ = """
if (i == 0) {
} else {
if (i == 1) {
}
}
if (i == 0) {
} else {
if (i == 1) {
} else {
x $0;
}
}
if (i == 0) {
} else {
if (i == 1) {
} else {
if (i == 2) {
}
}
}
if (i == 0) {
} else {
if (i == 1) {
} else {
if (i == 2) {
} else {
if (i == 3) {
}
x $0;
}
}
}
if (i == 0) {
} else {
if (i == 2) {
x $0;
} else {
x $0;
}
x $0;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ", chain_else_if=False).strip()
assert output == input_
def test_chain_else_if_only_applies_to_else_if(self):
input_ = """
if (i) {
} else {
x $1;
}
if (i) {
} else {
for uint j in [0:1] {
}
}
if (i) {
} else {
x $0;
if (!i) {
} else {
x $1;
}
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ", chain_else_if=True).strip()
assert output == input_
def test_annotations(self):
input_ = """
@ann_1
int[32] i = 0;
if (i == 0) {
@ann_2
i += 1;
}
""".strip()
output = openqasm3.dumps(openqasm3.parse(input_), indent=" ").strip()
assert output == input_
| 23,718 | 24.232979 | 99 | py |
openqasm | openqasm-main/source/openqasm/tests/test_openqasm_tests.py | import openqasm3
def test_examples(example_file):
"""Loop through all example files, verify that the ast_parser can parse the file.
The `example_file` fixture is generated in `conftest.py`. These tests are automatically skipped
if the examples directly cannot be found.
"""
with open(example_file) as f:
source = f.read()
openqasm3.parse(source)
| 386 | 28.769231 | 100 | py |
openqasm | openqasm-main/source/openqasm/tests/__init__.py | 0 | 0 | 0 | py | |
openqasm | openqasm-main/source/openqasm/docs/conf.py | # In general, we expect that `openqasm3` is installed and available on the path
# without modification.
import openqasm3
project = 'OpenQASM 3 Reference AST'
copyright = '2021, OpenQASM 3 Team and Contributors'
author = 'OpenQASM 3 Team and Contributors'
release = openqasm3.__version__
extensions = [
# Allow auto-generation of class and method documentation, as long as you
# explicitly put the directives in.
"sphinx.ext.autodoc",
]
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
| 534 | 25.75 | 79 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/visitor.py | """
=====================================================
AST Visitors and Transformers (``openqasm3.visitor``)
=====================================================
Implementation of an example AST visitor :obj:`~QASMVisitor`, which can be
inherited from to make generic visitors of the reference AST. Deriving from
this is :obj:`~QASMTransformer`, which is an example of how the AST can be
manipulated.
"""
from typing import Optional, TypeVar, Generic
from .ast import QASMNode
__all__ = [
"QASMVisitor",
"QASMTransformer",
]
T = TypeVar("T")
class QASMVisitor(Generic[T]):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
Modified from the implementation in ast.py in the Python standard library.
We added the context argument to the visit method. It allows the visitor
to hold temporary state while visiting the nodes.
The optional context argument in visit/generic_visit methods can be used to hold temporary
information that we do not want to hold in either the AST or the visitor themselves.
"""
def visit(self, node: QASMNode, context: Optional[T] = None):
"""Visit a node."""
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
# The visitor method may not have the context argument.
if context:
return visitor(node, context)
else:
return visitor(node)
def generic_visit(self, node: QASMNode, context: Optional[T] = None):
"""Called if no explicit visitor function exists for a node."""
for value in node.__dict__.values():
if not isinstance(value, list):
value = [value]
for item in value:
if isinstance(item, QASMNode):
if context:
self.visit(item, context)
else:
self.visit(item)
class QASMTransformer(QASMVisitor[T]):
"""
A :class:`QASMVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
Modified from the implementation in ast.py in the Python standard library
"""
def generic_visit(self, node: QASMNode, context: Optional[T] = None) -> QASMNode:
for field, old_value in node.__dict__.items():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, QASMNode):
value = self.visit(value, context) if context else self.visit(value)
if value is None:
continue
elif not isinstance(value, QASMNode):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, QASMNode):
new_node = self.visit(old_value, context) if context else self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| 3,365 | 36.4 | 95 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/parser.py | """
=============================
Parser (``openqasm3.parser``)
=============================
Tools for parsing OpenQASM 3 programs into the :obj:`reference AST <openqasm3.ast>`.
The quick-start interface is simply to call ``openqasm3.parse``:
.. currentmodule:: openqasm3
.. autofunction:: openqasm3.parse
The rest of this module provides some lower-level internals of the parser.
.. currentmodule:: openqasm3.parser
.. autofunction:: span
.. autofunction:: add_span
.. autofunction:: combine_span
.. autofunction:: get_span
.. autoclass:: QASMNodeVisitor
"""
# pylint: disable=wrong-import-order
__all__ = [
"parse",
"get_span",
"add_span",
"combine_span",
"span",
"QASMNodeVisitor",
"QASM3ParsingError",
]
from contextlib import contextmanager
from typing import Union, TypeVar, List
try:
from antlr4 import CommonTokenStream, InputStream, ParserRuleContext, RecognitionException
from antlr4.error.Errors import ParseCancellationException
from antlr4.error.ErrorStrategy import BailErrorStrategy
from antlr4.tree.Tree import TerminalNode
except ImportError as exc:
raise ImportError(
"Parsing is not available unless the [parser] extra is installed,"
" such as by 'pip install openqasm3[parser]'."
) from exc
from ._antlr.qasm3Lexer import qasm3Lexer
from ._antlr.qasm3Parser import qasm3Parser
from ._antlr.qasm3ParserVisitor import qasm3ParserVisitor
from . import ast
_TYPE_NODE_INIT = {
"int": ast.IntType,
"uint": ast.UintType,
"float": ast.FloatType,
"angle": ast.AngleType,
}
class QASM3ParsingError(Exception):
"""An error raised by the AST visitor during the AST-generation phase. This is raised in cases where the
given program could not be correctly parsed."""
def parse(input_: str, *, permissive=False) -> ast.Program:
"""
Parse a complete OpenQASM 3 program from a string.
:param input_: A string containing a complete OpenQASM 3 program.
:param permissive: A Boolean controlling whether ANTLR should attempt to
recover from incorrect input or not. Defaults to ``False``; if set to
``True``, the reference AST produced may be invalid if ANTLR emits any
warning messages during its parsing phase.
:return: A complete :obj:`~ast.Program` node.
"""
lexer = qasm3Lexer(InputStream(input_))
stream = CommonTokenStream(lexer)
parser = qasm3Parser(stream)
if not permissive:
# For some reason, the Python 3 runtime for ANTLR 4 is missing the
# setter method `setErrorHandler`, so we have to set the attribute
# directly.
parser._errHandler = BailErrorStrategy()
try:
tree = parser.program()
except (RecognitionException, ParseCancellationException) as exc:
raise QASM3ParsingError() from exc
return QASMNodeVisitor().visitProgram(tree)
def get_span(node: Union[ParserRuleContext, TerminalNode]) -> ast.Span:
"""Get the span of a node"""
if isinstance(node, ParserRuleContext):
return ast.Span(node.start.line, node.start.column, node.stop.line, node.stop.column)
else:
return ast.Span(node.symbol.line, node.symbol.start, node.symbol.line, node.symbol.stop)
_NodeT = TypeVar("_NodeT", bound=ast.QASMNode)
def add_span(node: _NodeT, span: ast.Span) -> _NodeT:
"""Set the span of a node and return the node"""
node.span = span
return node
def combine_span(first: ast.Span, second: ast.Span):
"""Combine two spans and return the combined one"""
return ast.Span(first.start_line, first.start_column, second.start_line, second.start_column)
def span(func):
"""Function decorator to automatic attach span to nodes for visit* methods."""
def wrapped(*args, **kwargs):
span = get_span(args[1]) # args[1] is ctx
node = func(*args, **kwargs)
if node is None:
raise ValueError(f"None encountered at {span}")
return add_span(node, span)
return wrapped
def _visit_identifier(identifier: TerminalNode):
return add_span(ast.Identifier(identifier.getText()), get_span(identifier))
def _raise_from_context(ctx: ParserRuleContext, message: str):
raise QASM3ParsingError(f"L{ctx.start.line}:C{ctx.start.column}: {message}")
class QASMNodeVisitor(qasm3ParserVisitor):
def __init__(self):
# A stack of "contexts", each of which is a stack of "scopes". Contexts
# are for the main program, gates and subroutines, while scopes are
# loops, if/else and manual scoping constructs. Each "context" always
# contains at least one scope: the base ``ParserRuleContext`` that
# opened it.
self._contexts: List[List[ParserRuleContext]] = []
@contextmanager
def _push_context(self, ctx: ParserRuleContext):
self._contexts.append([ctx])
yield
self._contexts.pop()
@contextmanager
def _push_scope(self, ctx: ParserRuleContext):
self._contexts[-1].append(ctx)
yield
self._contexts[-1].pop()
def _current_context(self):
return self._contexts[-1]
def _current_scope(self):
return self._contexts[-1][-1]
def _current_base_scope(self):
return self._contexts[-1][0]
def _in_global_scope(self):
return len(self._contexts) == 1 and len(self._contexts[0]) == 1
def _in_gate(self):
return isinstance(self._current_base_scope(), qasm3Parser.GateStatementContext)
def _in_subroutine(self):
return isinstance(self._current_base_scope(), qasm3Parser.DefStatementContext)
def _in_loop(self):
return any(
isinstance(scope, (qasm3Parser.ForStatementContext, qasm3Parser.WhileStatementContext))
for scope in reversed(self._current_context())
)
@span
def visitProgram(self, ctx: qasm3Parser.ProgramContext):
with self._push_context(ctx):
version = ctx.version().VersionSpecifier().getText() if ctx.version() else None
statements = [self.visit(statement) for statement in ctx.statement()]
return ast.Program(statements=statements, version=version)
@span
def visitStatement(self, ctx: qasm3Parser.StatementContext):
if ctx.pragma():
return self.visit(ctx.pragma())
out = self.visit(ctx.getChild(-1))
out.annotations = [self.visit(annotation) for annotation in ctx.annotation()]
return out
@span
def visitAnnotation(self, ctx: qasm3Parser.AnnotationContext):
return ast.Annotation(
keyword=ctx.AnnotationKeyword().getText()[1:],
command=ctx.RemainingLineContent().getText() if ctx.RemainingLineContent() else None,
)
def visitScope(self, ctx: qasm3Parser.ScopeContext) -> List[ast.Statement]:
return [self.visit(statement) for statement in ctx.statement()]
@span
def visitPragma(self, ctx: qasm3Parser.PragmaContext):
if not self._in_global_scope():
_raise_from_context(ctx, "pragmas must be global")
return ast.Pragma(
command=ctx.RemainingLineContent().getText() if ctx.RemainingLineContent() else None
)
@span
def visitAliasDeclarationStatement(self, ctx: qasm3Parser.AliasDeclarationStatementContext):
return ast.AliasStatement(
target=_visit_identifier(ctx.Identifier()),
value=self.visit(ctx.aliasExpression()),
)
@span
def visitAssignmentStatement(self, ctx: qasm3Parser.AssignmentStatementContext):
if self._in_gate():
_raise_from_context(ctx, "cannot assign to classical parameters in a gate")
if ctx.measureExpression():
return ast.QuantumMeasurementStatement(
measure=self.visit(ctx.measureExpression()),
target=self.visit(ctx.indexedIdentifier()),
)
return ast.ClassicalAssignment(
lvalue=self.visit(ctx.indexedIdentifier()),
op=ast.AssignmentOperator[ctx.op.text],
rvalue=self.visit(ctx.expression()),
)
@span
def visitBarrierStatement(self, ctx: qasm3Parser.BarrierStatementContext):
qubits = (
[self.visit(operand) for operand in ctx.gateOperandList().gateOperand()]
if ctx.gateOperandList()
else []
)
return ast.QuantumBarrier(qubits=qubits)
@span
def visitBoxStatement(self, ctx: qasm3Parser.BoxStatementContext):
with self._push_scope(ctx):
return ast.Box(
duration=self.visit(ctx.designator()) if ctx.designator() else None,
body=self.visit(ctx.scope()),
)
@span
def visitBreakStatement(self, ctx: qasm3Parser.BreakStatementContext):
if not self._in_loop():
_raise_from_context(ctx, "'break' statement outside loop")
return ast.BreakStatement()
@span
def visitCalStatement(self, ctx: qasm3Parser.CalStatementContext):
return ast.CalibrationStatement(
body=ctx.CalibrationBlock().getText() if ctx.CalibrationBlock() else ""
)
@span
def visitCalibrationGrammarStatement(self, ctx: qasm3Parser.CalibrationGrammarStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "'defcalgrammar' statements must be global")
return ast.CalibrationGrammarDeclaration(name=ctx.StringLiteral().getText()[1:-1])
@span
def visitClassicalDeclarationStatement(
self, ctx: qasm3Parser.ClassicalDeclarationStatementContext
):
if self._in_gate():
_raise_from_context(ctx, "cannot declare classical variables in a gate")
if ctx.arrayType() and not self._in_global_scope():
_raise_from_context(ctx, "arrays can only be declared globally")
init = self.visit(ctx.declarationExpression()) if ctx.declarationExpression() else None
return ast.ClassicalDeclaration(
type=self.visit(ctx.scalarType() or ctx.arrayType()),
identifier=_visit_identifier(ctx.Identifier()),
init_expression=init,
)
@span
def visitConstDeclarationStatement(self, ctx: qasm3Parser.ConstDeclarationStatementContext):
return ast.ConstantDeclaration(
type=self.visit(ctx.scalarType()),
identifier=_visit_identifier(ctx.Identifier()),
init_expression=self.visit(ctx.declarationExpression()),
)
@span
def visitContinueStatement(self, ctx: qasm3Parser.ContinueStatementContext):
if not self._in_loop():
_raise_from_context(ctx, "'continue' statement outside loop")
return ast.ContinueStatement()
@span
def visitDefStatement(self, ctx: qasm3Parser.DefStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "subroutine definitions must be global")
name = _visit_identifier(ctx.Identifier())
arguments = (
[self.visit(argument) for argument in ctx.argumentDefinitionList().argumentDefinition()]
if ctx.argumentDefinitionList()
else []
)
return_type = (
self.visit(ctx.returnSignature().scalarType()) if ctx.returnSignature() else None
)
with self._push_context(ctx):
body = self.visit(ctx.scope())
return ast.SubroutineDefinition(
name=name, arguments=arguments, body=body, return_type=return_type
)
@span
def visitDefcalStatement(self, ctx: qasm3Parser.DefcalStatementContext):
arguments = (
[
self.visit(argument)
for argument in ctx.defcalArgumentDefinitionList().defcalArgumentDefinition()
]
if ctx.defcalArgumentDefinitionList()
else []
)
qubits = [self.visit(operand) for operand in ctx.defcalOperandList().defcalOperand() or []]
return_type = (
self.visit(ctx.returnSignature().scalarType()) if ctx.returnSignature() else None
)
return ast.CalibrationDefinition(
name=self.visit(ctx.defcalTarget()),
arguments=arguments,
qubits=qubits,
return_type=return_type,
body=ctx.CalibrationBlock().getText() if ctx.CalibrationBlock() else "",
)
@span
def visitDelayStatement(self, ctx: qasm3Parser.DelayStatementContext):
qubits = (
[self.visit(operand) for operand in ctx.gateOperandList().gateOperand()]
if ctx.gateOperandList()
else []
)
return ast.DelayInstruction(duration=self.visit(ctx.designator()), qubits=qubits)
@span
def visitEndStatement(self, _: qasm3Parser.EndStatementContext):
return ast.EndStatement()
@span
def visitExpressionStatement(self, ctx: qasm3Parser.ExpressionStatementContext):
return ast.ExpressionStatement(self.visit(ctx.expression()))
@span
def visitExternStatement(self, ctx: qasm3Parser.ExternStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "extern declarations must be global")
arguments = (
[self.visit(type) for type in ctx.externArgumentList().externArgument()]
if ctx.externArgumentList()
else []
)
return_type = (
self.visit(ctx.returnSignature().scalarType()) if ctx.returnSignature() else None
)
return ast.ExternDeclaration(
name=_visit_identifier(ctx.Identifier()),
arguments=arguments,
return_type=return_type,
)
@span
def visitForStatement(self, ctx: qasm3Parser.ForStatementContext):
if ctx.setExpression():
set_declaration = self.visit(ctx.setExpression())
elif ctx.rangeExpression():
set_declaration = self.visit(ctx.rangeExpression())
else:
set_declaration = self.visit(ctx.expression())
with self._push_scope(ctx):
block = self.visit(ctx.body)
return ast.ForInLoop(
type=self.visit(ctx.scalarType()),
identifier=_visit_identifier(ctx.Identifier()),
set_declaration=set_declaration,
block=block,
)
@span
def visitGateCallStatement(self, ctx: qasm3Parser.GateCallStatementContext):
modifiers = [self.visit(modifier) for modifier in ctx.gateModifier()]
arguments = (
[self.visit(argument) for argument in ctx.expressionList().expression()]
if ctx.expressionList()
else []
)
qubits = (
[self.visit(operand) for operand in ctx.gateOperandList().gateOperand()]
if ctx.gateOperandList()
else []
)
if ctx.GPHASE():
if len(arguments) != 1:
_raise_from_context(
ctx, f"'gphase' takes exactly one argument, but received {arguments}"
)
return ast.QuantumPhase(modifiers=modifiers, argument=arguments[0], qubits=qubits)
return ast.QuantumGate(
modifiers=modifiers,
name=_visit_identifier(ctx.Identifier()),
arguments=arguments,
qubits=qubits,
duration=self.visit(ctx.designator()) if ctx.designator() else None,
)
@span
def visitGateStatement(self, ctx: qasm3Parser.GateStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "gate definitions must be global")
name = _visit_identifier(ctx.Identifier())
arguments = (
[_visit_identifier(id_) for id_ in ctx.params.Identifier()]
if ctx.params is not None
else []
)
qubits = [_visit_identifier(id_) for id_ in ctx.qubits.Identifier()]
with self._push_context(ctx):
body = self.visit(ctx.scope())
return ast.QuantumGateDefinition(name, arguments, qubits, body)
@span
def visitIfStatement(self, ctx: qasm3Parser.IfStatementContext):
with self._push_scope(ctx):
if_body = self.visit(ctx.if_body)
with self._push_scope(ctx):
else_body = self.visit(ctx.else_body) if ctx.else_body else []
return ast.BranchingStatement(
condition=self.visit(ctx.expression()), if_block=if_body, else_block=else_body
)
@span
def visitIncludeStatement(self, ctx: qasm3Parser.IncludeStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "'include' statements must be global")
return ast.Include(filename=ctx.StringLiteral().getText()[1:-1])
@span
def visitIoDeclarationStatement(self, ctx: qasm3Parser.IoDeclarationStatementContext):
if not self._in_global_scope():
keyword = "input" if ctx.INPUT() else "output"
_raise_from_context(ctx, f"'{keyword}' declarations must be global")
return ast.IODeclaration(
io_identifier=ast.IOKeyword.input if ctx.INPUT() else ast.IOKeyword.output,
type=self.visit(ctx.scalarType()),
identifier=_visit_identifier(ctx.Identifier()),
)
@span
def visitMeasureArrowAssignmentStatement(
self, ctx: qasm3Parser.MeasureArrowAssignmentStatementContext
):
if self._in_gate():
_raise_from_context(ctx, "cannot have a non-unitary 'measure' instruction in a gate")
return ast.QuantumMeasurementStatement(
measure=self.visit(ctx.measureExpression()),
target=self.visit(ctx.indexedIdentifier()) if ctx.indexedIdentifier() else None,
)
@span
def visitOldStyleDeclarationStatement(
self, ctx: qasm3Parser.OldStyleDeclarationStatementContext
):
identifier = _visit_identifier(ctx.Identifier())
size = self.visit(ctx.designator()) if ctx.designator() else None
if ctx.QREG():
if not self._in_global_scope():
_raise_from_context(ctx, "qubit declarations must be global")
return ast.QubitDeclaration(qubit=identifier, size=size)
span = (
combine_span(get_span(ctx.CREG()), get_span(ctx.designator()))
if ctx.designator()
else get_span(ctx.CREG())
)
return ast.ClassicalDeclaration(
type=add_span(ast.BitType(size=size), span),
identifier=identifier,
init_expression=None,
)
@span
def visitQuantumDeclarationStatement(self, ctx: qasm3Parser.QuantumDeclarationStatementContext):
if not self._in_global_scope():
_raise_from_context(ctx, "qubit declarations must be global")
size_designator = ctx.qubitType().designator()
return ast.QubitDeclaration(
qubit=_visit_identifier(ctx.Identifier()),
size=self.visit(size_designator) if size_designator is not None else None,
)
@span
def visitResetStatement(self, ctx: qasm3Parser.ResetStatementContext):
if self._in_gate():
_raise_from_context(ctx, "cannot have a non-unitary 'reset' instruction in a gate")
return ast.QuantumReset(qubits=self.visit(ctx.gateOperand()))
@span
def visitReturnStatement(self, ctx: qasm3Parser.ReturnStatementContext):
if not self._in_subroutine():
_raise_from_context(ctx, "'return' statement outside subroutine")
if ctx.expression():
expression = self.visit(ctx.expression())
elif ctx.measureExpression():
expression = self.visit(ctx.measureExpression())
else:
expression = None
return ast.ReturnStatement(expression=expression)
@span
def visitWhileStatement(self, ctx: qasm3Parser.WhileStatementContext):
with self._push_scope(ctx):
block = self.visit(ctx.body)
return ast.WhileLoop(while_condition=self.visit(ctx.expression()), block=block)
@span
def visitParenthesisExpression(self, ctx: qasm3Parser.ParenthesisExpressionContext):
return self.visit(ctx.expression())
@span
def visitIndexExpression(self, ctx: qasm3Parser.IndexExpressionContext):
return ast.IndexExpression(
collection=self.visit(ctx.expression()),
index=self.visit(ctx.indexOperator()),
)
@span
def visitUnaryExpression(self, ctx: qasm3Parser.UnaryExpressionContext):
return ast.UnaryExpression(
op=ast.UnaryOperator[ctx.op.text],
expression=self.visit(ctx.expression()),
)
@span
def _visit_binary_expression(self, ctx: ParserRuleContext):
return ast.BinaryExpression(
lhs=self.visit(ctx.expression(0)),
op=ast.BinaryOperator[ctx.op.text],
rhs=self.visit(ctx.expression(1)),
)
visitPowerExpression = _visit_binary_expression
visitMultiplicativeExpression = _visit_binary_expression
visitAdditiveExpression = _visit_binary_expression
visitBitshiftExpression = _visit_binary_expression
visitComparisonExpression = _visit_binary_expression
visitEqualityExpression = _visit_binary_expression
visitBitwiseAndExpression = _visit_binary_expression
visitBitwiseXorExpression = _visit_binary_expression
visitBitwiseOrExpression = _visit_binary_expression
visitLogicalAndExpression = _visit_binary_expression
visitLogicalOrExpression = _visit_binary_expression
@span
def visitCastExpression(self, ctx: qasm3Parser.CastExpressionContext):
return ast.Cast(type=self.visit(ctx.getChild(0)), argument=self.visit(ctx.expression()))
@span
def visitMeasureExpression(self, ctx: qasm3Parser.MeasureExpressionContext):
if self._in_gate():
_raise_from_context(ctx, "cannot have a non-unitary 'measure' instruction in a gate")
return ast.QuantumMeasurement(qubit=self.visit(ctx.gateOperand()))
@span
def visitDurationofExpression(self, ctx: qasm3Parser.DurationofExpressionContext):
with self._push_scope(ctx):
return ast.DurationOf(target=self.visit(ctx.scope()))
@span
def visitCallExpression(self, ctx: qasm3Parser.CallExpressionContext):
name = _visit_identifier(ctx.Identifier())
arguments = (
[self.visit(argument) for argument in ctx.expressionList().expression()]
if ctx.expressionList()
else []
)
if name.name == "sizeof":
if len(arguments) not in (1, 2):
_raise_from_context(ctx, "'sizeof' needs either one or two arguments")
return ast.SizeOf(
target=arguments[0],
index=arguments[1] if len(arguments) == 2 else None,
)
return ast.FunctionCall(name=name, arguments=arguments)
@span
def visitLiteralExpression(self, ctx: qasm3Parser.LiteralExpressionContext):
if ctx.Identifier():
return _visit_identifier(ctx.Identifier())
if ctx.BinaryIntegerLiteral():
return ast.IntegerLiteral(value=int(ctx.BinaryIntegerLiteral().getText(), 2))
if ctx.OctalIntegerLiteral():
return ast.IntegerLiteral(value=int(ctx.OctalIntegerLiteral().getText(), 8))
if ctx.DecimalIntegerLiteral():
return ast.IntegerLiteral(value=int(ctx.DecimalIntegerLiteral().getText(), 10))
if ctx.HexIntegerLiteral():
return ast.IntegerLiteral(value=int(ctx.HexIntegerLiteral().getText(), 16))
if ctx.FloatLiteral():
return ast.FloatLiteral(value=float(ctx.FloatLiteral().getText()))
if ctx.ImaginaryLiteral():
return ast.ImaginaryLiteral(value=float(ctx.ImaginaryLiteral().getText()[:-2]))
if ctx.BooleanLiteral():
return ast.BooleanLiteral(value=ctx.BooleanLiteral().getText() == "true")
if ctx.BitstringLiteral():
stripped = ctx.BitstringLiteral().getText()[1:-1].replace("_", "")
return ast.BitstringLiteral(value=int(stripped, 2), width=len(stripped))
if ctx.TimingLiteral():
text = ctx.TimingLiteral().getText()
value, suffix = text[:-2], text[-2:]
if suffix[1] == "s":
if suffix[0] in "num":
unit = ast.TimeUnit[suffix]
elif suffix[0] == "µ":
unit = ast.TimeUnit["us"]
else:
unit = ast.TimeUnit["s"]
value = text[:-1]
else:
unit = ast.TimeUnit["dt"]
return ast.DurationLiteral(value=float(value), unit=unit)
if ctx.HardwareQubit():
return ast.Identifier(ctx.HardwareQubit().getText())
raise _raise_from_context(ctx, "unknown literal type")
@span
def visitAliasExpression(self, ctx: qasm3Parser.AliasExpressionContext):
# This choice in the recursion and the accompanying reversal of the
# iterator builds the tree as left-associative. The logical operation
# is arbitrarily associative, but the AST needs us to make a choice.
def recurse(previous, iterator):
rhs = self.visit(previous)
try:
current = next(iterator)
except StopIteration:
return self.visit(previous)
lhs = recurse(current, iterator)
return add_span(ast.Concatenation(lhs=lhs, rhs=rhs), combine_span(lhs.span, rhs.span))
# This iterator should always be non-empty if ANTLR did its job right.
iterator = reversed(ctx.expression())
return recurse(next(iterator), iterator)
@span
def visitDeclarationExpression(self, ctx: qasm3Parser.DeclarationExpressionContext):
return self.visit(ctx.getChild(0))
@span
def visitRangeExpression(self, ctx: qasm3Parser.RangeExpressionContext):
# start, end and step are all optional as in [:]
# It could be [start:end] or [start:step:end]
start = None
end = None
step = None
colons_seen = 0
for child in ctx.getChildren():
if isinstance(child, qasm3Parser.ExpressionContext):
expression = self.visit(child)
if colons_seen == 0:
start = expression
elif colons_seen == 1:
end = expression
else:
step = end
end = expression
elif child.getText() == ":":
colons_seen += 1
return ast.RangeDefinition(start=start, end=end, step=step)
@span
def visitSetExpression(self, ctx: qasm3Parser.SetExpressionContext):
return ast.DiscreteSet(values=[self.visit(expression) for expression in ctx.expression()])
@span
def visitArrayLiteral(self, ctx: qasm3Parser.ArrayLiteralContext):
array_literal_element = (
qasm3Parser.ExpressionContext,
qasm3Parser.ArrayLiteralContext,
)
def predicate(child):
return isinstance(child, array_literal_element)
return ast.ArrayLiteral(
values=[self.visit(element) for element in ctx.getChildren(predicate=predicate)],
)
def visitIndexOperator(self, ctx: qasm3Parser.IndexOperatorContext):
if ctx.setExpression():
return self.visit(ctx.setExpression())
index_element = (
qasm3Parser.ExpressionContext,
qasm3Parser.RangeExpressionContext,
)
def predicate(child):
return isinstance(child, index_element)
return [self.visit(child) for child in ctx.getChildren(predicate=predicate)]
@span
def visitIndexedIdentifier(self, ctx: qasm3Parser.IndexedIdentifierContext):
if not ctx.indexOperator():
return _visit_identifier(ctx.Identifier())
return ast.IndexedIdentifier(
name=_visit_identifier(ctx.Identifier()),
indices=[self.visit(index) for index in ctx.indexOperator()],
)
@span
def visitDesignator(self, ctx: qasm3Parser.DesignatorContext):
return self.visit(ctx.expression())
@span
def visitGateModifier(self, ctx: qasm3Parser.GateModifierContext):
if ctx.INV():
return ast.QuantumGateModifier(modifier=ast.GateModifierName["inv"], argument=None)
if ctx.POW():
return ast.QuantumGateModifier(
modifier=ast.GateModifierName["pow"], argument=self.visit(ctx.expression())
)
return ast.QuantumGateModifier(
modifier=ast.GateModifierName["ctrl" if ctx.CTRL() else "negctrl"],
argument=self.visit(ctx.expression()) if ctx.expression() else None,
)
@span
def visitScalarType(self, ctx: qasm3Parser.ScalarTypeContext):
if ctx.BOOL():
return ast.BoolType()
if ctx.DURATION():
return ast.DurationType()
if ctx.STRETCH():
return ast.StretchType()
if ctx.BIT():
return ast.BitType(size=self.visit(ctx.designator()) if ctx.designator() else None)
if ctx.INT():
return ast.IntType(size=self.visit(ctx.designator()) if ctx.designator() else None)
if ctx.UINT():
return ast.UintType(size=self.visit(ctx.designator()) if ctx.designator() else None)
if ctx.FLOAT():
return ast.FloatType(size=self.visit(ctx.designator()) if ctx.designator() else None)
if ctx.ANGLE():
return ast.AngleType(size=self.visit(ctx.designator()) if ctx.designator() else None)
if ctx.COMPLEX():
base = self.visit(ctx.scalarType()) if ctx.scalarType() else None
if base is not None and not isinstance(base, ast.FloatType):
_raise_from_context(ctx.scalarType(), f"invalid type of complex components")
return ast.ComplexType(base_type=base)
_raise_from_context(ctx, "unhandled type: {ctx.getText()}")
@span
def visitArrayType(self, ctx: qasm3Parser.ArrayTypeContext):
base = self.visit(ctx.scalarType())
if not isinstance(
base,
(
ast.BitType,
ast.IntType,
ast.UintType,
ast.FloatType,
ast.AngleType,
ast.DurationType,
ast.BoolType,
ast.ComplexType,
),
):
_raise_from_context(ctx.scalarType(), f"invalid scalar type for array")
return ast.ArrayType(
base_type=base,
dimensions=[self.visit(expression) for expression in ctx.expressionList().expression()],
)
@span
def visitGateOperand(self, ctx: qasm3Parser.GateOperandContext):
if ctx.HardwareQubit():
return ast.Identifier(name=ctx.getText())
return self.visit(ctx.indexedIdentifier())
@span
def visitDefcalTarget(self, ctx: qasm3Parser.DefcalTargetContext):
return ast.Identifier(name=ctx.getText())
@span
def visitArgumentDefinition(self, ctx: qasm3Parser.ArgumentDefinitionContext):
name = _visit_identifier(ctx.Identifier())
if ctx.qubitType() or ctx.QREG():
designator = ctx.qubitType().designator() if ctx.qubitType() else ctx.designator()
return ast.QuantumArgument(
name=name, size=self.visit(designator) if designator else None
)
access = None
if ctx.CREG():
size = self.visit(ctx.designator()) if ctx.designator() else None
creg_span = get_span(ctx.CREG())
type_ = add_span(
ast.BitType(size=size),
combine_span(creg_span, get_span(size)) if size else creg_span,
)
elif ctx.arrayReferenceType():
array_ctx = ctx.arrayReferenceType()
access = (
ast.AccessControl.readonly if array_ctx.READONLY() else ast.AccessControl.mutable
)
base_type = self.visit(array_ctx.scalarType())
dimensions = (
self.visit(array_ctx.expression())
if array_ctx.expression()
else [self.visit(expr) for expr in array_ctx.expressionList().expression()]
)
type_ = add_span(
ast.ArrayReferenceType(base_type=base_type, dimensions=dimensions),
get_span(array_ctx),
)
else:
type_ = self.visit(ctx.scalarType())
return ast.ClassicalArgument(type=type_, name=name, access=access)
@span
def visitDefcalArgumentDefinition(self, ctx: qasm3Parser.DefcalArgumentDefinitionContext):
return self.visit(ctx.getChild(0))
@span
def visitExternArgument(self, ctx: qasm3Parser.ExternArgumentContext):
access = None
if ctx.CREG():
type_ = ast.BitType(size=self.visit(ctx.designator()) if ctx.designator() else None)
elif ctx.scalarType():
type_ = self.visit(ctx.scalarType())
else:
array_ctx = ctx.arrayReferenceType()
access = (
ast.AccessControl.readonly if array_ctx.READONLY() else ast.AccessControl.mutable
)
base_type = self.visit(array_ctx.scalarType())
dimensions = (
self.visit(array_ctx.expression())
if array_ctx.expression()
else [self.visit(expr) for expr in array_ctx.expressionList().expression()]
)
type_ = add_span(
ast.ArrayReferenceType(base_type=base_type, dimensions=dimensions),
get_span(array_ctx),
)
return ast.ExternArgument(type=type_, access=access)
@span
def visitDefcalOperand(self, ctx: qasm3Parser.DefcalOperandContext):
if ctx.HardwareQubit():
return ast.Identifier(ctx.HardwareQubit().getText())
return _visit_identifier(ctx.Identifier())
def visitStatementOrScope(
self, ctx: qasm3Parser.StatementOrScopeContext
) -> List[ast.Statement]:
return self.visit(ctx.scope()) if ctx.scope() else [self.visit(ctx.statement())]
| 34,306 | 38.34289 | 109 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/printer.py | """
==============================================================
Generating OpenQASM 3 from an AST Node (``openqasm3.printer``)
==============================================================
.. currentmodule:: openqasm3
It is often useful to go from the :mod:`AST representation <openqasm3.ast>` of an OpenQASM 3 program
back to the textual language. The functions and classes described here will do this conversion.
Most uses should be covered by using :func:`dump` to write to an open text stream (an open file, for
example) or :func:`dumps` to produce a string. Both of these accept :ref:`several keyword arguments
<printer-kwargs>` that control the formatting of the output.
.. autofunction:: openqasm3.dump
.. autofunction:: openqasm3.dumps
.. _printer-kwargs:
Controlling the formatting
==========================
.. currentmodule:: openqasm3.printer
The :func:`~openqasm3.dump` and :func:`~openqasm3.dumps` functions both use an internal AST-visitor
class to operate on the AST. This class actually defines all the formatting options, and can be
used for more low-level operations, such as writing a program piece-by-piece. This may need access
to the :ref:`printer state <printer-state>`, documented below.
.. autoclass:: Printer
:members:
:class-doc-from: both
For the most complete control, it is possible to subclass this printer and override only the visitor
methods that should be modified.
.. _printer-state:
Reusing the same printer
========================
.. currentmodule:: openqasm3.printer
If the :class:`Printer` is being reused to write multiple nodes to a single stream, you will also
likely need to access its internal state. This can be done by manually creating a
:class:`PrinterState` object and passing it in the original call to :meth:`Printer.visit`. The
state object is mutated by the visit, and will reflect the output state at the end.
.. autoclass:: PrinterState
:members:
"""
import contextlib
import dataclasses
import io
import functools
from typing import Sequence, Optional
from . import ast, properties
from .visitor import QASMVisitor
__all__ = ("dump", "dumps", "Printer", "PrinterState")
def dump(node: ast.QASMNode, file: io.TextIOBase, **kwargs) -> None:
"""Write textual OpenQASM 3 code representing ``node`` to the open stream ``file``.
It is generally expected that ``node`` will be an instance of :class:`.ast.Program`, but this
does not need to be the case.
For more details on the available keyword arguments, see :ref:`printer-kwargs`.
"""
Printer(file, **kwargs).visit(node)
def dumps(node: ast.QASMNode, **kwargs) -> str:
"""Get a string representation of the OpenQASM 3 code representing ``node``.
It is generally expected that ``node`` will be an instance of :class:`.ast.Program`, but this
does not need to be the case.
For more details on the available keyword arguments, see :ref:`printer-kwargs`.
"""
out = io.StringIO()
dump(node, out, **kwargs)
return out.getvalue()
@dataclasses.dataclass
class PrinterState:
"""State object for the print visitor. This is mutated during the visit."""
current_indent: int = 0
"""The current indentation level. This is a non-negative integer; the actual identation string
to be used is defined by the :class:`Printer`."""
skip_next_indent: bool = False
"""This is used to communicate between the different levels of if-else visitors when emitting
chained ``else if`` blocks. The chaining occurs with the next ``if`` if this is set to
``True``."""
@contextlib.contextmanager
def increase_scope(self):
"""Use as a context manager to increase the scoping level of this context inside the
resource block."""
self.current_indent += 1
try:
yield
finally:
self.current_indent -= 1
def _maybe_annotated(method):
@functools.wraps(method)
def annotated(self: "Printer", node: ast.Statement, context: PrinterState) -> None:
for annotation in node.annotations:
self.visit(annotation, context)
return method(self, node, context)
return annotated
class Printer(QASMVisitor[PrinterState]):
"""Internal AST-visitor for writing AST nodes out to a stream as valid OpenQASM 3.
This class can be used directly to write multiple nodes to the same stream, potentially with
some manual control fo the state between them.
If subclassing, generally only the specialised ``visit_*`` methods need to be overridden. These
are derived from the base class, and use the name of the relevant :mod:`AST node <.ast>`
verbatim after ``visit_``."""
def __init__(
self,
stream: io.TextIOBase,
*,
indent: str = " ",
chain_else_if: bool = True,
old_measurement: bool = False,
):
"""
Aside from ``stream``, the arguments here are keyword arguments that are common to this
class, :func:`~openqasm3.dump` and :func:`~openqasm3.dumps`.
:param stream: the stream that the output will be written to.
:type stream: io.TextIOBase
:param indent: the string to use as a single indentation level.
:type indent: str, optional (two spaces).
:param chain_else_if: If ``True`` (default), then constructs of the form::
if (x == 0) {
// ...
} else {
if (x == 1) {
// ...
} else {
// ...
}
}
will be collapsed into the equivalent but flatter::
if (x == 0) {
// ...
} else if (x == 1) {
// ...
} else {
// ...
}
:type chain_else_if: bool, optional (``True``)
:param old_measurement: If ``True``, then the OpenQASM 2-style "arrow" measurements will be
used instead of the normal assignments. For example, ``old_measurement=False`` (the
default) will emit ``a = measure b;`` where ``old_measurement=True`` would emit
``measure b -> a;`` instead.
:type old_measurement: bool, optional (``False``).
"""
self.stream = stream
self.indent = indent
self.chain_else_if = chain_else_if
self.old_measurement = old_measurement
def visit(self, node: ast.QASMNode, context: Optional[PrinterState] = None) -> None:
"""Completely visit a node and all subnodes. This is the dispatch entry point; this will
automatically result in the correct specialised visitor getting called.
:param node: The AST node to visit. Usually this will be an :class:`.ast.Program`.
:type node: .ast.QASMNode
:param context: The state object to be used during the visit. If not given, a default
object will be constructed and used.
:type context: PrinterState
"""
if context is None:
context = PrinterState()
return super().visit(node, context)
def _start_line(self, context: PrinterState) -> None:
if context.skip_next_indent:
context.skip_next_indent = False
return
self.stream.write(context.current_indent * self.indent)
def _end_statement(self, context: PrinterState) -> None:
self.stream.write(";\n")
def _end_line(self, context: PrinterState) -> None:
self.stream.write("\n")
def _write_statement(self, line: str, context: PrinterState) -> None:
self._start_line(context)
self.stream.write(line)
self._end_statement(context)
def _visit_sequence(
self,
nodes: Sequence[ast.QASMNode],
context: PrinterState,
*,
start: str = "",
end: str = "",
separator: str,
) -> None:
if start:
self.stream.write(start)
for node in nodes[:-1]:
self.visit(node, context)
self.stream.write(separator)
if nodes:
self.visit(nodes[-1], context)
if end:
self.stream.write(end)
def visit_Program(self, node: ast.Program, context: PrinterState) -> None:
if node.version:
self._write_statement(f"OPENQASM {node.version}", context)
for statement in node.statements:
self.visit(statement, context)
@_maybe_annotated
def visit_Include(self, node: ast.Include, context: PrinterState) -> None:
self._write_statement(f'include "{node.filename}"', context)
@_maybe_annotated
def visit_ExpressionStatement(
self, node: ast.ExpressionStatement, context: PrinterState
) -> None:
self._start_line(context)
self.visit(node.expression, context)
self._end_statement(context)
@_maybe_annotated
def visit_QubitDeclaration(self, node: ast.QubitDeclaration, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("qubit")
if node.size is not None:
self.stream.write("[")
self.visit(node.size)
self.stream.write("]")
self.stream.write(" ")
self.visit(node.qubit, context)
self._end_statement(context)
@_maybe_annotated
def visit_QuantumGateDefinition(
self, node: ast.QuantumGateDefinition, context: PrinterState
) -> None:
self._start_line(context)
self.stream.write("gate ")
self.visit(node.name, context)
if node.arguments:
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
self.stream.write(" ")
self._visit_sequence(node.qubits, context, separator=", ")
self.stream.write(" {")
self._end_line(context)
with context.increase_scope():
for statement in node.body:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
@_maybe_annotated
def visit_ExternDeclaration(self, node: ast.ExternDeclaration, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("extern ")
self.visit(node.name, context)
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
if node.return_type is not None:
self.stream.write(" -> ")
self.visit(node.return_type, context)
self._end_statement(context)
def visit_Identifier(self, node: ast.Identifier, context: PrinterState) -> None:
self.stream.write(node.name)
def visit_UnaryExpression(self, node: ast.UnaryExpression, context: PrinterState) -> None:
self.stream.write(node.op.name)
if properties.precedence(node) >= properties.precedence(node.expression):
self.stream.write("(")
self.visit(node.expression, context)
self.stream.write(")")
else:
self.visit(node.expression, context)
def visit_BinaryExpression(self, node: ast.BinaryExpression, context: PrinterState) -> None:
our_precedence = properties.precedence(node)
# All AST nodes that are built into BinaryExpression are currently left associative.
if properties.precedence(node.lhs) < our_precedence:
self.stream.write("(")
self.visit(node.lhs, context)
self.stream.write(")")
else:
self.visit(node.lhs, context)
self.stream.write(f" {node.op.name} ")
if properties.precedence(node.rhs) <= our_precedence:
self.stream.write("(")
self.visit(node.rhs, context)
self.stream.write(")")
else:
self.visit(node.rhs, context)
def visit_BitstringLiteral(self, node: ast.BitstringLiteral, context: PrinterState) -> None:
value = bin(node.value)[2:]
if len(value) < node.width:
value = "0" * (node.width - len(value)) + value
self.stream.write(f'"{value}"')
def visit_IntegerLiteral(self, node: ast.IntegerLiteral, context: PrinterState) -> None:
self.stream.write(str(node.value))
def visit_FloatLiteral(self, node: ast.FloatLiteral, context: PrinterState) -> None:
self.stream.write(str(node.value))
def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral, context: PrinterState) -> None:
self.stream.write(str(node.value) + "im")
def visit_BooleanLiteral(self, node: ast.BooleanLiteral, context: PrinterState) -> None:
self.stream.write("true" if node.value else "false")
def visit_DurationLiteral(self, node: ast.DurationLiteral, context: PrinterState) -> None:
self.stream.write(f"{node.value}{node.unit.name}")
def visit_ArrayLiteral(self, node: ast.ArrayLiteral, context: PrinterState) -> None:
self._visit_sequence(node.values, context, start="{", end="}", separator=", ")
def visit_FunctionCall(self, node: ast.FunctionCall, context: PrinterState) -> None:
self.visit(node.name)
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
def visit_Cast(self, node: ast.Cast, context: PrinterState) -> None:
self.visit(node.type)
self.stream.write("(")
self.visit(node.argument)
self.stream.write(")")
def visit_DiscreteSet(self, node: ast.DiscreteSet, context: PrinterState) -> None:
self._visit_sequence(node.values, context, start="{", end="}", separator=", ")
def visit_RangeDefinition(self, node: ast.RangeDefinition, context: PrinterState) -> None:
if node.start is not None:
self.visit(node.start, context)
self.stream.write(":")
if node.step is not None:
self.visit(node.step, context)
self.stream.write(":")
if node.end is not None:
self.visit(node.end, context)
def visit_IndexExpression(self, node: ast.IndexExpression, context: PrinterState) -> None:
if properties.precedence(node.collection) < properties.precedence(node):
self.stream.write("(")
self.visit(node.collection, context)
self.stream.write(")")
else:
self.visit(node.collection, context)
self.stream.write("[")
if isinstance(node.index, ast.DiscreteSet):
self.visit(node.index, context)
else:
self._visit_sequence(node.index, context, separator=", ")
self.stream.write("]")
def visit_IndexedIdentifier(self, node: ast.IndexedIdentifier, context: PrinterState) -> None:
self.visit(node.name, context)
for index in node.indices:
self.stream.write("[")
if isinstance(index, ast.DiscreteSet):
self.visit(index, context)
else:
self._visit_sequence(index, context, separator=", ")
self.stream.write("]")
def visit_Concatenation(self, node: ast.Concatenation, context: PrinterState) -> None:
lhs_precedence = properties.precedence(node.lhs)
our_precedence = properties.precedence(node)
rhs_precedence = properties.precedence(node.rhs)
# Concatenation is fully associative, but this package parses the AST by
# arbitrarily making it left-associative (since the design of the AST
# forces us to make a choice). We emit brackets to ensure that the
# round-trip through our printer and parser do not change the AST.
if lhs_precedence < our_precedence:
self.stream.write("(")
self.visit(node.lhs, context)
self.stream.write(")")
else:
self.visit(node.lhs, context)
self.stream.write(" ++ ")
if rhs_precedence <= our_precedence:
self.stream.write("(")
self.visit(node.rhs, context)
self.stream.write(")")
else:
self.visit(node.rhs, context)
@_maybe_annotated
def visit_QuantumGate(self, node: ast.QuantumGate, context: PrinterState) -> None:
self._start_line(context)
if node.modifiers:
self._visit_sequence(node.modifiers, context, end=" @ ", separator=" @ ")
self.visit(node.name, context)
if node.arguments:
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
self.stream.write(" ")
self._visit_sequence(node.qubits, context, separator=", ")
self._end_statement(context)
def visit_QuantumGateModifier(
self, node: ast.QuantumGateModifier, context: PrinterState
) -> None:
self.stream.write(node.modifier.name)
if node.argument is not None:
self.stream.write("(")
self.visit(node.argument, context)
self.stream.write(")")
@_maybe_annotated
def visit_QuantumPhase(self, node: ast.QuantumPhase, context: PrinterState) -> None:
self._start_line(context)
if node.modifiers:
self._visit_sequence(node.modifiers, context, end=" @ ", separator=" @ ")
self.stream.write("gphase(")
self.visit(node.argument, context)
self.stream.write(")")
if node.qubits:
self._visit_sequence(node.qubits, context, start=" ", separator=", ")
self._end_statement(context)
def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement, context: PrinterState) -> None:
self.stream.write("measure ")
self.visit(node.qubit, context)
@_maybe_annotated
def visit_QuantumReset(self, node: ast.QuantumReset, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("reset ")
self.visit(node.qubits, context)
self._end_statement(context)
@_maybe_annotated
def visit_QuantumBarrier(self, node: ast.QuantumBarrier, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("barrier")
if node.qubits:
self.stream.write(" ")
self._visit_sequence(node.qubits, context, separator=", ")
self._end_statement(context)
@_maybe_annotated
def visit_QuantumMeasurementStatement(
self, node: ast.QuantumMeasurementStatement, context: PrinterState
) -> None:
self._start_line(context)
if node.target is None:
self.visit(node.measure, context)
elif self.old_measurement:
self.visit(node.measure, context)
self.stream.write(" -> ")
self.visit(node.target, context)
else:
self.visit(node.target, context)
self.stream.write(" = ")
self.visit(node.measure, context)
self._end_statement(context)
def visit_ClassicalArgument(self, node: ast.ClassicalArgument, context: PrinterState) -> None:
if node.access is not None:
self.stream.write(
"readonly " if node.access == ast.AccessControl.readonly else "mutable "
)
self.visit(node.type, context)
self.stream.write(" ")
self.visit(node.name, context)
def visit_ExternArgument(self, node: ast.ExternArgument, context: PrinterState) -> None:
if node.access is not None:
self.stream.write(
"readonly " if node.access == ast.AccessControl.readonly else "mutable "
)
self.visit(node.type, context)
@_maybe_annotated
def visit_ClassicalDeclaration(
self, node: ast.ClassicalDeclaration, context: PrinterState
) -> None:
self._start_line(context)
self.visit(node.type)
self.stream.write(" ")
self.visit(node.identifier, context)
if node.init_expression is not None:
self.stream.write(" = ")
self.visit(node.init_expression)
self._end_statement(context)
@_maybe_annotated
def visit_IODeclaration(self, node: ast.IODeclaration, context: PrinterState) -> None:
self._start_line(context)
self.stream.write(f"{node.io_identifier.name} ")
self.visit(node.type)
self.stream.write(" ")
self.visit(node.identifier, context)
self._end_statement(context)
@_maybe_annotated
def visit_ConstantDeclaration(
self, node: ast.ConstantDeclaration, context: PrinterState
) -> None:
self._start_line(context)
self.stream.write("const ")
self.visit(node.type, context)
self.stream.write(" ")
self.visit(node.identifier, context)
self.stream.write(" = ")
self.visit(node.init_expression, context)
self._end_statement(context)
def visit_IntType(self, node: ast.IntType, context: PrinterState) -> None:
self.stream.write("int")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
def visit_UintType(self, node: ast.UintType, context: PrinterState) -> None:
self.stream.write("uint")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
def visit_FloatType(self, node: ast.FloatType, context: PrinterState) -> None:
self.stream.write("float")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
def visit_ComplexType(self, node: ast.ComplexType, context: PrinterState) -> None:
self.stream.write("complex")
if node.base_type is not None:
self.stream.write("[")
self.visit(node.base_type, context)
self.stream.write("]")
def visit_AngleType(self, node: ast.AngleType, context: PrinterState) -> None:
self.stream.write("angle")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
def visit_BitType(self, node: ast.BitType, context: PrinterState) -> None:
self.stream.write("bit")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
def visit_BoolType(self, node: ast.BoolType, context: PrinterState) -> None:
self.stream.write("bool")
def visit_ArrayType(self, node: ast.ArrayType, context: PrinterState) -> None:
self.stream.write("array[")
self.visit(node.base_type, context)
self._visit_sequence(node.dimensions, context, start=", ", end="]", separator=", ")
def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType, context: PrinterState) -> None:
self.stream.write("array[")
self.visit(node.base_type, context)
self.stream.write(", ")
if isinstance(node.dimensions, ast.Expression):
self.stream.write("#dim=")
self.visit(node.dimensions, context)
else:
self._visit_sequence(node.dimensions, context, separator=", ")
self.stream.write("]")
def visit_DurationType(self, node: ast.DurationType, context: PrinterState) -> None:
self.stream.write("duration")
def visit_StretchType(self, node: ast.StretchType, context: PrinterState) -> None:
self.stream.write("stretch")
@_maybe_annotated
def visit_CalibrationGrammarDeclaration(
self, node: ast.CalibrationGrammarDeclaration, context: PrinterState
) -> None:
self._write_statement(f'defcalgrammar "{node.name}"', context)
@_maybe_annotated
def visit_CalibrationDefinition(
self, node: ast.CalibrationDefinition, context: PrinterState
) -> None:
self._start_line(context)
self.stream.write("defcal ")
self.visit(node.name, context)
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
self.stream.write(" ")
self._visit_sequence(node.qubits, context, separator=", ")
if node.return_type is not None:
self.stream.write(" -> ")
self.visit(node.return_type, context)
self.stream.write(" {")
# At this point we _should_ be deferring to something else to handle formatting the
# calibration grammar statements, but we're neither we nor the AST are set up to do that.
self.stream.write(node.body)
self.stream.write("}")
self._end_line(context)
@_maybe_annotated
def visit_CalibrationStatement(
self, node: ast.CalibrationStatement, context: PrinterState
) -> None:
self._start_line(context)
self.stream.write("cal {")
# At this point we _should_ be deferring to something else to handle formatting the
# calibration grammar statements, but we're neither we nor the AST are set up to do that.
self.stream.write(node.body)
self.stream.write("}")
self._end_line(context)
@_maybe_annotated
def visit_SubroutineDefinition(
self, node: ast.SubroutineDefinition, context: PrinterState
) -> None:
self._start_line(context)
self.stream.write("def ")
self.visit(node.name, context)
self._visit_sequence(node.arguments, context, start="(", end=")", separator=", ")
if node.return_type is not None:
self.stream.write(" -> ")
self.visit(node.return_type, context)
self.stream.write(" {")
self._end_line(context)
with context.increase_scope():
for statement in node.body:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
def visit_QuantumArgument(self, node: ast.QuantumArgument, context: PrinterState) -> None:
self.stream.write("qubit")
if node.size is not None:
self.stream.write("[")
self.visit(node.size, context)
self.stream.write("]")
self.stream.write(" ")
self.visit(node.name, context)
@_maybe_annotated
def visit_ReturnStatement(self, node: ast.ReturnStatement, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("return")
if node.expression is not None:
self.stream.write(" ")
self.visit(node.expression)
self._end_statement(context)
@_maybe_annotated
def visit_BreakStatement(self, node: ast.BreakStatement, context: PrinterState) -> None:
self._write_statement("break", context)
@_maybe_annotated
def visit_ContinueStatement(self, node: ast.ContinueStatement, context: PrinterState) -> None:
self._write_statement("continue", context)
@_maybe_annotated
def visit_EndStatement(self, node: ast.EndStatement, context: PrinterState) -> None:
self._write_statement("end", context)
@_maybe_annotated
def visit_BranchingStatement(self, node: ast.BranchingStatement, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("if (")
self.visit(node.condition, context)
self.stream.write(") {")
self._end_line(context)
with context.increase_scope():
for statement in node.if_block:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
if node.else_block:
self.stream.write(" else ")
# Special handling to flatten a perfectly nested structure of
# if {...} else { if {...} else {...} }
# into the simpler
# if {...} else if {...} else {...}
# but only if we're allowed to by our options.
if (
self.chain_else_if
and len(node.else_block) == 1
and isinstance(node.else_block[0], ast.BranchingStatement)
and not node.annotations
):
context.skip_next_indent = True
self.visit(node.else_block[0], context)
# Don't end the line, because the outer-most `if` block will.
else:
self.stream.write("{")
self._end_line(context)
with context.increase_scope():
for statement in node.else_block:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
else:
self._end_line(context)
@_maybe_annotated
def visit_WhileLoop(self, node: ast.WhileLoop, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("while (")
self.visit(node.while_condition, context)
self.stream.write(") {")
self._end_line(context)
with context.increase_scope():
for statement in node.block:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
@_maybe_annotated
def visit_ForInLoop(self, node: ast.ForInLoop, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("for ")
self.visit(node.type)
self.stream.write(" ")
self.visit(node.identifier, context)
self.stream.write(" in ")
if isinstance(node.set_declaration, ast.RangeDefinition):
self.stream.write("[")
self.visit(node.set_declaration, context)
self.stream.write("]")
else:
self.visit(node.set_declaration, context)
self.stream.write(" {")
self._end_line(context)
with context.increase_scope():
for statement in node.block:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
@_maybe_annotated
def visit_DelayInstruction(self, node: ast.DelayInstruction, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("delay[")
self.visit(node.duration, context)
self.stream.write("]")
if node.qubits:
self.stream.write(" ")
self._visit_sequence(node.qubits, context, separator=", ")
self._end_statement(context)
@_maybe_annotated
def visit_Box(self, node: ast.Box, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("box")
if node.duration is not None:
self.stream.write("[")
self.visit(node.duration, context)
self.stream.write("]")
self.stream.write(" {")
self._end_line(context)
with context.increase_scope():
for statement in node.body:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self._end_line(context)
def visit_DurationOf(self, node: ast.DurationOf, context: PrinterState) -> None:
self.stream.write("durationof(")
if isinstance(node.target, ast.QASMNode):
self.visit(node.target, context)
else:
self.stream.write("{")
self._end_line(context)
with context.increase_scope():
for statement in node.target:
self.visit(statement, context)
self._start_line(context)
self.stream.write("}")
self.stream.write(")")
def visit_SizeOf(self, node: ast.SizeOf, context: PrinterState) -> None:
self.stream.write("sizeof(")
self.visit(node.target, context)
if node.index is not None:
self.stream.write(", ")
self.visit(node.index)
self.stream.write(")")
@_maybe_annotated
def visit_AliasStatement(self, node: ast.AliasStatement, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("let ")
self.visit(node.target, context)
self.stream.write(" = ")
self.visit(node.value, context)
self._end_statement(context)
@_maybe_annotated
def visit_ClassicalAssignment(
self, node: ast.ClassicalAssignment, context: PrinterState
) -> None:
self._start_line(context)
self.visit(node.lvalue, context)
self.stream.write(f" {node.op.name} ")
self.visit(node.rvalue, context)
self._end_statement(context)
def visit_Annotation(self, node: ast.Annotation, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("@")
self.stream.write(node.keyword)
if node.command is not None:
self.stream.write(" ")
self.stream.write(node.command)
self._end_line(context)
def visit_Pragma(self, node: ast.Pragma, context: PrinterState) -> None:
self._start_line(context)
self.stream.write("pragma ")
self.stream.write(node.command)
self._end_line(context)
| 33,232 | 37.688009 | 100 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/properties.py | from . import ast
__all__ = ["precedence"]
_PRECEDENCE_TABLE = {
ast.Concatenation: 0,
# ... the rest of the binary operations come very early ...
ast.UnaryExpression: 11,
# ... power expression ...
# "Call"-like expressions bind very tightly.
ast.IndexExpression: 13,
ast.FunctionCall: 13,
ast.Cast: 13,
ast.DurationOf: 13,
# Identifiers/literals are the top, since you never need to put brackets
# around a single literal or identifier.
ast.Identifier: 14,
ast.BitstringLiteral: 14,
ast.BooleanLiteral: 14,
ast.DurationLiteral: 14,
ast.FloatLiteral: 14,
ast.ImaginaryLiteral: 14,
ast.IntegerLiteral: 14,
ast.ArrayLiteral: 14,
}
_BINARY_PRECEDENCE_TABLE = {
ast.BinaryOperator["||"]: 1,
ast.BinaryOperator["&&"]: 2,
ast.BinaryOperator["|"]: 3,
ast.BinaryOperator["^"]: 4,
ast.BinaryOperator["&"]: 5,
# equality
ast.BinaryOperator["=="]: 6,
ast.BinaryOperator["!="]: 6,
# comparsions
ast.BinaryOperator["<"]: 7,
ast.BinaryOperator["<="]: 7,
ast.BinaryOperator[">"]: 7,
ast.BinaryOperator[">="]: 7,
# bitshifts
ast.BinaryOperator["<<"]: 8,
ast.BinaryOperator[">>"]: 8,
# additive
ast.BinaryOperator["+"]: 9,
ast.BinaryOperator["-"]: 9,
# multiplicative
ast.BinaryOperator["*"]: 10,
ast.BinaryOperator["/"]: 10,
ast.BinaryOperator["%"]: 10,
# ... unary expression goes here ...
ast.BinaryOperator["**"]: 12,
}
def precedence(node: ast.QASMNode) -> int:
"""Get the integer value of the precedence level of an expression node.
The actual numeric value has no real meaning and is subject to change
between different versions of the AST and versions of the language. It is
only intended to be used as a key for comparisons between different
expressions.
The number is such that if an AST node representing expression ``A``
contains a subexpression ``B``, then on output, ``B`` needs brackets around
it if its precedence is lower than ``A``. If ``A`` is a left-associative
(right-associative) binary operator, then its right-hand (left-hand)
subexpression also needs brackets if the precendence of the two are equal.
"""
if node.__class__ in _PRECEDENCE_TABLE:
return _PRECEDENCE_TABLE[node.__class__]
if isinstance(node, ast.BinaryExpression):
return _BINARY_PRECEDENCE_TABLE[node.op]
raise ValueError(f"Node '{node}' has no expression precedence. Is it an Expression?")
| 2,522 | 32.64 | 89 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/__init__.py | """
===================================
OpenQASM 3 Python reference package
===================================
This package contains the reference abstract syntax tree (AST) for representing
OpenQASM 3 programs, tools to parse text into this AST, and tools to manipulate
the AST.
The AST itself is in the :obj:`.ast` module. There is a reference parser in the
:obj:`.parser` module, which requires the ``[parser]`` extra to be installed.
With the ``[parser]`` extra installed, the simplest interface to the parser is
the :obj:`~parser.parse` function.
"""
__version__ = "0.5.0"
from . import ast, visitor, properties
from .printer import dump, dumps
# Try to initialise the 'parsing' extra components.
try:
import antlr4
except ModuleNotFoundError:
pass
else:
# Any import errors in section are of interest to the user, and should be propagated.
from . import parser
from .parser import parse
| 922 | 27.84375 | 89 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/ast.py | """
========================================
Abstract Syntax Tree (``openqasm3.ast``)
========================================
.. currentmodule:: openqasm3.ast
The reference abstract syntax tree (AST) for OpenQASM 3 programs.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import List, Optional, Union
from enum import Enum
__all__ = [
"AccessControl",
"AliasStatement",
"AngleType",
"Annotation",
"ArrayLiteral",
"ArrayReferenceType",
"ArrayType",
"AssignmentOperator",
"BinaryExpression",
"BinaryOperator",
"BitType",
"BitstringLiteral",
"BoolType",
"BooleanLiteral",
"Box",
"BranchingStatement",
"BreakStatement",
"CalibrationDefinition",
"CalibrationGrammarDeclaration",
"CalibrationStatement",
"Cast",
"ClassicalArgument",
"ClassicalAssignment",
"ClassicalDeclaration",
"ClassicalType",
"ComplexType",
"Concatenation",
"ConstantDeclaration",
"ContinueStatement",
"DelayInstruction",
"DiscreteSet",
"DurationLiteral",
"DurationOf",
"DurationType",
"EndStatement",
"Expression",
"ExpressionStatement",
"ExternArgument",
"ExternDeclaration",
"FloatLiteral",
"FloatType",
"ForInLoop",
"FunctionCall",
"GateModifierName",
"IODeclaration",
"IOKeyword",
"Identifier",
"ImaginaryLiteral",
"Include",
"IndexExpression",
"IndexedIdentifier",
"IntType",
"IntegerLiteral",
"Pragma",
"Program",
"QASMNode",
"QuantumArgument",
"QuantumBarrier",
"QuantumGate",
"QuantumGateDefinition",
"QuantumGateModifier",
"QuantumMeasurement",
"QuantumMeasurementStatement",
"QuantumPhase",
"QuantumReset",
"QuantumStatement",
"QubitDeclaration",
"RangeDefinition",
"ReturnStatement",
"SizeOf",
"Span",
"Statement",
"StretchType",
"SubroutineDefinition",
"TimeUnit",
"UintType",
"UnaryExpression",
"UnaryOperator",
"WhileLoop",
]
AccessControl = Enum("AccessControl", "readonly mutable")
AssignmentOperator = Enum("AssignmentOperator", "= += -= *= /= &= |= ~= ^= <<= >>= %= **=")
BinaryOperator = Enum("BinaryOperator", "> < >= <= == != && || | ^ & << >> + - * / % **")
GateModifierName = Enum("GateModifier", "inv pow ctrl negctrl")
IOKeyword = Enum("IOKeyword", "input output")
TimeUnit = Enum("TimeUnit", "dt ns us ms s")
UnaryOperator = Enum("UnaryOperator", "~ ! -")
@dataclass
class Span:
"""
Start and end line/column in the source file
We use the Antlr convention. The starting line number is 1 and starting column number is 0.
"""
start_line: int
start_column: int
end_line: int
end_column: int
@dataclass
class QASMNode:
"""Base class for all OpenQASM 3 nodes"""
span: Optional[Span] = field(init=False, default=None, compare=False)
"""
The span(location) of the node in the source code.
Because not all the nodes are generated from source, the span is optional.
To make it easier to write unit test, we exclude span from the generated __eq__().
"""
@dataclass
class Program(QASMNode):
"""
An entire OpenQASM 3 program represented by a list of top level statements
"""
statements: List[Statement]
version: Optional[str] = None
@dataclass
class Annotation(QASMNode):
"""An annotation applied to a statment."""
keyword: str
command: Optional[str] = None
@dataclass
class Statement(QASMNode):
"""A statement: anything that can appear on its own line"""
annotations: List[Annotation] = field(init=False, default_factory=list)
@dataclass
class Include(Statement):
"""
An include statement
"""
filename: str
@dataclass
class ExpressionStatement(Statement):
"""A statement that contains a single expression"""
expression: Expression
# Note that QubitDeclaration is not a valid QuantumStatement, because qubits
# can only be declared in global scopes, not in gates.
@dataclass
class QubitDeclaration(Statement):
"""
Global qubit declaration
Example::
qubit q;
qubit[4] q;
q // <- qubit
4 // <- size
"""
qubit: Identifier
size: Optional[Expression] = None
@dataclass
class QuantumGateDefinition(Statement):
"""
Define a new quantum gate
Example::
gate cx c, t {
ctrl @ unitary(pi, 0, pi) c, t;
}
"""
name: Identifier
arguments: List[Identifier]
qubits: List[Identifier]
body: List[QuantumStatement]
class QuantumStatement(Statement):
"""Statements that may appear inside a gate declaration"""
@dataclass
class ExternDeclaration(Statement):
"""
A extern declaration
Example::
extern get_pauli(int[prec]) -> bit[2 * n];
get_pauli // <- name
int[prec] // <- classical type
bit[2 * n] // <- return type
"""
name: Identifier
arguments: List[ExternArgument]
return_type: Optional[ClassicalType] = None
class Expression(QASMNode):
"""An expression: anything that returns a value"""
@dataclass
class Identifier(Expression):
"""
An identifier
Example::
q1
"""
name: str
@dataclass
class UnaryExpression(Expression):
"""
A unary expression
Example::
~b
!bool
-i
"""
op: UnaryOperator
expression: Expression
@dataclass
class BinaryExpression(Expression):
"""
A binary expression
Example::
q1 || q2
"""
op: BinaryOperator
lhs: Expression
rhs: Expression
@dataclass
class IntegerLiteral(Expression):
"""
An integer literal
Example::
1
"""
value: int
@dataclass
class FloatLiteral(Expression):
"""
An real number literal
Example::
1.1
"""
value: float
@dataclass
class ImaginaryLiteral(Expression):
"""
An real number literal
Example::
1.1im
"""
value: float
@dataclass
class BooleanLiteral(Expression):
"""
A boolean expression
Example::
true
false
"""
value: bool
@dataclass
class BitstringLiteral(Expression):
"""A literal bitstring value. The ``value`` is the numerical value of the
bitstring, and the ``width`` is the number of digits given."""
value: int
width: int
@dataclass
class DurationLiteral(Expression):
"""
A duration literal
Example::
1.0ns
"""
value: float
unit: TimeUnit
@dataclass
class ArrayLiteral(Expression):
"""Array literal, used to initialise declared arrays.
For example::
array[uint[8], 2] row = {1, 2};
array[uint[8], 2, 2] my_array = {{1, 2}, {3, 4}};
array[uint[8], 2, 2] my_array = {row, row};
"""
values: List[Expression]
@dataclass
class FunctionCall(Expression):
"""
A function call expression
Example::
foo(1)
foo // <- name
"""
name: Identifier
arguments: List[Expression]
@dataclass
class Cast(Expression):
"""
A cast call expression
Example::
counts += int[1](b);
"""
type: ClassicalType
argument: Expression
@dataclass
class DiscreteSet(QASMNode):
"""
A set of discrete values. This can be used for the values in a ``for``
loop, or to index certain values out of a register::
for i in {1, 2, 3} {}
let alias = qubits[{2, 3, 4}];
"""
values: List[Expression]
@dataclass
class RangeDefinition(QASMNode):
"""
Range definition.
Example::
1:2
1:1:10
:
"""
start: Optional[Expression]
end: Optional[Expression]
step: Optional[Expression]
IndexElement = Union[DiscreteSet, List[Union[Expression, RangeDefinition]]]
@dataclass
class IndexExpression(Expression):
"""
An index expression.
Example::
q[1]
"""
collection: Expression
index: IndexElement
@dataclass
class IndexedIdentifier(QASMNode):
"""An indentifier with index operators, such that it can be used as an
lvalue. The list of indices is subsequent index brackets, so in::
a[{1, 2, 3}][0:1, 0:1]
the list of indices will have two elements. The first will be a
:class:`.DiscreteSet`, and the second will be a list of two
:class:`.RangeDefinition`\\ s.
"""
name: Identifier
indices: List[IndexElement]
@dataclass
class Concatenation(Expression):
"""
Concatenation of two registers, for example::
a ++ b
a[2:3] ++ a[0:1]
"""
lhs: Expression
rhs: Expression
@dataclass
class QuantumGate(QuantumStatement):
"""
Invoking a quantum gate
Example::
cx[dur] 0, 1;
or
ctrl @ p(λ) a, b;
ctrl @ // <- quantumGateModifier
p // <- quantumGateName
λ // <- argument
a, b // <- qubit
"""
modifiers: List[QuantumGateModifier]
name: Identifier
arguments: List[Expression]
qubits: List[Union[IndexedIdentifier, Identifier]]
duration: Optional[Expression] = None
@dataclass
class QuantumGateModifier(QASMNode):
"""
A quantum gate modifier
Attributes:
modifier: 'inv', 'pow', or 'ctrl'
expression: only pow modifier has expression.
Example::
inv @
pow(1/2)
ctrl
"""
modifier: GateModifierName
argument: Optional[Expression] = None
@dataclass
class QuantumPhase(QuantumStatement):
"""
A quantum phase instruction
Example::
ctrl @ gphase(λ) a;
ctrl @ // <- quantumGateModifier
λ // <- argument
a // <- qubit
"""
modifiers: List[QuantumGateModifier]
argument: Expression
qubits: List[Union[IndexedIdentifier, Identifier]]
# Not a full expression because it can only be used in limited contexts.
@dataclass
class QuantumMeasurement(QASMNode):
"""
A quantum measurement instruction
Example::
measure q;
"""
qubit: Union[IndexedIdentifier, Identifier]
# Note that this is not a QuantumStatement because it involves access to
# classical bits.
@dataclass
class QuantumMeasurementStatement(Statement):
"""Stand-alone statement of a quantum measurement, potentially assigning the
result to a classical variable. This is not the only statement that
`measure` can appear in (it can also be in classical declaration statements
and returns)."""
measure: QuantumMeasurement
target: Optional[Union[IndexedIdentifier, Identifier]]
@dataclass
class QuantumBarrier(QuantumStatement):
"""
A quantum barrier instruction
Example::
barrier q;
"""
qubits: List[Expression]
@dataclass
class QuantumReset(QuantumStatement):
"""
A reset instruction.
Example::
reset q;
"""
qubits: Union[IndexedIdentifier, Identifier]
@dataclass
class ClassicalArgument(QASMNode):
"""
Classical argument for a gate or subroutine declaration
"""
type: ClassicalType
name: Identifier
access: Optional[AccessControl] = None
@dataclass
class ExternArgument(QASMNode):
"""Classical argument for an extern declaration."""
type: ClassicalType
access: Optional[AccessControl] = None
@dataclass
class ClassicalDeclaration(Statement):
"""
Classical variable declaration
Example::
bit c;
"""
type: ClassicalType
identifier: Identifier
init_expression: Optional[Union[Expression, QuantumMeasurement]] = None
@dataclass
class IODeclaration(Statement):
"""
Input/output variable declaration
Exampe::
input angle[16] theta;
output bit select;
"""
io_identifier: IOKeyword
type: ClassicalType
identifier: Identifier
@dataclass
class ConstantDeclaration(Statement):
"""
Constant declaration
Example::
const int[16] n = 10;
"""
type: ClassicalType
identifier: Identifier
init_expression: Expression
class ClassicalType(QASMNode):
"""
Base class for classical type
"""
@dataclass
class IntType(ClassicalType):
"""
Node representing a classical ``int`` (signed integer) type, with an
optional precision.
Example:
int[8]
int[16]
"""
size: Optional[Expression] = None
@dataclass
class UintType(ClassicalType):
"""
Node representing a classical ``uint`` (unsigned integer) type, with an
optional precision.
Example:
uint[8]
uint[16]
"""
size: Optional[Expression] = None
@dataclass
class FloatType(ClassicalType):
"""
Node representing the classical ``float`` type, with the particular IEEE-754
floating-point size optionally specified.
Example:
float[16]
float[64]
"""
size: Optional[Expression] = None
@dataclass
class ComplexType(ClassicalType):
"""
Complex ClassicalType. Its real and imaginary parts are based on other classical types.
Example::
complex[float]
complex[float[32]]
"""
base_type: Optional[FloatType]
@dataclass
class AngleType(ClassicalType):
"""
Node representing the classical ``angle`` type, with an optional precision.
Example::
angle[8]
angle[16]
"""
size: Optional[Expression] = None
@dataclass
class BitType(ClassicalType):
"""
Node representing the classical ``bit`` type, with an optional size.
Example::
bit[8]
creg[8]
"""
size: Optional[Expression] = None
class BoolType(ClassicalType):
"""
Leaf node representing the Boolean classical type.
"""
@dataclass
class ArrayType(ClassicalType):
"""Type of arrays that include allocation of the storage.
This is generally any array declared as a standard statement, but not
arrays declared by being arguments to subroutines.
"""
base_type: Union[
IntType, UintType, FloatType, AngleType, DurationType, BitType, BoolType, ComplexType
]
dimensions: List[Expression]
@dataclass
class ArrayReferenceType(ClassicalType):
"""Type of arrays that are a reference to an array with allocated storage.
This is generally any array declared as a subroutine argument. The
dimensions can be either a list of expressions (one for each dimension), or
a single expression, which is the number of dimensions.
For example::
// `a` will have dimensions `[IntegerLiteral(2)]` (with a list), because
// it is a 1D array, with a length of 2.
def f(const array[uint[8], 2] a) {}
// `b` will have dimension `IntegerLiteral(3)` (no list), because it is
// a 3D array, but we don't know the lengths of its dimensions.
def f(const array[uint[8], #dim=3] b) {}
"""
base_type: Union[
IntType, UintType, FloatType, AngleType, DurationType, BitType, BoolType, ComplexType
]
dimensions: Union[Expression, List[Expression]]
class DurationType(ClassicalType):
"""
Leaf node representing the ``duration`` type.
"""
class StretchType(ClassicalType):
"""
Leaf node representing the ``stretch`` type.
"""
@dataclass
class CalibrationGrammarDeclaration(Statement):
"""
Calibration grammar declaration
Example::
defcalgrammar "openpulse";
"""
name: str
@dataclass
class CalibrationStatement(Statement):
"""An inline ``cal`` statement for embedded pulse-grammar interactions.
Example::
cal {
shift_phase(drive($0), theta);
}
"""
body: str
@dataclass
class CalibrationDefinition(Statement):
"""
Calibration definition
Example::
defcal rz(angle[20] theta) q {
shift_phase drive(q), -theta;
}
"""
name: Identifier
arguments: List[Union[ClassicalArgument, Expression]]
qubits: List[Identifier]
return_type: Optional[ClassicalType]
body: str
@dataclass
class SubroutineDefinition(Statement):
"""
Subroutine definition
Example::
def measure(qubit q) -> bit {
s q;
h q;
return measure q;
}
"""
name: Identifier
arguments: List[Union[ClassicalArgument, QuantumArgument]]
body: List[Statement]
return_type: Optional[ClassicalType] = None
@dataclass
class QuantumArgument(QASMNode):
"""
Quantum argument for a subroutine declaration
"""
name: Identifier
size: Optional[Expression] = None
@dataclass
class ReturnStatement(Statement):
"""
Classical or quantum return statement
Example::
return measure q;
return a + b
"""
expression: Optional[Union[Expression, QuantumMeasurement]] = None
class BreakStatement(Statement):
"""
Break statement
Example::
break;
"""
class ContinueStatement(Statement):
"""
Continue statement
Example::
continue;
"""
class EndStatement(Statement):
"""
End statement
Example::
end;
"""
@dataclass
class BranchingStatement(Statement):
"""
Branch (``if``) statement
Example::
if (temp == 1) {
ry(-pi / 2) scratch[0];
} else continue;
"""
condition: Expression
if_block: List[Statement]
else_block: List[Statement]
@dataclass
class WhileLoop(Statement):
"""
While loop
Example::
while(~success) {
reset magic;
ry(pi / 4) magic;
success = distill(magic, scratch);
}
"""
while_condition: Expression
block: List[Statement]
@dataclass
class ForInLoop(Statement):
"""
For in loop
Example::
for i in [0: 2] {
majority a[i], b[i + 1], a[i + 1];
}
"""
type: ClassicalType
identifier: Identifier
set_declaration: Union[RangeDefinition, DiscreteSet, Expression]
block: List[Statement]
@dataclass
class DelayInstruction(QuantumStatement):
"""
Delay instruction
Example::
delay[start_stretch] $0;
"""
duration: Expression
qubits: List[Union[IndexedIdentifier, Identifier]]
@dataclass
class Box(QuantumStatement):
"""
Timing box
Example::
box [maxdur] {
delay[start_stretch] $0;
x $0;
}
"""
duration: Optional[Expression]
body: List[QuantumStatement]
@dataclass
class DurationOf(Expression):
"""
Duration Of
Example::
durationof({x $0;})
"""
target: List[Statement]
@dataclass
class SizeOf(Expression):
"""``sizeof`` an array's dimensions."""
target: Expression
index: Optional[Expression] = None
@dataclass
class AliasStatement(Statement):
"""
Alias statement
Example::
let a = qubits[0];
"""
target: Identifier
value: Union[Identifier, Concatenation]
@dataclass
class ClassicalAssignment(Statement):
"""
Classical assignment
Example::
a[0] = 1;
"""
lvalue: Union[Identifier, IndexedIdentifier]
op: AssignmentOperator
rvalue: Expression
@dataclass
class Pragma(QASMNode):
"""
Pragma
Example::
#pragma val1 val2 val3
"""
command: str
| 19,394 | 16.777269 | 95 | py |
openqasm | openqasm-main/source/openqasm/openqasm3/_antlr/__init__.py | """ANTLR-generated files for parsing OpenQASM 3 files.
This package sets up its import contents to be taken from the generated files
whose ANTLR version matches the installed version of the ANTLR runtime. The
generated files should be placed in directories called ``_<major>_<minor>``,
where `major` is 4, and `minor` is the minor version of ANTLR (e.g. if ANTLR
4.10 was used, those files should be in ``_4_10``).
The ANTLR files from more than one version of ANTLR can be present at once. This package will
dynamically load the correct version based on the installed version of the runtime.
"""
from importlib.abc import MetaPathFinder as _MetaPathFinder, Loader as _Loader
import pathlib
import sys
if sys.version_info < (3, 10):
from importlib_metadata import version as _version
else:
from importlib.metadata import version as _version
# The `antlr4` package is supplied by `antlr4_python3_runtime`.
_parts = [int(x) for x in _version("antlr4_python3_runtime").split(".")]
_resolved_dir = f"_{_parts[0]}_{_parts[1]}"
_antlr_dir = pathlib.Path(__file__).parent
if not (_antlr_dir / _resolved_dir).is_dir():
_available = [path.parent.name[1:] for path in _antlr_dir.rglob("qasm3Parser.py")]
if not _available:
raise ImportError("No ANTLR-generated parsers found.")
raise ImportError(
f"Missing ANTLR-generated parser for version '{_parts[0]}.{_parts[1]}'."
f" Available versions: {_available!r}"
)
class ANTLRMetaPathFinder(_MetaPathFinder):
"""Redirect module/package lookups in `openqasm3.antlr` to the concrete implementations
pre-generated by the ANTLR version that matches the installed version of the runtime."""
def __init__(self, version_package: str):
top_level = __package__.rsplit(".")[0]
# Note the extra `.` in the domain because we don't want to handle ourselves.
self._domain = f"{top_level}._antlr."
self._versioned = f"{top_level}._antlr.{version_package}"
def find_spec(self, fullname, path, target=None):
from importlib.machinery import SourceFileLoader
from importlib.util import spec_from_loader, find_spec
if not fullname.startswith(self._domain) or fullname.startswith(self._versioned):
return None
newname = f"{self._versioned}.{fullname[len(self._domain):]}"
# Get the spec and loader for the direct path to the versioned file, and rewrap them to have
# the unversioned module name. The modules aren't loaded (or executed) by this, but the
# loader is configured so that when they are, their scopes all carry the unversioned name.
return spec_from_loader(fullname, SourceFileLoader(fullname, find_spec(newname).origin))
sys.meta_path = [ANTLRMetaPathFinder(_resolved_dir)] + sys.meta_path
# ... and now the additional content of this module.
RUNTIME_VERSION = tuple(int(x) for x in _parts)
"""The runtime-detected version of the ANTLR runtime, as a tuple like ``sys.version_info``."""
# These imports are re-directed into concrete versioned ones. Doing them
# manually here helps stop pylint complaining.
from . import qasm3Lexer, qasm3Parser, qasm3ParserVisitor, qasm3ParserListener
| 3,203 | 45.434783 | 100 | py |
openqasm | openqasm-main/source/_extensions/multifigure.py | # -*- coding: utf-8 -*-
import itertools
from docutils.parsers.rst import Directive, directives
from docutils import nodes
DEFAULT_ROW_ITEM_COUNT = 4
MULTIFIGURE_HTML_CONTENT_TAG = 'div'
MULTIFIGURE_HTML_ITEM_TAG = 'div'
MULTIFIGURE_HTML_CAPTION_TAG = 'span'
class multifigure_content(nodes.General, nodes.Element):
"""Node for grouping and laying out all the images in a multi-figure."""
class multifigure_item(nodes.General, nodes.Element):
"""Node representing one of the images inside a multi-figure."""
def label_list(argument):
return [
label
for label in (label.strip() for label in argument.split(' '))
if label
]
class MultiFigure(Directive):
"""
Directive for creating multi-image figures, called multifigures.
Options:
--------
``rowitems``: maximum number of items per row. Default is 4.
``labels``: a space separated list of labels for the items. Default is no
labels.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
option_spec = {
'rowitems': directives.positive_int,
'labels': label_list
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
images = []
caption_and_legend = []
for child in node:
if isinstance(child, (nodes.target, nodes.image, nodes.figure)):
images.append(child)
else:
caption_and_legend.append(child)
items = []
row_item_count = min(
len(images), self.options.get('rowitems', DEFAULT_ROW_ITEM_COUNT))
labels = self.options.get('labels', [])
for img, label in itertools.zip_longest(images, labels):
item_node = multifigure_item('', img)
item_node['item-width'] = 100 // row_item_count
if label is not None:
item_node['label'] = label
items.append(item_node)
caption, legend = caption_and_legend[0], caption_and_legend[1:]
resultnode = nodes.figure('', multifigure_content('', *items))
resultnode['labels'] = labels
resultnode.append(nodes.caption(caption.rawsource, '', *caption))
if legend:
resultnode.append(nodes.legend('', *legend))
return [resultnode]
def visit_multifigure_content_html(self, node):
alignment = 'baseline' if node.parent.get('labels') else 'center'
self.body.append(self.starttag(
node,
MULTIFIGURE_HTML_CONTENT_TAG,
CLASS='figure-content',
style=' '.join((
'display: flex;',
'gap: 2rem 0;',
'flex-direction: row;',
'justify-content: center;',
'align-items: %s;' % alignment,
'flex-wrap: wrap;'))
))
def depart_multifigure_content_html(self, node):
self.body.append('</%s>\n' % MULTIFIGURE_HTML_CONTENT_TAG)
def visit_multifigure_item_html(self, node):
self.body.append(self.starttag(
node,
MULTIFIGURE_HTML_ITEM_TAG,
CLASS='figure-item',
style=' '.join((
'max-height: 10rem;',
'width: %i%%;' % node.get('item-width'),
'display: flex;',
'flex-direction: column;'))
))
def depart_multifigure_item_html(self, node):
if node.get('label'):
self.body.append(self.starttag(
node,
'p',
CLASS='caption'
))
self.body.append(self.starttag(
node,
MULTIFIGURE_HTML_CAPTION_TAG,
CLASS='caption-number'
))
self.body.append(node.get('label'))
self.body.append('</p>\n')
self.body.append('</%s>\n' % MULTIFIGURE_HTML_CAPTION_TAG)
self.body.append('</%s>\n' % MULTIFIGURE_HTML_ITEM_TAG)
def setup(app):
app.add_node(
multifigure_content,
html=(visit_multifigure_content_html, depart_multifigure_content_html))
app.add_node(
multifigure_item,
html=(visit_multifigure_item_html, depart_multifigure_item_html))
app.add_directive('multifigure', MultiFigure)
return {'parallel_read_safe': True}
| 4,293 | 28.210884 | 79 | py |
NeuroKit | NeuroKit-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import re
from setuptools import find_packages, setup
# Utilities
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("NEWS.rst") as history_file:
history = history_file.read()
history = history.replace("\n-------------------", "\n^^^^^^^^^^^^^^^^^^^").replace(
"\n=====", "\n-----"
)
def find_version():
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"),
open("neurokit2/__init__.py").read(),
)
return result.group(1)
# Dependencies
requirements = ["numpy", "pandas", "scipy", "scikit-learn>=1.0.0", "matplotlib"]
# Optional Dependencies (only needed / downloaded for testing purposes, for instance to test against some other packages)
setup_requirements = ["pytest-runner", "numpy"]
test_requirements = requirements + [
"pytest",
"coverage",
"bioread",
"mne[data]",
"pyentrp",
"antropy",
"EntropyHub",
"nolds",
"biosppy==0.6.1",
"cvxopt",
"PyWavelets",
"EMD-signal",
"astropy",
"plotly",
"ts2vg",
]
# Setup
setup(
# Info
name="neurokit2",
keywords="NeuroKit2, physiology, bodily signals, Python, ECG, EDA, EMG, PPG",
url="https://github.com/neuropsychology/NeuroKit",
version=find_version(),
description="The Python Toolbox for Neurophysiological Signal Processing.",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-rst",
license="MIT license",
# The name and contact of a maintainer
author="Dominique Makowski",
author_email="dom.makowski@gmail.com",
# Dependencies
install_requires=requirements,
setup_requires=setup_requirements,
extras_require={"test": test_requirements},
test_suite="pytest",
tests_require=test_requirements,
# Misc
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
)
| 2,317 | 25.953488 | 121 | py |
NeuroKit | NeuroKit-master/studies/complexity_eeg/make_data.py | import os
import mne
import numpy as np
import pandas as pd
import neurokit2 as nk
# =============================================================================
# Parameters
# =============================================================================
datasets = [
"../../data/lemon/lemon/", # Path to local preprocessed LEMON dataset
# "../../data/rs_eeg_texas/data/", # Path to local TEXAS dataset
"../../data/srm_restingstate_eeg/eeg/", # Path to local SRM dataset
"../../data/testretest_restingstate_eeg/eeg/", # Path to local testrestest dataset
"../../data/rebel_eeg_restingstate_sg/eeg/",
"../../data/rebel_eeg_restingstate_fr/eeg/",
]
# =============================================================================
# Functions
# =============================================================================
# channel = "Fp1"
def optimize_delay(raw, channel="Fp1"):
signal = raw.get_data(picks=channel)[0]
vals = np.unique(np.round(np.linspace(1, 80, 80) / (1000 / raw.info["sfreq"]), 0)).astype(int)
vals = vals[vals > 0]
delay, out = nk.complexity_delay(signal, delay_max=vals, method="fraser1986")
rez = pd.DataFrame({"Value": out["Values"], "Score": out["Scores"]})
rez["Method"] = out["Method"]
rez["Metric"] = out["Metric"]
# rez_delay["Algorithm"] = out["Algorithm"]
rez["Channel"] = channel
rez["Optimal"] = delay
rez["What"] = "Delay"
return rez
def optimize_dimension(raw, channel="Fp1"):
signal = raw.get_data(picks=channel)[0]
dim, out = nk.complexity_dimension(
signal,
dimension_max=5,
delay=np.round(27 / (1000 / raw.info["sfreq"]), 0).astype(int),
method="afn",
)
rez = pd.DataFrame({"Value": out["Values"], "Score": out["E1"]})
rez["Method"] = "AFN"
rez["Channel"] = channel
rez["Optimal"] = dim
rez["What"] = "Dimension"
return rez
def optimize_tolerance(raw, channel="Fp1"):
signal = raw.get_data(picks=channel)[0]
signal = nk.standardize(signal)
out = pd.DataFrame()
for method in ["maxApEn", "recurrence"]:
r, info = nk.complexity_tolerance(
signal,
delay=np.round(27 / (1000 / raw.info["sfreq"]), 0).astype(int),
dimension=5,
method=method,
r_range=np.linspace(0.002, 2, 10),
show=True,
)
rez = pd.DataFrame({"Value": info["Values"], "Score": info["Scores"]})
rez["Method"] = method
rez["Channel"] = channel
rez["Optimal"] = r
rez["What"] = "Dimension"
out = pd.concat([out, rez], axis=0)
return out
def read_raw(path, participant):
if "texas" in path:
dataset = "Texas"
sub = participant.split("_")[4].replace(".bdf", "")
cond = "Alternating"
raw = mne.io.read_raw_bdf(
path + participant,
eog=["LVEOG", "RVEOG", "LHEOG", "RHEOG"],
misc=["NAS", "NFpz"],
exclude=["M1", "M2"],
preload=True,
verbose=False,
)
raw = raw.set_montage("standard_1020")
elif "srm" in path:
dataset = "SRM"
cond = "Eyes-Closed"
sub = participant.split("_")[0]
raw = mne.io.read_raw_fif(path + participant, verbose=False, preload=True)
elif "testretest" in path:
dataset = "Wang (2022)"
sub = participant.split("_")[0]
if "eyesopen" in participant:
cond = "Eyes-Open"
else:
cond = "Eyes-Closed"
raw = mne.io.read_raw_fif(path + participant, verbose=False, preload=True)
elif "rebel_eeg_restingstate" in path:
if "_sg" in path:
dataset = "Resting-State (SG)"
else:
dataset = "Resting-State (FR)"
cond = "Eyes-Closed"
sub = participant
raw = mne.io.read_raw_fif(path + participant, verbose=False, preload=True)
else:
dataset = "Lemon"
sub = participant.split("_")[0]
if participant.split("_")[1] == "EO":
cond = "Eyes-Open"
else:
cond = "Eyes-Closed"
raw = mne.io.read_raw_fif(path + participant, verbose=False, preload=True)
raw = raw.set_eeg_reference("average")
# raw = mne.preprocessing.compute_current_source_density(raw)
orig_filtering = f"{raw.info['highpass']}-{raw.info['lowpass']}"
raw = raw.filter(1, 50, fir_design="firwin", verbose=False)
return raw, sub, cond, orig_filtering, dataset
def compute_complexity(raw, channel="Fp1"):
signal = raw.get_data(picks=channel)[0]
signal = nk.standardize(signal)
delay = np.round(27 / (1000 / raw.info["sfreq"]), 0).astype(int)
m = 5
r, _ = nk.complexity_tolerance(signal, dimension=m, method="NeuroKit")
rez = pd.DataFrame({"Channel": [channel]})
rez["SFD"], _ = nk.fractal_sevcik(signal) # Change ShanEn D by SFD
rez["MSWPEn"], _ = nk.entropy_multiscale(
signal, scale="default", dimension=m, tolerance=r, method="MSWPEn"
)
rez["CWPEn"], _ = nk.entropy_permutation(
signal,
delay=delay,
dimension=m,
tolerance=r,
weighted=True,
conditional=True,
)
rez["AttEn"], _ = nk.entropy_attention(
signal,
)
rez["SVDEn"], _ = nk.entropy_svd(signal, delay=delay, dimension=m)
rez["Hjorth"], _ = nk.complexity_hjorth(signal)
rez["FDNLD"], _ = nk.fractal_nld(signal)
mfdfa, _ = nk.fractal_dfa(signal, multifractal=True)
rez["MFDFA_Width"] = mfdfa["Width"]
rez["MFDFA_Max"] = mfdfa["Max"]
rez["MFDFA_Mean"] = mfdfa["Mean"]
rez["MFDFA_Increment"] = mfdfa["Increment"]
return rez
# =============================================================================
# Delay
# =============================================================================
# rez_delay = pd.DataFrame()
# for path in datasets:
# participants = os.listdir(path)
# for i, participant in enumerate(participants):
# if i < 0 or i > 3:
# continue
# print(f"Participant n°{i} (path: {path})")
# raw, sub, cond, orig_filtering, dataset = read_raw(path, participant)
# args = [{"raw": raw, "channel": ch} for ch in raw.pick_types(eeg=True).ch_names]
# for i in args:
# optimize_delay(i["raw"], i["channel"])
# out = nk.parallel_run(optimize_delay, args, n_jobs=1, verbose=10)
# out = pd.concat(out)
# out["Participant"] = sub
# out["Condition"] = cond
# out["Sampling_Rate"] = raw.info["sfreq"]
# out["Lowpass"] = raw.info["lowpass"]
# out["Original_Frequencies"] = orig_filtering
# out["Duration"] = len(raw) / raw.info["sfreq"] / 60
# out["Dataset"] = dataset
# rez_delay = pd.concat([rez_delay, out], axis=0)
# rez_delay.to_csv("data_delay.csv", index=False)
# print("===================")
# print("FINISHED.")
# =============================================================================
# Attractors
# =============================================================================
attractors = pd.DataFrame()
for path in datasets:
participants = os.listdir(path)[2]
raw, _, _, _, dataset = read_raw(path, participants)
raw = raw.crop(tmin=30, tmax=90)
for channel in ["Cz", "Fz", "AFz", "Pz", "Oz"]: # raw.ch_names
if channel not in raw.ch_names:
print(f"Channel: {channel}, Dataset: {dataset}")
continue
signal = nk.standardize(raw.get_data(picks=channel)[0])
# if "testretest" in path:
# nk.signal_psd(signal, sampling_rate=raw.info["sfreq"], max_frequency=100, show=True)
d = np.round(np.array([27]) / (1000 / raw.info["sfreq"]), 0).astype(int)
for i, delay in enumerate(d):
if delay == 0:
continue
data = nk.complexity_embedding(signal, delay=delay, dimension=4)
data = pd.DataFrame(data, columns=["x", "y", "z", "c"])
data["Dataset"] = dataset
data["Sampling_Rate"] = raw.info["sfreq"]
data["Delay"] = delay
data["Delay_Type"] = i
data["Channel"] = "Fz" if channel == "AFz" else channel
data["Time"] = raw.times[0 : len(data)]
attractors = pd.concat([attractors, data], axis=0)
attractors.to_csv("data_attractor.csv", index=False)
attractors = pd.DataFrame()
for path in datasets:
participants = os.listdir(path)[2]
raw, _, _, _, dataset = read_raw(path, participants)
raw = raw.crop(tmin=85, tmax=90)
signal = nk.standardize(raw.get_data(picks="Fz")[0])
for freq in range(1, 51):
delay = np.round(freq / (1000 / raw.info["sfreq"]), 0).astype(int)
if delay == 0:
continue
data = nk.complexity_embedding(signal, delay=delay, dimension=2)
data = pd.DataFrame(data, columns=["x", "y"])
data["Dataset"] = dataset
data["Delay"] = delay
data["Period"] = freq
data["Time"] = raw.times[0 : len(data)]
attractors = pd.concat([attractors, data], axis=0)
attractors.to_csv("data_attractor_anim.csv", index=False)
# =============================================================================
# Dimension
# =============================================================================
rez_dim = pd.DataFrame()
for path in datasets:
participants = os.listdir(path)
for i, participant in enumerate(participants):
if i < 0 or i > 1:
continue
print(f"Participant n°{i} (path: {path})")
raw, sub, cond, orig_filtering, dataset = read_raw(path, participant)
args = [{"raw": raw, "channel": ch} for ch in raw.pick_types(eeg=True).ch_names]
out = nk.parallel_run(optimize_dimension, args, n_jobs=8, verbose=10)
out = pd.concat(out)
out["Participant"] = sub
out["Condition"] = cond
out["Sampling_Rate"] = raw.info["sfreq"]
out["Lowpass"] = raw.info["lowpass"]
out["Original_Frequencies"] = orig_filtering
out["Duration"] = len(raw) / raw.info["sfreq"] / 60
out["Dataset"] = dataset
rez_dim = pd.concat([rez_dim, out], axis=0)
rez_dim.to_csv("data_dimension.csv", index=False)
print("===================")
print("FINISHED.")
# =============================================================================
# Tolerance
# =============================================================================
rez_r = pd.DataFrame()
for path in datasets:
participants = os.listdir(path)
for i, participant in enumerate(participants):
if i < 0 or i > 1:
continue
print(f"Participant n°{i} (path: {path})")
raw, sub, cond, orig_filtering, dataset = read_raw(path, participant)
args = [{"raw": raw, "channel": ch} for ch in raw.pick_types(eeg=True).ch_names]
out = nk.parallel_run(optimize_tolerance, args, n_jobs=8, verbose=10)
out = pd.concat(out)
out["Participant"] = sub
out["Condition"] = cond
out["Sampling_Rate"] = raw.info["sfreq"]
out["Lowpass"] = raw.info["lowpass"]
out["Original_Frequencies"] = orig_filtering
out["Duration"] = len(raw) / raw.info["sfreq"] / 60
out["Dataset"] = dataset
rez_r = pd.concat([rez_r, out], axis=0)
rez_r.to_csv("data_tolerance.csv", index=False)
print("===================")
print("FINISHED.")
# =============================================================================
# Clustering
# =============================================================================
rez_complexity = pd.DataFrame()
for path in datasets:
participants = os.listdir(path)
for i, participant in enumerate(participants):
if i < 0 or i > 100:
continue
print(f"Participant n°{i} (path: {path})")
raw, sub, cond, orig_filtering, dataset = read_raw(path, participant)
args = [{"raw": raw, "channel": ch} for ch in raw.pick_types(eeg=True).ch_names]
out = nk.parallel_run(compute_complexity, args, n_jobs=1, verbose=10)
out = pd.concat(out)
out["Participant"] = sub
out["Condition"] = cond
out["Dataset"] = dataset
rez_complexity = pd.concat([rez_complexity, out], axis=0)
rez_complexity.to_csv("data_complexity.csv", index=False)
print("===================")
print("FINISHED.")
| 12,454 | 34.585714 | 98 | py |
NeuroKit | NeuroKit-master/studies/ecg_benchmark/make_data.py | import pandas as pd
import neurokit2 as nk
# Load ECGs
ecgs = ["../../data/gudb/ECGs.csv",
"../../data/mit_arrhythmia/ECGs.csv",
"../../data/mit_normal/ECGs.csv",
"../../data/ludb/ECGs.csv",
"../../data/fantasia/ECGs.csv"]
# Load True R-peaks location
rpeaks = [pd.read_csv("../../data/gudb/Rpeaks.csv"),
pd.read_csv("../../data/mit_arrhythmia/Rpeaks.csv"),
pd.read_csv("../../data/mit_normal/Rpeaks.csv"),
pd.read_csv("../../data/ludb/Rpeaks.csv"),
pd.read_csv("../../data/fantasia/Rpeaks.csv")]
# =============================================================================
# Study 1
# =============================================================================
def neurokit(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
return info["ECG_R_Peaks"]
def pantompkins1985(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="pantompkins1985")
return info["ECG_R_Peaks"]
def hamilton2002(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="hamilton2002")
return info["ECG_R_Peaks"]
def martinez2003(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="martinez2003")
return info["ECG_R_Peaks"]
def christov2004(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="christov2004")
return info["ECG_R_Peaks"]
def gamboa2008(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="gamboa2008")
return info["ECG_R_Peaks"]
def elgendi2010(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="elgendi2010")
return info["ECG_R_Peaks"]
def engzeemod2012(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="engzeemod2012")
return info["ECG_R_Peaks"]
def kalidas2017(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="kalidas2017")
return info["ECG_R_Peaks"]
def rodrigues2020(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="rodrigues2020")
return info["ECG_R_Peaks"]
results = []
for method in [neurokit, pantompkins1985, hamilton2002, martinez2003, christov2004,
gamboa2008, elgendi2010, engzeemod2012, kalidas2017, rodrigues2020]:
print(method.__name__)
for i in range(len(rpeaks)):
print(" - " + str(i))
data_ecg = pd.read_csv(ecgs[i])
result = nk.benchmark_ecg_preprocessing(method, data_ecg, rpeaks[i])
result["Method"] = method.__name__
results.append(result)
results = pd.concat(results).reset_index(drop=True)
results.to_csv("data_detectors.csv", index=False)
# =============================================================================
# Study 2
# =============================================================================
def none(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
return info["ECG_R_Peaks"]
def mean_removal(ecg, sampling_rate):
ecg = nk.signal_detrend(ecg, order=0)
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
return info["ECG_R_Peaks"]
def standardization(ecg, sampling_rate):
ecg = nk.standardize(ecg)
signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
return info["ECG_R_Peaks"]
results = []
for method in [none, mean_removal, standardization]:
print(method.__name__)
for i in range(len(rpeaks)):
print(" - " + str(i))
data_ecg = pd.read_csv(ecgs[i])
result = nk.benchmark_ecg_preprocessing(method, data_ecg, rpeaks[i])
result["Method"] = method.__name__
results.append(result)
results = pd.concat(results).reset_index(drop=True)
results.to_csv("data_normalization.csv", index=False)
# =============================================================================
# Study 3
# =============================================================================
#def none(ecg, sampling_rate):
# signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
# return info["ECG_R_Peaks"]
#
## Detrending-based
#def polylength(ecg, sampling_rate):
# length = len(ecg) / sampling_rate
# ecg = nk.signal_detrend(ecg, method="polynomial", order=int(length / 2))
# signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
# return info["ECG_R_Peaks"]
#
#def tarvainen(ecg, sampling_rate):
# ecg = nk.signal_detrend(ecg, method="tarvainen2002")
# signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
# return info["ECG_R_Peaks"]
#
#def locreg(ecg, sampling_rate):
# ecg = nk.signal_detrend(ecg,
# method="locreg",
# window=1/0.5,
# stepsize=0.02)
# signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
# return info["ECG_R_Peaks"]
#
#def rollingz(ecg, sampling_rate):
# ecg = nk.standardize(ecg, window=sampling_rate*2)
# signal, info = nk.ecg_peaks(ecg, sampling_rate=sampling_rate, method="neurokit")
# return info["ECG_R_Peaks"]
#
#
#results = []
#for method in [none, polylength, tarvainen, locreg, rollingz]:
# print(method.__name__)
# for i in range(len(ecgs)):
# print(" - " + str(i))
# result = nk.benchmark_ecg_preprocessing(method, ecgs[i], rpeaks[i])
# result["Method"] = method.__name__
# results.append(result)
#results = pd.concat(results).reset_index(drop=True)
#
#results.to_csv("data_lowfreq.csv", index=False) | 5,866 | 32.335227 | 91 | py |
NeuroKit | NeuroKit-master/studies/hrv_frequency/make_data.py | import pandas as pd
import numpy as np
import neurokit2 as nk
# Load True R-peaks location
datafiles = [pd.read_csv("../../data/gudb/Rpeaks.csv"),
pd.read_csv("../../data/mit_arrhythmia/Rpeaks.csv"),
pd.read_csv("../../data/mit_normal/Rpeaks.csv"),
pd.read_csv("../../data/fantasia/Rpeaks.csv")]
# Get results
all_results = pd.DataFrame()
for file in datafiles:
for database in np.unique(file["Database"]):
print(str(database))
data = file[file["Database"] == database]
for participant in np.unique(data["Participant"]):
data_participant = data[data["Participant"] == participant]
sampling_rate = np.unique(data_participant["Sampling_Rate"])[0]
rpeaks = data_participant["Rpeaks"].values
# Interpolate
rri = np.diff(rpeaks) / sampling_rate * 1000
desired_length = int(np.rint(rpeaks[-1] / sampling_rate * sampling_rate))
rri = nk.signal_interpolate(rpeaks[1:], rri, x_new=np.arange(desired_length))
# Get PSD
psd = nk.signal_psd(rri, sampling_rate=sampling_rate)
#
# results = nk.hrv_frequency(rpeaks, sampling_rate=sampling_rate)
# results["Participant"] = participant
# results["Database"] = database
# results["Recording_Length"] = rpeaks[-1] / sampling_rate / 60
#
# all_results = pd.concat([all_results, results], axis=0)
#
#all_results.to_csv("data.csv", index=False)
| 1,516 | 31.978261 | 89 | py |
NeuroKit | NeuroKit-master/studies/erp_gam/script.py | import numpy as np
import pandas as pd
import neurokit2 as nk
import matplotlib.pyplot as plt
import mne
# Download example dataset
raw = mne.io.read_raw_fif(mne.datasets.sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif')
events = mne.read_events(mne.datasets.sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
# Create epochs (100 ms baseline + 500 ms)
epochs = mne.Epochs(raw,
events,
event_id,
tmin=-0.1,
tmax=0.5,
picks='eeg',
preload=True,
detrend=0,
baseline=(None, 0))
# Generate list of evoked objects from conditions names
evoked = [epochs[name].average() for name in ('audio', 'visual')]
# Plot topo
#mne.viz.plot_compare_evokeds(evoked, picks='eeg', axes='topo')
#plt.savefig("figures/fig1.png")
#plt.clf()
# Select subset of frontal electrodes
picks = ["EEG 001", "EEG 002", "EEG 003",
"EEG 005", "EEG 006",
"EEG 010", "EEG 011", "EEG 012", "EEG 013", "EEG 014"]
epochs = epochs.pick_channels(picks)
# Downsample
epochs = epochs.resample(sfreq=150)
# Convert to data frame and save
#nk.mne_to_df(epochs).to_csv("data.csv", index=False)
# =============================================================================
# MNE-based ERP analysis
# =============================================================================
# Transform each condition to array
condition1 = np.mean(epochs["audio"].get_data(), axis=1)
condition2 = np.mean(epochs["visual"].get_data(), axis=1)
# Permutation test to find significant cluster of differences
t_vals, clusters, p_vals, h0 = mne.stats.permutation_cluster_test([condition1, condition2],
out_type='mask',
seed=111)
# Visualize
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, ncols=1, sharex=True)
# Evoked
#evoked = [epochs[name].average() for name in ('audio', 'visual')]
#mne.viz.plot_compare_evokeds(evoked, picks=picks, combine="mean"), axes=ax0)
times = epochs.times
ax0.axvline(x=0, linestyle="--", color="black")
ax0.plot(times, np.mean(condition1, axis=0), label="Audio")
ax0.plot(times, np.mean(condition2, axis=0), label="Visual")
ax0.legend(loc="upper right")
ax0.set_ylabel("uV")
# Difference
ax1.axvline(x=0, linestyle="--", color="black")
ax1.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0))
ax1.axhline(y=0, linestyle="--", color="black")
ax1.set_ylabel("Difference")
# T-values
ax2.axvline(x=0, linestyle="--", color="black")
h = None
for i, c in enumerate(clusters):
c = c[0]
if p_vals[i] <= 0.05:
h = ax2.axvspan(times[c.start],
times[c.stop - 1],
color='red',
alpha=0.5)
else:
ax2.axvspan(times[c.start],
times[c.stop - 1],
color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = ax2.plot(times, t_vals, 'g')
if h is not None:
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("t-values")
plt.savefig("figures/fig2.png")
plt.clf() | 3,394 | 32.613861 | 109 | py |
NeuroKit | NeuroKit-master/studies/microstates_howmany/script.py | import os
import mne
import scipy
import numpy as np
import pandas as pd
import neurokit2 as nk
import matplotlib.pyplot as plt
import autoreject
from autoreject.utils import interpolate_bads
import scipy.stats
data_path = "D:/Dropbox/RECHERCHE/N/NeuroKit/data/rs_eeg_texas/data/"
files = os.listdir(data_path)
results = []
for i, file in enumerate(files[0:2]):
print(i)
# Read
raw = mne.io.read_raw_bdf(data_path + file, eog=['LVEOG', 'RVEOG', 'LHEOG', 'RHEOG'], misc=['M1', 'M2', 'NAS', 'NFpz'], preload=True)
sampling_rate = np.rint(raw.info["sfreq"])
# Set montage
raw = raw.set_montage("biosemi64")
# Find events
events = nk.events_find(nk.mne_channel_extract(raw, "Status"),
threshold_keep="below",
event_conditions=["EyesClosed", "EyesOpen"] * 4)
# Rereference
raw = nk.eeg_rereference(raw, ["M1", "M2"])
# Filter
raw = raw.filter(1, 35)
# Bad epochs
bads, info = nk.eeg_badchannels(raw)
raw.info['bads'] += bads
# raw.plot()
raw = raw.interpolate_bads()
# ICA
ica = mne.preprocessing.ICA(n_components=15, random_state=97).fit(raw)
ica = ica.detect_artifacts(raw, eog_ch=['LVEOG', 'RVEOG'])
# ica.plot_properties(raw, picks=ica.exclude)
raw = ica.apply(raw)
# Rereference
raw = nk.eeg_rereference(raw, "average")
for method in ["kmdo"]:
rez = nk.microstates_findnumber(raw, n_max=6, show=False, method="kmod")
rez["Method"] = method
rez["Participant"] = file
results.append(rez)
#
#
#
#
#
# ransac = autoreject.Ransac(verbose='progressbar', picks="eeg", n_jobs=1)
# raw = autoreject.get_rejection_threshold(raw, picks="eeg")
#
#
#raw.info["ch_names"]
#nk.signal_plot(event)
#mne.viz.plot_raw(raw)
| 1,816 | 22.294872 | 137 | py |
NeuroKit | NeuroKit-master/tests/tests_microstates.py | # -*- coding: utf-8 -*-
import mne
import numpy as np
import neurokit2 as nk
# =============================================================================
# Peaks
# =============================================================================
def test_microstates_peaks():
# Load eeg data and calculate gfp
eeg = nk.mne_data("filt-0-40_raw")
gfp = nk.eeg_gfp(eeg)
# Find peaks
peaks_nk = nk.microstates_peaks(eeg, distance_between=0.01)
# Test with alternative method taken from Frederic
# https://github.com/Frederic-vW/eeg_microstates/blob/master/eeg_microstates.py
def locmax(x):
dx = np.diff(x) # discrete 1st derivative
zc = np.diff(np.sign(dx)) # zero-crossings of dx
m = 1 + np.where(zc == -2)[0] # indices of local max.
return m
peaks_frederic = locmax(gfp)
assert all(elem in peaks_frederic for elem in peaks_nk) # only works when distance_between = 0.01
| 951 | 28.75 | 102 | py |
NeuroKit | NeuroKit-master/tests/tests_ecg.py | # -*- coding: utf-8 -*-
import biosppy
import matplotlib.pyplot as plt
import numpy as np
import pytest
import neurokit2 as nk
def test_ecg_simulate():
ecg1 = nk.ecg_simulate(
duration=20, length=5000, method="simple", noise=0, random_state=0
)
assert len(ecg1) == 5000
ecg2 = nk.ecg_simulate(duration=20, length=5000, heart_rate=500, random_state=1)
# pd.DataFrame({"ECG1":ecg1, "ECG2": ecg2}).plot()
# pd.DataFrame({"ECG1":ecg1, "ECG2": ecg2}).hist()
assert len(nk.signal_findpeaks(ecg1, height_min=0.6)["Peaks"]) < len(
nk.signal_findpeaks(ecg2, height_min=0.6)["Peaks"]
)
ecg3 = nk.ecg_simulate(duration=10, length=5000, random_state=2)
# pd.DataFrame({"ECG1":ecg1, "ECG3": ecg3}).plot()
assert len(nk.signal_findpeaks(ecg2, height_min=0.6)["Peaks"]) > len(
nk.signal_findpeaks(ecg3, height_min=0.6)["Peaks"]
)
def test_ecg_clean():
sampling_rate = 1000
noise = 0.05
ecg = nk.ecg_simulate(sampling_rate=sampling_rate, noise=noise, random_state=3)
ecg_cleaned_nk = nk.ecg_clean(ecg, sampling_rate=sampling_rate, method="neurokit")
assert ecg.size == ecg_cleaned_nk.size
# Assert that highpass filter with .5 Hz lowcut was applied.
fft_raw = np.abs(np.fft.rfft(ecg))
fft_nk = np.abs(np.fft.rfft(ecg_cleaned_nk))
freqs = np.fft.rfftfreq(ecg.size, 1 / sampling_rate)
assert np.sum(fft_raw[freqs < 0.5]) > np.sum(fft_nk[freqs < 0.5])
# Comparison to biosppy
ecg_biosppy = nk.ecg_clean(ecg, sampling_rate=sampling_rate, method="biosppy")
assert np.allclose(ecg_biosppy.mean(), 0, atol=1e-6)
def test_ecg_peaks():
sampling_rate = 1000
noise = 0.15
ecg = nk.ecg_simulate(
duration=120, sampling_rate=sampling_rate, noise=noise, random_state=42
)
ecg_cleaned_nk = nk.ecg_clean(ecg, sampling_rate=sampling_rate, method="neurokit")
# Test without request to correct artifacts.
signals, _ = nk.ecg_peaks(
ecg_cleaned_nk, correct_artifacts=False, method="neurokit"
)
assert signals.shape == (120000, 1)
assert np.allclose(signals["ECG_R_Peaks"].values.sum(dtype=np.int64), 139, atol=1)
# Test with request to correct artifacts.
signals, _ = nk.ecg_peaks(ecg_cleaned_nk, correct_artifacts=True, method="neurokit")
assert signals.shape == (120000, 1)
assert np.allclose(signals["ECG_R_Peaks"].values.sum(dtype=np.int64), 139, atol=1)
def test_ecg_process():
sampling_rate = 1000
noise = 0.05
ecg = nk.ecg_simulate(sampling_rate=sampling_rate, noise=noise, random_state=4)
_ = nk.ecg_process(ecg, sampling_rate=sampling_rate, method="neurokit")
def test_ecg_plot():
ecg = nk.ecg_simulate(duration=60, heart_rate=70, noise=0.05, random_state=5)
ecg_summary, _ = nk.ecg_process(ecg, sampling_rate=1000, method="neurokit")
# Plot data over samples.
nk.ecg_plot(ecg_summary)
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 2
titles = ["Raw and Cleaned Signal", "Heart Rate"]
for ax, title in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[1].get_xlabel() == "Samples"
np.testing.assert_array_equal(fig.axes[0].get_xticks(), fig.axes[1].get_xticks())
plt.close(fig)
# Plot data over seconds.
nk.ecg_plot(ecg_summary, sampling_rate=1000)
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 3
titles = ["Raw and Cleaned Signal", "Heart Rate", "Individual Heart Beats"]
for ax, title in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[1].get_xlabel() == "Time (seconds)"
np.testing.assert_array_equal(fig.axes[0].get_xticks(), fig.axes[1].get_xticks())
plt.close(fig)
def test_ecg_findpeaks():
sampling_rate = 1000
ecg = nk.ecg_simulate(
duration=60,
sampling_rate=sampling_rate,
noise=0,
method="simple",
random_state=42,
)
ecg_cleaned = nk.ecg_clean(ecg, sampling_rate=sampling_rate, method="neurokit")
# Test neurokit methodwith show=True
info_nk = nk.ecg_findpeaks(ecg_cleaned, show=True)
assert info_nk["ECG_R_Peaks"].size == 69
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 2
# Test pantompkins1985 method
info_pantom = nk.ecg_findpeaks(
nk.ecg_clean(ecg, method="pantompkins1985"), method="pantompkins1985"
)
assert info_pantom["ECG_R_Peaks"].size == 70
# Test hamilton2002 method
info_hamilton = nk.ecg_findpeaks(
nk.ecg_clean(ecg, method="hamilton2002"), method="hamilton2002"
)
assert info_hamilton["ECG_R_Peaks"].size == 69
# Test christov2004 method
info_christov = nk.ecg_findpeaks(ecg_cleaned, method="christov2004")
assert info_christov["ECG_R_Peaks"].size == 273
# Test gamboa2008 method
info_gamboa = nk.ecg_findpeaks(ecg_cleaned, method="gamboa2008")
assert info_gamboa["ECG_R_Peaks"].size == 69
# Test elgendi2010 method
info_elgendi = nk.ecg_findpeaks(
nk.ecg_clean(ecg, method="elgendi2010"), method="elgendi2010"
)
assert info_elgendi["ECG_R_Peaks"].size == 70
# Test engzeemod2012 method
info_engzeemod = nk.ecg_findpeaks(
nk.ecg_clean(ecg, method="engzeemod2012"), method="engzeemod2012"
)
assert info_engzeemod["ECG_R_Peaks"].size == 69
# Test kalidas2017 method
info_kalidas = nk.ecg_findpeaks(
nk.ecg_clean(ecg, method="kalidas2017"), method="kalidas2017"
)
assert np.allclose(info_kalidas["ECG_R_Peaks"].size, 68, atol=1)
# Test martinez2004 method
ecg = nk.ecg_simulate(
duration=60, sampling_rate=sampling_rate, noise=0, random_state=42
)
ecg_cleaned = nk.ecg_clean(ecg, sampling_rate=sampling_rate, method="neurokit")
info_martinez = nk.ecg_findpeaks(ecg_cleaned, method="martinez2004")
assert np.allclose(info_martinez["ECG_R_Peaks"].size, 69, atol=1)
def test_ecg_eventrelated():
ecg, _ = nk.ecg_process(nk.ecg_simulate(duration=20, random_state=6))
epochs = nk.epochs_create(
ecg, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9
)
ecg_eventrelated = nk.ecg_eventrelated(epochs)
# Test rate features
assert np.alltrue(
np.array(ecg_eventrelated["ECG_Rate_Min"])
< np.array(ecg_eventrelated["ECG_Rate_Mean"])
)
assert np.alltrue(
np.array(ecg_eventrelated["ECG_Rate_Mean"])
< np.array(ecg_eventrelated["ECG_Rate_Max"])
)
assert len(ecg_eventrelated["Label"]) == 3
# Test warning on missing columns
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `ECG_Phase_Artrial`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["ECG_Phase_Atrial"]
nk.ecg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an.*`ECG_Phase_Ventricular`"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["ECG_Phase_Ventricular"]
nk.ecg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `ECG_Quality`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["ECG_Quality"]
nk.ecg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `ECG_Rate`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["ECG_Rate"]
nk.ecg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
# Test warning on long epochs (eventrelated_utils)
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*duration of your epochs seems.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
first_epoch_copy.index = range(len(first_epoch_copy))
nk.ecg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
def test_ecg_delineate():
sampling_rate = 1000
# test with simulated signals
ecg = nk.ecg_simulate(duration=20, sampling_rate=sampling_rate, random_state=42)
_, rpeaks = nk.ecg_peaks(ecg, sampling_rate=sampling_rate)
number_rpeaks = len(rpeaks["ECG_R_Peaks"])
# Method 1: derivative
_, waves_derivative = nk.ecg_delineate(
ecg, rpeaks, sampling_rate=sampling_rate, method="peaks"
)
assert len(waves_derivative["ECG_P_Peaks"]) == number_rpeaks
assert len(waves_derivative["ECG_Q_Peaks"]) == number_rpeaks
assert len(waves_derivative["ECG_S_Peaks"]) == number_rpeaks
assert len(waves_derivative["ECG_T_Peaks"]) == number_rpeaks
assert len(waves_derivative["ECG_P_Onsets"]) == number_rpeaks
assert len(waves_derivative["ECG_T_Offsets"]) == number_rpeaks
# Method 2: CWT
_, waves_cwt = nk.ecg_delineate(
ecg, rpeaks, sampling_rate=sampling_rate, method="cwt"
)
assert np.allclose(len(waves_cwt["ECG_P_Peaks"]), 22, atol=1)
assert np.allclose(len(waves_cwt["ECG_T_Peaks"]), 22, atol=1)
assert np.allclose(len(waves_cwt["ECG_R_Onsets"]), 23, atol=1)
assert np.allclose(len(waves_cwt["ECG_R_Offsets"]), 23, atol=1)
assert np.allclose(len(waves_cwt["ECG_P_Onsets"]), 22, atol=1)
assert np.allclose(len(waves_cwt["ECG_P_Offsets"]), 22, atol=1)
assert np.allclose(len(waves_cwt["ECG_T_Onsets"]), 22, atol=1)
assert np.allclose(len(waves_cwt["ECG_T_Offsets"]), 22, atol=1)
def test_ecg_invert():
sampling_rate = 500
noise = 0.05
ecg = nk.ecg_simulate(sampling_rate=sampling_rate, noise=noise, random_state=3)
ecg_inverted = ecg * -1 + 2 * np.nanmean(ecg)
ecg_fixed, _ = nk.ecg_invert(ecg_inverted)
assert np.allclose((ecg - ecg_fixed).mean(), 0, atol=1e-6)
def test_ecg_intervalrelated():
data = nk.data("bio_resting_5min_100hz")
df, _ = nk.ecg_process(data["ECG"], sampling_rate=100)
columns = [
"ECG_Rate_Mean",
"HRV_RMSSD",
"HRV_MeanNN",
"HRV_SDNN",
"HRV_SDSD",
"HRV_CVNN",
"HRV_CVSD",
"HRV_MedianNN",
"HRV_MadNN",
"HRV_MCVNN",
"HRV_IQRNN",
"HRV_pNN50",
"HRV_pNN20",
"HRV_TINN",
"HRV_HTI",
"HRV_ULF",
"HRV_VLF",
"HRV_LF",
"HRV_HF",
"HRV_VHF",
"HRV_LFHF",
"HRV_LFn",
"HRV_HFn",
"HRV_LnHF",
"HRV_SD1",
"HRV_SD2",
"HRV_SD1SD2",
"HRV_S",
"HRV_CSI",
"HRV_CVI",
"HRV_CSI_Modified",
"HRV_PIP",
"HRV_IALS",
"HRV_PSS",
"HRV_PAS",
"HRV_GI",
"HRV_SI",
"HRV_AI",
"HRV_PI",
"HRV_C1d",
"HRV_C1a",
"HRV_SD1d",
"HRV_SD1a",
"HRV_C2d",
"HRV_C2a",
"HRV_SD2d",
"HRV_SD2a",
"HRV_Cd",
"HRV_Ca",
"HRV_SDNNd",
"HRV_SDNNa",
"HRV_DFA_alpha1",
"HRV_DFA_alpha2",
"HRV_ApEn",
"HRV_SampEn",
"HRV_MSE",
"HRV_CMSE",
"HRV_RCMSE",
"HRV_CD",
]
# Test with signal dataframe
features_df = nk.ecg_intervalrelated(df, sampling_rate=100)
# https://github.com/neuropsychology/NeuroKit/issues/304
assert all(
features_df == nk.ecg_analyze(df, sampling_rate=100, method="interval-related")
)
assert (elem in columns for elem in np.array(features_df.columns.values, dtype=str))
assert features_df.shape[0] == 1 # Number of rows
# Test with dict
columns.append("Label")
epochs = nk.epochs_create(df, events=[0, 15000], sampling_rate=100, epochs_end=150)
features_dict = nk.ecg_intervalrelated(epochs, sampling_rate=100)
assert (
elem in columns for elem in np.array(features_dict.columns.values, dtype=str)
)
assert features_dict.shape[0] == 2 # Number of rows
| 12,392 | 32.136364 | 88 | py |
NeuroKit | NeuroKit-master/tests/tests_ecg_delineate.py | import pathlib
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import neurokit2 as nk
SHOW_DEBUG_PLOTS = False
MAX_SIGNAL_DIFF = 0.03 # seconds
@pytest.fixture(name="test_data")
def setup_load_ecg_data():
"""Load ecg signal and sampling rate."""
def load_signal_from_disk(filename=None, sampling_rate=2000):
if filename is None:
ecg = nk.ecg_simulate(duration=10, sampling_rate=sampling_rate, method="ecgsyn")
else:
filename = (pathlib.Path(__file__) / "../ecg_data" / filename).resolve().as_posix()
ecg = np.array(pd.read_csv(filename))[:, 1]
return ecg, sampling_rate
ecg, sampling_rate = load_signal_from_disk("good_4000.csv", sampling_rate=4000)
annots_filename = (pathlib.Path(__file__) / "../ecg_data" / "good_4000_annotation.csv").resolve().as_posix()
annots = pd.read_csv(annots_filename, index_col=0, header=None).transpose()
if SHOW_DEBUG_PLOTS:
plt.plot(ecg)
plt.show()
rpeaks = nk.ecg_findpeaks(ecg, sampling_rate=sampling_rate, method="martinez")["ECG_R_Peaks"]
test_data = dict(ecg=ecg, sampling_rate=sampling_rate, rpeaks=rpeaks)
test_data.update(annots)
yield test_data
def helper_plot(attribute, ecg_characteristics, test_data):
peaks = [ecg_characteristics[attribute], test_data[attribute]]
print("0: computed\n1: data")
ecg = test_data["ecg"]
nk.events_plot(peaks, ecg)
plt.title(attribute)
plt.show()
def run_test_func(test_data):
_, waves = nk.ecg_delineate(test_data["ecg"], test_data["rpeaks"], test_data["sampling_rate"], method="dwt")
for key in waves:
waves[key] = np.array(waves[key])
return waves
@pytest.mark.parametrize(
"attribute",
[
"ECG_P_Peaks",
"ECG_T_Peaks",
"ECG_T_Onsets",
"ECG_T_Offsets",
"ECG_P_Onsets",
"ECG_P_Offsets",
"ECG_R_Onsets",
"ECG_R_Offsets",
],
)
@pytest.mark.skipif(sys.platform.startswith("mac"), reason="skip testing on macos")
def test_find_ecg_characteristics(attribute, test_data):
ecg_characteristics = run_test_func(test_data)
diff = ecg_characteristics[attribute] - test_data[attribute]
diff = diff[diff.abs() < 0.5 * test_data["sampling_rate"]] # remove obvious failure
test_data[attribute] = test_data[attribute][~np.isnan(test_data[attribute])]
diff = diff[~np.isnan(diff)]
report = """
Difference statistics
{diff_describe}
Difference:
{diff}
""".format(
diff_describe=diff.describe(), diff=diff
)
# helper_plot(attribute, ecg_characteristics, test_data)
assert diff.std() < 0.11 * test_data["sampling_rate"], report
assert diff.mean() < 0.11 * test_data["sampling_rate"], report
| 2,795 | 30.772727 | 112 | py |
NeuroKit | NeuroKit-master/tests/tests.py | import doctest
import pytest
if __name__ == "__main__":
doctest.testmod()
pytest.main()
| 99 | 10.111111 | 26 | py |
NeuroKit | NeuroKit-master/tests/tests_eog.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import mne
import numpy as np
import pytest
import neurokit2 as nk
def test_eog_clean():
# test with exported csv
eog_signal = nk.data("eog_200hz")["vEOG"]
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=200)
assert eog_cleaned.size == eog_signal.size
# test with mne.io.Raw
raw = mne.io.read_raw_fif(
str(mne.datasets.sample.data_path()) + "/MEG/sample/sample_audvis_raw.fif", preload=True
)
sampling_rate = raw.info["sfreq"]
eog_channels = nk.mne_channel_extract(raw, what="EOG", name="EOG").values
eog_cleaned = nk.eog_clean(eog_channels, sampling_rate, method="agarwal2019")
assert eog_cleaned.size == eog_channels.size
# compare with mne filter
eog_cleaned_mne = nk.eog_clean(eog_channels, sampling_rate, method="mne")
mne_clean = mne.filter.filter_data(
eog_channels,
sfreq=sampling_rate,
l_freq=1,
h_freq=10,
filter_length="10s",
l_trans_bandwidth=0.5,
h_trans_bandwidth=0.5,
phase="zero-double",
fir_window="hann",
fir_design="firwin2",
verbose=False,
)
assert np.allclose((eog_cleaned_mne - mne_clean).mean(), 0)
def test_eog_findpeaks():
eog_signal = nk.data("eog_100hz")
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=100)
# Test with NeuroKit
nk_peaks = nk.eog_findpeaks(
eog_cleaned, sampling_rate=100, method="neurokit", threshold=0.33, show=False
)
assert nk_peaks.size == 19
# Test with MNE
mne_peaks = nk.eog_findpeaks(eog_cleaned, method="mne")
assert mne_peaks.size == 44
# Test with brainstorm
brainstorm_peaks = nk.eog_findpeaks(eog_cleaned, method="brainstorm")
assert brainstorm_peaks.size == 28
blinker_peaks = nk.eog_findpeaks(eog_cleaned, method="blinker", sampling_rate=100)
assert blinker_peaks.size == 14
def test_eog_process():
eog_signal = nk.data("eog_200hz")["vEOG"]
signals, info = nk.eog_process(eog_signal, sampling_rate=200)
# Extract blinks, test across dataframe and dict
blinks = np.where(signals["EOG_Blinks"] == 1)[0]
assert np.all(blinks == info["EOG_Blinks"])
def test_eog_plot():
eog_signal = nk.data("eog_100hz")
signals, info = nk.eog_process(eog_signal, sampling_rate=100)
# Plot
nk.eog_plot(signals, peaks=info, sampling_rate=100)
fig = plt.gcf()
assert len(fig.axes) == 3
titles = ["Raw and Cleaned Signal", "Blink Rate", "Individual Blinks"]
legends = [["Raw", "Cleaned", "Blinks"], ["Rate", "Mean"], ["Median"]]
ylabels = ["Amplitude (mV)", "Blinks per minute"]
for (ax, title, legend, ylabel) in zip(fig.get_axes(), titles, legends, ylabels):
assert ax.get_title() == title
subplot = ax.get_legend_handles_labels()
assert subplot[1] == legend
assert ax.get_ylabel() == ylabel
assert fig.get_axes()[1].get_xlabel() == "Time (seconds)"
np.testing.assert_array_equal(fig.axes[0].get_xticks(), fig.axes[1].get_xticks())
plt.close(fig)
with pytest.raises(ValueError, match=r"NeuroKit error: eog_plot.*"):
nk.eog_plot(None)
def test_eog_eventrelated():
eog = nk.data("eog_200hz")["vEOG"]
eog_signals, info = nk.eog_process(eog, sampling_rate=200)
epochs = nk.epochs_create(
eog_signals, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9
)
eog_eventrelated = nk.eog_eventrelated(epochs)
# Test rate features
assert np.alltrue(
np.array(eog_eventrelated["EOG_Rate_Min"]) < np.array(eog_eventrelated["EOG_Rate_Mean"])
)
assert np.alltrue(
np.array(eog_eventrelated["EOG_Rate_Mean"]) < np.array(eog_eventrelated["EOG_Rate_Max"])
)
# Test blink presence
assert np.alltrue(np.array(eog_eventrelated["EOG_Blinks_Presence"]) == np.array([1, 0, 0]))
# Test warning on missing columns
with pytest.warns(nk.misc.NeuroKitWarning, match=r".*does not have an `EOG_Blinks`.*"):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EOG_Blinks"]
nk.eog_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(nk.misc.NeuroKitWarning, match=r".*does not have an `EOG_Rate`.*"):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EOG_Rate"]
nk.eog_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
def test_eog_intervalrelated():
eog = nk.data("eog_200hz")["vEOG"]
eog_signals, info = nk.eog_process(eog, sampling_rate=200)
columns = ["EOG_Peaks_N", "EOG_Rate_Mean"]
# Test with signal dataframe
features = nk.eog_intervalrelated(eog_signals)
assert all(elem in np.array(features.columns.values, dtype=str) for elem in columns)
assert features.shape[0] == 1 # Number of rows
# Test with dict
columns.append("Label")
epochs = nk.epochs_create(
eog_signals, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9
)
epochs_dict = nk.eog_intervalrelated(epochs)
assert all(elem in columns for elem in np.array(epochs_dict.columns.values, dtype=str))
assert epochs_dict.shape[0] == len(epochs) # Number of rows
| 5,372 | 31.96319 | 96 | py |
NeuroKit | NeuroKit-master/tests/tests_eda.py | import platform
import biosppy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import neurokit2 as nk
# =============================================================================
# EDA
# =============================================================================
def test_eda_simulate():
eda1 = nk.eda_simulate(duration=10, length=None, scr_number=1, random_state=333)
assert len(nk.signal_findpeaks(eda1, height_min=0.6)["Peaks"]) == 1
eda2 = nk.eda_simulate(duration=10, length=None, scr_number=5, random_state=333)
assert len(nk.signal_findpeaks(eda2, height_min=0.6)["Peaks"]) == 5
# pd.DataFrame({"EDA1": eda1, "EDA2": eda2}).plot()
assert len(nk.signal_findpeaks(eda2, height_min=0.6)["Peaks"]) > len(
nk.signal_findpeaks(eda1, height_min=0.6)["Peaks"]
)
def test_eda_clean():
sampling_rate = 1000
eda = nk.eda_simulate(
duration=30,
sampling_rate=sampling_rate,
scr_number=6,
noise=0.01,
drift=0.01,
random_state=42,
)
clean = nk.eda_clean(eda, sampling_rate=sampling_rate)
assert len(clean) == len(eda)
# Comparison to biosppy (https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/eda.py)
eda_biosppy = nk.eda_clean(eda, sampling_rate=sampling_rate, method="biosppy")
original, _, _ = biosppy.tools.filter_signal(
signal=eda,
ftype="butter",
band="lowpass",
order=4,
frequency=5,
sampling_rate=sampling_rate,
)
original, _ = biosppy.tools.smoother(
signal=original, kernel="boxzen", size=int(0.75 * sampling_rate), mirror=True
)
# pd.DataFrame({"our":eda_biosppy, "biosppy":original}).plot()
assert np.allclose((eda_biosppy - original).mean(), 0, atol=1e-5)
def test_eda_phasic():
sr = 100
eda = nk.eda_simulate(
duration=30,
sampling_rate=sr,
scr_number=6,
noise=0.01,
drift=0.01,
random_state=42,
)
if platform.system() == "Linux":
cvxEDA = nk.eda_phasic(eda, sampling_rate=sr, method="cvxeda")
assert len(cvxEDA) == len(eda)
smoothMedian = nk.eda_phasic(eda, sampling_rate=sr, method="smoothmedian")
assert len(smoothMedian) == len(eda)
highpass = nk.eda_phasic(eda, sampling_rate=sr, method="highpass")
assert len(highpass) == len(eda)
# This fails unfortunately... need to fix the sparsEDA algorithm
# sparsEDA = nk.eda_phasic(eda, sampling_rate=sr, method="sparsEDA")
# assert len(highpass) == len(eda)
def test_eda_peaks():
sampling_rate = 1000
eda = nk.eda_simulate(
duration=30 * 20,
sampling_rate=sampling_rate,
scr_number=6 * 20,
noise=0,
drift=0.01,
random_state=42,
)
eda_phasic = nk.eda_phasic(nk.standardize(eda), method="highpass")["EDA_Phasic"].values
signals, info = nk.eda_peaks(eda_phasic, method="gamboa2008")
onsets, peaks, amplitudes = biosppy.eda.basic_scr(eda_phasic, sampling_rate=1000)
assert np.allclose((info["SCR_Peaks"] - peaks).mean(), 0, atol=1e-5)
signals, info = nk.eda_peaks(eda_phasic, method="kim2004")
# Check that indices and values positions match
peak_positions = np.where(info["SCR_Peaks"] != 0)[0]
assert np.all(peak_positions == np.where(info["SCR_Amplitude"] != 0)[0])
assert np.all(peak_positions == np.where(info["SCR_Height"] != 0)[0])
assert np.all(peak_positions == np.where(info["SCR_RiseTime"] != 0)[0])
recovery_positions = np.where(info["SCR_Recovery"] != 0)[0]
assert np.all(recovery_positions == np.where(info["SCR_RecoveryTime"] != 0)[0])
def test_eda_process():
eda = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0, sampling_rate=250)
signals, info = nk.eda_process(eda, sampling_rate=250)
assert signals.shape == (7500, 11)
assert (
np.array(
[
"EDA_Raw",
"EDA_Clean",
"EDA_Tonic",
"EDA_Phasic",
"SCR_Onsets",
"SCR_Peaks",
"SCR_Height",
"SCR_Amplitude",
"SCR_RiseTime",
"SCR_Recovery",
"SCR_RecoveryTime",
]
)
in signals.columns.values
)
# Check equal number of markers
peaks = np.where(signals["SCR_Peaks"] == 1)[0]
onsets = np.where(signals["SCR_Onsets"] == 1)[0]
recovery = np.where(signals["SCR_Recovery"] == 1)[0]
assert peaks.shape == onsets.shape == recovery.shape == (5,)
def test_eda_plot():
sampling_rate = 1000
eda = nk.eda_simulate(
duration=30,
sampling_rate=sampling_rate,
scr_number=6,
noise=0,
drift=0.01,
random_state=42,
)
eda_summary, _ = nk.eda_process(eda, sampling_rate=sampling_rate)
# Plot data over samples.
nk.eda_plot(eda_summary)
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 3
titles = [
"Raw and Cleaned Signal",
"Skin Conductance Response (SCR)",
"Skin Conductance Level (SCL)",
]
for ax, title in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[2].get_xlabel() == "Samples"
np.testing.assert_array_equal(
fig.axes[0].get_xticks(), fig.axes[1].get_xticks(), fig.axes[2].get_xticks()
)
plt.close(fig)
# Plot data over seconds.
nk.eda_plot(eda_summary, sampling_rate=sampling_rate)
# This will identify the latest figure.
fig = plt.gcf()
assert fig.get_axes()[2].get_xlabel() == "Seconds"
def test_eda_eventrelated():
eda = nk.eda_simulate(duration=15, scr_number=3)
eda_signals, _ = nk.eda_process(eda, sampling_rate=1000)
epochs = nk.epochs_create(
eda_signals,
events=[5000, 10000, 15000],
sampling_rate=1000,
epochs_start=-0.1,
epochs_end=1.9,
)
eda_eventrelated = nk.eda_eventrelated(epochs)
no_activation = np.where(eda_eventrelated["EDA_SCR"] == 0)[0][0]
assert int(pd.DataFrame(eda_eventrelated.values[no_activation]).isna().sum()) == 4
assert len(eda_eventrelated["Label"]) == 3
def test_eda_intervalrelated():
data = nk.data("bio_resting_8min_100hz")
df, _ = nk.eda_process(data["EDA"], sampling_rate=100)
columns = ["SCR_Peaks_N", "SCR_Peaks_Amplitude_Mean"]
# Test with signal dataframe
rez = nk.eda_intervalrelated(df)
assert all([i in rez.columns.values for i in columns])
assert rez.shape[0] == 1 # Number of rows
# Test with dict
columns.append("Label")
epochs = nk.epochs_create(df, events=[0, 25300], sampling_rate=100, epochs_end=20)
rez = nk.eda_intervalrelated(epochs)
assert all([i in rez.columns.values for i in columns])
assert rez.shape[0] == 2 # Number of rows
def test_eda_sympathetic():
eda_signal = nk.data("bio_eventrelated_100hz")["EDA"]
indexes_posada = nk.eda_sympathetic(eda_signal, sampling_rate=100, method="posada")
# Test value is float
assert isinstance(indexes_posada["EDA_Sympathetic"], float)
assert isinstance(indexes_posada["EDA_SympatheticN"], float)
def test_eda_findpeaks():
eda_signal = nk.data("bio_eventrelated_100hz")["EDA"]
eda_cleaned = nk.eda_clean(eda_signal)
eda = nk.eda_phasic(eda_cleaned)
eda_phasic = eda["EDA_Phasic"].values
# Find peaks
nabian2018 = nk.eda_findpeaks(eda_phasic, sampling_rate=100, method="nabian2018")
assert len(nabian2018["SCR_Peaks"]) == 9
vanhalem2020 = nk.eda_findpeaks(eda_phasic, sampling_rate=100, method="vanhalem2020")
min_n_peaks = min(len(vanhalem2020), len(nabian2018))
assert any(
nabian2018["SCR_Peaks"][:min_n_peaks] - vanhalem2020["SCR_Peaks"][:min_n_peaks]
) < np.mean(eda_signal)
@pytest.mark.parametrize(
"method_cleaning, method_phasic, method_peaks",
[
("none", "cvxeda", "gamboa2008"),
("neurokit", "median", "nabian2018"),
],
)
def test_eda_report(tmp_path, method_cleaning, method_phasic, method_peaks):
sampling_rate = 100
eda = nk.eda_simulate(
duration=30,
sampling_rate=sampling_rate,
scr_number=6,
noise=0,
drift=0.01,
random_state=0,
)
d = tmp_path / "sub"
d.mkdir()
p = d / "myreport.html"
signals, _ = nk.eda_process(
eda,
sampling_rate=sampling_rate,
method_cleaning=method_cleaning,
method_phasic=method_phasic,
method_peaks=method_peaks,
report=str(p),
)
assert p.is_file()
| 8,687 | 29.808511 | 101 | py |
NeuroKit | NeuroKit-master/tests/tests_events.py | import matplotlib.pyplot as plt
import numpy as np
import pytest
import neurokit2 as nk
# =============================================================================
# Events
# =============================================================================
def test_events_find():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
assert list(events["onset"]) == [0, 236, 550, 864]
events = nk.events_find(signal, duration_min=150)
assert list(events["onset"]) == [236, 550]
events = nk.events_find(signal, inter_min=300)
assert list(events["onset"]) == [0, 550, 864]
# No events found warning
signal = np.zeros(1000)
with pytest.warns(nk.misc.NeuroKitWarning, match=r'No events found.*'):
nk.events_find(signal)
def test_events_to_mne():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
events, event_id = nk.events_to_mne(events)
assert event_id == {"event": 0}
def test_events_plot():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
nk.events_plot(events, signal)
# Different events
events1 = events["onset"]
events2 = np.linspace(0, len(signal), 8)
nk.events_plot([events1, events2], signal)
fig = plt.gcf()
for ax in fig.get_axes():
handles, labels = ax.get_legend_handles_labels()
assert len(handles) == len(events1) + len(events2) + 1
assert len(labels) == len(handles)
plt.close(fig)
# Different conditions
events = nk.events_find(signal, event_conditions=["A", "B", "A", "B"])
nk.events_plot(events, signal)
fig = plt.gcf()
for ax in fig.get_axes():
handles, labels = ax.get_legend_handles_labels()
assert len(handles) == len(events) + 1
assert len(labels) == len(handles)
plt.close(fig)
| 1,897 | 26.911765 | 79 | py |
NeuroKit | NeuroKit-master/tests/tests_complexity.py | from collections.abc import Iterable
import antropy
import nolds
import numpy as np
import pandas as pd
from pyentrp import entropy as pyentrp
import sklearn.neighbors
from packaging import version
# import EntropyHub
import neurokit2 as nk
# For the testing of complexity, we test our implementations against existing and established ones.
# However, some of these other implementations are not really packaged in a way
# SO THAT we can easily import them. Thus, we directly copied their content in this file
# (below the tests).
# =============================================================================
# Some sanity checks
# =============================================================================
def test_complexity_sanity():
signal = np.cos(np.linspace(start=0, stop=30, num=1000))
mdfa_q = [-5, -3, -1, 1, 3, 5]
# Entropy
assert np.allclose(
nk.entropy_fuzzy(signal)[0], nk.entropy_sample(signal, fuzzy=True)[0], atol=0.000001
)
# Fractal
fractal_dfa, parameters = nk.fractal_dfa(signal, scale=np.array([4, 8, 12, 20]))
assert parameters["Fluctuations"].shape == (4, 1)
assert np.allclose(fractal_dfa, 2.10090484, atol=0.0001)
_, parameters = nk.fractal_dfa(signal, multifractal=True, q=mdfa_q)
# TODO: why this gives 70 or 71 depending on the machine????
# assert parameters["Fluctuations"].shape == (70, len(mdfa_q))
assert np.allclose(nk.fractal_correlation(signal)[0], 0.7382138350901658, atol=0.000001)
assert np.allclose(
nk.fractal_correlation(signal, radius="nolds")[0], nolds.corr_dim(signal, 2), atol=0.01
)
# =============================================================================
# Comparison against R
# =============================================================================
# R code:
#
# library(TSEntropies)
# library(pracma)
#
# signal <- read.csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv")$RSP
# r <- 0.2 * sd(signal)
# ApEn --------------------------------------------------------------------
# TSEntropies::ApEn(signal, dim=2, lag=1, r=r)
# 0.04383386
# TSEntropies::ApEn(signal, dim=3, lag=2, r=1)
# 0.0004269369
# pracma::approx_entropy(signal[1:200], edim=2, r=r, elag=1)
# 0.03632554
# SampEn ------------------------------------------------------------------
# TSEntropies::SampEn(signal[1:300], dim=2, lag=1, r=r)
# 0.04777648
# TSEntropies::FastSampEn(signal[1:300], dim=2, lag=1, r=r)
# 0.003490405
# pracma::sample_entropy(signal[1:300], edim=2, tau=1, r=r)
# 0.03784376
# pracma::sample_entropy(signal[1:300], edim=3, tau=2, r=r)
# 0.09185509
def test_complexity_vs_R():
signal = pd.read_csv(
"https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv"
)["RSP"].values
r = 0.2 * np.std(signal, ddof=1)
# ApEn
apen = nk.entropy_approximate(signal, dimension=2, tolerance=r)[0]
assert np.allclose(apen, 0.04383386, atol=0.0001)
apen = nk.entropy_approximate(signal, dimension=3, delay=2, tolerance=1)[0]
assert np.allclose(apen, 0.0004269369, atol=0.0001)
apen = nk.entropy_approximate(signal[0:200], dimension=2, delay=1, tolerance=r)[0]
assert np.allclose(apen, 0.03632554, atol=0.0001)
# SampEn
sampen = nk.entropy_sample(signal[0:300], dimension=2, tolerance=r)[0]
assert np.allclose(
sampen,
nk.entropy_sample(signal[0:300], dimension=2, tolerance=r, distance="infinity")[0],
atol=0.001,
)
assert np.allclose(sampen, 0.03784376, atol=0.001)
sampen = nk.entropy_sample(signal[0:300], dimension=3, delay=2, tolerance=r)[0]
assert np.allclose(sampen, 0.09185509, atol=0.01)
# =============================================================================
# Comparison against Python implementations
# =============================================================================
def test_complexity_vs_Python():
signal = np.cos(np.linspace(start=0, stop=30, num=100))
tolerance = 0.2 * np.std(signal, ddof=1)
# Shannon
shannon = nk.entropy_shannon(signal)[0]
# assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())
assert np.allclose(shannon, pyentrp.shannon_entropy(signal))
# Approximate
assert np.allclose(nk.entropy_approximate(signal)[0], 0.17364897858477146)
# EntropyHub doens't work because of PyEMD
# assert np.allclose(
# nk.entropy_approximate(signal, dimension=2, tolerance=tolerance)[0],
# EntropyHub.ApEn(signal, m=2, tau=1, r=tolerance)[0][2],
# )
assert np.allclose(
nk.entropy_approximate(signal, dimension=2, tolerance=tolerance)[0],
entropy_app_entropy(signal, 2),
)
assert nk.entropy_approximate(signal, dimension=2, tolerance=tolerance)[0] != pyeeg_ap_entropy(
signal, 2, tolerance
)
# Sample
assert np.allclose(
nk.entropy_sample(signal, dimension=2, tolerance=tolerance)[0],
entropy_sample_entropy(signal, 2),
)
assert np.allclose(
nk.entropy_sample(signal, dimension=2, tolerance=0.2)[0],
nolds.sampen(signal, 2, 0.2),
)
assert np.allclose(
nk.entropy_sample(signal, dimension=2, tolerance=0.2)[0],
entro_py_sampen(signal, 2, 0.2, scale=False),
)
assert np.allclose(
nk.entropy_sample(signal, dimension=2, tolerance=0.2)[0],
pyeeg_samp_entropy(signal, 2, 0.2),
)
# assert np.allclose(
# nk.entropy_sample(signal, dimension=2, tolerance=0.2)[0],
# EntropyHub.SampEn(signal, m=2, tau=1, r=0.2)[0][2],
# )
# import sampen
# sampen.sampen2(signal[0:300], mm=2, r=r)
assert (
nk.entropy_sample(signal, dimension=2, tolerance=0.2)[0]
!= pyentrp.sample_entropy(signal, 2, 0.2)[1]
)
assert (
nk.entropy_sample(signal, dimension=2, tolerance=0.2 * np.sqrt(np.var(signal)))[0]
!= MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]
)
# MSE
# assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal)))
# != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list"))
# assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1))
# != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))
# Fuzzy
assert np.allclose(
nk.entropy_fuzzy(signal, dimension=2, tolerance=0.2, delay=1)[0]
- entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False),
0,
)
# Lempel Ziv Complexity
threshold = np.nanmedian(signal)
binary = np.zeros(len(signal))
binary[signal > threshold] = 1
assert np.allclose(
nk.complexity_lempelziv(signal, symbolize="median", normalize=True)[0]
- antropy.lziv_complexity(binary, normalize=True),
0,
)
# Katz
assert np.allclose(nk.fractal_katz(signal)[0] - antropy.katz_fd(signal), 0)
# # DFA
# assert np.allclose(nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20]))['slopes'][0], nolds.dfa(
# signal, nvals=[4, 8, 12, 20], fit_exp="poly"), atol=0.01
# )
# =============================================================================
# Wikipedia
# =============================================================================
def wikipedia_sampen(signal, m=2, r=1):
N = len(signal)
B = 0.0
A = 0.0
# Split time series and save all templates of length m
xmi = np.array([signal[i : i + m] for i in range(N - m)])
xmj = np.array([signal[i : i + m] for i in range(N - m + 1)])
# Save all matches minus the self-match, compute B
B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])
# Similar for computing A
m += 1
xm = np.array([signal[i : i + m] for i in range(N - m + 1)])
A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])
# Return SampEn
return -np.log(A / B)
# =============================================================================
# entropy_estimators (https://github.com/paulbrodersen/entropy_estimators)
# =============================================================================
# import numpy as np
# from entropy_estimators import continuous
#
# x = np.random.randn(10000)
#
# # I don't know what this compute though
# continuous.get_h_mvn(x)
# continuous.get_h(x, k=5)
# =============================================================================
# Pyeeg
# =============================================================================
def pyeeg_embed_seq(time_series, tau, embedding_dimension):
if not isinstance(time_series, np.ndarray):
typed_time_series = np.asarray(time_series)
else:
typed_time_series = time_series
shape = (typed_time_series.size - tau * (embedding_dimension - 1), embedding_dimension)
strides = (typed_time_series.itemsize, tau * typed_time_series.itemsize)
return np.lib.stride_tricks.as_strided(typed_time_series, shape=shape, strides=strides)
def pyeeg_bin_power(X, Band, Fs):
C = np.fft.fft(X)
C = abs(C)
Power = np.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / Fs * len(X))) : int(np.floor(Next_Freq / Fs * len(X)))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio
def pyeeg_ap_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
# Probability that random M-sequences are in range
Cm = InRange.mean(axis=0)
# M+1-sequences in range if M-sequences are in range & last values are close
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def pyeeg_samp_entropy(X, M, R):
N = len(X)
Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0) # Don't count self-matches
# Probability that random M-sequences are in range
Cm = InRange.sum(axis=0)
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
return Samp_En
# =============================================================================
# Entropy
# =============================================================================
def entropy_embed(x, order=3, delay=1):
N = len(x)
if order * delay > N:
raise ValueError("Error: order * delay should be lower than x.size")
if delay < 1:
raise ValueError("Delay has to be at least 1.")
if order < 2:
raise ValueError("Order has to be at least 2.")
Y = np.zeros((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay : i * delay + Y.shape[1]]
return Y.T
def entropy_app_samp_entropy(x, order, metric="chebyshev", approximate=True):
sklearn_version = version.parse(sklearn.__version__)
if sklearn_version >= version.parse("1.3.0"):
_all_metrics = sklearn.neighbors.KDTree.valid_metrics()
else:
_all_metrics = sklearn.neighbors.KDTree.valid_metrics
if metric not in _all_metrics:
raise ValueError(
"The given metric (%s) is not valid. The valid " # pylint: disable=consider-using-f-string
"metric names are: %s" % (metric, _all_metrics)
)
phi = np.zeros(2)
r = 0.2 * np.std(x, axis=-1, ddof=1)
# compute phi(order, r)
_emb_data1 = entropy_embed(x, order, 1)
if approximate:
emb_data1 = _emb_data1
else:
emb_data1 = _emb_data1[:-1]
count1 = (
sklearn.neighbors.KDTree(emb_data1, metric=metric)
.query_radius(emb_data1, r, count_only=True)
.astype(np.float64)
)
# compute phi(order + 1, r)
emb_data2 = entropy_embed(x, order + 1, 1)
count2 = (
sklearn.neighbors.KDTree(emb_data2, metric=metric)
.query_radius(emb_data2, r, count_only=True)
.astype(np.float64)
)
if approximate:
phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))
phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))
return phi
def entropy_app_entropy(x, order=2, metric="chebyshev"):
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)
return np.subtract(phi[0], phi[1])
def entropy_sample_entropy(x, order=2, metric="chebyshev"):
x = np.asarray(x, dtype=np.float64)
phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
# =============================================================================
# entro-py
# =============================================================================
def entro_py_sampen(x, dim, r, scale=True):
return entro_py_entropy(x, dim, r, scale=scale)
def entro_py_cross_sampen(x1, x2, dim, r, scale=True):
return entro_py_entropy([x1, x2], dim, r, scale)
def entro_py_fuzzyen(x, dim, r, n, scale=True):
return entro_py_entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)
def entro_py_cross_fuzzyen(x1, x2, dim, r, n, scale=True):
return entro_py_entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)
def entro_py_pattern_mat(x, m):
x = np.asarray(x).ravel()
if m == 1:
return x
else:
N = len(x)
patterns = np.zeros((m, N - m + 1))
for i in range(m):
patterns[i, :] = x[i : N - m + i + 1]
return patterns
def entro_py_entropy(x, dim, r, n=1, scale=True, remove_baseline=False):
fuzzy = remove_baseline
cross = isinstance(x, list)
N = len(x[0]) if cross else len(x)
if scale:
if cross:
x = [entro_py_scale(np.copy(x[0])), entro_py_scale(np.copy(x[1]))]
else:
x = entro_py_scale(np.copy(x))
phi = [0, 0] # phi(m), phi(m+1)
for j in [0, 1]:
m = dim + j
npat = N - dim # https://github.com/ixjlyons/entro-py/pull/2/files
if cross:
# patterns = [entro_py_pattern_mat(x[0], m), entro_py_pattern_mat(x[1], m)]
patterns = [
entro_py_pattern_mat(x[0], m)[:, :npat],
entro_py_pattern_mat(x[1], m)[:, :npat],
] # https://github.com/ixjlyons/entro-py/pull/2/files
else:
# patterns = entro_py_pattern_mat(x, m)
patterns = entro_py_pattern_mat(x, m)[:, :npat]
if remove_baseline:
if cross:
patterns[0] = entro_py_remove_baseline(patterns[0], axis=0)
patterns[1] = entro_py_remove_baseline(patterns[1], axis=0)
else:
patterns = entro_py_remove_baseline(patterns, axis=0)
# count = np.zeros(N-m) # https://github.com/ixjlyons/entro-py/pull/2/files
# for i in range(N-m): # https://github.com/ixjlyons/entro-py/pull/2/files
count = np.zeros(npat)
for i in range(npat):
if cross:
if m == 1:
sub = patterns[1][i]
else:
sub = patterns[1][:, [i]]
dist = np.max(np.abs(patterns[0] - sub), axis=0)
else:
if m == 1:
sub = patterns[i]
else:
sub = patterns[:, [i]]
dist = np.max(np.abs(patterns - sub), axis=0)
if fuzzy:
sim = np.exp(-np.power(dist, n) / r)
else:
sim = dist < r
count[i] = np.sum(sim) - 1
# phi[j] = np.mean(count) / (N-m-1)
# https://github.com/ixjlyons/entro-py/pull/2/files
phi[j] = np.mean(count) / (N - dim - 1)
return np.log(phi[0] / phi[1])
def entro_py_scale(x, axis=None):
x = entro_py_remove_baseline(x, axis=axis)
x /= np.std(x, ddof=1, axis=axis, keepdims=True)
return x
def entro_py_remove_baseline(x, axis=None):
x -= np.mean(x, axis=axis, keepdims=True)
return x
# =============================================================================
# MultiscaleEntropy https://github.com/reatank/MultiscaleEntropy/blob/master/MultiscaleEntropy/mse.py
# =============================================================================
def MultiscaleEntropy_init_return_type(return_type):
if return_type == "dict":
return {}
else:
return []
def MultiscaleEntropy_check_type(x, num_type, name):
if isinstance(x, num_type):
tmp = [x]
elif not isinstance(x, Iterable):
raise ValueError(
name + " should be a " + num_type.__name__ + " or an iterator of " + num_type.__name__
)
else:
tmp = []
for i in x:
tmp.append(i)
if not isinstance(i, num_type):
raise ValueError(
name
+ " should be a "
+ num_type.__name__
+ " or an iterator of "
+ num_type.__name__
)
return tmp
# sum of seperate intervals of x
def MultiscaleEntropy_coarse_grain(x, scale_factor):
x = np.array(x)
x_len = len(x)
if x_len % scale_factor:
padded_len = (1 + int(x_len / scale_factor)) * scale_factor
else:
padded_len = x_len
tmp_x = np.zeros(padded_len)
tmp_x[:x_len] = x
tmp_x = np.reshape(tmp_x, (int(padded_len / scale_factor), scale_factor))
ans = np.reshape(np.sum(tmp_x, axis=1), (-1)) / scale_factor
return ans
def MultiscaleEntropy_sample_entropy(
x, m=[2], r=[0.15], sd=None, return_type="dict", safe_mode=False
):
"""[Sample Entropy, the threshold will be r*sd]
Arguments:
x {[input signal]} -- [an iterator of numbers]
Keyword Arguments:
m {list} -- [m in sample entropy] (default: {[2]})
r {list} -- [r in sample entropy] (default: {[0.15]})
sd {number} -- [standard derivation of x, if None, will be calculated] (default: {None})
return_type {str} -- [can be dict or list] (default: {'dict'})
safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})
Raises:
ValueError -- [some values too big]
Returns:
[dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of m, r;
if list, nest as [i][j] for lengths of m, r]
"""
# type checking
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, "m")
r = MultiscaleEntropy_check_type(r, float, "r")
if not (sd is None) and not isinstance(sd, (float, int)):
raise ValueError("sd should be a number")
try:
x = np.array(x)
except Exception as exc:
raise ValueError("x should be a sequence of numbers") from exc
# value checking
if len(x) < max(m):
raise ValueError("the max m is bigger than x's length")
# initialization
if sd is None:
sd = np.sqrt(np.var(x))
ans = MultiscaleEntropy_init_return_type(return_type)
# calculation
for i, rr in enumerate(r):
threshold = rr * sd
if return_type == "dict":
ans[rr] = MultiscaleEntropy_init_return_type(return_type)
else:
ans.append(MultiscaleEntropy_init_return_type(return_type))
count = {}
tmp_m = []
for mm in m:
tmp_m.append(mm)
tmp_m.append(mm + 1)
tmp_m = list(set(tmp_m))
for mm in tmp_m:
count[mm] = 0
for j in range(1, len(x) - min(m) + 1):
cont = 0
for inc in range(0, len(x) - j):
if abs(x[inc] - x[j + inc]) < threshold:
cont += 1
elif cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
cont = 0
if cont > 0:
for mm in tmp_m:
tmp = cont - mm + 1
count[mm] += tmp if tmp > 0 else 0
for mm in m:
if count[mm + 1] == 0 or count[mm] == 0:
t = len(x) - mm + 1
tmp = -np.log(1 / (t * (t - 1)))
else:
tmp = -np.log(count[mm + 1] / count[mm])
if return_type == "dict":
ans[rr][mm] = tmp
else:
ans[i].append(tmp)
return ans
def MultiscaleEntropy_mse(
x, scale_factor=list(range(1, 21)), m=[2], r=[0.15], return_type="dict", safe_mode=False
):
"""[Multiscale Entropy]
Arguments:
x {[input signal]} -- [an iterator of numbers]
Keyword Arguments:
scale_factor {list} -- [scale factors of coarse graining] (default: {[i for i in range(1,21)]})
m {list} -- [m in sample entropy] (default: {[2]})
r {list} -- [r in sample entropy] (default: {[0.15]})
return_type {str} -- [can be dict or list] (default: {'dict'})
safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})
Raises:
ValueError -- [some values too big]
Returns:
[dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of scale_factor, m, r;
if list nest as [i][j][k] for lengths of scale_factor, m, r]
"""
# type checking
if not safe_mode:
m = MultiscaleEntropy_check_type(m, int, "m")
r = MultiscaleEntropy_check_type(r, float, "r")
scale_factor = MultiscaleEntropy_check_type(scale_factor, int, "scale_factor")
try:
x = np.array(x)
except Exception as exc:
raise ValueError("x should be a sequence of numbers") from exc
# value checking
if max(scale_factor) > len(x):
raise ValueError("the max scale_factor is bigger than x's length")
# calculation
sd = np.sqrt(np.var(x))
ms_en = MultiscaleEntropy_init_return_type(return_type)
for s_f in scale_factor:
y = MultiscaleEntropy_coarse_grain(x, s_f)
if return_type == "dict":
ms_en[s_f] = MultiscaleEntropy_sample_entropy(y, m, r, sd, "dict", True)
else:
ms_en.append(MultiscaleEntropy_sample_entropy(y, m, r, sd, "list", True))
if return_type == "list":
ms_en = [i[0] for i in ms_en]
ms_en = [i[0] for i in ms_en]
return ms_en
| 23,316 | 32.94032 | 127 | py |
NeuroKit | NeuroKit-master/tests/tests_stats.py | import numpy as np
import pandas as pd
import neurokit2 as nk
# =============================================================================
# Stats
# =============================================================================
def test_standardize():
rez = np.sum(nk.standardize([1, 1, 5, 2, 1]))
assert np.allclose(rez, 0, atol=0.0001)
rez = np.sum(nk.standardize(np.array([1, 1, 5, 2, 1])))
assert np.allclose(rez, 0, atol=0.0001)
rez = np.sum(nk.standardize(pd.Series([1, 1, 5, 2, 1])))
assert np.allclose(rez, 0, atol=0.0001)
rez = np.sum(nk.standardize([1, 1, 5, 2, 1, 5, 1, 7], robust=True))
assert np.allclose(rez, 14.8387, atol=0.001)
def test_fit_loess():
signal = np.cos(np.linspace(start=0, stop=10, num=1000))
fit, _ = nk.fit_loess(signal, alpha=0.75)
assert np.allclose(np.mean(signal - fit), -0.0201905899, atol=0.0001)
def test_mad():
simple_case = [0] * 10
assert nk.mad(simple_case) == 0
wikipedia_example = np.array([1, 1, 2, 2, 4, 6, 9])
constant = 1.42
assert nk.mad(wikipedia_example, constant=constant) == constant
negative_wikipedia_example = -wikipedia_example
assert nk.mad(negative_wikipedia_example, constant=constant) == constant
def create_sample_cluster_data(random_state):
rng = nk.misc.check_random_state(random_state)
# generate simple sample data
K = 5
points = np.array([[0., 0.], [-0.3, -0.3], [0.3, -0.3], [0.3, 0.3], [-0.3, 0.3]])
centres = np.column_stack((rng.choice(K, size=K, replace=False), rng.choice(K, size=K, replace=False)))
angles = rng.uniform(0, 2 * np.pi, size=K)
offset = rng.uniform(size=2)
# place a cluster at each centre
data = []
for i in range(K):
rotation = np.array([[np.cos(angles[i]), np.sin(angles[i])], [-np.sin(angles[i]), np.cos(angles[i])]])
data.extend(centres[i] + points @ rotation)
rng.shuffle(data)
# shift both data and target centres
data = np.vstack(data) + offset
centres = centres + offset
return data, centres
def test_kmedoids():
# set random state for reproducible results
random_state_data = 33
random_state_clustering = 77
# create sample data
data, centres = create_sample_cluster_data(random_state_data)
K = len(centres)
# run kmedoids
res = nk.cluster(data, method='kmedoids', n_clusters=K, random_state=random_state_clustering)
# check results (sort, then compare rows of res[1] and points)
assert np.allclose(res[1][np.lexsort(res[1].T)], centres[np.lexsort(centres.T)])
def test_kmeans():
# set random state for reproducible results
random_state_data = 54
random_state_clustering = 76
# create sample data
data, centres = create_sample_cluster_data(random_state_data)
K = len(centres)
# run kmeans
res = nk.cluster(data, method='kmeans', n_clusters=K, n_init=1, random_state=random_state_clustering)
# check results (sort, then compare rows of res[1] and points)
assert np.allclose(res[1][np.lexsort(res[1].T)], centres[np.lexsort(centres.T)])
| 3,101 | 28.542857 | 110 | py |
NeuroKit | NeuroKit-master/tests/tests_epochs.py | import numpy as np
import neurokit2 as nk
def test_epochs_create():
# Get data
data = nk.data("bio_eventrelated_100hz")
# Find events
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
# Create epochs
epochs_1 = nk.epochs_create(data, events, sampling_rate=100,
epochs_start=-0.5, epochs_end=3)
epochs_2 = nk.epochs_create(data, events, sampling_rate=100,
epochs_start=-0.5, epochs_end=1)
# Test lengths and column names
assert len(epochs_1) == 4
columns = ['ECG', 'EDA', 'Photosensor', 'RSP', 'Index', 'Label', 'Condition']
assert all(elem in columns for elem
in np.array(epochs_1['1'].columns.values, dtype=str))
# Test corresponding event features in epochs
condition_names = []
for i in epochs_1:
cond = np.unique(epochs_1[i].Condition)[0]
condition_names.append(cond)
assert events['onset'][int(i)-1] in np.array(epochs_1[i].Index)
assert events['condition'] == condition_names
# Test full vs subsetted epochs
for i, j in zip(epochs_1, epochs_2):
epoch_full = epochs_2[str(j)]
epoch_subset = epochs_1[str(i)].loc[-0.5:1]
assert len(epoch_full) == len(epoch_subset)
for col in epoch_full.columns:
assert all(np.array(epoch_subset[col]) == np.array(epoch_full[col]))
| 1,507 | 33.272727 | 92 | py |
NeuroKit | NeuroKit-master/tests/tests_signal_fixpeaks.py | # -*- coding: utf-8 -*-
import numpy as np
import numpy.random
import pytest
import neurokit2 as nk
from neurokit2.signal.signal_fixpeaks import _correct_artifacts, _find_artifacts, signal_fixpeaks
def compute_rmssd(peaks):
rr = np.ediff1d(peaks, to_begin=0)
rr[0] = np.mean(rr[1:])
rmssd = np.sqrt(np.mean(rr ** 2))
return rmssd
@pytest.fixture
def n_peaks():
return 1000
@pytest.fixture
def k_peaks():
return 100
@pytest.fixture
def artifact_idcs(k_peaks, n_peaks):
idcs = np.arange(k_peaks, n_peaks, k_peaks)
return idcs
@pytest.fixture
def peaks_correct(n_peaks):
# Simulate sinusoidally changing heart periods.
rr = np.sin(np.arange(n_peaks))
# Add some noise.
rng = numpy.random.default_rng(42)
rr_noisy = rng.normal(rr, 0.1)
# Scale to range of 250msec and offset by 1000msec. I.e., heart period
# fluctuates in a range of 250msec around 1000msec.
rr_scaled = 1000 + rr_noisy * 125
peaks = np.cumsum(np.rint(rr_scaled)).astype(int)
return peaks
@pytest.fixture
def peaks_misaligned(request, peaks_correct, artifact_idcs):
rmssd = compute_rmssd(peaks_correct)
displacement = request.param * rmssd
peaks_misaligned = peaks_correct.copy()
peaks_misaligned[artifact_idcs] = peaks_misaligned[artifact_idcs] - displacement
return peaks_misaligned
@pytest.fixture
def peaks_missed(peaks_correct, artifact_idcs):
peaks_missed = peaks_correct.copy()
peaks_missed = np.delete(peaks_missed, artifact_idcs)
return peaks_missed
@pytest.fixture
def peaks_extra(peaks_correct, artifact_idcs):
extra_peaks = (peaks_correct[artifact_idcs + 1] - peaks_correct[artifact_idcs]) / 15 + peaks_correct[artifact_idcs]
peaks_extra = peaks_correct.copy()
peaks_extra = np.insert(peaks_extra, artifact_idcs, extra_peaks)
return peaks_extra
@pytest.fixture
def artifacts_misaligned(artifact_idcs):
artifacts = {"ectopic": list(artifact_idcs + 1), "missed": [], "extra": [], "longshort": list(artifact_idcs)}
return artifacts
@pytest.fixture
def artifacts_missed(artifact_idcs):
missed_idcs = [
j - i for i, j in enumerate(artifact_idcs)
] # account for the fact that peak indices are shifted to the left after deletion of peaks
artifacts = {"ectopic": [], "missed": missed_idcs, "extra": [], "longshort": []}
return artifacts
@pytest.fixture
def artifacts_extra(artifact_idcs):
extra_idcs = [
j + (i + 1) for i, j in enumerate(artifact_idcs)
] # account for the fact that peak indices are shifted to the right after insertion of peaks
artifacts = {"ectopic": [], "missed": [], "extra": extra_idcs, "longshort": []}
return artifacts
@pytest.mark.parametrize("peaks_misaligned", [2, 4, 8], indirect=["peaks_misaligned"])
def test_misaligned_detection(peaks_misaligned, artifacts_misaligned):
artifacts, _ = _find_artifacts(peaks_misaligned, sampling_rate=1)
assert artifacts == artifacts_misaligned # check for identical key-value pairs
def test_missed_detection(peaks_missed, artifacts_missed):
artifacts, _ = _find_artifacts(peaks_missed, sampling_rate=1)
assert artifacts == artifacts_missed
def test_extra_detection(peaks_extra, artifacts_extra):
artifacts, _ = _find_artifacts(peaks_extra, sampling_rate=1)
assert artifacts == artifacts_extra
@pytest.mark.parametrize("peaks_misaligned", [2, 4, 8], indirect=["peaks_misaligned"])
def test_misaligned_correction(peaks_misaligned, artifacts_misaligned):
peaks_corrected = _correct_artifacts(artifacts_misaligned, peaks_misaligned)
assert (
np.unique(peaks_corrected).size == peaks_misaligned.size
) # make sure that no peak duplication occurs and that number of peaks doesn't change
def test_missed_correction(peaks_missed, artifacts_missed):
peaks_corrected = _correct_artifacts(artifacts_missed, peaks_missed)
assert np.unique(peaks_corrected).size == (peaks_missed.size + len(artifacts_missed["missed"]))
def test_extra_correction(peaks_extra, artifacts_extra):
peaks_corrected = _correct_artifacts(artifacts_extra, peaks_extra)
assert np.unique(peaks_corrected).size == (peaks_extra.size - len(artifacts_extra["extra"]))
def idfn(val):
if isinstance(val, bool):
return f"iterative_{val}"
@pytest.mark.parametrize(
"peaks_misaligned, iterative, rmssd_diff",
[(2, True, 27), (2, False, 27), (4, True, 113), (4, False, 113), (8, True, 444), (8, False, 444)],
indirect=["peaks_misaligned"],
ids=idfn,
)
def test_misaligned_correction_wrapper(peaks_correct, peaks_misaligned, iterative, rmssd_diff):
_, peaks_corrected = signal_fixpeaks(peaks_misaligned, sampling_rate=1, iterative=iterative)
rmssd_correct = compute_rmssd(peaks_correct)
rmssd_corrected = compute_rmssd(peaks_corrected)
rmssd_uncorrected = compute_rmssd(peaks_misaligned)
# Assert that correction does not produce peaks that exceed the temporal
# bounds of the original peaks.
assert peaks_correct[0] <= peaks_corrected[0]
assert peaks_correct[-1] >= peaks_corrected[-1]
# Assert that after artifact correction, the difference in RMSSD to the
# undistorted signal decreases. This also implicitely tests if the peak
# distortion affects the RMSSD (manipulation check).
rmssd_diff_uncorrected = np.abs(rmssd_correct - rmssd_uncorrected)
rmssd_diff_corrected = np.abs(rmssd_correct - rmssd_corrected)
assert int(rmssd_diff_uncorrected - rmssd_diff_corrected) == rmssd_diff
@pytest.mark.parametrize("iterative, rmssd_diff", [(True, 3), (False, 3)], ids=idfn)
def test_extra_correction_wrapper(peaks_correct, peaks_extra, iterative, rmssd_diff):
_, peaks_corrected = signal_fixpeaks(peaks_extra, sampling_rate=1, iterative=iterative)
rmssd_correct = compute_rmssd(peaks_correct)
rmssd_corrected = compute_rmssd(peaks_corrected)
rmssd_uncorrected = compute_rmssd(peaks_extra)
# Assert that correction does not produce peaks that exceed the temporal
# bounds of the original peaks.
assert peaks_correct[0] <= peaks_corrected[0]
assert peaks_correct[-1] >= peaks_corrected[-1]
# Assert that after artifact correction, the difference in RMSSD to the
# undistorted signal decreases. This also implicitely tests if the peak
# distortion affects the RMSSD (manipulation check).
rmssd_diff_uncorrected = np.abs(rmssd_correct - rmssd_uncorrected)
rmssd_diff_corrected = np.abs(rmssd_correct - rmssd_corrected)
assert int(rmssd_diff_uncorrected - rmssd_diff_corrected) == rmssd_diff
@pytest.mark.parametrize("iterative, rmssd_diff", [(True, 13), (False, 13)], ids=idfn)
def test_missed_correction_wrapper(peaks_correct, peaks_missed, iterative, rmssd_diff):
_, peaks_corrected = signal_fixpeaks(peaks_missed, sampling_rate=1, iterative=iterative)
rmssd_correct = compute_rmssd(peaks_correct)
rmssd_corrected = compute_rmssd(peaks_corrected)
rmssd_uncorrected = compute_rmssd(peaks_missed)
# Assert that correction does not produce peaks that exceed the temporal
# bounds of the original peaks.
assert peaks_correct[0] <= peaks_corrected[0]
assert peaks_correct[-1] >= peaks_corrected[-1]
# Assert that after artifact correction, the difference in RMSSD to the
# undistorted signal decreases. This also implicitely tests if the peak
# distortion affects the RMSSD (manipulation check).
rmssd_diff_uncorrected = np.abs(rmssd_correct - rmssd_uncorrected)
rmssd_diff_corrected = np.abs(rmssd_correct - rmssd_corrected)
assert int(rmssd_diff_uncorrected - rmssd_diff_corrected) == rmssd_diff
@pytest.fixture
def testpeaks_for_neurokit_method():
signal = nk.signal_simulate(duration=20, sampling_rate=1000, frequency=1)
peaks_true = nk.signal_findpeaks(signal)["Peaks"]
peaks = np.delete(peaks_true, [5,6,7,8,9,10,15,16,17,19]) # create gaps
# (I added more than in the example in the function docstring)
peaks = np.sort(np.append(peaks, [1350, 11350, 18350])) # add artifacts
return peaks
@pytest.mark.parametrize("interval_max", [None, 1.5, 2.0])
def test_neurokit_method_returns_only_positive_indices(testpeaks_for_neurokit_method, interval_max):
peaks_corrected = nk.signal_fixpeaks(peaks=testpeaks_for_neurokit_method, interval_min=0.5,
interval_max=interval_max,
method="neurokit")
assert np.all(peaks_corrected >= 0)
@pytest.mark.parametrize("interval_max", [None, 1.5, 2.0])
def test_neurokit_method_returns_no_duplicates(testpeaks_for_neurokit_method, interval_max):
peaks_corrected = nk.signal_fixpeaks(peaks=testpeaks_for_neurokit_method, interval_min=0.5,
interval_max=interval_max,
method="neurokit")
assert np.unique(peaks_corrected).size == peaks_corrected.size
@pytest.mark.parametrize("interval_max", [None, 1.5, 2.0])
def test_neurokit_method_returns_strictly_increasing_indices(testpeaks_for_neurokit_method, interval_max):
peaks_corrected = nk.signal_fixpeaks(peaks=testpeaks_for_neurokit_method, interval_min=0.5,
interval_max=interval_max,
method="neurokit")
assert np.all(np.diff(peaks_corrected) > 0) | 9,417 | 35.362934 | 119 | py |
NeuroKit | NeuroKit-master/tests/tests_ecg_findpeaks.py | # -*- coding: utf-8 -*-
import os.path
import numpy as np
import pandas as pd
# Trick to directly access internal functions for unit testing.
#
# Using neurokit2.ecg.ecg_findpeaks._ecg_findpeaks_MWA doesn't
# work because of the "from .ecg_findpeaks import ecg_findpeaks"
# statement in neurokit2/ecg/__init.__.py.
from neurokit2.ecg.ecg_findpeaks import _ecg_findpeaks_MWA, _ecg_findpeaks_peakdetect
def _read_csv_column(csv_name, column):
csv_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ecg_data", csv_name)
csv_data = pd.read_csv(csv_path, header=None)
return csv_data[column].to_numpy()
def test_ecg_findpeaks_MWA():
np.testing.assert_array_equal(
_ecg_findpeaks_MWA(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float), 3), [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]
)
# This test case is intentionally a "change aversion" test that simply
# verifies that the output of the _ecg_findpeaks_peakdetect function
# on two different test datasets remains unchanged.
#
# Most notably the assertions here don't necessarily document the
# "correct" output of the function, just what the output used to be earlier.
# Potential bug fixes could legitimately require updates to this test case.
#
# Instead the main purpose of this test case is to give extra confidence
# that optimizations or other refactorings won't accidentally introduce
# new bugs into the function.
def test_ecg_findpeaks_peakdetect():
good_4000 = _read_csv_column("good_4000.csv", 1)
expected_good_4000_peaks = _read_csv_column("expected_ecg_findpeaks_peakdetect_good_4000.csv", 0)
np.testing.assert_array_equal(_ecg_findpeaks_peakdetect(good_4000, sampling_rate=4000), expected_good_4000_peaks)
bad_500 = _read_csv_column("bad_500.csv", 1)
expected_bad_500_peaks = _read_csv_column("expected_ecg_findpeaks_peakdetect_bad_500.csv", 0)
np.testing.assert_array_equal(_ecg_findpeaks_peakdetect(bad_500, sampling_rate=500), expected_bad_500_peaks)
| 1,984 | 42.152174 | 118 | py |
NeuroKit | NeuroKit-master/tests/tests_ppg.py | # -*- coding: utf-8 -*-
import itertools
import numpy as np
import pytest
import neurokit2 as nk
durations = (20, 200, 300)
sampling_rates = (25, 50, 500)
heart_rates = (50, 120)
freq_modulations = (0.1, 0.4)
params = [durations, sampling_rates, heart_rates, freq_modulations]
params_combis = list(itertools.product(*params))
@pytest.mark.parametrize(
"duration, sampling_rate, heart_rate, freq_modulation", params_combis
)
def test_ppg_simulate(duration, sampling_rate, heart_rate, freq_modulation):
ppg = nk.ppg_simulate(
duration=duration,
sampling_rate=sampling_rate,
heart_rate=heart_rate,
frequency_modulation=freq_modulation,
ibi_randomness=0,
drift=0,
motion_amplitude=0,
powerline_amplitude=0,
burst_amplitude=0,
burst_number=0,
random_state=42,
random_state_distort=42,
show=False,
)
assert ppg.size == duration * sampling_rate
signals, _ = nk.ppg_process(ppg, sampling_rate=sampling_rate)
if sampling_rate > 25:
assert np.allclose(signals["PPG_Rate"].mean(), heart_rate, atol=1)
# Ensure that the heart rate fluctuates in the requested range.
groundtruth_range = freq_modulation * heart_rate
observed_range = np.percentile(signals["PPG_Rate"], 90) - np.percentile(signals["PPG_Rate"], 10)
assert np.allclose(groundtruth_range, observed_range, atol=groundtruth_range * 0.15)
# TODO: test influence of different noise configurations
@pytest.mark.parametrize(
"ibi_randomness, std_heart_rate",
[(0.1, 3), (0.2, 5), (0.3, 8), (0.4, 11), (0.5, 14), (0.6, 19)],
)
def test_ppg_simulate_ibi(ibi_randomness, std_heart_rate):
ppg = nk.ppg_simulate(
duration=20,
sampling_rate=50,
heart_rate=70,
frequency_modulation=0,
ibi_randomness=ibi_randomness,
drift=0,
motion_amplitude=0,
powerline_amplitude=0,
burst_amplitude=0,
burst_number=0,
random_state=42,
show=False,
)
assert ppg.size == 20 * 50
signals, _ = nk.ppg_process(ppg, sampling_rate=50)
assert np.allclose(signals["PPG_Rate"].mean(), 70, atol=1.5)
# Ensure that standard deviation of heart rate
assert np.allclose(signals["PPG_Rate"].std(), std_heart_rate, atol=1)
# TODO: test influence of different noise configurations
def test_ppg_simulate_legacy_rng():
ppg = nk.ppg_simulate(
duration=30,
sampling_rate=250,
heart_rate=70,
frequency_modulation=0.2,
ibi_randomness=0.1,
drift=0.1,
motion_amplitude=0.1,
powerline_amplitude=0.01,
random_state=654,
random_state_distort="legacy",
show=False,
)
# Run simple checks to verify that the signal is the same as that generated with version 0.2.3
# before the introduction of the new random number generation approach
assert np.allclose(np.mean(ppg), 0.6598246992405254)
assert np.allclose(np.std(ppg), 0.4542274696384863)
assert np.allclose(
np.mean(np.reshape(ppg, (-1, 1500)), axis=1),
[0.630608661400, 0.63061887029, 0.60807993168, 0.65731025466, 0.77250577818],
)
def test_ppg_clean():
sampling_rate = 500
ppg = nk.ppg_simulate(
duration=30,
sampling_rate=sampling_rate,
heart_rate=180,
frequency_modulation=0.01,
ibi_randomness=0.1,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
ppg_cleaned_elgendi = nk.ppg_clean(
ppg, sampling_rate=sampling_rate, method="elgendi"
)
assert ppg.size == ppg_cleaned_elgendi.size
# Assert that bandpass filter with .5 Hz lowcut and 8 Hz highcut was applied.
fft_raw = np.abs(np.fft.rfft(ppg))
fft_elgendi = np.abs(np.fft.rfft(ppg_cleaned_elgendi))
freqs = np.fft.rfftfreq(ppg.size, 1 / sampling_rate)
assert np.sum(fft_raw[freqs < 0.5]) > np.sum(fft_elgendi[freqs < 0.5])
assert np.sum(fft_raw[freqs > 8]) > np.sum(fft_elgendi[freqs > 8])
def test_ppg_findpeaks():
sampling_rate = 500
# Test Elgendi method
ppg = nk.ppg_simulate(
duration=30,
sampling_rate=sampling_rate,
heart_rate=60,
frequency_modulation=0.01,
ibi_randomness=0.1,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=True,
)
ppg_cleaned_elgendi = nk.ppg_clean(
ppg, sampling_rate=sampling_rate, method="elgendi"
)
info_elgendi = nk.ppg_findpeaks(
ppg_cleaned_elgendi, sampling_rate=sampling_rate, show=True
)
peaks = info_elgendi["PPG_Peaks"]
assert peaks.size == 29
assert np.abs(peaks.sum() - 219764) < 5 # off by no more than 5 samples in total
# Test MSPTD method
info_msptd = nk.ppg_findpeaks(
ppg, sampling_rate=sampling_rate, method="bishop", show=True
)
peaks = info_msptd["PPG_Peaks"]
assert peaks.size == 29
assert np.abs(peaks.sum() - 219665) < 30 # off by no more than 30 samples in total
@pytest.mark.parametrize(
"method_cleaning, method_peaks",
[("elgendi", "elgendi"), ("nabian2018", "elgendi"), ("elgendi", "bishop")],
)
def test_ppg_report(tmp_path, method_cleaning, method_peaks):
sampling_rate = 100
ppg = nk.ppg_simulate(
duration=30,
sampling_rate=sampling_rate,
heart_rate=60,
frequency_modulation=0.01,
ibi_randomness=0.1,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=True,
)
d = tmp_path / "sub"
d.mkdir()
p = d / "myreport.html"
signals, _ = nk.ppg_process(
ppg,
sampling_rate=sampling_rate,
report=str(p),
method_cleaning=method_cleaning,
method_peaks=method_peaks,
)
assert p.is_file()
def test_ppg_intervalrelated():
sampling_rate = 100
ppg = nk.ppg_simulate(
duration=500,
sampling_rate=sampling_rate,
heart_rate=70,
frequency_modulation=0.025,
ibi_randomness=0.15,
drift=0.5,
motion_amplitude=0.25,
powerline_amplitude=0.25,
burst_amplitude=0.5,
burst_number=3,
random_state=0,
show=True,
)
# Process the data
df, info = nk.ppg_process(ppg, sampling_rate=sampling_rate)
epochs = nk.epochs_create(
df, events=[0, 15000], sampling_rate=sampling_rate, epochs_end=150
)
epochs_ppg_intervals = nk.ppg_intervalrelated(epochs)
assert "PPG_Rate_Mean" in epochs_ppg_intervals.columns
ppg_intervals = nk.ppg_intervalrelated(df)
assert "PPG_Rate_Mean" in ppg_intervals.columns
| 6,974 | 27.125 | 104 | py |
NeuroKit | NeuroKit-master/tests/tests_signal.py | import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import scipy.signal
import neurokit2 as nk
# =============================================================================
# Signal
# =============================================================================
def test_signal_simulate():
# Warning for nyquist criterion
with pytest.warns(
nk.misc.NeuroKitWarning, match=r"Skipping requested frequency.*cannot be resolved.*"
):
nk.signal_simulate(sampling_rate=100, frequency=11, silent=False)
# Warning for period duration
with pytest.warns(
nk.misc.NeuroKitWarning, match=r"Skipping requested frequency.*since its period of.*"
):
nk.signal_simulate(duration=1, frequency=0.1, silent=False)
def test_signal_smooth():
# TODO: test kernels other than "boxcar"
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
smooth1 = nk.signal_smooth(signal, kernel="boxcar", size=100)
smooth2 = nk.signal_smooth(signal, kernel="boxcar", size=500)
# assert that the signal's amplitude is attenuated more with wider kernels
assert np.allclose(np.std(smooth1), 0.6044, atol=0.00001)
assert np.allclose(np.std(smooth2), 0.1771, atol=0.0001)
def test_signal_smooth_boxcar():
signal = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=float)
np.testing.assert_array_almost_equal(
nk.signal_smooth(signal, kernel="boxcar", size=3),
[(1 + 1 + 2) / 3, 2, 3, 4, 5, 6, 7, 8, 9, (9 + 10 + 10) / 3],
)
def test_signal_binarize():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
binary = nk.signal_binarize(signal)
assert len(binary) == 1000
binary = nk.signal_binarize(list(signal))
assert len(binary) == 1000
def test_signal_resample():
signal = np.cos(np.linspace(start=0, stop=20, num=50))
downsampled_interpolation = nk.signal_resample(
signal, method="interpolation", sampling_rate=1000, desired_sampling_rate=500
)
downsampled_numpy = nk.signal_resample(
signal, method="numpy", sampling_rate=1000, desired_sampling_rate=500
)
downsampled_pandas = nk.signal_resample(
signal, method="pandas", sampling_rate=1000, desired_sampling_rate=500
)
downsampled_fft = nk.signal_resample(
signal, method="FFT", sampling_rate=1000, desired_sampling_rate=500
)
downsampled_poly = nk.signal_resample(
signal, method="poly", sampling_rate=1000, desired_sampling_rate=500
)
# Upsample
upsampled_interpolation = nk.signal_resample(
downsampled_interpolation,
method="interpolation",
sampling_rate=500,
desired_sampling_rate=1000,
)
upsampled_numpy = nk.signal_resample(
downsampled_numpy, method="numpy", sampling_rate=500, desired_sampling_rate=1000
)
upsampled_pandas = nk.signal_resample(
downsampled_pandas, method="pandas", sampling_rate=500, desired_sampling_rate=1000
)
upsampled_fft = nk.signal_resample(
downsampled_fft, method="FFT", sampling_rate=500, desired_sampling_rate=1000
)
upsampled_poly = nk.signal_resample(
downsampled_poly, method="poly", sampling_rate=500, desired_sampling_rate=1000
)
# Check
rez = pd.DataFrame(
{
"Interpolation": upsampled_interpolation - signal,
"Numpy": upsampled_numpy - signal,
"Pandas": upsampled_pandas - signal,
"FFT": upsampled_fft - signal,
"Poly": upsampled_poly - signal,
}
)
assert np.allclose(np.mean(rez.mean()), 0.0001, atol=0.0001)
def test_signal_detrend():
signal = np.cos(np.linspace(start=0, stop=10, num=1000)) # Low freq
signal += np.cos(np.linspace(start=0, stop=100, num=1000)) # High freq
signal += 3 # Add baseline
rez_nk = nk.signal_detrend(signal, order=1)
rez_scipy = scipy.signal.detrend(signal, type="linear")
assert np.allclose(np.mean(rez_nk - rez_scipy), 0, atol=0.000001)
rez_nk = nk.signal_detrend(signal, order=0)
rez_scipy = scipy.signal.detrend(signal, type="constant")
assert np.allclose(np.mean(rez_nk - rez_scipy), 0, atol=0.000001)
# Tarvainen
rez_nk = nk.signal_detrend(signal, method="tarvainen2002", regularization=500)
assert np.allclose(np.mean(rez_nk - signal), -2.88438737697, atol=0.000001)
def test_signal_filter():
signal = np.cos(np.linspace(start=0, stop=10, num=1000)) # Low freq
signal += np.cos(np.linspace(start=0, stop=100, num=1000)) # High freq
filtered = nk.signal_filter(signal, highcut=10)
assert np.std(signal) > np.std(filtered)
with pytest.warns(nk.misc.NeuroKitWarning, match=r"The sampling rate is too low.*"):
with pytest.raises(ValueError):
nk.signal_filter(signal, method="bessel", sampling_rate=100, highcut=50)
# Generate 10 seconds of signal with 2 Hz oscillation and added 50Hz powerline-noise.
sampling_rate = 250
samples = np.arange(10 * sampling_rate)
signal = np.sin(2 * np.pi * 2 * (samples / sampling_rate))
powerline = np.sin(2 * np.pi * 50 * (samples / sampling_rate))
signal_corrupted = signal + powerline
signal_clean = nk.signal_filter(
signal_corrupted, sampling_rate=sampling_rate, method="powerline"
)
# import matplotlib.pyplot as plt
# figure, (ax0, ax1, ax2) = plt.subplots(nrows=3, ncols=1, sharex=True)
# ax0.plot(signal_corrupted)
# ax1.plot(signal)
# ax2.plot(signal_clean * 100)
# plt.suptitle("Powerline")
# plt.show()
assert np.allclose(sum(signal_clean - signal), -2, atol=0.2)
lowcut = 60
highcut = 40
order = 2
signal_bandstop = nk.signal_filter(
signal_corrupted, sampling_rate=sampling_rate, lowcut=lowcut, highcut=highcut, method="butterworth",
order=order
)
freqs = [highcut, lowcut]
filter_type = "bandstop"
sos = scipy.signal.butter(order, freqs, btype=filter_type, output="sos", fs=sampling_rate)
signal_bandstop_scipy = scipy.signal.sosfiltfilt(sos, signal_corrupted)
# figure, (ax0, ax1, ax2, ax3) = plt.subplots(nrows=4, ncols=1, sharex=True)
# ax0.plot(signal_corrupted)
# ax1.plot(signal)
# ax2.plot(signal_bandstop * 100)
# ax3.plot(signal_bandstop_scipy * 100)
# plt.suptitle("Bandstop")
# plt.show()
assert np.allclose(signal_bandstop, signal_bandstop_scipy, atol=0.2)
def test_signal_filter_with_missing():
sampling_rate = 100
duration_not_missing = 10
frequency = 2
signal = np.concatenate(
[
nk.signal_simulate(duration=duration_not_missing, sampling_rate=sampling_rate, frequency=frequency, random_state=42),
[np.nan] * 1000,
nk.signal_simulate(duration=duration_not_missing, sampling_rate=sampling_rate, frequency=frequency, random_state=43),
]
)
samples = np.arange(len(signal))
powerline = np.sin(2 * np.pi * 50 * (samples / sampling_rate))
signal_corrupted = signal + powerline
signal_clean = nk.signal_filter(
signal_corrupted, sampling_rate=sampling_rate, method="powerline"
)
assert signal_clean.size == signal.size
assert np.allclose(signal_clean, signal, atol=0.2, equal_nan=True)
def test_signal_interpolate():
x_axis = np.linspace(start=10, stop=30, num=10)
signal = np.cos(x_axis)
interpolated = nk.signal_interpolate(x_axis, signal, x_new=np.arange(1000))
assert len(interpolated) == 1000
assert interpolated[0] == signal[0]
assert interpolated[-1] == signal[-1]
def test_signal_findpeaks():
signal1 = np.cos(np.linspace(start=0, stop=30, num=1000))
info1 = nk.signal_findpeaks(signal1)
signal2 = np.concatenate(
[np.arange(0, 20, 0.1), np.arange(17, 30, 0.1), np.arange(30, 10, -0.1)]
)
info2 = nk.signal_findpeaks(signal2)
assert len(info1["Peaks"]) > len(info2["Peaks"])
def test_signal_merge():
signal1 = np.cos(np.linspace(start=0, stop=10, num=100))
signal2 = np.cos(np.linspace(start=0, stop=20, num=100))
signal = nk.signal_merge(signal1, signal2, time1=[0, 10], time2=[-5, 5])
assert len(signal) == 150
assert signal[0] == signal2[0] + signal2[0]
def test_signal_rate(): # since singal_rate wraps signal_period, the latter is tested as well
# Test with array.
duration = 10
sampling_rate = 1000
signal = nk.signal_simulate(duration=duration, sampling_rate=sampling_rate, frequency=1)
info = nk.signal_findpeaks(signal)
rate = nk.signal_rate(peaks=info["Peaks"], sampling_rate=1000, desired_length=len(signal))
assert rate.shape[0] == duration * sampling_rate
# Test with dictionary.produced from signal_findpeaks.
assert info[list(info.keys())[0]].shape == (info["Peaks"].shape[0],)
# Test with DataFrame.
duration = 120
sampling_rate = 1000
rsp = nk.rsp_simulate(
duration=duration,
sampling_rate=sampling_rate,
respiratory_rate=15,
method="sinuosoidal",
noise=0,
)
rsp_cleaned = nk.rsp_clean(rsp, sampling_rate=sampling_rate)
signals, info = nk.rsp_peaks(rsp_cleaned)
rate = nk.signal_rate(
signals, sampling_rate=sampling_rate, desired_length=duration * sampling_rate
)
assert rate.shape == (signals.shape[0],)
# Test with dictionary.produced from rsp_findpeaks.
rate = nk.signal_rate(
info, sampling_rate=sampling_rate, desired_length=duration * sampling_rate
)
assert rate.shape == (duration * sampling_rate,)
def test_signal_period():
# Test warning path of no peaks
with pytest.warns(nk.NeuroKitWarning, match=r"Too few peaks detected to compute the rate."):
nk.signal_period(np.zeros)
def test_signal_plot():
# Test with array
signal = nk.signal_simulate(duration=10, sampling_rate=1000)
nk.signal_plot(signal, sampling_rate=1000)
fig = plt.gcf()
axs = fig.get_axes()
assert len(axs) == 1
ax = axs[0]
handles, labels = ax.get_legend_handles_labels()
assert labels == ["Signal"]
assert len(labels) == len(handles) == len([signal])
assert ax.get_xlabel() == "Time (seconds)"
plt.close(fig)
# Test with dataframe
data = pd.DataFrame(
{
"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
"Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
"Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000))),
}
)
nk.signal_plot(data, sampling_rate=None)
fig = plt.gcf()
for ax in fig.get_axes():
handles, labels = ax.get_legend_handles_labels()
assert labels == list(data.columns.values)
assert len(labels) == len(handles) == len(data.columns)
assert ax.get_xlabel() == "Samples"
plt.close(fig)
# Test with list
signal = nk.signal_binarize(nk.signal_simulate(duration=10))
phase = nk.signal_phase(signal, method="percents")
nk.signal_plot([signal, phase])
fig = plt.gcf()
for ax in fig.get_axes():
handles, labels = ax.get_legend_handles_labels()
assert labels == ["Signal1", "Signal2"]
assert len(labels) == len(handles) == len([signal, phase])
assert ax.get_xlabel() == "Samples"
plt.close(fig)
def test_signal_power():
signal1 = nk.signal_simulate(duration=20, frequency=1, sampling_rate=500)
pwr1 = nk.signal_power(signal1, [[0.9, 1.6], [1.4, 2.0]], sampling_rate=500)
signal2 = nk.signal_simulate(duration=20, frequency=1, sampling_rate=100)
pwr2 = nk.signal_power(signal2, [[0.9, 1.6], [1.4, 2.0]], sampling_rate=100)
assert np.allclose(np.mean(pwr1.iloc[0] - pwr2.iloc[0]), 0, atol=0.01)
def test_signal_timefrequency():
signal = nk.signal_simulate(duration=50, frequency=5) + 2 * nk.signal_simulate(
duration=50, frequency=20
)
# short-time fourier transform
frequency, time, stft = nk.signal_timefrequency(
signal, method="stft", min_frequency=1, max_frequency=50, show=False
)
assert len(frequency) == stft.shape[0]
assert len(time) == stft.shape[1]
indices_freq5 = np.logical_and(frequency > 3, frequency < 7)
indices_freq20 = np.logical_and(frequency > 18, frequency < 22)
assert np.sum(stft[indices_freq5]) < np.sum(stft[indices_freq20])
# wavelet transform
frequency, time, cwtm = nk.signal_timefrequency(
signal, method="cwt", max_frequency=50, show=False
)
assert len(frequency) == cwtm.shape[0]
assert len(time) == cwtm.shape[1]
indices_freq5 = np.logical_and(frequency > 3, frequency < 7)
indices_freq20 = np.logical_and(frequency > 18, frequency < 22)
assert np.sum(cwtm[indices_freq5]) < np.sum(cwtm[indices_freq20])
# wvd
frequency, time, wvd = nk.signal_timefrequency(
signal, method="wvd", max_frequency=50, show=False
)
assert len(frequency) == wvd.shape[0]
assert len(time) == wvd.shape[1]
indices_freq5 = np.logical_and(frequency > 3, frequency < 7)
indices_freq20 = np.logical_and(frequency > 18, frequency < 22)
assert np.sum(wvd[indices_freq5]) < np.sum(wvd[indices_freq20])
# pwvd
frequency, time, pwvd = nk.signal_timefrequency(
signal, method="pwvd", max_frequency=50, show=False
)
assert len(frequency) == pwvd.shape[0]
assert len(time) == pwvd.shape[1]
indices_freq5 = np.logical_and(frequency > 3, frequency < 7)
indices_freq20 = np.logical_and(frequency > 18, frequency < 22)
assert np.sum(pwvd[indices_freq5]) < np.sum(pwvd[indices_freq20])
def test_signal_psd(recwarn):
warnings.simplefilter("always")
data = nk.data("bio_eventrelated_100hz")
out = nk.signal_psd(data["ECG"], sampling_rate=100)
assert list(out.columns) == ["Frequency", "Power"]
def test_signal_distort():
signal = nk.signal_simulate(duration=10, frequency=0.5, sampling_rate=10)
# Warning for nyquist criterion
with pytest.warns(
nk.misc.NeuroKitWarning, match=r"Skipping requested noise frequency.*cannot be resolved.*"
):
nk.signal_distort(signal, sampling_rate=10, noise_amplitude=1, silent=False)
# Warning for period duration
with pytest.warns(
nk.misc.NeuroKitWarning, match=r"Skipping requested noise frequency.*since its period of.*"
):
signal = nk.signal_simulate(duration=1, frequency=1, sampling_rate=10)
nk.signal_distort(signal, noise_amplitude=1, noise_frequency=0.1, silent=False)
signal2 = nk.signal_simulate(duration=10, frequency=0.5, sampling_rate=10)
def test_signal_surrogate():
# Logistic map
r = 3.95
x = np.empty(450)
x[0] = 0.5
for i in range(1, len(x)):
x[i] = r * x[i - 1] * (1 - x[i - 1])
x = x[50:]
# Create surrogate
surrogate = nk.signal_surrogate(x, method="IAAFT", random_state=127)
# Check mean and variance
assert np.allclose(np.mean(x), np.mean(surrogate))
assert np.allclose(np.var(x), np.var(surrogate))
# Check distribution
assert np.allclose(
np.histogram(x, 10, (0, 1))[0],
np.histogram(surrogate, 10, (0, 1))[0],
atol=1
)
# Check spectrum
assert (
np.mean(np.abs(np.abs(np.fft.rfft(surrogate - np.mean(surrogate)))
- np.abs(np.fft.rfft(x - np.mean(x))))) < 0.1
)
| 15,358 | 33.748869 | 125 | py |
NeuroKit | NeuroKit-master/tests/__init__.py | 0 | 0 | 0 | py | |
NeuroKit | NeuroKit-master/tests/tests_rsp.py | # -*- coding: utf-8 -*-
import copy
import random
import biosppy
import matplotlib.pyplot as plt
import numpy as np
import pytest
import neurokit2 as nk
random.seed(a=13, version=2)
def test_rsp_simulate():
rsp1 = nk.rsp_simulate(duration=20, length=3000, random_state=42)
assert len(rsp1) == 3000
rsp2 = nk.rsp_simulate(duration=20, length=3000, respiratory_rate=80, random_state=42)
# pd.DataFrame({"RSP1":rsp1, "RSP2":rsp2}).plot()
# pd.DataFrame({"RSP1":rsp1, "RSP2":rsp2}).hist()
assert len(nk.signal_findpeaks(rsp1, height_min=0.2)["Peaks"]) < len(
nk.signal_findpeaks(rsp2, height_min=0.2)["Peaks"]
)
rsp3 = nk.rsp_simulate(duration=20, length=3000, method="sinusoidal", random_state=42)
rsp4 = nk.rsp_simulate(duration=20, length=3000, method="breathmetrics", random_state=42)
# pd.DataFrame({"RSP3":rsp3, "RSP4":rsp4}).plot()
assert len(nk.signal_findpeaks(rsp3, height_min=0.2)["Peaks"]) > len(
nk.signal_findpeaks(rsp4, height_min=0.2)["Peaks"]
)
def test_rsp_simulate_legacy_rng():
rsp = nk.rsp_simulate(
duration=10,
sampling_rate=100,
noise=0.03,
respiratory_rate=12,
method="breathmetrics",
random_state=123,
random_state_distort="legacy",
)
# Run simple checks to verify that the signal is the same as that generated with version 0.2.3
# before the introduction of the new random number generation approach
assert np.allclose(np.mean(rsp), 0.03869389548166346)
assert np.allclose(np.std(rsp), 0.3140022628657376)
assert np.allclose(
np.mean(np.reshape(rsp, (-1, 200)), axis=1),
[0.2948574728, -0.2835745073, 0.2717568165, -0.2474764970, 0.1579061923],
)
@pytest.mark.parametrize(
"random_state, random_state_distort",
[
(13579, "legacy"),
(13579, "spawn"),
(13579, 24680),
(13579, None),
(np.random.RandomState(33), "spawn"),
(np.random.SeedSequence(33), "spawn"),
(np.random.Generator(np.random.Philox(33)), "spawn"),
(None, "spawn"),
],
)
def test_rsp_simulate_all_rng_types(random_state, random_state_distort):
# Run rsp_simulate to test for errors (e.g. using methods like randint that are only
# implemented for RandomState but not Generator, or vice versa)
rsp = nk.rsp_simulate(
duration=10,
sampling_rate=100,
noise=0.03,
respiratory_rate=12,
method="breathmetrics",
random_state=random_state,
random_state_distort=random_state_distort,
)
# Double check the signal is finite and of the right length
assert np.all(np.isfinite(rsp))
assert len(rsp) == 10 * 100
def test_rsp_clean():
sampling_rate = 100
duration = 120
rsp = nk.rsp_simulate(
duration=duration,
sampling_rate=sampling_rate,
respiratory_rate=15,
noise=0.1,
random_state=42,
)
# Add linear drift (to test baseline removal).
rsp += nk.signal_distort(rsp, sampling_rate=sampling_rate, linear_drift=True, random_state=42)
for method in ["khodadad2018", "biosppy", "hampel"]:
cleaned = nk.rsp_clean(rsp, sampling_rate=sampling_rate, method=method)
assert len(rsp) == len(cleaned)
khodadad2018 = nk.rsp_clean(rsp, sampling_rate=sampling_rate, method="khodadad2018")
rsp_biosppy = nk.rsp_clean(rsp, sampling_rate=sampling_rate, method="biosppy")
# Check if filter was applied.
fft_raw = np.abs(np.fft.rfft(rsp))
fft_khodadad2018 = np.abs(np.fft.rfft(khodadad2018))
fft_biosppy = np.abs(np.fft.rfft(rsp_biosppy))
freqs = np.fft.rfftfreq(len(rsp), 1 / sampling_rate)
assert np.sum(fft_raw[freqs > 3]) > np.sum(fft_khodadad2018[freqs > 3])
assert np.sum(fft_raw[freqs < 0.05]) > np.sum(fft_khodadad2018[freqs < 0.05])
assert np.sum(fft_raw[freqs > 0.35]) > np.sum(fft_biosppy[freqs > 0.35])
assert np.sum(fft_raw[freqs < 0.1]) > np.sum(fft_biosppy[freqs < 0.1])
# Comparison to biosppy (https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/resp.py#L62)
rsp_biosppy = nk.rsp_clean(rsp, sampling_rate=sampling_rate, method="biosppy")
original, _, _ = biosppy.tools.filter_signal(
signal=rsp,
ftype="butter",
band="bandpass",
order=2,
frequency=[0.1, 0.35],
sampling_rate=sampling_rate,
)
original = nk.signal_detrend(original, order=0)
assert np.allclose((rsp_biosppy - original).mean(), 0, atol=1e-6)
# Check if outlier was corrected
hampel_sampling_rate = 1000
hampel_sample = nk.rsp_simulate(
duration=duration,
sampling_rate=hampel_sampling_rate,
respiratory_rate=15,
noise=0.1,
random_state=42,
)
# Random numbers
distort_locations = random.sample(range(len(hampel_sample)), 20)
distorted_sample = copy.copy(hampel_sample)
distorted_sample[distort_locations] = 100
assert np.allclose(
nk.rsp_clean(
distorted_sample, sampling_rate=hampel_sampling_rate, method="hampel", window_length=1
),
hampel_sample,
atol=1,
)
def test_rsp_peaks():
rsp = nk.rsp_simulate(duration=120, sampling_rate=1000, respiratory_rate=15, random_state=42)
rsp_cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
for method in ["khodadad2018", "biosppy", "scipy"]:
signals, info = nk.rsp_peaks(rsp_cleaned, method=method)
assert signals.shape == (120000, 2)
assert signals["RSP_Peaks"].sum() in [28, 29]
assert signals["RSP_Troughs"].sum() in [28, 29]
assert info["RSP_Peaks"].shape[0] in [28, 29]
assert info["RSP_Troughs"].shape[0] in [28, 29]
assert 4010 < np.median(np.diff(info["RSP_Peaks"])) < 4070
assert 3800 < np.median(np.diff(info["RSP_Troughs"])) < 4010
assert info["RSP_Peaks"][0] > info["RSP_Troughs"][0]
assert info["RSP_Peaks"][-1] > info["RSP_Troughs"][-1]
def test_rsp_amplitude():
rsp = nk.rsp_simulate(
duration=120,
sampling_rate=1000,
respiratory_rate=15,
method="sinusoidal",
noise=0,
random_state=1,
)
rsp_cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
signals, info = nk.rsp_peaks(rsp_cleaned)
# Test with dictionary.
amplitude = nk.rsp_amplitude(rsp, signals)
assert amplitude.shape == (rsp.size,)
assert np.abs(amplitude.mean() - 1) < 0.01
# Test with DataFrame.
amplitude = nk.rsp_amplitude(rsp, info)
assert amplitude.shape == (rsp.size,)
assert np.abs(amplitude.mean() - 1) < 0.01
def test_rsp_process():
rsp = nk.rsp_simulate(duration=120, sampling_rate=1000, respiratory_rate=15, random_state=2)
signals, _ = nk.rsp_process(rsp, sampling_rate=1000)
# Only check array dimensions since functions called by rsp_process have
# already been unit tested.
assert len(signals) == 120000
assert np.all(
[
i in signals.columns.values
for i in [
"RSP_Raw",
"RSP_Clean",
"RSP_Amplitude",
"RSP_Rate",
"RSP_Phase",
"RSP_Phase_Completion",
"RSP_Peaks",
"RSP_Troughs",
]
]
)
def test_rsp_plot():
rsp = nk.rsp_simulate(duration=120, sampling_rate=1000, respiratory_rate=15, random_state=3)
rsp_summary, _ = nk.rsp_process(rsp, sampling_rate=1000)
nk.rsp_plot(rsp_summary)
# This will identify the latest figure.
fig = plt.gcf()
assert len(fig.axes) == 5
titles = ["Raw and Cleaned Signal", "Breathing Rate", "Breathing Amplitude"]
for (ax, title) in zip(fig.get_axes(), titles):
assert ax.get_title() == title
plt.close(fig)
def test_rsp_eventrelated():
rsp, _ = nk.rsp_process(nk.rsp_simulate(duration=30, random_state=42))
epochs = nk.epochs_create(rsp, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9)
rsp_eventrelated = nk.rsp_eventrelated(epochs)
# Test rate features
assert np.alltrue(
np.array(rsp_eventrelated["RSP_Rate_Min"]) < np.array(rsp_eventrelated["RSP_Rate_Mean"])
)
assert np.alltrue(
np.array(rsp_eventrelated["RSP_Rate_Mean"]) < np.array(rsp_eventrelated["RSP_Rate_Max"])
)
# Test amplitude features
assert np.alltrue(
np.array(rsp_eventrelated["RSP_Amplitude_Min"])
< np.array(rsp_eventrelated["RSP_Amplitude_Mean"])
)
assert np.alltrue(
np.array(rsp_eventrelated["RSP_Amplitude_Mean"])
< np.array(rsp_eventrelated["RSP_Amplitude_Max"])
)
assert len(rsp_eventrelated["Label"]) == 3
# Test warning on missing columns
with pytest.warns(nk.misc.NeuroKitWarning, match=r".*does not have an `RSP_Amplitude`.*"):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["RSP_Amplitude"]
nk.rsp_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(nk.misc.NeuroKitWarning, match=r".*does not have an `RSP_Phase`.*"):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["RSP_Phase"]
nk.rsp_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
def test_rsp_rrv():
rsp90 = nk.rsp_simulate(duration=60, sampling_rate=1000, respiratory_rate=90, random_state=42)
rsp110 = nk.rsp_simulate(duration=60, sampling_rate=1000, respiratory_rate=110, random_state=42)
cleaned90 = nk.rsp_clean(rsp90, sampling_rate=1000)
_, peaks90 = nk.rsp_peaks(cleaned90)
rsp_rate90 = nk.signal_rate(peaks90, desired_length=len(rsp90))
cleaned110 = nk.rsp_clean(rsp110, sampling_rate=1000)
_, peaks110 = nk.rsp_peaks(cleaned110)
rsp_rate110 = nk.signal_rate(peaks110, desired_length=len(rsp110))
rsp90_rrv = nk.rsp_rrv(rsp_rate90, peaks90)
rsp110_rrv = nk.rsp_rrv(rsp_rate110, peaks110)
assert np.array(rsp90_rrv["RRV_SDBB"]) < np.array(rsp110_rrv["RRV_SDBB"])
assert np.array(rsp90_rrv["RRV_RMSSD"]) < np.array(rsp110_rrv["RRV_RMSSD"])
assert np.array(rsp90_rrv["RRV_SDSD"]) < np.array(rsp110_rrv["RRV_SDSD"])
# assert np.array(rsp90_rrv["RRV_pNN50"]) == np.array(rsp110_rrv["RRV_pNN50"]) == 0
# assert np.array(rsp110_rrv["RRV_pNN20"]) == np.array(rsp90_rrv["RRV_pNN20"]) == 0
# assert np.array(rsp90_rrv["RRV_TINN"]) < np.array(rsp110_rrv["RRV_TINN"])
# assert np.array(rsp90_rrv["RRV_HTI"]) > np.array(rsp110_rrv["RRV_HTI"])
assert np.array(rsp90_rrv["RRV_HF"]) < np.array(rsp110_rrv["RRV_HF"])
assert np.isnan(rsp90_rrv["RRV_VLF"][0])
assert np.isnan(rsp110_rrv["RRV_VLF"][0])
# assert all(elem in ['RRV_SDBB','RRV_RMSSD', 'RRV_SDSD'
# 'RRV_VLF', 'RRV_LF', 'RRV_HF', 'RRV_LFHF',
# 'RRV_LFn', 'RRV_HFn',
# 'RRV_SD1', 'RRV_SD2', 'RRV_SD2SD1','RRV_ApEn', 'RRV_SampEn', 'RRV_DFA']
# for elem in np.array(rsp110_rrv.columns.values, dtype=str))
def test_rsp_intervalrelated():
data = nk.data("bio_resting_5min_100hz")
df, _ = nk.rsp_process(data["RSP"], sampling_rate=100)
# Test with signal dataframe
features_df = nk.rsp_intervalrelated(df)
assert features_df.shape[0] == 1 # Number of rows
# Test with dict
epochs = nk.epochs_create(df, events=[0, 15000], sampling_rate=100, epochs_end=150)
features_dict = nk.rsp_intervalrelated(epochs)
assert features_dict.shape[0] == 2 # Number of rows
def test_rsp_rvt():
sampling_rate = 1000
rsp10 = nk.rsp_simulate(
duration=60, sampling_rate=sampling_rate, respiratory_rate=10, random_state=42
)
rsp20 = nk.rsp_simulate(
duration=60, sampling_rate=sampling_rate, respiratory_rate=20, random_state=42
)
for method in ["harrison", "birn", "power"]:
rvt10 = nk.rsp_rvt(rsp10, method=method, sampling_rate=sampling_rate)
rvt20 = nk.rsp_rvt(rsp20, method=method, sampling_rate=sampling_rate)
assert len(rsp10) == len(rvt10)
assert len(rsp20) == len(rvt20)
assert min(rvt10[~np.isnan(rvt10)]) >= 0
assert min(rvt20[~np.isnan(rvt20)]) >= 0
@pytest.mark.parametrize(
"method_cleaning, method_peaks, method_rvt",
[("none", "scipy", "power2020"),
("biosppy", "biosppy", "power2020"),
("khodadad2018", "khodadad2018", "birn2006"),
("power2020", "scipy", "harrison2021"),
],
)
def test_rsp_report(tmp_path, method_cleaning, method_peaks, method_rvt):
sampling_rate = 100
rsp = nk.rsp_simulate(
duration=30,
sampling_rate=sampling_rate,
random_state=0,
)
d = tmp_path / "sub"
d.mkdir()
p = d / "myreport.html"
signals, _ = nk.rsp_process(
rsp,
sampling_rate=sampling_rate,
report=str(p),
method_cleaning=method_cleaning,
method_peaks=method_peaks,
method_rvt=method_rvt,
)
assert p.is_file()
| 13,128 | 33.732804 | 106 | py |
NeuroKit | NeuroKit-master/tests/tests_hrv.py | import numpy as np
import pandas as pd
import pytest
import neurokit2 as nk
from neurokit2 import misc
def test_hrv_time():
ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=60, random_state=42)
ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=150, random_state=42)
_, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)
_, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)
hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)
hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)
assert np.all(hrv_fast["HRV_RMSSD"] < hrv_slow["HRV_RMSSD"])
assert np.all(hrv_fast["HRV_MeanNN"] < hrv_slow["HRV_MeanNN"])
assert np.all(hrv_fast["HRV_SDNN"] < hrv_slow["HRV_SDNN"])
assert np.all(hrv_fast["HRV_CVNN"] < hrv_slow["HRV_CVNN"])
assert np.all(hrv_fast["HRV_CVSD"] < hrv_slow["HRV_CVSD"])
assert np.all(hrv_fast["HRV_MedianNN"] < hrv_slow["HRV_MedianNN"])
assert np.all(hrv_fast["HRV_MadNN"] < hrv_slow["HRV_MadNN"])
assert np.all(hrv_fast["HRV_MCVNN"] < hrv_slow["HRV_MCVNN"])
assert np.all(hrv_fast["HRV_pNN50"] == hrv_slow["HRV_pNN50"])
assert np.all(hrv_fast["HRV_pNN20"] < hrv_slow["HRV_pNN20"])
assert np.all(hrv_fast["HRV_TINN"] < hrv_slow["HRV_TINN"])
assert np.all(hrv_fast["HRV_HTI"] != hrv_slow["HRV_HTI"])
def test_hrv_frequency():
# Test frequency domain
ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)
_, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)
hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)
ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)
_, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)
hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)
assert np.allclose(hrv1["HRV_HF"] - hrv2["HRV_HF"], 0, atol=1.5)
assert np.isnan(hrv1["HRV_ULF"][0])
assert np.isnan(hrv1["HRV_VLF"][0])
assert np.isnan(hrv2["HRV_ULF"][0])
assert np.isnan(hrv2["HRV_VLF"][0])
def test_hrv():
ecg = nk.ecg_simulate(duration=120, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
ecg_hrv = nk.hrv(peaks, sampling_rate=1000)
assert np.isclose(ecg_hrv["HRV_RMSSD"].values[0], 3.526, atol=0.1)
def test_rri_input_hrv():
ecg = nk.ecg_simulate(duration=120, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
peaks = peaks["ECG_R_Peaks"]
rri = np.diff(peaks).astype(float)
rri_time = peaks[1:] / 1000
rri[3:5] = [np.nan, np.nan]
ecg_hrv = nk.hrv({"RRI": rri, "RRI_Time": rri_time})
assert np.isclose(ecg_hrv["HRV_RMSSD"].values[0], 3.526, atol=0.2)
@pytest.mark.parametrize("detrend", ["polynomial", "loess"])
def test_hrv_detrended_rri(detrend):
ecg = nk.ecg_simulate(duration=120, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
peaks = peaks["ECG_R_Peaks"]
rri = np.diff(peaks).astype(float)
rri_time = peaks[1:] / 1000
rri_processed, rri_processed_time, _ = nk.intervals_process(
rri, intervals_time=rri_time, interpolate=False, interpolation_rate=None, detrend=detrend
)
ecg_hrv = nk.hrv({"RRI": rri_processed, "RRI_Time": rri_processed_time})
assert np.isclose(
ecg_hrv["HRV_RMSSD"].values[0],
np.sqrt(np.mean(np.square(np.diff(rri_processed)))),
atol=0.1,
)
@pytest.mark.parametrize("interpolation_rate", ["from_mean_rri", 1, 4, 100])
def test_hrv_interpolated_rri(interpolation_rate):
ecg = nk.ecg_simulate(duration=120, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
peaks = peaks["ECG_R_Peaks"]
rri = np.diff(peaks).astype(float)
rri_time = peaks[1:] / 1000
if interpolation_rate == "from_mean_rri":
interpolation_rate = 1000 / np.mean(rri)
rri_processed, rri_processed_time, _ = nk.intervals_process(
rri, intervals_time=rri_time, interpolate=True, interpolation_rate=interpolation_rate
)
ecg_hrv = nk.hrv({"RRI": rri_processed, "RRI_Time": rri_processed_time})
assert np.isclose(
ecg_hrv["HRV_RMSSD"].values[0],
np.sqrt(np.mean(np.square(np.diff(rri_processed)))),
atol=0.1,
)
def test_hrv_missing():
random_state = 42
rng = misc.check_random_state(random_state)
# Download data
data = nk.data("bio_resting_5min_100hz")
sampling_rate = 100
ecg = data["ECG"]
_, peaks = nk.ecg_process(ecg, sampling_rate=sampling_rate)
peaks = peaks["ECG_R_Peaks"]
rri = np.diff(peaks / sampling_rate).astype(float) * 1000
rri_time = peaks[1:] / sampling_rate
# remove some intervals and their corresponding timestamps
missing = rng.choice(len(rri), size=int(len(rri) / 5))
rri_missing = rri[np.array([i for i in range(len(rri)) if i not in missing])]
rri_time_missing = rri_time[np.array([i for i in range(len(rri_time)) if i not in missing])]
orig_hrv = nk.hrv_time(peaks, sampling_rate=sampling_rate)
miss_only_rri_hrv = nk.hrv_time({"RRI": rri_missing})
# by providing the timestamps corresponding to each interval
# we should be able to better estimate the original RMSSD
# before some intervals were removed
# (at least for this example signal)
miss_rri_time_hrv = nk.hrv_time({"RRI": rri_missing, "RRI_Time": rri_time_missing})
abs_diff_only_rri = np.mean(
np.abs(np.diff([orig_hrv["HRV_RMSSD"].values[0], miss_only_rri_hrv["HRV_RMSSD"].values[0]]))
)
abs_diff_rri_time = np.mean(
np.abs(np.diff([orig_hrv["HRV_RMSSD"].values[0], miss_rri_time_hrv["HRV_RMSSD"].values[0]]))
)
assert abs_diff_only_rri > abs_diff_rri_time
def test_hrv_rsa():
data = nk.data("bio_eventrelated_100hz")
ecg_signals, info = nk.ecg_process(data["ECG"], sampling_rate=100)
rsp_signals, _ = nk.rsp_process(data["RSP"], sampling_rate=100)
rsa_feature_columns = [
"RSA_P2T_Mean",
"RSA_P2T_Mean_log",
"RSA_P2T_SD",
"RSA_P2T_NoRSA",
"RSA_PorgesBohrer",
"RSA_Gates_Mean",
"RSA_Gates_Mean_log",
"RSA_Gates_SD",
]
rsa_features = nk.hrv_rsa(
ecg_signals, rsp_signals, rpeaks=info, sampling_rate=100, continuous=False
)
assert all(key in rsa_feature_columns for key in rsa_features.keys())
# Test simulate RSP signal warning
with pytest.warns(misc.NeuroKitWarning, match=r"RSP signal not found. For this.*"):
nk.hrv_rsa(ecg_signals, rpeaks=info, sampling_rate=100, continuous=False)
with pytest.warns(misc.NeuroKitWarning, match=r"RSP signal not found. For this time.*"):
nk.hrv_rsa(ecg_signals, pd.DataFrame(), rpeaks=info, sampling_rate=100, continuous=False)
# Test missing rsp onsets/centers
with pytest.warns(misc.NeuroKitWarning, match=r"Couldn't find rsp cycles onsets and centers.*"):
rsp_signals["RSP_Peaks"] = 0
_ = nk.hrv_rsa(ecg_signals, rsp_signals, rpeaks=info, sampling_rate=100, continuous=False)
def test_hrv_nonlinear_fragmentation():
# https://github.com/neuropsychology/NeuroKit/issues/344
from neurokit2.hrv.hrv_nonlinear import _hrv_nonlinear_fragmentation
edge_rri = np.array([888.0, 1262.0, 1290.0, 1274.0, 1300.0, 1244.0, 1266.0])
test_out = {}
_hrv_nonlinear_fragmentation(edge_rri, out=test_out)
assert test_out == {
"IALS": 0.8333333333333334,
"PAS": 1.0,
"PIP": 0.5714285714285714,
"PSS": 1.0,
}
| 7,652 | 35.099057 | 100 | py |
NeuroKit | NeuroKit-master/tests/tests_eeg.py | import mne
import numpy as np
import pooch
import neurokit2 as nk
# =============================================================================
# EEG
# =============================================================================
def test_eeg_add_channel():
raw = mne.io.read_raw_fif(
str(mne.datasets.sample.data_path()) + "/MEG/sample/sample_audvis_raw.fif", preload=True
)
# len(channel) > len(raw)
ecg1 = nk.ecg_simulate(length=170000)
# sync_index_raw > sync_index_channel
raw1 = nk.mne_channel_add(
raw.copy(), ecg1, channel_type="ecg", sync_index_raw=100, sync_index_channel=0
)
df1 = raw1.to_data_frame()
# test if the column of channel is added
assert len(df1.columns) == 378
# test if the NaN is appended properly to the added channel to account for difference in distance between two signals
sync_index_raw = 100
sync_index_channel = 0
for i in df1["Added_Channel"].head(abs(sync_index_channel - sync_index_raw)):
assert np.isnan(i)
assert np.isfinite(df1["Added_Channel"].iloc[abs(sync_index_channel - sync_index_raw)])
# len(channel) < len(raw)
ecg2 = nk.ecg_simulate(length=166790)
# sync_index_raw < sync_index_channel
raw2 = nk.mne_channel_add(
raw.copy(), ecg2, channel_type="ecg", sync_index_raw=0, sync_index_channel=100
)
df2 = raw2.to_data_frame()
# test if the column of channel is added
assert len(df2.columns) == 378
# test if the NaN is appended properly to the added channel to account for difference in distance between two signals + difference in length
sync_index_raw = 0
sync_index_channel = 100
for i in df2["Added_Channel"].tail(
abs(sync_index_channel - sync_index_raw) + (len(raw) - len(ecg2))
):
assert np.isnan(i)
assert np.isfinite(
df2["Added_Channel"].iloc[
-abs(sync_index_channel - sync_index_raw) - (len(raw) - len(ecg2)) - 1
]
)
def test_mne_channel_extract():
raw = mne.io.read_raw_fif(
str(mne.datasets.sample.data_path()) + "/MEG/sample/sample_audvis_raw.fif", preload=True
)
# Extract 1 channel
what = "EEG 053"
raw_channel = nk.mne_channel_extract(raw, what)
assert raw_channel.what == what
# Extract more than 1 channel
what2 = ["EEG 053", "EEG 054", "EEG 055"]
raw_channel2 = nk.mne_channel_extract(raw, what2)
assert len(raw_channel2.columns) == 3
assert all(elem in what2 for elem in np.array(raw_channel2.columns.values, dtype=str))
# Extract a category of channels
what3 = "EEG"
raw_channels = nk.mne_channel_extract(raw, what3)
assert len(raw_channels.columns) == 60
raw_eeg_names = [x for x in raw.info["ch_names"] if what3 in x]
assert raw_eeg_names == list(raw_channels.columns.values)
def test_mne_to_df():
raw = mne.io.read_raw_fif(
str(mne.datasets.sample.data_path()) + "/MEG/sample/sample_audvis_filt-0-40_raw.fif"
)
assert len(nk.mne_to_df(raw)) == 41700
events = mne.read_events(
str(mne.datasets.sample.data_path()) + "/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif"
)
event_id = {"audio/left": 1, "audio/right": 2, "visual/left": 3, "visual/right": 4}
# Create epochs (100 ms baseline + 500 ms)
epochs = mne.Epochs(
raw,
events,
event_id,
tmin=-0.1,
tmax=0.5,
picks="eeg",
preload=True,
detrend=0,
baseline=(None, 0),
)
assert len(nk.mne_to_df(epochs)) == 26208
evoked = [epochs[name].average() for name in ("audio", "visual")]
assert len(nk.mne_to_df(evoked)) == 182
| 3,675 | 29.633333 | 144 | py |
NeuroKit | NeuroKit-master/tests/tests_bio.py | import numpy as np
import neurokit2 as nk
def test_bio_process():
sampling_rate = 1000
# Create data
ecg = nk.ecg_simulate(duration=30, sampling_rate=sampling_rate)
rsp = nk.rsp_simulate(duration=30, sampling_rate=sampling_rate)
eda = nk.eda_simulate(duration=30, sampling_rate=sampling_rate, scr_number=3)
emg = nk.emg_simulate(duration=30, sampling_rate=sampling_rate, burst_number=3)
bio_df, bio_info = nk.bio_process(ecg=ecg, rsp=rsp, eda=eda, emg=emg, sampling_rate=sampling_rate)
# SCR components
scr = [val for key, val in bio_info.items() if "SCR" in key]
assert all(len(elem) == len(scr[0]) for elem in scr)
assert all(bio_info["SCR_Onsets"] < bio_info["SCR_Peaks"])
assert all(bio_info["SCR_Peaks"] < bio_info["SCR_Recovery"])
# RSP
assert all(bio_info["RSP_Peaks"] > bio_info["RSP_Troughs"])
assert len(bio_info["RSP_Peaks"]) == len(bio_info["RSP_Troughs"])
# EMG
assert all(bio_info["EMG_Offsets"] > bio_info["EMG_Onsets"])
assert len(bio_info["EMG_Offsets"] == len(bio_info["EMG_Onsets"]))
def test_bio_analyze():
# Example with event-related analysis
data = nk.data("bio_eventrelated_100hz")
df, info = nk.bio_process(
ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], keep=data["Photosensor"], sampling_rate=100
)
events = nk.events_find(
data["Photosensor"], threshold_keep="below", event_conditions=["Negative", "Neutral", "Neutral", "Negative"]
)
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=1.9)
event_related = nk.bio_analyze(epochs)
assert len(event_related) == len(epochs)
labels = [int(i) for i in event_related["Label"]]
assert labels == list(np.arange(1, len(epochs) + 1))
# Example with interval-related analysis
data = nk.data("bio_resting_8min_100hz")
df, info = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=100)
interval_related = nk.bio_analyze(df)
assert len(interval_related) == 1
| 2,058 | 35.767857 | 116 | py |
NeuroKit | NeuroKit-master/tests/tests_data.py | import os
import numpy as np
import neurokit2 as nk
path_data = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
# =============================================================================
# Data
# =============================================================================
def test_read_acqknowledge():
df, sampling_rate = nk.read_acqknowledge(os.path.join(path_data, "acqnowledge.acq"), sampling_rate=2000)
assert sampling_rate == 2000
df, sampling_rate = nk.read_acqknowledge(os.path.join(path_data, "acqnowledge.acq"), sampling_rate="max")
assert sampling_rate == 4000
def test_data():
dataset = "bio_eventrelated_100hz"
data = nk.data(dataset)
assert len(data.columns) == 4
assert data.size == 15000 * 4
assert all(elem in ["ECG", "EDA", "Photosensor", "RSP"] for elem in np.array(data.columns.values, dtype=str))
dataset2 = "bio_eventrelated_100hz.csv"
data2 = nk.data(dataset2)
assert len(data.columns) == len(data2.columns)
assert data2.size == data.size
assert all(elem in np.array(data.columns.values, dtype=str) for elem in np.array(data2.columns.values, dtype=str))
| 1,187 | 30.263158 | 118 | py |
NeuroKit | NeuroKit-master/tests/tests_emg.py | import biosppy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import scipy.stats
import neurokit2 as nk
# =============================================================================
# EMG
# =============================================================================
def test_emg_simulate():
emg1 = nk.emg_simulate(duration=20, length=5000, burst_number=1)
assert len(emg1) == 5000
emg2 = nk.emg_simulate(duration=20, length=5000, burst_number=15)
assert scipy.stats.median_abs_deviation(emg1) < scipy.stats.median_abs_deviation(
emg2
)
emg3 = nk.emg_simulate(duration=20, length=5000, burst_number=1, burst_duration=2.0)
# pd.DataFrame({"EMG1":emg1, "EMG3": emg3}).plot()
assert len(nk.signal_findpeaks(emg3, height_min=1.0)["Peaks"]) > len(
nk.signal_findpeaks(emg1, height_min=1.0)["Peaks"]
)
def test_emg_activation():
emg = nk.emg_simulate(duration=10, burst_number=3)
cleaned = nk.emg_clean(emg)
emg_amplitude = nk.emg_amplitude(cleaned)
activity_signal, info = nk.emg_activation(emg_amplitude)
assert set(activity_signal.columns.to_list()) == set(list(info.keys()))
assert len(info["EMG_Onsets"]) == len(info["EMG_Offsets"])
for i, j in zip(info["EMG_Onsets"], info["EMG_Offsets"]):
assert i < j
def test_emg_clean():
sampling_rate = 1000
emg = nk.emg_simulate(duration=20, sampling_rate=sampling_rate)
emg_cleaned = nk.emg_clean(emg, sampling_rate=sampling_rate)
assert emg.size == emg_cleaned.size
# Comparison to biosppy (https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/emg.py)
original, _, _ = biosppy.tools.filter_signal(
signal=emg,
ftype="butter",
band="highpass",
order=4,
frequency=100,
sampling_rate=sampling_rate,
)
emg_cleaned_biosppy = nk.signal_detrend(original, order=0)
assert np.allclose((emg_cleaned - emg_cleaned_biosppy).mean(), 0, atol=1e-6)
def test_emg_plot():
sampling_rate = 1000
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
emg_summary, _ = nk.emg_process(emg, sampling_rate=sampling_rate)
# Plot data over samples.
fig = nk.emg_plot(emg_summary)
assert len(fig.axes) == 2
titles = ["Raw and Cleaned Signal", "Muscle Activation"]
for (ax, title) in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[1].get_xlabel() == "Samples"
np.testing.assert_array_equal(fig.axes[0].get_xticks(), fig.axes[1].get_xticks())
plt.close(fig)
# Plot data over time.
fig = nk.emg_plot(emg_summary, sampling_rate=sampling_rate)
assert fig.get_axes()[1].get_xlabel() == "Time (seconds)"
def test_emg_eventrelated():
emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
epochs = nk.epochs_create(
emg_signals,
events=[3000, 6000, 9000],
sampling_rate=1000,
epochs_start=-0.1,
epochs_end=1.9,
)
emg_eventrelated = nk.emg_eventrelated(epochs)
# Test amplitude features
no_activation = np.where(emg_eventrelated["EMG_Activation"] == 0)[0][0]
assert int(pd.DataFrame(emg_eventrelated.values[no_activation]).isna().sum()) == 5
assert np.alltrue(
np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Mean"]))
< np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Max"]))
)
assert len(emg_eventrelated["Label"]) == 3
# Test warning on missing columns
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `EMG_Onsets`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Onsets"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `EMG_Activity`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Activity"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an.*`EMG_Amplitude`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Amplitude"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
def test_emg_intervalrelated():
emg = nk.emg_simulate(duration=40, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
columns = ["EMG_Activation_N", "EMG_Amplitude_Mean"]
# Test with signal dataframe
features_df = nk.emg_intervalrelated(emg_signals)
assert all(
elem in columns for elem in np.array(features_df.columns.values, dtype=str)
)
assert features_df.shape[0] == 1 # Number of rows
# Test with dict
columns.append("Label")
epochs = nk.epochs_create(
emg_signals, events=[0, 20000], sampling_rate=1000, epochs_end=20
)
features_dict = nk.emg_intervalrelated(epochs)
assert all(
elem in columns for elem in np.array(features_dict.columns.values, dtype=str)
)
assert features_dict.shape[0] == 2 # Number of rows
@pytest.mark.parametrize(
"method_cleaning, method_activation, threshold",
[("none", "threshold", "default"),
("biosppy", "pelt", 0.5),
("biosppy", "mixture", 0.05),
("biosppy", "biosppy", "default"),
("biosppy", "silva", "default")],
)
def test_emg_report(tmp_path, method_cleaning, method_activation, threshold):
sampling_rate = 250
emg = nk.emg_simulate(
duration=30,
sampling_rate=sampling_rate,
random_state=0,
)
d = tmp_path / "sub"
d.mkdir()
p = d / "myreport.html"
signals, _ = nk.emg_process(
emg,
sampling_rate=sampling_rate,
report=str(p),
method_cleaning=method_cleaning,
method_activation=method_activation,
threshold=threshold
)
assert p.is_file()
assert "EMG_Activity" in signals.columns
| 6,367 | 31.161616 | 135 | py |
NeuroKit | NeuroKit-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- REQUIREMENTS -----------------------------------------------------
# pip install sphinx-material
# pip install sphinxemoji
import datetime
import os
import re
import sys
import asyncio
import platform
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
def find_author():
"""This returns 'The NeuroKit's development team'"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"),
open("../neurokit2/__init__.py").read(),
)
return str(result.group(1))
project = "NeuroKit2"
copyright = f"2020–{datetime.datetime.now().year}"
author = '<a href="https://dominiquemakowski.github.io/">Dominique Makowski</a> and the <a href="https://github.com/neuropsychology/NeuroKit/blob/master/AUTHORS.rst">Team</a>. This documentation is licensed under a <a href="https://creativecommons.org/licenses/by/4.0/">CC BY 4.0</a> license.'
# The short X.Y version.
def find_version():
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"),
open("../neurokit2/__init__.py").read(),
)
return result.group(1)
version = find_version()
# The full version, including alpha/beta/rc tags.
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.autosectionlabel",
"sphinx.ext.viewcode",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinxemoji.sphinxemoji",
"sphinx_copybutton",
"myst_nb",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
# Ignore duplicated sections warning
suppress_warnings = ["epub.duplicated_toc_entry"]
nitpicky = False # Set to True to get all warnings about crosslinks
# Prefix document path to section labels, to use:
# `path/to/file:heading` instead of just `heading`
autosectionlabel_prefix_document = True
# -- Options for autodoc -------------------------------------------------
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = False
napoleon_use_ivar = False
napoleon_use_rtype = False
add_module_names = False # If true, the current module name will be prepended to all description
# -- Options for ipython directive ----------------------------------------
# Doesn't work?
# ipython_promptin = ">" # "In [%d]:"
# ipython_promptout = ">" # "Out [%d]:"
# -- Options for myst_nb ---------------------------------------------------
nb_execution_mode = "force"
nb_execution_raise_on_error = True
# googleanalytics_id = "G-DVXSEGN5M9"
# Address asyncio warning
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# NumPyDoc configuration -----------------------------------------------------
# -- Options for HTML output -------------------------------------------------
html_favicon = "img/icon.ico"
html_logo = "img/neurokit.png"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
# https://sphinx-book-theme.readthedocs.io/en/latest/customize/index.html
html_theme_options = {
"repository_url": "https://github.com/neuropsychology/NeuroKit",
"repository_branch": "dev", # TODO: remove this before merging
"use_repository_button": True,
"use_issues_button": True,
"path_to_docs": "docs/",
"use_edit_page_button": True,
"logo_only": True,
"show_toc_level": 1,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 4,891 | 33.20979 | 293 | py |
NeuroKit | NeuroKit-master/docs/readme/README_examples.py | import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import neurokit2 as nk
# Setup matplotlib with Agg to run on server
matplotlib.use("Agg")
plt.rcParams["figure.figsize"] = (10, 6.5)
plt.rcParams["savefig.facecolor"] = "white"
# =============================================================================
# Quick Example
# =============================================================================
# Download example data
data = nk.data("bio_eventrelated_100hz")
# Preprocess the data (filter, find peaks, etc.)
processed_data, info = nk.bio_process(
ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=100
)
# Compute relevant features
results = nk.bio_analyze(processed_data, sampling_rate=100)
# =============================================================================
# Simulate physiological signals
# =============================================================================
# Generate synthetic signals
ecg = nk.ecg_simulate(duration=10, heart_rate=70)
ppg = nk.ppg_simulate(duration=10, heart_rate=70)
rsp = nk.rsp_simulate(duration=10, respiratory_rate=15)
eda = nk.eda_simulate(duration=10, scr_number=3)
emg = nk.emg_simulate(duration=10, burst_number=2)
# Visualise biosignals
data = pd.DataFrame({"ECG": ecg, "PPG": ppg, "RSP": rsp, "EDA": eda, "EMG": emg})
nk.signal_plot(data, subplots=True)
# Save it
data = pd.DataFrame(
{
"ECG": nk.ecg_simulate(duration=10, heart_rate=70, noise=0),
"PPG": nk.ppg_simulate(duration=10, heart_rate=70, powerline_amplitude=0),
"RSP": nk.rsp_simulate(duration=10, respiratory_rate=15, noise=0),
"EDA": nk.eda_simulate(duration=10, scr_number=3, noise=0),
"EMG": nk.emg_simulate(duration=10, burst_number=2, noise=0),
}
)
plot = data.plot(
subplots=True, layout=(5, 1), color=["#f44336", "#E91E63", "#2196F3", "#9C27B0", "#FF9800"]
)
fig = plt.gcf()
fig.set_size_inches(10, 6, forward=True)
[ax.legend(loc=1) for ax in plt.gcf().axes]
plt.tight_layout()
fig.savefig("README_simulation.png", dpi=300)
# =============================================================================
# Electrodermal Activity (EDA) processing
# =============================================================================
# Generate 10 seconds of EDA signal (recorded at 250 samples / second) with 2 SCR peaks
eda = nk.eda_simulate(duration=10, sampling_rate=250, scr_number=2, drift=0.1)
# Process it
signals, info = nk.eda_process(eda, sampling_rate=250)
# Visualise the processing
nk.eda_plot(signals, sampling_rate=None)
# Save it
nk.eda_plot(signals, sampling_rate=None)
plt.tight_layout()
plt.savefig("README_eda.png", dpi=300)
# =============================================================================
# Cardiac activity (ECG) processing
# =============================================================================
# Generate 15 seconds of ECG signal (recorded at 250 samples / second)
ecg = nk.ecg_simulate(duration=15, sampling_rate=250, heart_rate=70, random_state=333)
# Process it
signals, info = nk.ecg_process(ecg, sampling_rate=250)
# Visualise the processing
nk.ecg_plot(signals, sampling_rate=250)
# Save it
nk.ecg_plot(signals, sampling_rate=250)
plt.tight_layout()
plt.savefig("README_ecg.png", dpi=300)
# =============================================================================
# Respiration (RSP) processing
# =============================================================================
# Generate one minute of RSP signal (recorded at 250 samples / second)
rsp = nk.rsp_simulate(duration=60, sampling_rate=250, respiratory_rate=15)
# Process it
signals, info = nk.rsp_process(rsp, sampling_rate=250)
# Visualise the processing
nk.rsp_plot(signals, sampling_rate=250)
# Save it
nk.rsp_plot(signals, sampling_rate=250)
fig = plt.gcf()
fig.set_size_inches(10, 12, forward=True)
plt.tight_layout()
plt.savefig("README_rsp.png", dpi=300)
# =============================================================================
# Electromyography (EMG) processing
# =============================================================================
# Generate 10 seconds of EMG signal (recorded at 250 samples / second)
emg = nk.emg_simulate(duration=10, sampling_rate=250, burst_number=3)
# Process it
signals, info = nk.emg_process(emg, sampling_rate=250)
# Visualise the processing
nk.emg_plot(signals, sampling_rate=250)
# Save it
nk.emg_plot(signals, sampling_rate=250)
plt.tight_layout()
plt.savefig("README_emg.png", dpi=300)
# =============================================================================
# Photoplethysmography (PPG/BVP)
# =============================================================================
# Generate 15 seconds of PPG signal (recorded at 250 samples / second)
ppg = nk.ppg_simulate(duration=15, sampling_rate=250, heart_rate=70, random_state=333)
# Process it
signals, info = nk.ppg_process(ppg, sampling_rate=250)
# Visualize the processing
nk.ppg_plot(signals, sampling_rate=250)
# Save it
nk.ppg_plot(signals, sampling_rate=250)
plt.tight_layout()
plt.savefig("README_ppg.png", dpi=300)
# =============================================================================
# Electrooculography (EOG)
# =============================================================================
# Import EOG data
eog_signal = nk.data("eog_100hz")
# Process it
signals, info = nk.eog_process(eog_signal, sampling_rate=100)
# Plot
nk.eog_plot(signals, peaks=info, sampling_rate=100)
plt.tight_layout()
plt.savefig("README_eog.png", dpi=300)
# =============================================================================
# Signal Processing
# =============================================================================
# Generate original signal
original = nk.signal_simulate(duration=6, frequency=1)
# Distort the signal (add noise, linear trend, artifacts etc.)
distorted = nk.signal_distort(
original,
noise_amplitude=0.1,
noise_frequency=[5, 10, 20],
powerline_amplitude=0.05,
artifacts_amplitude=0.3,
artifacts_number=3,
linear_drift=0.5,
)
# Clean (filter and detrend)
cleaned = nk.signal_detrend(distorted)
cleaned = nk.signal_filter(cleaned, lowcut=0.5, highcut=1.5)
# Compare the 3 signals
plot = nk.signal_plot([original, distorted, cleaned])
# Save plot
fig = plt.gcf()
plt.tight_layout()
fig.savefig("README_signalprocessing.png", dpi=300)
# =============================================================================
# Heart Rate Variability
# =============================================================================
# Reset plot size
plt.rcParams["figure.figsize"] = plt.rcParamsDefault["figure.figsize"]
# Download data
data = nk.data("bio_resting_8min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV indices
hrv = nk.hrv(peaks, sampling_rate=100, show=True)
hrv
# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 6 * 1.5, forward=True)
plt.tight_layout()
fig.savefig("README_hrv.png", dpi=300)
# =============================================================================
# ECG Delineation
# =============================================================================
# Download data
ecg_signal = nk.data(dataset="ecg_3000hz")
# Extract R-peaks locations
_, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=3000)
# Delineate
signal, waves = nk.ecg_delineate(
ecg_signal,
rpeaks,
sampling_rate=3000,
method="dwt",
show=True,
show_type="all",
window_start=-0.15,
window_end=0.2,
)
# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 6 * 1.5, forward=True)
plt.tight_layout()
fig.savefig("README_delineate.png", dpi=300)
# =============================================================================
# Complexity
# =============================================================================
# Generate signal
signal = nk.signal_simulate(frequency=[1, 3], noise=0.01, sampling_rate=200)
# Find optimal time delay, embedding dimension and r
parameters = nk.complexity_optimize(signal, show=True)
parameters
# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 6 * 1.5, forward=True)
plt.tight_layout()
fig.savefig("README_complexity_optimize.png", dpi=300)
# =============================================================================
# Signal Decomposition
# =============================================================================
np.random.seed(333)
# Create complex signal
signal = nk.signal_simulate(duration=10, frequency=1) # High freq
signal += 3 * nk.signal_simulate(duration=10, frequency=3) # Higher freq
signal += 3 * np.linspace(0, 2, len(signal)) # Add baseline and linear trend
signal += 2 * nk.signal_simulate(duration=10, frequency=0.1, noise=0) # Non-linear trend
signal += np.random.normal(0, 0.02, len(signal)) # Add noise
# Decompose signal using Empirical Mode Decomposition (EMD)
components = nk.signal_decompose(signal, method="emd")
nk.signal_plot(components) # Visualize components
# Recompose merging correlated components
recomposed = nk.signal_recompose(components, threshold=0.99)
nk.signal_plot(recomposed) # Visualize components
# Save plot
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
ax1.plot(signal, color="grey", label="Original Signal")
for i in range(len(components)):
ax2.plot(
components[i, :],
color=matplotlib.cm.magma(i / len(components)),
label="Component " + str(i),
)
for i in range(len(recomposed)):
ax3.plot(
recomposed[i, :],
color=matplotlib.cm.viridis(i / len(recomposed)),
label="Recomposed " + str(i),
)
fig.set_size_inches(10, 6, forward=True)
[ax.legend(loc=1) for ax in plt.gcf().axes]
plt.tight_layout()
fig.savefig("README_decomposition.png", dpi=300)
# =============================================================================
# Signal Power Spectrum Density
# =============================================================================
# Generate complex signal
signal = nk.signal_simulate(
duration=20, frequency=[0.5, 5, 10, 15], amplitude=[2, 1.5, 0.5, 0.3], noise=0.025
)
# Get the PSD using different methods
welch = nk.signal_psd(signal, method="welch", min_frequency=1, max_frequency=20, show=True)
multitaper = nk.signal_psd(signal, method="multitapers", max_frequency=20, show=True)
lomb = nk.signal_psd(signal, method="lomb", min_frequency=1, max_frequency=20, show=True)
burg = nk.signal_psd(signal, method="burg", min_frequency=1, max_frequency=20, order=10, show=True)
# Visualize the different methods together
fig, axes = plt.subplots(nrows=2)
axes[0].plot(np.linspace(0, 20, len(signal)), signal, color="black", linewidth=0.5)
axes[0].set_title("Original signal")
axes[0].set_xlabel("Time (s)")
axes[1].plot(
welch["Frequency"], welch["Power"], label="Welch", color="#E91E63", linewidth=2, zorder=1
)
axes[1].plot(
multitaper["Frequency"],
multitaper["Power"],
label="Multitaper",
color="#2196F3",
linewidth=2,
zorder=2,
)
axes[1].plot(burg["Frequency"], burg["Power"], label="Burg", color="#4CAF50", linewidth=2, zorder=3)
axes[1].plot(
lomb["Frequency"], lomb["Power"], label="Lomb", color="#FFC107", linewidth=0.5, zorder=4
)
axes[1].set_title("Power Spectrum Density (PSD)")
axes[1].set_yscale("log")
axes[1].set_xlabel("Frequency (Hz)")
axes[1].set_ylabel(r"PSD ($ms^2/Hz$)")
for x in [0.5, 5, 10, 15]:
axes[1].axvline(x, color="#FF5722", linewidth=1, ymax=0.95, linestyle="--")
axes[1].legend(loc="upper right")
# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 8 * 1.5, forward=True)
plt.tight_layout()
fig.savefig("README_psd.png", dpi=300)
# =============================================================================
# Statistics
# =============================================================================
x = np.random.normal(loc=0, scale=1, size=100000)
ci_min, ci_max = nk.hdi(x, ci=0.95, show=True)
# Save plot
fig = plt.gcf()
fig.set_size_inches(10 / 1.5, 6 / 1.5)
plt.tight_layout()
fig.savefig("README_hdi.png", dpi=300)
| 12,141 | 30.70235 | 100 | py |
NeuroKit | NeuroKit-master/data/eeg_1min_200hz.py | import pickle
import mne
raw = mne.io.read_raw_fif(
mne.datasets.sample.data_path() / "MEG/sample/sample_audvis_raw.fif",
preload=True,
verbose=False,
)
raw = raw.pick(["eeg", "eog", "stim"], verbose=False)
raw = raw.crop(0, 60)
raw = raw.resample(200)
# raw.ch_names
# raw.info["sfreq"]
# Store data (serialize)
with open("eeg_1min_200hz.pickle", "wb") as handle:
pickle.dump(raw, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 445 | 20.238095 | 73 | py |
NeuroKit | NeuroKit-master/data/eeg_resting_8min.py | import mne
import numpy as np
import TruScanEEGpy
import neurokit2 as nk
# EDF TO FIF
# ==========
# Read original file (too big to be uploaded on github)
raw = mne.io.read_raw_edf("eeg_restingstate_3000hz.edf", preload=True)
# Find event onset and cut
event = nk.events_find(raw.copy().pick_channels(["Foto"]).to_data_frame()["Foto"])
tmin = event["onset"][0] / 3000
raw = raw.crop(tmin=tmin, tmax=tmin + 8 * 60)
# EOG
eog = raw.copy().pick_channels(["124", "125"]).to_data_frame()
eog = eog["124"] - eog["125"]
raw = nk.eeg_add_channel(raw, eog, channel_type="eog", channel_name="EOG")
raw = raw.drop_channels(["124", "125"])
# Montage
mne.rename_channels(
raw.info, dict(zip(raw.info["ch_names"], TruScanEEGpy.convert_to_tenfive(raw.info["ch_names"])))
)
montage = TruScanEEGpy.montage_mne_128(TruScanEEGpy.layout_128(names="10-5"))
extra_channels = np.array(raw.info["ch_names"])[
np.array([i not in montage.ch_names for i in raw.info["ch_names"]])
]
raw = raw.drop_channels(extra_channels[np.array([i not in ["EOG"] for i in extra_channels])])
raw = raw.set_montage(montage)
# Save
raw = raw.resample(300)
raw.save("eeg_restingstate_300hz.fif", overwrite=True)
## Convert to df
# df = pd.DataFrame(raw.get_data().T)
# df.columns = raw.info["ch_names"]
# df.to_csv("eeg_restingstate_300hz.csv")
| 1,318 | 27.673913 | 100 | py |
NeuroKit | NeuroKit-master/data/mit_arrhythmia/download_mit_arrhythmia.py | # -*- coding: utf-8 -*-
"""Script for formatting the MIT-Arrhythmia database
Steps:
1. Download the ZIP database from https://alpha.physionet.org/content/mitdb/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'mit-bih-arrhythmia-database-1.0.0') to the same folder as this script.
4. Run this script.
Credits:
https://github.com/berndporr/py-ecg-detectors/blob/master/tester_MITDB.py by Bernd Porr
"""
import pandas as pd
import numpy as np
import wfdb
import os
data_files = ["mit-bih-arrhythmia-database-1.0.0/" + file for file in os.listdir("mit-bih-arrhythmia-database-1.0.0") if ".dat" in file]
def read_file(file, participant):
"""Utility function
"""
# Get signal
data = pd.DataFrame({"ECG": wfdb.rdsamp(file[:-4])[0][:, 0]})
data["Participant"] = "MIT-Arrhythmia_%.2i" %(participant)
data["Sample"] = range(len(data))
data["Sampling_Rate"] = 360
data["Database"] = "MIT-Arrhythmia-x" if "x_mitdb" in file else "MIT-Arrhythmia"
# getting annotations
anno = wfdb.rdann(file[:-4], 'atr')
anno = np.unique(anno.sample[np.in1d(anno.symbol, ['N', 'L', 'R', 'B', 'A', 'a', 'J', 'S', 'V', 'r', 'F', 'e', 'j', 'n', 'E', '/', 'f', 'Q', '?'])])
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "MIT-Arrhythmia_%.2i" %(participant)
anno["Sampling_Rate"] = 360
anno["Database"] = "MIT-Arrhythmia-x" if "x_mitdb" in file else "MIT-Arrhythmia"
return data, anno
dfs_ecg = []
dfs_rpeaks = []
for participant, file in enumerate(data_files):
print("Participant: " + str(participant + 1) + "/" + str(len(data_files)))
data, anno = read_file(file, participant)
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Store additional recording if available
if "x_" + file.replace("mit-bih-arrhythmia-database-1.0.0/", "") in os.listdir("mit-bih-arrhythmia-database-1.0.0/x_mitdb/"):
print(" - Additional recording detected.")
data, anno = read_file("mit-bih-arrhythmia-database-1.0.0/x_mitdb/" + "x_" + file.replace("mit-bih-arrhythmia-database-1.0.0/", ""), participant)
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
# Quick test
#import neurokit2 as nk
#nk.events_plot(anno["Rpeaks"][anno["Rpeaks"] <= 1000], data["ECG"][0:1002]) | 2,522 | 33.561644 | 153 | py |
NeuroKit | NeuroKit-master/data/fantasia/download_fantasia.py | # -*- coding: utf-8 -*-
"""Script for formatting the Fantasia Database
The database consists of twenty young and twenty elderly healthy subjects. All subjects remained in a resting state in sinus rhythm while watching the movie Fantasia (Disney, 1940) to help maintain wakefulness. The continuous ECG signals were digitized at 250 Hz. Each heartbeat was annotated using an automated arrhythmia detection algorithm, and each beat annotation was verified by visual inspection.
Steps:
1. Download the ZIP database from https://physionet.org/content/fantasia/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'fantasia-database-1.0.0') to the same folder as this script.
4. Run this script.
"""
import pandas as pd
import numpy as np
import wfdb
import os
files = os.listdir("./fantasia-database-1.0.0/")
files = [s.replace('.dat', '') for s in files if ".dat" in s]
dfs_ecg = []
dfs_rpeaks = []
for i, participant in enumerate(files):
data, info = wfdb.rdsamp("./fantasia-database-1.0.0/" + participant)
# Get signal
data = pd.DataFrame(data, columns=info["sig_name"])
data = data[["ECG"]]
data["Participant"] = "Fantasia_" + participant
data["Sample"] = range(len(data))
data["Sampling_Rate"] = info['fs']
data["Database"] = "Fantasia"
# Get annotations
anno = wfdb.rdann("./fantasia-database-1.0.0/" + participant, 'ecg')
anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]]
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "Fantasia_" + participant
anno["Sampling_Rate"] = info['fs']
anno["Database"] = "Fantasia"
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
| 1,886 | 34.603774 | 403 | py |
NeuroKit | NeuroKit-master/data/gudb/download_gudb.py | # -*- coding: utf-8 -*-
"""Script for downloading, formatting and saving the GUDB database (https://github.com/berndporr/ECG-GUDB).
It contains ECGs from 25 subjects. Each subject was recorded performing 5 different tasks for two minutes:
- sitting
- a maths test on a tablet
- walking on a treadmill
- running on a treadmill
- using a hand bike
The sampling rate is 250Hz for all experiments.
Credits and citation:
- Howell, L., & Porr, B. (2018). High precision ECG Database with annotated R peaks,
recorded and filmed under realistic conditions.
"""
import pandas as pd
import ecg_gudb_database
dfs_ecg = []
dfs_rpeaks = []
for participant in range(25):
print("Participant: " + str(participant+1) + "/25")
for i, experiment in enumerate(ecg_gudb_database.GUDb.experiments):
print(" - Condition " + str(i+1) + "/5")
# creating class which loads the experiment
ecg_class = ecg_gudb_database.GUDb(participant, experiment)
# Chest Strap Data - only donwload if R-peaks annotations are available
if ecg_class.anno_cs_exists:
data = pd.DataFrame({"ECG": ecg_class.cs_V2_V1})
data["Participant"] = "GUDB_%.2i" %(participant)
data["Sample"] = range(len(data))
data["Sampling_Rate"] = 250
data["Database"] = "GUDB_" + experiment
# getting annotations
anno = pd.DataFrame({"Rpeaks": ecg_class.anno_cs})
anno["Participant"] = "GUDB_%.2i" %(participant)
anno["Sampling_Rate"] = 250
anno["Database"] = "GUDB (" + experiment + ")"
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Einthoven leads
# if ecg_class.anno_cables_exists:
# cables_anno = ecg_class.anno_cables
# einthoven_i = ecg_class.einthoven_I
# einthoven_ii = ecg_class.einthoven_II
# einthoven_iii = ecg_class.einthoven_III
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
| 2,114 | 33.112903 | 107 | py |
NeuroKit | NeuroKit-master/data/ludb/download_ludb.py | # -*- coding: utf-8 -*-
"""Script for formatting the Lobachevsky University Electrocardiography Database
The database consists of 200 10-second 12-lead ECG signal records representing different morphologies of the ECG signal. The ECGs were collected from healthy volunteers and patients, which had various cardiovascular diseases. The boundaries of P, T waves and QRS complexes were manually annotated by cardiologists for all 200 records.
Steps:
1. In the command line, run 'pip install gsutil'
2. Then, 'gsutil -m cp -r gs://ludb-1.0.0.physionet.org D:/YOURPATH/NeuroKit/data/ludb'
This will download all the files in a folder named 'ludb-1.0.0.physionet.org' at the
destination you entered.
3. Run this script.
"""
import pandas as pd
import numpy as np
import wfdb
import os
files = os.listdir("./ludb-1.0.0.physionet.org/")
dfs_ecg = []
dfs_rpeaks = []
for participant in range(200):
filename = str(participant + 1)
data, info = wfdb.rdsamp("./ludb-1.0.0.physionet.org/" + filename)
# Get signal
data = pd.DataFrame(data, columns=info["sig_name"])
data = data[["i"]].rename(columns={"i": "ECG"})
data["Participant"] = "LUDB_%.2i" %(participant + 1)
data["Sample"] = range(len(data))
data["Sampling_Rate"] = info['fs']
data["Database"] = "LUDB"
# Get annotations
anno = wfdb.rdann("./ludb-1.0.0.physionet.org/" + filename, 'atr_i')
anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]]
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "LUDB_%.2i" %(participant + 1)
anno["Sampling_Rate"] = info['fs']
anno["Database"] = "LUDB"
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
| 1,862 | 33.5 | 334 | py |
NeuroKit | NeuroKit-master/data/ptb_xl/download_ptbxl.py | # -*- coding: utf-8 -*-
"""Script for formatting the PTB-XL Database
https://physionet.org/content/ptb-xl/1.0.1/
"""
| 118 | 18.833333 | 44 | py |
NeuroKit | NeuroKit-master/data/testretest_restingstate_eeg/download_script.py | """
https://openneuro.org/datasets/ds003685/
"""
import os
import re
import shutil
import mne
import numpy as np
import openneuro as on
import neurokit2 as nk
# Download cleaned data (takes some time)
on.download(
dataset="ds003685",
target_dir="eeg/raw",
include="sub-*/ses-session1/*eyes*",
)
# Convert to MNE
path = "eeg/raw/"
for sub in os.listdir(path):
if "sub" not in sub or "sub-60" in sub:
continue
print(f"Participant: {sub}")
newpath = path + sub + "/ses-session1/eeg/"
# The header file is broken as the name in it is incorrect
# -------------------------------------------------------------------------
for file in [f for f in os.listdir(newpath) if ".vmrk" in f]:
with open(newpath + file, "r+") as f:
text = f.read() # read everything in the file
pattern = re.search("DataFile=.*\\n", text).group(0)
text = text.replace(pattern, pattern.replace(" ", ""))
with open(newpath + file, "r+") as f:
f.write(text)
for file in [f for f in os.listdir(newpath) if ".vhdr" in f]:
with open(newpath + file, "r+") as f:
text = f.read() # read everything in the file
pattern = re.search("DataFile=.*\\n", text).group(0)
text = text.replace(pattern, pattern.replace(" ", ""))
pattern = re.search("MarkerFile=.*\\n", text).group(0)
text = text.replace(pattern, pattern.replace(" ", ""))
with open(newpath + file, "r+") as f:
f.write(text)
# -------------------------------------------------------------------------
raw = mne.io.read_raw_brainvision(newpath + file, preload=True, verbose=False)
raw = raw.set_eeg_reference("average")
# raw = raw.set_montage("biosemi64")
if "eyesopen" in file:
raw.save("eeg/" + sub + "_eyesopen_raw.fif", overwrite=True)
else:
raw.save("eeg/" + sub + "_eyesclosed_raw.fif", overwrite=True)
print("FINISHED.")
# Clean-up
shutil.rmtree("eeg/raw/")
| 2,061 | 32.258065 | 86 | py |
NeuroKit | NeuroKit-master/data/mit_long-term/download_mit_long-term.py | # -*- coding: utf-8 -*-
"""Script for formatting the MIT-Long-Term ECG Database
Steps:
1. Download the ZIP database from https://physionet.org/content/ltdb/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'mit-bih-long-term-ecg-database-1.0.0') to the same folder as this script.
4. Run this script.
Credits:
https://github.com/berndporr/py-ecg-detectors/blob/master/tester_MITDB.py by Bernd Porr
"""
import os
import numpy as np
import pandas as pd
import wfdb
data_files = ["mit-bih-long-term-ecg-database-1.0.0/" + file for file in os.listdir("mit-bih-long-term-ecg-database-1.0.0") if ".dat" in file]
dfs_ecg = []
dfs_rpeaks = []
for participant, file in enumerate(data_files):
print("Participant: " + str(participant + 1) + "/" + str(len(data_files)))
# Get signal
data = pd.DataFrame({"ECG": wfdb.rdsamp(file[:-4])[0][:, 1]})
data["Participant"] = "MIT-LongTerm_%.2i" %(participant)
data["Sample"] = range(len(data))
data["Sampling_Rate"] = 128
data["Database"] = "MIT-LongTerm"
# getting annotations
anno = wfdb.rdann(file[:-4], 'atr')
anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]]
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "MIT-LongTerm_%.2i" %(participant)
anno["Sampling_Rate"] = 128
anno["Database"] = "MIT-LongTerm"
# Select only 2h of recording (otherwise it's too big)
data = data[460800:460800*3].reset_index(drop=True)
anno = anno[(anno["Rpeaks"] > 460800) & (anno["Rpeaks"] <= 460800*3)].reset_index(drop=True)
anno["Rpeaks"] = anno["Rpeaks"] - 460800
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
# Quick test
#import neurokit2 as nk
#nk.events_plot(anno["Rpeaks"][anno["Rpeaks"] <= 1000], data["ECG"][0:1001])
| 1,993 | 29.212121 | 142 | py |
NeuroKit | NeuroKit-master/data/srm_restingstate_eeg/download_script.py | """
https://openneuro.org/datasets/ds003775/versions/1.0.0
"""
import os
import shutil
import mne
import numpy as np
import openneuro as on
import neurokit2 as nk
# Download cleaned data (takes some time)
on.download(
dataset="ds003775",
target_dir="eeg/raw",
include="sub-*",
exclude="derivatives/cleaned_data",
)
# Convert to MNE
path = "eeg/raw/"
for sub in os.listdir(path):
if "sub" not in sub:
continue
print(f"Participant: {sub}")
file = [f for f in os.listdir(path + sub + "/ses-t1/eeg/") if ".edf" in f][0]
raw = mne.io.read_raw_edf(path + sub + "/ses-t1/eeg/" + file, preload=True, verbose=False)
raw = raw.set_montage("biosemi64")
# Clean
raw = raw.notch_filter(freqs=np.arange(50, 501, 50), verbose=False)
raw.info["bads"], _ = nk.eeg_badchannels(
raw, bad_threshold=0.33, distance_threshold=0.99, show=False
)
print("Bad channels: " + str(len(raw.info['bads'])))
raw = raw.interpolate_bads()
raw.save("eeg/" + sub + "_raw.fif", overwrite=True)
print("FINISHED.")
# Clean-up
shutil.rmtree("eeg/raw/")
| 1,101 | 23.488889 | 94 | py |
NeuroKit | NeuroKit-master/data/mit_normal/download_mit_normal.py | # -*- coding: utf-8 -*-
"""Script for formatting the MIT-Normal Sinus Rhythm Database
Steps:
1. Download the ZIP database from https://physionet.org/content/nsrdb/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'mit-bih-normal-sinus-rhythm-database-1.0.0') to the same folder as this script.
4. Run this script.
Credits:
https://github.com/berndporr/py-ecg-detectors/blob/master/tester_MITDB.py by Bernd Porr
"""
import pandas as pd
import numpy as np
import wfdb
import os
data_files = ["mit-bih-normal-sinus-rhythm-database-1.0.0/" + file for file in os.listdir("mit-bih-normal-sinus-rhythm-database-1.0.0") if ".dat" in file]
dfs_ecg = []
dfs_rpeaks = []
for participant, file in enumerate(data_files):
print("Participant: " + str(participant + 1) + "/" + str(len(data_files)))
# Get signal
data = pd.DataFrame({"ECG": wfdb.rdsamp(file[:-4])[0][:, 1]})
data["Participant"] = "MIT-Normal_%.2i" %(participant)
data["Sample"] = range(len(data))
data["Sampling_Rate"] = 128
data["Database"] = "MIT-Normal"
# getting annotations
anno = wfdb.rdann(file[:-4], 'atr')
anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]]
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "MIT-Normal_%.2i" %(participant)
anno["Sampling_Rate"] = 128
anno["Database"] = "MIT-Normal"
# Select only 1h of recording (otherwise it's too big)
data = data[460800:460800*3].reset_index(drop=True)
anno = anno[(anno["Rpeaks"] > 460800) & (anno["Rpeaks"] <= 460800*2)].reset_index(drop=True)
anno["Rpeaks"] = anno["Rpeaks"] - 460800
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
# Quick test
#import neurokit2 as nk
#nk.events_plot(anno["Rpeaks"][anno["Rpeaks"] <= 1000], data["ECG"][0:1001])
| 2,009 | 29.923077 | 154 | py |
NeuroKit | NeuroKit-master/data/lemon/download_lemon.py | # -*- coding: utf-8 -*-
"""Script for formatting the LEMON EEG dataset
https://ftp.gwdg.de/pub/misc/MPI-Leipzig_Mind-Brain-Body-LEMON/EEG_MPILMBB_LEMON/EEG_Preprocessed_BIDS_ID/EEG_Preprocessed/
Steps:
1. Download the ZIP database from https://physionet.org/content/nstdb/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'mit-bih-noise-stress-test-database-1.0.0') to the same folder as this script.
4. Run this script.
Credits:
pycrostates package by Mathieu Scheltienne and Victor Férat
"""
import os
import mne
import numpy as np
import pooch
# Path of the database
path = "https://ftp.gwdg.de/pub/misc/MPI-Leipzig_Mind-Brain-Body-LEMON/EEG_MPILMBB_LEMON/EEG_Preprocessed_BIDS_ID/EEG_Preprocessed/"
# Create a registry with the file names
files = {
f"sub-01{i:04d}_{j}.{k}": None
for i in range(2, 319)
for j in ["EC", "EO"]
for k in ["fdt", "set"]
}
# Create fetcher
fetcher = pooch.create(
path="lemon/",
base_url=path,
registry=files,
)
# Download the files
for sub in files.keys():
try:
_ = fetcher.fetch(sub)
except:
pass
print("Finished downloading!")
# Preprocessing
# fmt: off
standard_channels = [
"Fp1", "Fp2", "F7", "F3", "Fz", "F4", "F8", "FC5",
"FC1", "FC2", "FC6", "T7", "C3", "Cz", "C4", "T8",
"CP5", "CP1", "CP2", "CP6", "AFz", "P7", "P3", "Pz",
"P4", "P8", "PO9", "O1", "Oz", "O2", "PO10", "AF7",
"AF3", "AF4", "AF8", "F5", "F1", "F2", "F6", "FT7",
"FC3", "FC4", "FT8", "C5", "C1", "C2", "C6", "TP7",
"CP3", "CPz", "CP4", "TP8", "P5", "P1", "P2", "P6",
"PO7", "PO3", "POz", "PO4", "PO8",
]
# fmt: on
for sub in os.listdir("lemon/"):
if sub.endswith("fdt") is True or sub.endswith("fif") or "sub" not in sub:
continue
raw = mne.io.read_raw_eeglab("lemon/" + sub, preload=True)
missing_channels = list(set(standard_channels) - set(raw.info["ch_names"]))
if len(missing_channels) != 0:
# add the missing channels as bads (array of zeros)
missing_data = np.zeros((len(missing_channels), raw.n_times))
data = np.vstack([raw.get_data(), missing_data])
ch_names = raw.info["ch_names"] + missing_channels
ch_types = raw.get_channel_types() + ["eeg"] * len(missing_channels)
info = mne.create_info(ch_names=ch_names, ch_types=ch_types, sfreq=raw.info["sfreq"])
raw = mne.io.RawArray(data=data, info=info)
raw.info["bads"].extend(missing_channels)
raw = raw.add_reference_channels("FCz")
raw = raw.reorder_channels(standard_channels)
raw = raw.set_montage("standard_1005")
raw = raw.interpolate_bads()
raw = raw.set_eeg_reference("average").apply_proj()
raw.save("lemon/" + sub.replace(".set", "") + "_raw.fif", overwrite=True)
# Clean-up
for sub in os.listdir("lemon/"):
if sub.endswith("fif"):
continue
os.remove(f"lemon/{sub}")
print("FINISHED.")
| 2,996 | 30.21875 | 132 | py |
NeuroKit | NeuroKit-master/data/mit_nst/download_mit_nst.py | # -*- coding: utf-8 -*-
"""Script for formatting the MIT-Noise Stress Test database
Steps:
1. Download the ZIP database from https://physionet.org/content/nstdb/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'mit-bih-noise-stress-test-database-1.0.0') to the same folder as this script.
4. Run this script.
Credits:
https://github.com/berndporr/py-ecg-detectors/blob/master/tester_MITDB.py by Bernd Porr
"""
import pandas as pd
import numpy as np
import wfdb
import os
data_files = ["mit-bih-noise-stress-test-database-1.0.0/" + file for file in os.listdir("mit-bih-noise-stress-test-database-1.0.0") if ".dat" in file]
dfs_ecg = []
dfs_rpeaks = []
for participant, file in enumerate(data_files):
if ('mit-bih-noise-stress-test-database-1.0.0/119' in file or 'mit-bih-noise-stress-test-database-1.0.0/118' in file) is False:
break
print("Record: " + str(participant + 1) + "/" + str(len(data_files)-3))
# Get signal
data = pd.DataFrame({"ECG": wfdb.rdsamp(file[:-4])[0][:, 0]})
data["Participant"] = "MIT-NST_118" if "118e" in file else "MIT-NST_119"
data["Sample"] = range(len(data))
data["Sampling_Rate"] = 360
data["Database"] = "MIT-NST"
# getting annotations
anno = wfdb.rdann(file[:-4], 'atr')
anno = np.unique(anno.sample[np.in1d(anno.symbol, ['N', 'L', 'R', 'B', 'A', 'a', 'J', 'S', 'V', 'r', 'F', 'e', 'j', 'n', 'E', '/', 'f', 'Q', '?'])])
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "MIT-NST_118" if "118e" in file else "MIT-NST_119"
anno["Sampling_Rate"] = 360
anno["Database"] = "MIT-NST"
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = pd.concat(dfs_ecg).to_csv("ECGs.csv", index=False)
dfs_rpeaks = pd.concat(dfs_rpeaks).to_csv("Rpeaks.csv", index=False)
# Quick test
#import neurokit2 as nk
#nk.events_plot(anno["Rpeaks"][anno["Rpeaks"] <= 1000], data["ECG"][0:1002]) | 2,000 | 32.915254 | 152 | py |
NeuroKit | NeuroKit-master/neurokit2/__init__.py | """Top-level package for NeuroKit."""
import datetime
import platform
import matplotlib
# Dependencies
import numpy as np
import pandas as pd
import scipy
import sklearn
from .benchmark import *
from .bio import *
from .complexity import *
from .data import *
from .ecg import *
from .eda import *
from .eeg import *
from .emg import *
from .eog import *
from .epochs import *
from .events import *
from .hrv import *
from .markov import *
from .microstates import *
from .misc import *
from .ppg import *
from .rsp import *
from .signal import *
from .stats import *
from .video import *
# Info
__version__ = "0.2.5"
# Maintainer info
__author__ = "The NeuroKit development team"
__email__ = "dom.makowski@gmail.com"
# Citation
__bibtex__ = r"""
@article{Makowski2021neurokit,
author = {Dominique Makowski and Tam Pham and Zen J. Lau and Jan C. Brammer and Fran{\c{c}}ois Lespinasse and Hung Pham and Christopher Schölzel and S. H. Annabel Chen},
title = {{NeuroKit}2: A Python toolbox for neurophysiological signal processing},
journal = {Behavior Research Methods},
volume = {53},
number = {4},
pages = {1689--1696},
publisher = {Springer Science and Business Media {LLC}},
doi = {10.3758/s13428-020-01516-y},
url = {https://doi.org/10.3758%2Fs13428-020-01516-y},
year = 2021,
month = {feb}
}
"""
__cite__ = (
"""
You can cite NeuroKit2 as follows:
- Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H.,
Schölzel, C., & Chen, S. A. (2021). NeuroKit2: A Python toolbox for neurophysiological signal processing.
Behavior Research Methods, 53(4), 1689-1696. https://doi.org/10.3758/s13428-020-01516-y
Full bibtex reference:
"""
+ __bibtex__
)
# Aliases for citation
__citation__ = __cite__
# =============================================================================
# Helper functions to retrieve info
# =============================================================================
def cite(silent=False):
"""Cite NeuroKit2.
This function will print the bibtex and the APA reference for your to copy and cite.
Examples
---------
.. ipython:: python
import neurokit2 as nk
nk.cite()
"""
if silent is False:
print(__cite__)
else:
return __bibtex__
def version(silent=False):
"""NeuroKit2's version.
This function is a helper to retrieve the version of the package.
Examples
---------
.. ipython:: python
import neurokit2 as nk
nk.version()
"""
if silent is False:
print(
"- OS: " + platform.system(),
"(" + platform.architecture()[1] + " " + platform.architecture()[0] + ")",
"\n- Python: " + platform.python_version(),
"\n- NeuroKit2: " + __version__,
"\n\n- NumPy: " + np.__version__,
"\n- Pandas: " + pd.__version__,
"\n- SciPy: " + scipy.__version__,
"\n- sklearn: " + sklearn.__version__,
"\n- matplotlib: " + matplotlib.__version__,
)
else:
return __version__
| 3,105 | 23.650794 | 173 | py |
NeuroKit | NeuroKit-master/neurokit2/video/video_blinks.py | # !!!!!!!!!!!!!!!!!!!!!!!!
# ! NEED HELP WITH THAT !
# !!!!!!!!!!!!!!!!!!!!!!!!
# import numpy as np
# from ..misc import progress_bar
# def video_blinks(video, verbose=True):
# """**Extract blinks from video**"""
# # Try loading menpo
# try:
# import cv2
# import menpo.io
# import menpo.landmark
# import menpodetect
# except ImportError:
# raise ImportError(
# "The 'menpo' and 'menpodetect' modules are required for this function to run. ",
# "Please install them first (`pip install menpo` and `pip install menpodetect`).",
# )
# frame = video[0]
# # 1. Extract faces
# faces = nk.video_face(video, verbose=False)
# face = faces[0]
# for i, face in enumerate(faces):
# img = menpo.image.Image(face, copy=True)
# img_bw = img.as_greyscale()
# # Eyes detection
# eyes = menpodetect.load_opencv_eye_detector()(img_bw)
# img_bw.view()
# eye.view(line_width=1, render_markers=False, line_colour="r")
# for eye in eyes:
# eye.view(line_width=1, render_markers=False, line_colour="r")
# def detect_pupil(img_bw):
# """
# This method should use cv2.findContours and cv2.HoughCircles() function from cv2 library to find the pupil
# and then set the coordinates for pupil circle coordinates
# """
# # as array
# img = img_bw.as_vector().reshape(img_bw.shape).copy()
# # First binarize the image so that findContours can work correctly.
# menpo.image.Image(img, copy=True).view()
# img[img >= 100] = 255
# img[img < 100] = 0
# # Now find the contours and then find the pupil in the contours.
# contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# # Make a copy image of the original and then use the drawContours function to actually apply
# # the contours found in the previous step
# img_with_contours = np.copy(img)
# cv2.drawContours(img_with_contours, contours, -1, (0, 255, 0))
# c = cv2.HoughCircles(
# img_with_contours, cv2.HOUGH_GRADIENT, 2, self._img.shape[0] / 2, maxRadius=150
# )
# # Then mask the pupil from the image and store it's coordinates.
# for l in c:
# # OpenCV returns the circles as a list of lists of circles
# for circle in l:
# center = (int(circle[0]), int(circle[1]))
# radius = int(circle[2])
# cv2.circle(self._img, center, radius, (0, 0, 0), thickness=-1)
# pupil = (center[0], center[1], radius)
| 2,574 | 35.267606 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/video/video_skin.py | import numpy as np
from ..misc import find_closest
def video_skin(face, show=False):
"""**Skin detection**
This function detects the skin in a face.
.. note::
This function is experimental. If you are interested in helping us improve that aspect of
NeuroKit (e.g., by adding more detection algorithms), please get in touch!
Parameters
----------
face : np.ndarray
A face data numpy array of the shape (channel, height, width).
show : bool
Whether to show the skin detection mask.
Returns
-------
np.ndarray
A skin detection mask.
See Also
--------
video_face, video_ppg
Examples
--------
.. ipython:: python
import neurokit2 as nk
# video, sampling_rate = nk.read_video("video.mp4")
# faces = nk.video_face(video)
# skin = nk.video_skin(faces[0], show=True)
"""
# Try loading cv2
try:
import cv2
except ImportError:
raise ImportError(
"The 'cv2' module is required for this function to run. ",
"Please install it first (`pip install opencv-python`).",
)
img = face.swapaxes(0, 1).swapaxes(1, 2)
# Credits:
# https://github.com/pavisj/rppg-pos/blob/master/SkinDetector/skin_detector/skin_detector.py
# Get mask in HSV space
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
lower_thresh = np.array([0, 50, 0], dtype="uint8")
upper_thresh = np.array([120, 150, 255], dtype="uint8")
mask_hsv = cv2.inRange(img_hsv, lower_thresh, upper_thresh)
mask_hsv[mask_hsv < 128] = 0
mask_hsv[mask_hsv >= 128] = 1
# Get mask in RGB space
lower_thresh = np.array([45, 52, 108], dtype="uint8")
upper_thresh = np.array([255, 255, 255], dtype="uint8")
mask_a = cv2.inRange(img, lower_thresh, upper_thresh)
mask_b = 255 * ((img[:, :, 2] - img[:, :, 1]) / 20)
mask_c = 255 * ((np.max(img, axis=2) - np.min(img, axis=2)) / 20)
mask_d = np.bitwise_and(np.uint64(mask_a), np.uint64(mask_b))
mask_rgb = np.bitwise_and(np.uint64(mask_c), np.uint64(mask_d))
mask_rgb[mask_rgb < 128] = 0
mask_rgb[mask_rgb >= 128] = 1
# Get mask in YCbCr space
lower_thresh = np.array([90, 100, 130], dtype="uint8")
upper_thresh = np.array([230, 120, 180], dtype="uint8")
img_ycrcb = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB)
mask_ycrcb = cv2.inRange(img_ycrcb, lower_thresh, upper_thresh)
mask_ycrcb[mask_ycrcb < 128] = 0
mask_ycrcb[mask_ycrcb >= 128] = 1
mask = (mask_hsv + mask_rgb + mask_ycrcb) / 3
# Get percentages of skin as a function of different thresholds
threshold = np.arange(0, 1.2, 0.3)
percent = [np.sum(mask >= t) / mask.size for t in threshold]
threshold = threshold[find_closest(0.5, percent, return_index=True)]
mask[mask < threshold] = 0
mask[mask >= threshold] = 255
# Process mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
# Grab and cut
kernel = np.ones((50, 50), np.float32) / (50 * 50)
dst = cv2.filter2D(mask, -1, kernel)
dst[dst != 0] = 255
free = np.array(cv2.bitwise_not(dst), dtype="uint8")
grab_mask = np.zeros(mask.shape, dtype="uint8")
grab_mask[:, :] = 2
grab_mask[mask == 255] = 1
grab_mask[free == 255] = 0
if np.unique(grab_mask).tolist() == [0, 1]:
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
if img.size != 0:
mask, bgdModel, fgdModel = cv2.grabCut(
img, grab_mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK
)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype("uint8")
mask = mask.astype("uint8")
masked_face = cv2.bitwise_and(img, img, mask=mask)
if show is True:
print(f"{int((100 / 255) * np.sum(mask) / mask.size)}% of the image is skin")
cv2.imshow("img", cv2.cvtColor(mask.astype("uint8"), cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
return mask, masked_face
| 4,237 | 31.6 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/video/video_ppg.py | import numpy as np
from ..misc import progress_bar
from .video_face import video_face
from .video_skin import video_skin
def video_ppg(video, sampling_rate=30, verbose=True):
"""**Remote Photoplethysmography (rPPG) from Video**
Extracts the photoplethysmogram (PPG) from a webcam video using the Plane-Orthogonal-to-Skin
(POS) algorithm.
.. note::
This function is experimental and does NOT seem to work at all
(https://github.com/DominiqueMakowski/RemotePhysiology). If you
are interested in helping us improve that aspect of NeuroKit
(e.g., by adding more detection algorithms), please get in touch!
Parameters
----------
video : np.ndarray
A video data numpy array of the shape (frame, channel, height, width).
sampling_rate : int
The sampling rate of the video, by default 30 fps (a common sampling rate for commercial
webcams).
verbose : bool
Whether to print the progress bar.
Returns
-------
np.ndarray
A PPG signal.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# video, sampling_rate = nk.read_video("video.mp4")
# ppg = nk.video_ppg(video)
References
----------
* Wang, W., Den Brinker, A. C., Stuijk, S., & De Haan, G. (2016). Algorithmic principles of
remote PPG. IEEE Transactions on Biomedical Engineering, 64(7), 1479-1491.
"""
# Initialize heart rate
ppg = np.full((len(video)), np.nan)
# Chunk into 8 second segments (5 * 1.6 which is the temporal smoothing window)
chunk_size = int(sampling_rate * 8)
for _, start in progress_bar(np.arange(0, len(video), chunk_size), verbose=verbose):
end = start + chunk_size
if end > len(video):
end = len(video)
ppg[start:end] = _video_ppg(video[start:end, :, :, :], sampling_rate, window=1.6)
return ppg
# ==============================================================================
# Internals
# ==============================================================================
def _video_ppg(video, sampling_rate=30, window=1.6):
# 1. Extract faces
faces = video_face(video, verbose=False)
rgb = np.full((len(faces), 3), np.nan)
for i, face in enumerate(faces):
# 2. Extract skin
mask, masked_face = video_skin(face)
# Extract color
r = np.sum(masked_face[:, :, 0]) / np.sum(mask > 0)
g = np.sum(masked_face[:, :, 1]) / np.sum(mask > 0)
b = np.sum(masked_face[:, :, 2]) / np.sum(mask > 0)
rgb[i, :] = [r, g, b]
# Plane-Orthogonal-to-Skin (POS)
# ==============================
# Calculating window (l)
window = int(sampling_rate * window)
H = np.full(len(rgb), 0)
for t in range(0, (rgb.shape[0] - window)):
# 4. Spatial averaging
C = rgb[t : t + window - 1, :].T
# 5. Temporal normalization
mean_color = np.mean(C, axis=1)
try:
Cn = np.matmul(np.linalg.inv(np.diag(mean_color)), C)
except np.linalg.LinAlgError: # Singular matrix
continue
# 6. Projection
S = np.matmul(np.array([[0, 1, -1], [-2, 1, 1]]), Cn)
# 7. Tuning (2D signal to 1D signal)
std = np.array([1, np.std(S[0, :]) / np.std(S[1, :])])
P = np.matmul(std, S)
# 8. Overlap-Adding
H[t : t + window - 1] = H[t : t + window - 1] + (P - np.mean(P)) / np.std(P)
return H
| 3,488 | 30.718182 | 96 | py |
NeuroKit | NeuroKit-master/neurokit2/video/__init__.py | """Submodule for NeuroKit."""
from .video_face import video_face
from .video_plot import video_plot
from .video_ppg import video_ppg
from .video_skin import video_skin
__all__ = ["video_plot", "video_face", "video_skin", "video_ppg"]
| 236 | 25.333333 | 65 | py |
NeuroKit | NeuroKit-master/neurokit2/video/video_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from ..signal import signal_resample
def video_plot(video, sampling_rate=30, frames=3, signals=None):
"""**Visualize video**
This function plots a few frames from a video as an image.
Parameters
----------
video : np.ndarray
An video data numpy array of the shape (frame, channel, height, width)
sampling_rate : int
The number of frames per second (FPS), by default 30.
frames : int or list
What frames to plot. If list, indicates the index of frames. If number, will select
linearly spaced frames.
signals : list
A list of signals to plot under the videos.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# video, sampling_rate = nk.read_video("video.mp4")
# nk.video_plot(video, sampling_rate=sampling_rate)
"""
# Put into list if it's not already
if isinstance(video, list) is False:
video = [video]
# How many subplots
nrows = len(video)
if signals is not None:
if isinstance(signals, list) is False:
signals = [signals]
nrows += len(signals)
# Get x-axis (of the first video)
length = video[0].shape[0]
desired_length = 1000
if length > 1000:
desired_length = length
# TODO: height_ratios doesn't work as expected
_, ax = plt.subplots(
nrows=nrows,
sharex=True,
# gridspec_kw={"height_ratios": height_ratios},
constrained_layout=True,
)
# Get frame locations
if isinstance(frames, int):
frames = np.linspace(0, length - 1, frames).astype(int)
# For each videos in the list, plot them
if nrows == 1:
ax = [ax] # Otherwise it will make ax[i] non subscritable
for i, vid in enumerate(video):
vid = _video_plot_format(vid, frames=frames, desired_length=desired_length)
ax[i].axis("off")
ax[i].imshow(vid, aspect="auto")
if signals is not None:
for j, signal in enumerate(signals):
# Make sure the size is correct
if len(signal) != length:
signal = signal_resample(signal, desired_length=desired_length)
# Plot
ax[i + j + 1].plot(signal)
for frame in frames:
ax[i + j + 1].axvline(
x=int(np.round(frame / length * desired_length)),
color="black",
linestyle="--",
alpha=0.5,
)
# Ticks in seconds
plt.xticks(
np.linspace(0, desired_length, 5),
np.char.mod("%.1f", np.linspace(0, length / sampling_rate, 5)),
)
plt.xlabel("Time (s)")
def _video_plot_format(vid, frames=[0], desired_length=1000):
# Try loading cv2
try:
import cv2
except ImportError:
raise ImportError(
"The 'cv2' module is required for this function to run. ",
"Please install it first (`pip install opencv-python`).",
)
# (frames, height, width, RGB channels) for cv2
vid = vid.swapaxes(3, 1).swapaxes(2, 1)
# Concatenate
excerpt = np.concatenate(vid[frames], axis=1)
# Rescale
excerpt = cv2.resize(
excerpt.astype("uint8"),
dsize=(desired_length, vid.shape[1]),
interpolation=cv2.INTER_CUBIC,
)
return excerpt
| 3,435 | 26.934959 | 91 | py |
NeuroKit | NeuroKit-master/neurokit2/video/video_face.py | import numpy as np
from ..misc import progress_bar
def video_face(video, verbose=True):
"""**Extract face from video**
This function extracts the faces from a video. This function requires the `cv2, `menpo` and
`menpodetect` modules to be installed.
.. note::
This function is experimental. If you are interested in helping us improve that aspect of
NeuroKit (e.g., by adding more detection algorithms), please get in touch!
Parameters
----------
video : np.ndarray
An video data numpy array of the shape (frame, channel, height, width)
verbose : bool
Whether to print the progress bar.
Returns
-------
list
A list of cropped faces.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# video, sampling_rate = nk.read_video("video.mp4")
# faces = nk.video_face(video)
# nk.video_plot([video, faces])
"""
faceboxes = np.full([len(video), 3, 500, 500], 0)
for i, frame in progress_bar(video, verbose=verbose):
faces = _video_face_landmarks(frame)
if len(faces) > 0:
faceboxes[i, :, :, :] = _video_face_crop(frame, faces[0])
return faceboxes.astype("uint8")
# ==============================================================================
# Internals
# ==============================================================================
def _video_face_crop(frame, face):
# Try loading cv2
try:
import cv2
except ImportError:
raise ImportError(
"The 'cv2' module is required for this function to run. ",
"Please install it first (`pip install opencv-python`).",
)
facebox = face.as_vector().reshape(-1, 2).astype(int)
# Crop
img = frame[:, facebox[0, 0] : facebox[1, 0], facebox[0, 1] : facebox[2, 1]]
# Resize
img = cv2.resize(img.swapaxes(0, 1).swapaxes(1, 2), (500, 500))
return img.swapaxes(0, 2).swapaxes(1, 2).astype(int)
def _video_face_landmarks(frame):
# Try loading menpo
try:
import menpo.io
import menpo.landmark
import menpodetect
except ImportError:
raise ImportError(
"The 'menpo' and 'menpodetect' modules are required for this function to run. ",
"Please install them first (`pip install menpo` and `pip install menpodetect`).",
)
img = menpo.image.Image(frame, copy=True)
img_bw = img.as_greyscale()
# Face detection
faces = menpodetect.load_opencv_frontal_face_detector()(img_bw)
# Eyes detection
# eyes = menpodetect.load_opencv_eye_detector()(img_bw)
return faces
| 2,665 | 26.484536 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/events/events_find.py | # -*- coding: utf-8 -*-
import itertools
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning
from ..signal import signal_binarize
def events_find(
event_channel,
threshold="auto",
threshold_keep="above",
start_at=0,
end_at=None,
duration_min=1,
duration_max=None,
inter_min=0,
discard_first=0,
discard_last=0,
event_labels=None,
event_conditions=None,
):
"""**Find Events**
Find and select events in a continuous signal (e.g., from a photosensor).
Parameters
----------
event_channel : array or list
The channel containing the events.
threshold : str or float
The threshold value by which to select the events. If ``"auto"``, takes the value between
the max and the min.
threshold_keep : str
``"above"`` or ``"below"``, define the events as above or under the threshold. For
photosensors, a white screen corresponds usually to higher values. Therefore, if your
events are signaled by a black colour, events values are the lower ones (i.e., the signal
"drops" when the events onset), and you should set the cut to ``"below"``.
start_at : int
Keep events which onset is after a particular time point.
end_at : int
Keep events which onset is before a particular time point.
duration_min : int
The minimum duration of an event to be considered as such (in time points).
duration_max : int
The maximum duration of an event to be considered as such (in time points).
inter_min : int
The minimum duration after an event for the subsequent event to be considered as such (in
time points). Useful when spurious consecutive events are created due to very high sampling
rate.
discard_first : int
Discard first or last n events. Useful if the experiment starts with some spurious events.
If ``discard_first=0``, no first event is removed.
discard_last : int
Discard first or last n events. Useful if the experiment ends with some spurious events.
If ``discard_last=0``, no last event is removed.
event_labels : list
A list containing unique event identifiers. If ``None``, will use the event index number.
event_conditions : list
An optional list containing, for each event, for example the trial category, group or
experimental conditions.
Returns
----------
dict
Dict containing 3 or 4 arrays, ``"onset"`` for event onsets, ``"duration"`` for event
durations, ``"label"`` for the event identifiers and the optional ``"conditions"`` passed
to ``event_conditions``.
See Also
--------
events_plot, events_to_mne, events_create
Example
----------
Simulate a trigger signal (e.g., from photosensor)
.. ipython:: python
import neurokit2 as nk
import numpy as np
signal = np.zeros(200)
signal[20:60] = 1
signal[100:105] = 1
signal[130:170] = 1
events = nk.events_find(signal)
events
@savefig p_events_find1.png scale=100%
nk.events_plot(events, signal)
@suppress
plt.close()
The second event is an artifact (too short), we can skip it
.. ipython:: python
events = nk.events_find(signal, duration_min= 10)
@savefig p_events_find2.png scale=100%
nk.events_plot(events, signal)
@suppress
plt.close()
"""
events = _events_find(
event_channel, threshold=threshold, threshold_keep=threshold_keep
)
# Warning when no events detected
if len(events["onset"]) == 0:
warn(
"No events found. Check your event_channel or adjust 'threshold' or 'keep' arguments.",
category=NeuroKitWarning,
)
return events
# Remove based on duration
to_keep = np.full(len(events["onset"]), True)
to_keep[events["duration"] < duration_min] = False
if duration_max is not None:
to_keep[events["duration"] > duration_max] = False
events["onset"] = events["onset"][to_keep]
events["duration"] = events["duration"][to_keep]
# Remove based on index
if start_at > 0:
events["duration"] = events["duration"][events["onset"] >= start_at]
events["onset"] = events["onset"][events["onset"] >= start_at]
if end_at is not None:
events["duration"] = events["duration"][events["onset"] <= end_at]
events["onset"] = events["onset"][events["onset"] <= end_at]
# Remove based on interval min
if inter_min > 0:
inter = np.diff(events["onset"])
events["onset"] = np.concatenate(
[events["onset"][0:1], events["onset"][1::][inter >= inter_min]]
)
events["duration"] = np.concatenate(
[events["duration"][0:1], events["duration"][1::][inter >= inter_min]]
)
# Remove first and last n
if discard_first > 0:
events["onset"] = events["onset"][discard_first:]
events["duration"] = events["duration"][discard_first:]
if discard_last > 0:
events["onset"] = events["onset"][0 : -1 * discard_last]
events["duration"] = events["duration"][0 : -1 * discard_last]
events = _events_find_label(
events, event_labels=event_labels, event_conditions=event_conditions
)
return events
# =============================================================================
# Internals
# =============================================================================
def _events_find_label(
events, event_labels=None, event_conditions=None, function_name="events_find"
):
# Get n events
n = len(events["onset"])
# Labels
if event_labels is None:
event_labels = (np.arange(n) + 1).astype(str)
if len(list(set(event_labels))) != n:
raise ValueError(
"NeuroKit error: "
+ function_name
+ "(): oops, it seems like the `event_labels` that you provided "
+ "are not unique (all different). Please provide "
+ str(n)
+ " distinct labels."
)
if len(event_labels) != n:
raise ValueError(
"NeuroKit error: "
+ function_name
+ "(): oops, it seems like you provided "
+ str(len(event_labels))
+ " `event_labels`, but "
+ str(n)
+ " events got detected :(. Check your event names or the event signal!"
)
events["label"] = event_labels
# Condition
if event_conditions is not None:
if len(event_conditions) != n:
raise ValueError(
"NeuroKit error: "
+ function_name
+ "(): oops, it seems like you provided "
+ str(len(event_conditions))
+ " `event_conditions`, but "
+ str(n)
+ " events got detected :(. Check your event conditions or the event signal!"
)
events["condition"] = event_conditions
return events
def _events_find(event_channel, threshold="auto", threshold_keep="above"):
binary = signal_binarize(event_channel, threshold=threshold)
if threshold_keep not in ["above", "below"]:
raise ValueError(
"In events_find(), 'threshold_keep' must be one of 'above' or 'below'."
)
if threshold_keep != "above":
binary = np.abs(binary - 1) # Reverse if events are below
# Initialize data
events = {"onset": [], "duration": []}
index = 0
for event, group in itertools.groupby(binary):
duration = len(list(group))
if event == 1:
events["onset"].append(index)
events["duration"].append(duration)
index += duration
# Convert to array
events["onset"] = np.array(events["onset"])
events["duration"] = np.array(events["duration"])
return events
| 7,961 | 31.365854 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/events/events_create.py | import numpy as np
from .events_find import _events_find_label
def events_create(event_onsets, event_durations=None, event_labels=None, event_conditions=None):
"""**Create events dictionnary from list of onsets**
Parameters
----------
event_onsets : array or list
A list of events onset.
event_durations : array or list
A list of durations. If none is passed, will take the duration
between each onset (i.e., will assume that events are consecutive).
event_labels : list
A list containing unique event identifiers. If ``None``, will use the event index number.
event_conditions : list
An optional list containing, for each event, for example the trial category, group or
experimental conditions.
Returns
----------
dict
Dict containing 3 or 4 arrays, ``"onset"`` for event onsets, ``"duration"`` for event
durations, ``"label"`` for the event identifiers and the optional ``"conditions"`` passed
to ``event_conditions``.
See Also
--------
events_plot, events_to_mne, events_find
Example
----------
.. ipython:: python
import neurokit2 as nk
events = nk.events_create(event_onsets = [500, 1500, 2500, 5000])
events
events = nk.events_create(event_onsets = [500, 1500, 2500, 5000],
event_labels=["S1", "S2", "S3", "S4"],
event_conditions=["A", "A", "B", "B"])
events
"""
if event_durations is None:
event_durations = np.diff(np.concatenate(([0], event_onsets)))
events = {"onset": event_onsets, "duration": event_durations}
events = _events_find_label(
events, event_labels=event_labels, event_conditions=event_conditions
)
return events
| 1,821 | 29.881356 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/events/events_plot.py | # -*- coding: utf-8 -*-
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def events_plot(events, signal=None, color="red", linestyle="--"):
"""**Visualize Events**
Plot events in signal.
Parameters
----------
events : list or ndarray or dict
Events onset location. Can also be a list of lists, in which case it will mark them with
different colors. If a dict is passed (e.g., from :func:`events_find`), it will only plot
the onsets.
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
color : str
Argument passed to matplotlib plotting.
linestyle : str
Argument passed to matplotlib plotting.
Returns
-------
fig
Figure representing a plot of the signal and the event markers.
See Also
--------
events_find
Examples
----------
.. ipython:: python
import neurokit2 as nk
@savefig p_events_plot1.png scale=100%
nk.events_plot([1, 3, 5])
@suppress
plt.close()
* **Example 1**: With signal
.. ipython:: python
signal = nk.signal_simulate(duration=4)
events = nk.events_find(signal)
@savefig p_events_plot2.png scale=100%
nk.events_plot(events, signal)
@suppress
plt.close()
* **Example 2**: Different events
.. ipython:: python
events1 = events["onset"]
events2 = np.linspace(0, len(signal), 8)
@savefig p_events_plot3.png scale=100%
nk.events_plot([events1, events2], signal)
@suppress
plt.close()
* **Example 3**: Conditions
.. ipython:: python
events = nk.events_find(signal, event_conditions=["A", "B", "A", "B"])
@savefig p_events_plot4.png scale=100%
nk.events_plot(events, signal)
@suppress
plt.close()
* **Example 4**: Different colors for all events
.. ipython:: python
signal = nk.signal_simulate(duration=10)
events = nk.events_find(signal)
events = [[i] for i in events['onset']]
@savefig p_events_plot5.png scale=100%
nk.events_plot(events, signal)
@suppress
plt.close()
"""
if isinstance(events, dict):
if "condition" in events.keys():
events_list = []
for condition in set(events["condition"]):
events_list.append(
[x for x, y in zip(events["onset"], events["condition"]) if y == condition]
)
events = events_list
else:
events = events["onset"]
if signal is None:
signal = np.full(events[-1] + 1, 0)
if isinstance(signal, pd.DataFrame) is False:
signal = pd.DataFrame({"Signal": signal})
# Plot signal(s)
signal.plot()
# Check if events is list of lists
try:
len(events[0])
is_listoflists = True
except TypeError:
is_listoflists = False
if is_listoflists is False:
# Loop through sublists
for event in events:
plt.axvline(event, color=color, linestyle=linestyle)
else:
# Convert color and style to list
if isinstance(color, str):
color_map = matplotlib.cm.get_cmap("rainbow")
color = color_map(np.linspace(0, 1, num=len(events)))
if isinstance(linestyle, str):
linestyle = np.full(len(events), linestyle)
# Loop through sublists
for i, event in enumerate(events):
for j in events[i]:
plt.axvline(j, color=color[i], linestyle=linestyle[i], label=str(i))
# Display only one legend per event type
handles, labels = plt.gca().get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
plt.legend(newHandles, newLabels)
| 3,998 | 26.02027 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/events/__init__.py | """Submodule for NeuroKit."""
from .events_find import events_find
from .events_create import events_create
from .events_plot import events_plot
from .events_to_mne import events_to_mne
__all__ = ["events_find", "events_create", "events_plot", "events_to_mne"]
| 263 | 28.333333 | 74 | py |
NeuroKit | NeuroKit-master/neurokit2/events/events_to_mne.py | # -*- coding: utf-8 -*-
import numpy as np
def events_to_mne(events, event_conditions=None):
"""**Create MNE-compatible events**
Create `MNE <https://mne.tools/stable/index.html>`_ compatible events for integration with
M/EEG.
Parameters
----------
events : list or ndarray or dict
Events onset location. Can also be a dict obtained through :func:`.events_find`.
event_conditions : list
An optional list containing, for each event, for example the trial category, group or
experimental conditions. Defaults to ``None``.
Returns
-------
tuple
MNE-formatted events and the event id, that can be added
via ``raw.add_events(events)``, and a dictionary with event's names.
See Also
--------
events_find
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=4)
events = nk.events_find(signal)
events, event_id = nk.events_to_mne(events)
events
event_id
# Conditions
events = nk.events_find(signal, event_conditions=["A", "B", "A", "B"])
events, event_id = nk.events_to_mne(events)
event_id
"""
if isinstance(events, dict):
if "condition" in events.keys():
event_conditions = events["condition"]
events = events["onset"]
event_id = {}
if event_conditions is None:
event_conditions = ["event"] * len(events)
# Sanity check
if len(event_conditions) != len(events):
raise ValueError(
"NeuroKit error: events_to_mne(): 'event_conditions' argument of different length than event onsets."
)
event_names = list(set(event_conditions))
event_index = list(range(len(event_names)))
for i in enumerate(event_names):
event_conditions = [event_index[i[0]] if x == i[1] else x for x in event_conditions]
event_id[i[1]] = event_index[i[0]]
events = np.array([events, [0] * len(events), event_conditions]).T
return events, event_id
| 2,061 | 26.864865 | 113 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_noise.py | import numpy as np
from ..misc import check_random_state
def signal_noise(duration=10, sampling_rate=1000, beta=1, random_state=None):
"""**Simulate noise**
This function generates pure Gaussian ``(1/f)**beta`` noise. The power-spectrum of the generated
noise is proportional to ``S(f) = (1 / f)**beta``. The following categories of noise have been
described:
* violet noise: beta = -2
* blue noise: beta = -1
* white noise: beta = 0
* flicker / pink noise: beta = 1
* brown noise: beta = 2
Parameters
----------
duration : float
Desired length of duration (s).
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
beta : float
The noise exponent.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
Returns
-------
noise : array
The signal of pure noise.
References
----------
* Timmer, J., & Koenig, M. (1995). On generating power law noise. Astronomy and Astrophysics,
300, 707.
* https://github.com/felixpatzelt/colorednoise
* https://en.wikipedia.org/wiki/Colors_of_noise
Examples
--------
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
# Generate pure noise
violet = nk.signal_noise(beta=-2)
blue = nk.signal_noise(beta=-1)
white = nk.signal_noise(beta=0)
pink = nk.signal_noise(beta=1)
brown = nk.signal_noise(beta=2)
# Visualize
@savefig p_signal_noise1.png scale=100%
nk.signal_plot([violet, blue, white, pink, brown],
standardize=True,
labels=["Violet", "Blue", "White", "Pink", "Brown"])
@suppress
plt.close()
.. ipython:: python
# Visualize spectrum
psd_violet = nk.signal_psd(violet, sampling_rate=200, method="fft")
psd_blue = nk.signal_psd(blue, sampling_rate=200, method="fft")
psd_white = nk.signal_psd(white, sampling_rate=200, method="fft")
psd_pink = nk.signal_psd(pink, sampling_rate=200, method="fft")
psd_brown = nk.signal_psd(brown, sampling_rate=200, method="fft")
@savefig p_signal_noise2.png scale=100%
plt.loglog(psd_violet["Frequency"], psd_violet["Power"], c="violet")
plt.loglog(psd_blue["Frequency"], psd_blue["Power"], c="blue")
plt.loglog(psd_white["Frequency"], psd_white["Power"], c="grey")
plt.loglog(psd_pink["Frequency"], psd_pink["Power"], c="pink")
plt.loglog(psd_brown["Frequency"], psd_brown["Power"], c="brown")
@suppress
plt.close()
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# The number of samples in the time series
n = int(duration * sampling_rate)
# Calculate Frequencies (we asume a sample rate of one)
# Use fft functions for real output (-> hermitian spectrum)
f = np.fft.rfftfreq(n, d=1 / sampling_rate)
# Build scaling factors for all frequencies
fmin = 1.0 / n # Low frequency cutoff
f[f < fmin] = fmin
f = f ** (-beta / 2.0)
# Calculate theoretical output standard deviation from scaling
w = f[1:].copy()
w[-1] *= (1 + (n % 2)) / 2.0 # correct f = +-0.5
sigma = 2 * np.sqrt(np.sum(w ** 2)) / n
# Generate scaled random power + phase, adjusting size to
# generate one Fourier component per frequency
sr = rng.normal(scale=f, size=len(f))
si = rng.normal(scale=f, size=len(f))
# If the signal length is even, frequencies +/- 0.5 are equal
# so the coefficient must be real.
if not n % 2:
si[..., -1] = 0
# Regardless of signal length, the DC component must be real
si[..., 0] = 0
# Combine power + corrected phase to Fourier components
s = sr + 1j * si
# Transform to real time series & scale to unit variance
y = np.fft.irfft(s, n=n) / sigma
return y
| 4,052 | 31.95122 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_timefrequency.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
from ..signal.signal_detrend import signal_detrend
def signal_timefrequency(
signal,
sampling_rate=1000,
min_frequency=0.04,
max_frequency=None,
method="stft",
window=None,
window_type="hann",
mode="psd",
nfreqbin=None,
overlap=None,
analytical_signal=True,
show=True,
):
"""**Quantify changes of a nonstationary signal’s frequency over time**
The objective of time-frequency analysis is to offer a more informative description of the
signal which reveals the temporal variation of its frequency contents.
There are many different Time-Frequency Representations (TFRs) available:
* Linear TFRs: efficient but create tradeoff between time and frequency resolution
* Short Time Fourier Transform (STFT): the time-domain signal is windowed into short
segments and FT is applied to each segment, mapping the signal into the TF plane. This
method assumes that the signal is quasi-stationary (stationary over the duration of the
window). The width of the window is the trade-off between good time (requires short
duration window) versus good frequency resolution (requires long duration windows)
* Wavelet Transform (WT): similar to STFT but instead of a fixed duration window function,
a varying window length by scaling the axis of the window is used. At low frequency, WT
proves high spectral resolution but poor temporal resolution. On the other hand, for high
frequencies, the WT provides high temporal resolution but poor spectral resolution.
* Quadratic TFRs: better resolution but computationally expensive and suffers from having
cross terms between multiple signal components
* Wigner Ville Distribution (WVD): while providing very good resolution in time and
frequency of the underlying signal structure, because of its bilinear nature, existence
of negative values, the WVD has misleading TF results in the case of multi-component
signals such as EEG due to the presence of cross terms and inference terms. Cross WVD
terms can be reduced by using smoothing kernel functions as well as analyzing the
analytic signal (instead of the original signal)
* Smoothed Pseudo Wigner Ville Distribution (SPWVD): to address the problem of cross-terms
suppression, SPWVD allows two independent analysis windows, one in time and the other in
frequency domains.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
method : str
Time-Frequency decomposition method.
min_frequency : float
The minimum frequency.
max_frequency : float
The maximum frequency.
window : int
Length of each segment in seconds. If ``None`` (default), window will be automatically
calculated. For ``"STFT" method``.
window_type : str
Type of window to create, defaults to ``"hann"``. See :func:`.scipy.signal.get_window` to
see full options of windows. For ``"STFT" method``.
mode : str
Type of return values for ``"STFT" method``. Can be ``"psd"``, ``"complex"`` (default,
equivalent to output of ``"STFT"`` with no padding or boundary extension), ``"magnitude"``,
``"angle"``, ``"phase"``. Defaults to ``"psd"``.
nfreqbin : int, float
Number of frequency bins. If ``None`` (default), nfreqbin will be set to
``0.5*sampling_rate``.
overlap : int
Number of points to overlap between segments. If ``None``, ``noverlap = nperseg // 8``.
Defaults to ``None``.
analytical_signal : bool
If ``True``, analytical signal instead of actual signal is used in `Wigner Ville
Distribution` methods.
show : bool
If ``True``, will return two PSD plots.
Returns
-------
frequency : np.array
Frequency.
time : np.array
Time array.
stft : np.array
Short Term Fourier Transform. Time increases across its columns and frequency increases
down the rows.
Examples
-------
.. ipython:: python
import neurokit2 as nk
sampling_rate = 100
signal = nk.signal_simulate(100, sampling_rate, frequency=[3, 10])
# STFT Method
@savefig p_signal_timefrequency1.png scale=100%
f, t, stft = nk.signal_timefrequency(signal,
sampling_rate,
max_frequency=20,
method="stft",
show=True)
@suppress
plt.close()
.. ipython:: python
# CWTM Method
@savefig p_signal_timefrequency2.png scale=100%
f, t, cwtm = nk.signal_timefrequency(signal,
sampling_rate,
max_frequency=20,
method="cwt",
show=True)
@suppress
plt.close()
.. ipython:: python
# WVD Method
@savefig p_signal_timefrequency3.png scale=100%
f, t, wvd = nk.signal_timefrequency(signal,
sampling_rate,
max_frequency=20,
method="wvd",
show=True)
@suppress
plt.close()
.. ipython:: python
# PWVD Method
@savefig p_signal_timefrequency4.png scale=100%
f, t, pwvd = nk.signal_timefrequency(signal,
sampling_rate,
max_frequency=20,
method="pwvd",
show=True)
@suppress
plt.close()
"""
# Initialize empty container for results
# Define window length
if min_frequency == 0:
min_frequency = 0.04 # sanitize lowest frequency to lf
if max_frequency is None:
max_frequency = sampling_rate // 2 # nyquist
# STFT
if method.lower() in ["stft"]:
frequency, time, tfr = short_term_ft(
signal,
sampling_rate=sampling_rate,
overlap=overlap,
window=window,
mode=mode,
min_frequency=min_frequency,
window_type=window_type,
)
# CWT
elif method.lower() in ["cwt", "wavelet"]:
frequency, time, tfr = continuous_wt(
signal,
sampling_rate=sampling_rate,
min_frequency=min_frequency,
max_frequency=max_frequency,
)
# WVD
elif method in ["WignerVille", "wvd"]:
frequency, time, tfr = wvd(
signal,
sampling_rate=sampling_rate,
n_freqbins=nfreqbin,
analytical_signal=analytical_signal,
method="WignerVille",
)
# pseudoWVD
elif method in ["pseudoWignerVille", "pwvd"]:
frequency, time, tfr = wvd(
signal,
sampling_rate=sampling_rate,
n_freqbins=nfreqbin,
analytical_signal=analytical_signal,
method="pseudoWignerVille",
)
# Sanitize output
lower_bound = len(frequency) - len(frequency[frequency >= min_frequency])
f = frequency[(frequency >= min_frequency) & (frequency <= max_frequency)]
z = tfr[lower_bound : lower_bound + len(f)]
if show is True:
plot_timefrequency(
z,
time,
f,
signal=signal,
method=method,
)
return f, time, z
# =============================================================================
# Short-Time Fourier Transform (STFT)
# =============================================================================
def short_term_ft(
signal,
sampling_rate=1000,
min_frequency=0.04,
overlap=None,
window=None,
window_type="hann",
mode="psd",
):
"""Short-term Fourier Transform."""
if window is not None:
nperseg = int(window * sampling_rate)
else:
# to capture at least 5 times slowest wave-length
nperseg = int((2 / min_frequency) * sampling_rate)
frequency, time, tfr = scipy.signal.spectrogram(
signal,
fs=sampling_rate,
window=window_type,
scaling="density",
nperseg=nperseg,
nfft=None,
detrend=False,
noverlap=overlap,
mode=mode,
)
return frequency, time, np.abs(tfr)
# =============================================================================
# Continuous Wavelet Transform (CWT) - Morlet
# =============================================================================
def continuous_wt(
signal, sampling_rate=1000, min_frequency=0.04, max_frequency=None, nfreqbin=None
):
"""**Continuous Wavelet Transform**
References
----------
* Neto, O. P., Pinheiro, A. O., Pereira Jr, V. L., Pereira, R., Baltatu, O. C., & Campos, L.
A. (2016). Morlet wavelet transforms of heart rate variability for autonomic nervous system
activity. Applied and Computational Harmonic Analysis, 40(1), 200-206.
* Wachowiak, M. P., Wachowiak-Smolíková, R., Johnson, M. J., Hay, D. C., Power, K. E.,
& Williams-Bell, F. M. (2018). Quantitative feature analysis of continuous analytic wavelet
transforms of electrocardiography and electromyography. Philosophical Transactions of the
Royal Society A: Mathematical, Physical and Engineering Sciences, 376(2126), 20170250.
"""
# central frequency
w = 6.0 # recommended
if nfreqbin is None:
nfreqbin = sampling_rate // 2
# frequency
frequency = np.linspace(min_frequency, max_frequency, nfreqbin)
# time
time = np.arange(len(signal)) / sampling_rate
widths = w * sampling_rate / (2 * frequency * np.pi)
# Mother wavelet = Morlet
tfr = scipy.signal.cwt(signal, scipy.signal.morlet2, widths, w=w)
return frequency, time, np.abs(tfr)
# =============================================================================
# Wigner-Ville Distribution
# =============================================================================
def wvd(signal, sampling_rate=1000, n_freqbins=None, analytical_signal=True, method="WignerVille"):
"""Wigner Ville Distribution and Pseudo-Wigner Ville Distribution."""
# Compute the analytical signal
if analytical_signal:
signal = scipy.signal.hilbert(signal_detrend(signal))
# Pre-processing
if n_freqbins is None:
n_freqbins = 256
if method in ["pseudoWignerVille", "pwvd"]:
fwindows = np.zeros(n_freqbins + 1)
fwindows_mpts = len(fwindows) // 2
windows_length = n_freqbins // 4
windows_length = windows_length - windows_length % 2 + 1
windows = np.hamming(windows_length)
fwindows[fwindows_mpts + np.arange(-windows_length // 2, windows_length // 2)] = windows
else:
fwindows = np.ones(n_freqbins + 1)
fwindows_mpts = len(fwindows) // 2
time = np.arange(len(signal)) * 1.0 / sampling_rate
# This is discrete frequency (should we return?)
if n_freqbins % 2 == 0:
frequency = np.hstack((np.arange(n_freqbins / 2), np.arange(-n_freqbins / 2, 0)))
else:
frequency = np.hstack(
(np.arange((n_freqbins - 1) / 2), np.arange(-(n_freqbins - 1) / 2, 0))
)
tfr = np.zeros((n_freqbins, time.shape[0]), dtype=complex) # the time-frequency matrix
tausec = round(n_freqbins / 2.0)
winlength = tausec - 1
# taulens: len of tau for each step
taulens = np.min(
np.c_[
np.arange(signal.shape[0]),
signal.shape[0] - np.arange(signal.shape[0]) - 1,
winlength * np.ones(time.shape),
],
axis=1,
)
conj_signal = np.conj(signal)
# iterate and compute the wv for each indices
for idx in range(time.shape[0]):
tau = np.arange(-taulens[idx], taulens[idx] + 1).astype(int)
# this step is required to use the efficient DFT
indices = np.remainder(n_freqbins + tau, n_freqbins).astype(int)
tfr[indices, idx] = (
fwindows[fwindows_mpts + tau] * signal[idx + tau] * conj_signal[idx - tau]
)
if (idx < signal.shape[0] - tausec) and (idx >= tausec + 1):
tfr[tausec, idx] = (
fwindows[fwindows_mpts + tausec]
* signal[idx + tausec]
* np.conj(signal[idx - tausec])
+ fwindows[fwindows_mpts - tausec]
* signal[idx - tausec]
* conj_signal[idx + tausec]
)
tfr[tausec, idx] *= 0.5
# Now tfr contains the product of the signal segments and its conjugate.
# To find wd we need to apply fft one more time.
tfr = np.fft.fft(tfr, axis=0)
tfr = np.real(tfr)
# continuous time frequency
frequency = 0.5 * np.arange(n_freqbins, dtype=float) / n_freqbins * sampling_rate
return frequency, time, tfr
# =============================================================================
# Smooth Pseudo-Wigner-Ville Distribution
# =============================================================================
def smooth_pseudo_wvd(
signal,
sampling_rate=1000,
freq_length=None,
time_length=None,
segment_step=1,
nfreqbin=None,
window_method="hamming",
):
"""**Smoothed Pseudo Wigner Ville Distribution**
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
freq_length : np.ndarray
Lenght of frequency smoothing window.
time_length: np.array
Lenght of time smoothing window
segment_step : int
The step between samples in ``time_array``. Default to 1.
nfreqbin : int
Number of Frequency bins.
window_method : str
Method used to create smoothing windows. Can be "hanning"/ "hamming" or "gaussian".
Returns
-------
frequency_array : np.ndarray
Frequency array.
time_array : np.ndarray
Time array.
pwvd : np.ndarray
SPWVD. Time increases across its columns and frequency increases
down the rows.
References
----------
* J. M. O' Toole, M. Mesbah, and B. Boashash, (2008), "A New Discrete Analytic Signal for
Reducing Aliasing in the Discrete Wigner-Ville Distribution", IEEE Trans.
"""
# Define parameters
N = len(signal)
# sample_spacing = 1 / sampling_rate
if nfreqbin is None:
nfreqbin = 300
# Zero-padded signal to length 2N
signal_padded = np.append(signal, np.zeros_like(signal))
# DFT
signal_fft = np.fft.fft(signal_padded)
signal_fft[1 : N - 1] = signal_fft[1 : N - 1] * 2
signal_fft[N:] = 0
# Inverse FFT
signal_ifft = np.fft.ifft(signal_fft)
signal_ifft[N:] = 0
# Make analytic signal
signal = scipy.signal.hilbert(signal_detrend(signal_ifft))
# Create smoothing windows in time and frequency
if freq_length is None:
freq_length = np.floor(N / 4.0)
# Plus one if window length is not odd
if freq_length % 2 == 0:
freq_length += 1
elif len(freq_length) % 2 == 0:
raise ValueError("The length of frequency smoothing window must be odd.")
if time_length is None:
time_length = np.floor(N / 10.0)
# Plus one if window length is not odd
if time_length % 2 == 0:
time_length += 1
elif len(time_length) % 2 == 0:
raise ValueError("The length of time smoothing window must be odd.")
if window_method == "hamming":
freq_window = scipy.signal.hamming(int(freq_length)) # normalize by max
time_window = scipy.signal.hamming(int(time_length)) # normalize by max
elif window_method == "gaussian":
std_freq = freq_length / (6 * np.sqrt(2 * np.log(2)))
freq_window = scipy.signal.gaussian(freq_length, std_freq)
freq_window /= max(freq_window)
std_time = time_length / (6 * np.sqrt(2 * np.log(2)))
time_window = scipy.signal.gaussian(time_length, std_time)
time_window /= max(time_window)
# to add warning if method is not one of the supported methods
# Mid-point index of windows
midpt_freq = (len(freq_window) - 1) // 2
midpt_time = (len(time_window) - 1) // 2
# Create arrays
time_array = np.arange(start=0, stop=N, step=segment_step, dtype=int) / sampling_rate
# frequency_array = np.fft.fftfreq(nfreqbin, sample_spacing)[0:nfreqbin / 2]
frequency_array = 0.5 * np.arange(nfreqbin, dtype=float) / N
pwvd = np.zeros((nfreqbin, len(time_array)), dtype=complex)
# Calculate pwvd
for i, t in enumerate(time_array):
# time shift
tau_max = np.min(
[t + midpt_time - 1, N - t + midpt_time, np.round(N / 2.0) - 1, midpt_freq]
)
# time-lag list
tau = np.arange(
start=-np.min([midpt_time, N - t]), stop=np.min([midpt_time, t - 1]) + 1, dtype="int"
)
time_pts = (midpt_time + tau).astype(int)
g2 = time_window[time_pts]
g2 = g2 / np.sum(g2)
signal_pts = (t - tau - 1).astype(int)
# zero frequency
pwvd[0, i] = np.sum(g2 * signal[signal_pts] * np.conjugate(signal[signal_pts]))
# other frequencies
for m in range(int(tau_max)):
tau = np.arange(
start=-np.min([midpt_time, N - t - m]),
stop=np.min([midpt_time, t - m - 1]) + 1,
dtype="int",
)
time_pts = (midpt_time + tau).astype(int)
g2 = time_window[time_pts]
g2 = g2 / np.sum(g2)
signal_pt1 = (t + m - tau - 1).astype(int)
signal_pt2 = (t - m - tau - 1).astype(int)
# compute positive half
rmm = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))
pwvd[m + 1, i] = freq_window[midpt_freq + m + 1] * rmm
# compute negative half
rmm = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))
pwvd[nfreqbin - m - 1, i] = freq_window[midpt_freq - m + 1] * rmm
m = np.round(N / 2.0)
if t <= N - m and t >= m + 1 and m <= midpt_freq:
tau = np.arange(
start=-np.min([midpt_time, N - t - m]),
stop=np.min([midpt_time, t - 1 - m]) + 1,
dtype="int",
)
time_pts = (midpt_time + tau + 1).astype(int)
g2 = time_window[time_pts]
g2 = g2 / np.sum(g2)
signal_pt1 = (t + m - tau).astype(int)
signal_pt2 = (t - m - tau).astype(int)
x = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))
x *= freq_window[midpt_freq + m + 1]
y = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))
y *= freq_window[midpt_freq - m + 1]
pwvd[m, i] = 0.5 * (x + y)
pwvd = np.real(np.fft.fft(pwvd, axis=0))
# Visualization
return frequency_array, time_array, pwvd
# =============================================================================
# Plot function
# =============================================================================
def plot_timefrequency(z, time, f, signal=None, method="stft"):
"""Visualize a time-frequency matrix."""
if method == "stft":
figure_title = "Short-time Fourier Transform Magnitude"
fig, ax = plt.subplots()
for i in range(len(time)):
ax.plot(f, z[:, i], label="Segment" + str(np.arange(len(time))[i] + 1))
ax.legend()
ax.set_title("Signal Spectrogram")
ax.set_ylabel("STFT Magnitude")
ax.set_xlabel("Frequency (Hz)")
elif method == "cwt":
figure_title = "Continuous Wavelet Transform Magnitude"
elif method == "wvd":
figure_title = "Wigner Ville Distrubution Spectrogram"
fig = plt.figure()
plt.plot(time, signal)
plt.xlabel("Time (sec)")
plt.ylabel("Signal")
elif method == "pwvd":
figure_title = "Pseudo Wigner Ville Distribution Spectrogram"
fig, ax = plt.subplots()
spec = ax.pcolormesh(time, f, z, cmap=plt.get_cmap("magma"), shading="auto")
plt.colorbar(spec)
ax.set_title(figure_title)
ax.set_ylabel("Frequency (Hz)")
ax.set_xlabel("Time (sec)")
return fig
| 21,035 | 35.20654 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""**Plot signal with events as vertical lines**
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to ``None``.
subplots : bool
If ``True``, each signal is plotted in a subplot.
standardize : bool
If ``True``, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to ``None``.
**kwargs : optional
Arguments passed to matplotlib plotting.
See Also
--------
ecg_plot, rsp_plot, ppg_plot, emg_plot, eog_plot
Returns
-------
Though the function returns nothing, the figure can be retrieved and saved as follows:
.. code-block:: console
# To be run after signal_plot()
fig = plt.gcf()
fig.savefig("myfig.png")
Examples
----------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, sampling_rate=1000)
@savefig p_signal_plot1.png scale=100%
nk.signal_plot(signal, sampling_rate=1000, color="red")
@suppress
plt.close()
.. ipython:: python
# Simulate data
data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
"Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
"Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
# Process signal
@savefig p_signal_plot2.png scale=100%
nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
nk.signal_plot([signal, data], standardize=True)
@suppress
plt.close()
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, pd.Series):
out = pd.concat(
[out, pd.DataFrame({content.name: content.values})],
axis=1,
sort=True,
)
elif isinstance(content, pd.DataFrame):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and pd.api.types.is_integer_dtype(signal.index):
plt.gca().set_xlabel("Samples")
else:
plt.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
_ = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and pd.api.types.is_integer_dtype(signal.index):
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| 6,532 | 31.665 | 107 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_period.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning
from .signal_formatpeaks import _signal_formatpeaks_sanitize
from .signal_interpolate import signal_interpolate
def signal_period(
peaks,
sampling_rate=1000,
desired_length=None,
interpolation_method="monotone_cubic",
):
"""**Calculate signal period from a series of peaks**
Calculate the period of a signal from a series of peaks. The period is defined as the time
in seconds between two consecutive peaks.
Parameters
----------
peaks : Union[list, np.array, pd.DataFrame, pd.Series, dict]
The samples at which the peaks occur. If an array is passed in, it is assumed that it was
obtained with :func:`.signal_findpeaks`. If a DataFrame is passed in, it is assumed it is
of the same length as the input signal in which occurrences of R-peaks are marked as "1",
with such containers obtained with e.g., :func:`.ecg_findpeaks` or :func:`.rsp_findpeaks`.
sampling_rate : int
The sampling frequency of the signal that contains peaks (in Hz, i.e., samples/second).
Defaults to 1000.
desired_length : int
If left at the default ``None``, the returned period will have the same number of elements
as ``peaks``. If set to a value larger than the sample at which the last peak occurs in the
signal (i.e., ``peaks[-1]``), the returned period will be interpolated between peaks over
``desired_length`` samples. To interpolate the period over the entire duration of the
signal, set ``desired_length`` to the number of samples in the signal. Cannot be smaller
than or equal to the sample at which the last peak occurs in the signal.
Defaults to ``None``.
interpolation_method : str
Method used to interpolate the rate between peaks. See :func:`.signal_interpolate`.
``"monotone_cubic"`` is chosen as the default interpolation method since it ensures monotone
interpolation between data points (i.e., it prevents physiologically implausible
"overshoots" or "undershoots" in the y-direction). In contrast, the widely used cubic
spline interpolation does not ensure monotonicity.
Returns
-------
array
A vector containing the period.
See Also
--------
signal_findpeaks, signal_fixpeaks, signal_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Generate 2 signals (with fixed and variable period)
sig1 = nk.signal_simulate(duration=20, sampling_rate=200, frequency=1)
sig2 = nk.ecg_simulate(duration=20, sampling_rate=200, heart_rate=60)
# Find peaks
info1 = nk.signal_findpeaks(sig1)
info2 = nk.ecg_findpeaks(sig2, sampling_rate=200)
# Compute period
period1 = nk.signal_period(peaks=info1["Peaks"], desired_length=len(sig1), sampling_rate=200)
period2 = nk.signal_period(peaks=info2["ECG_R_Peaks"], desired_length=len(sig2), sampling_rate=200)
@savefig p_signal_period.png scale=100%
nk.signal_plot([period1, period2], subplots=True)
@suppress
plt.close()
"""
peaks = _signal_formatpeaks_sanitize(peaks)
# Sanity checks.
if np.size(peaks) <= 3:
warn(
"Too few peaks detected to compute the rate. Returning empty vector.",
category=NeuroKitWarning,
)
return np.full(desired_length, np.nan)
if isinstance(desired_length, (int, float)):
if desired_length <= peaks[-1]:
raise ValueError(
"NeuroKit error: desired_length must be None or larger than the index of the last peak."
)
# Calculate period in sec, based on peak to peak difference and make sure
# that rate has the same number of elements as peaks (important for
# interpolation later) by prepending the mean of all periods.
period = np.ediff1d(peaks, to_begin=0) / sampling_rate
period[0] = np.mean(period[1:])
# Interpolate all statistics to desired length.
if desired_length is not None:
period = signal_interpolate(
peaks, period, x_new=np.arange(desired_length), method=interpolation_method
)
return period
| 4,293 | 38.394495 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_flatline.py | # -*- coding: utf-8 -*-
import numpy as np
def signal_flatline(signal, threshold=0.01):
"""**Return the Flatline Percentage of the Signal**
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
threshold : float, optional
Flatline threshold relative to the biggest change in the signal.
This is the percentage of the maximum value of absolute consecutive
differences.
Returns
-------
float
Percentage of signal where the absolute value of the derivative is lower then the threshold.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=5)
nk.signal_flatline(signal)
"""
diff = np.diff(signal)
threshold = threshold * np.max(np.abs(diff))
flatline = np.where(np.abs(diff) < threshold)[0]
return len(flatline) / len(signal)
| 978 | 24.102564 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_distort.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning, check_random_state, listify
from .signal_resample import signal_resample
from .signal_simulate import signal_simulate
def signal_distort(
signal,
sampling_rate=1000,
noise_shape="laplace",
noise_amplitude=0,
noise_frequency=100,
powerline_amplitude=0,
powerline_frequency=50,
artifacts_amplitude=0,
artifacts_frequency=100,
artifacts_number=5,
linear_drift=False,
random_state=None,
silent=False,
):
"""**Signal distortion**
Add noise of a given frequency, amplitude and shape to a signal.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
noise_shape : str
The shape of the noise. Can be one of ``"laplace"`` (default) or
``"gaussian"``.
noise_amplitude : float
The amplitude of the noise (the scale of the random function, relative
to the standard deviation of the signal).
noise_frequency : float
The frequency of the noise (in Hz, i.e., samples/second).
powerline_amplitude : float
The amplitude of the powerline noise (relative to the standard
deviation of the signal).
powerline_frequency : float
The frequency of the powerline noise (in Hz, i.e., samples/second).
artifacts_amplitude : float
The amplitude of the artifacts (relative to the standard deviation of
the signal).
artifacts_frequency : int
The frequency of the artifacts (in Hz, i.e., samples/second).
artifacts_number : int
The number of artifact bursts. The bursts have a random duration
between 1 and 10% of the signal duration.
linear_drift : bool
Whether or not to add linear drift to the signal.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
silent : bool
Whether or not to display warning messages.
Returns
-------
array
Vector containing the distorted signal.
Examples
--------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, frequency=0.5)
# Noise
@savefig p_signal_distort1.png scale=100%
noise = pd.DataFrame({"Freq100": nk.signal_distort(signal, noise_frequency=200),
"Freq50": nk.signal_distort(signal, noise_frequency=50),
"Freq10": nk.signal_distort(signal, noise_frequency=10),
"Freq5": nk.signal_distort(signal, noise_frequency=5),
"Raw": signal}).plot()
@suppress
plt.close()
.. ipython:: python
# Artifacts
@savefig p_signal_distort2.png scale=100%
artifacts = pd.DataFrame({"1Hz": nk.signal_distort(signal, noise_amplitude=0,
artifacts_frequency=1,
artifacts_amplitude=0.5),
"5Hz": nk.signal_distort(signal, noise_amplitude=0,
artifacts_frequency=5,
artifacts_amplitude=0.2),
"Raw": signal}).plot()
@suppress
plt.close()
"""
# Seed the random generator for reproducible results.
rng = check_random_state(random_state)
# Make sure that noise_amplitude is a list.
if isinstance(noise_amplitude, (int, float)):
noise_amplitude = [noise_amplitude]
signal_sd = np.std(signal, ddof=1)
if signal_sd == 0:
signal_sd = None
noise = 0
# Basic noise.
if min(noise_amplitude) > 0:
noise += _signal_distort_noise_multifrequency(
signal,
signal_sd=signal_sd,
sampling_rate=sampling_rate,
noise_amplitude=noise_amplitude,
noise_frequency=noise_frequency,
noise_shape=noise_shape,
silent=silent,
rng=rng,
)
# Powerline noise.
if powerline_amplitude > 0:
noise += _signal_distort_powerline(
signal,
signal_sd=signal_sd,
sampling_rate=sampling_rate,
powerline_frequency=powerline_frequency,
powerline_amplitude=powerline_amplitude,
silent=silent,
)
# Artifacts.
if artifacts_amplitude > 0:
noise += _signal_distort_artifacts(
signal,
signal_sd=signal_sd,
sampling_rate=sampling_rate,
artifacts_frequency=artifacts_frequency,
artifacts_amplitude=artifacts_amplitude,
artifacts_number=artifacts_number,
silent=silent,
rng=rng,
)
if linear_drift:
noise += _signal_linear_drift(signal)
distorted = signal + noise
return distorted
# ===========================================================================
# Types of Noise
# ===========================================================================
def _signal_linear_drift(signal):
n_samples = len(signal)
linear_drift = np.arange(n_samples) * (1 / n_samples)
return linear_drift
def _signal_distort_artifacts(
signal,
signal_sd=None,
sampling_rate=1000,
artifacts_frequency=0,
artifacts_amplitude=0.1,
artifacts_number=5,
artifacts_shape="laplace",
silent=False,
rng=None,
):
# Generate artifact burst with random onset and random duration.
artifacts = _signal_distort_noise(
len(signal),
sampling_rate=sampling_rate,
noise_frequency=artifacts_frequency,
noise_amplitude=artifacts_amplitude,
noise_shape=artifacts_shape,
silent=silent,
rng=rng,
)
if artifacts.sum() == 0:
return artifacts
min_duration = int(np.rint(len(artifacts) * 0.001))
max_duration = int(np.rint(len(artifacts) * 0.01))
artifact_durations = rng.choice(range(min_duration, max_duration), size=artifacts_number)
artifact_onsets = rng.choice(len(artifacts) - max_duration, size=artifacts_number)
artifact_offsets = artifact_onsets + artifact_durations
artifact_idcs = np.array([False] * len(artifacts))
for i in range(artifacts_number):
artifact_idcs[artifact_onsets[i] : artifact_offsets[i]] = True
artifacts[~artifact_idcs] = 0
# Scale amplitude by the signal's standard deviation.
if signal_sd is not None:
artifacts_amplitude *= signal_sd
artifacts *= artifacts_amplitude
return artifacts
def _signal_distort_powerline(
signal,
signal_sd=None,
sampling_rate=1000,
powerline_frequency=50,
powerline_amplitude=0.1,
silent=False,
):
duration = len(signal) / sampling_rate
powerline_noise = signal_simulate(
duration=duration,
sampling_rate=sampling_rate,
frequency=powerline_frequency,
amplitude=1,
silent=silent,
)
if signal_sd is not None:
powerline_amplitude *= signal_sd
powerline_noise *= powerline_amplitude
return powerline_noise
def _signal_distort_noise_multifrequency(
signal,
signal_sd=None,
sampling_rate=1000,
noise_amplitude=0.1,
noise_frequency=100,
noise_shape="laplace",
silent=False,
rng=None,
):
base_noise = np.zeros(len(signal))
params = listify(
noise_amplitude=noise_amplitude, noise_frequency=noise_frequency, noise_shape=noise_shape
)
for i in range(len(params["noise_amplitude"])):
freq = params["noise_frequency"][i]
amp = params["noise_amplitude"][i]
shape = params["noise_shape"][i]
if signal_sd is not None:
amp *= signal_sd
# Make some noise!
_base_noise = _signal_distort_noise(
len(signal),
sampling_rate=sampling_rate,
noise_frequency=freq,
noise_amplitude=amp,
noise_shape=shape,
silent=silent,
rng=rng,
)
base_noise += _base_noise
return base_noise
def _signal_distort_noise(
n_samples,
sampling_rate=1000,
noise_frequency=100,
noise_amplitude=0.1,
noise_shape="laplace",
silent=False,
rng=None,
):
_noise = np.zeros(n_samples)
# Apply a very conservative Nyquist criterion in order to ensure
# sufficiently sampled signals.
nyquist = sampling_rate * 0.1
if noise_frequency > nyquist:
if not silent:
warn(
f"Skipping requested noise frequency "
f" of {noise_frequency} Hz since it cannot be resolved at "
f" the sampling rate of {sampling_rate} Hz. Please increase "
f" sampling rate to {noise_frequency * 10} Hz or choose "
f" frequencies smaller than or equal to {nyquist} Hz.",
category=NeuroKitWarning,
)
return _noise
# Also make sure that at least one period of the frequency can be
# captured over the duration of the signal.
duration = n_samples / sampling_rate
if (1 / noise_frequency) > duration:
if not silent:
warn(
f"Skipping requested noise frequency "
f" of {noise_frequency} Hz since its period of {1 / noise_frequency} "
f" seconds exceeds the signal duration of {duration} seconds. "
f" Please choose noise frequencies larger than "
f" {1 / duration} Hz or increase the duration of the "
f" signal above {1 / noise_frequency} seconds.",
category=NeuroKitWarning,
)
return _noise
noise_duration = int(duration * noise_frequency)
if noise_shape in ["normal", "gaussian"]:
_noise = rng.normal(0, noise_amplitude, noise_duration)
elif noise_shape == "laplace":
_noise = rng.laplace(0, noise_amplitude, noise_duration)
else:
raise ValueError(
"NeuroKit error: signal_distort(): 'noise_shape' should be one of 'gaussian' or 'laplace'."
)
if len(_noise) != n_samples:
_noise = signal_resample(_noise, desired_length=n_samples, method="interpolation")
return _noise
| 10,658 | 30.35 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_surrogate.py | import numpy as np
from ..misc import check_random_state
def signal_surrogate(signal, method="IAAFT", random_state=None, **kwargs):
"""**Create Signal Surrogates**
Generate a surrogate version of a signal. Different methods are available, such as:
* **random**: Performs a random permutation of the signal value. This way, the signal
distribution is unaffected and the serial correlations are cancelled, yielding a whitened
signal with an distribution identical to that of the original.
* **IAAFT**: Returns an Iterative Amplitude Adjusted Fourier Transform (IAAFT) surrogate.
It is a phase randomized, amplitude adjusted surrogates that have the same power spectrum
(to a very high accuracy) and distribution as the original data, using an iterative scheme.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
Can be ``"random"`` or ``"IAAFT"``.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
**kwargs
Other keywords arguments, such as ``max_iter`` (by default 1000).
Returns
-------
surrogate : array
Surrogate signal.
Examples
--------
Create surrogates using different methods.
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
signal = nk.signal_simulate(duration = 1, frequency = [3, 5], noise = 0.1)
surrogate_iaaft = nk.signal_surrogate(signal, method = "IAAFT")
surrogate_random = nk.signal_surrogate(signal, method = "random")
@savefig p_signal_surrogate1.png scale=100%
plt.plot(surrogate_random, label = "Random Surrogate")
plt.plot(surrogate_iaaft, label = "IAAFT Surrogate")
plt.plot(signal, label = "Original")
plt.legend()
@suppress
plt.close()
As we can see, the signal pattern is destroyed by random surrogates, but not in the IAAFT one.
And their distributions are identical:
.. ipython:: python
@savefig p_signal_surrogate2.png scale=100%
plt.plot(*nk.density(signal), label = "Original")
plt.plot(*nk.density(surrogate_iaaft), label = "IAAFT Surrogate")
plt.plot(*nk.density(surrogate_random), label = "Random Surrogate")
plt.legend()
@suppress
plt.close()
However, the power spectrum of the IAAFT surrogate is preserved.
.. ipython:: python
f = nk.signal_psd(signal, max_frequency=20)
f["IAAFT"] = nk.signal_psd(surrogate_iaaft, max_frequency=20)["Power"]
f["Random"] = nk.signal_psd(surrogate_random, max_frequency=20)["Power"]
@savefig p_signal_surrogate3.png scale=100%
f.plot("Frequency", ["Power", "IAAFT", "Random"])
@suppress
plt.close()
References
----------
* Schreiber, T., & Schmitz, A. (1996). Improved surrogate data for nonlinearity tests. Physical
review letters, 77(4), 635.
"""
# TODO: when discrete signal is detected, run surrogate of markov chains
# https://github.com/Frederic-vW/eeg_microstates/blob/eeg_microstates3.py#L861
# Or markov_simulate()
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
method = method.lower()
if method == "random":
surrogate = rng.permutation(signal)
elif method == "iaaft":
surrogate, _, _ = _signal_surrogate_iaaft(signal, rng=rng, **kwargs)
return surrogate
def _signal_surrogate_iaaft(signal, max_iter=1000, atol=1e-8, rtol=1e-10, rng=None):
"""IAAFT
max_iter : int
Maximum iterations to be performed while checking for convergence. Convergence can be
achieved before maximum interation.
atol : float
Absolute tolerance for checking convergence.
rtol : float
Relative tolerance for checking convergence. If both atol and rtol are set to zero, the
iterations end only when the RMSD stops changing or when maximum iteration is reached.
Returns
-------
surrogate : array
Surrogate series with (almost) the same power spectrum and distribution.
i : int
Number of iterations that have been performed.
rmsd : float
Root-mean-square deviation (RMSD) between the absolute squares of the Fourier amplitudes of
the surrogate series and that of the original series.
"""
# Calculate "true" Fourier amplitudes and sort the series
amplitudes = np.abs(np.fft.rfft(signal))
sort = np.sort(signal)
# Previous and current error
previous_error, current_error = (-1, 1)
# Start with a random permutation
t = np.fft.rfft(rng.permutation(signal))
for i in range(max_iter):
# Match power spectrum
s = np.real(np.fft.irfft(amplitudes * t / np.abs(t), n=len(signal)))
# Match distribution by rank ordering
surrogate = sort[np.argsort(np.argsort(s))]
t = np.fft.rfft(surrogate)
current_error = np.sqrt(np.mean((amplitudes ** 2 - np.abs(t) ** 2) ** 2))
# Check convergence
if abs(current_error - previous_error) <= atol + rtol * abs(previous_error):
break
previous_error = current_error
# Normalize error w.r.t. mean of the "true" power spectrum.
rmsd = current_error / np.mean(amplitudes ** 2)
return surrogate, i, rmsd
| 5,520 | 34.619355 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_autocor.py | import numpy as np
import scipy.signal
import scipy.stats
from matplotlib import pyplot as plt
def signal_autocor(signal, lag=None, demean=True, method="auto", show=False):
"""**Autocorrelation (ACF)**
Compute the autocorrelation of a signal.
Parameters
-----------
signal : Union[list, np.array, pd.Series]
Vector of values.
lag : int
Time lag. If specified, one value of autocorrelation between signal with its lag self will
be returned.
demean : bool
If ``True``, the mean of the signal will be subtracted from the signal before ACF
computation.
method : str
Using ``"auto"`` runs ``scipy.signal.correlate`` to determine the faster algorithm. Other
methods are kept for legacy reasons, but are not recommended. Other methods include
``"correlation"`` (using :func:`.np.correlate`) or ``"fft"`` (Fast Fourier Transform).
show : bool
If ``True``, plot the autocorrelation at all values of lag.
Returns
-------
r : float
The cross-correlation of the signal with itself at different time lags. Minimum time lag is
0, maximum time lag is the length of the signal. Or a correlation value at a specific lag
if lag is not ``None``.
info : dict
A dictionary containing additional information, such as the confidence interval.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Example 1: Using 'Correlation' Method
signal = [1, 2, 3, 4, 5]
@savefig p_signal_autocor1.png scale=100%
r, info = nk.signal_autocor(signal, show=True, method='correlation')
@suppress
plt.close()
.. ipython:: python
# Example 2: Using 'FFT' Method
signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.5)
@savefig p_signal_autocor2.png scale=100%
r, info = nk.signal_autocor(signal, lag=2, method='fft', show=True)
@suppress
plt.close()
"""
n = len(signal)
# Demean
if demean:
signal = np.asarray(signal) - np.nanmean(signal)
# Run autocor
method = method.lower()
if method in ["auto"]:
acov = scipy.signal.correlate(signal, signal, mode="full", method="auto")[n - 1 :]
elif method in ["cor", "correlation", "correlate"]:
acov = np.correlate(signal, signal, mode="full")
acov = acov[n - 1 :] # Min time lag is 0
elif method == "fft":
a = np.concatenate((signal, np.zeros(n - 1))) # added zeros to your signal
fft = np.fft.fft(a)
acf = np.fft.ifft(np.conjugate(fft) * fft)[:n]
acov = acf.real
elif method == "unbiased":
dnorm = np.r_[np.arange(1, n + 1), np.arange(n - 1, 0, -1)]
fft = np.fft.fft(signal, n=n)
acf = np.fft.ifft(np.conjugate(fft) * fft)[:n]
acf /= dnorm[n - 1 :]
acov = acf.real
else:
raise ValueError("Method must be 'auto', 'correlation' or 'fft'.")
# Normalize
r = acov / np.max(acov)
# Confidence interval
varacf = 1.0 / n
interval = scipy.stats.norm.ppf(1 - 0.05 / 2.0) * np.sqrt(varacf)
ci_low, ci_high = r - interval, r + interval
# Plot
if show:
plt.axhline(y=0, color="grey", linestyle="--")
plt.plot(np.arange(1, len(r) + 1), r, lw=2)
plt.ylabel("Autocorrelation r")
plt.xlabel("Lag")
plt.ylim(-1.1, 1.1)
if lag is not None:
if lag > n:
raise ValueError(
"NeuroKit error: signal_autocor(): The time lag exceeds the duration of the signal. "
)
else:
r = r[lag]
return r, {"CI_low": ci_low, "CI_high": ci_high, "Method": method, "ACov": acov}
| 3,754 | 32.230088 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_merge.py | # -*- coding: utf-8 -*-
import numpy as np
from .signal_resample import signal_resample
def signal_merge(signal1, signal2, time1=[0, 10], time2=[0, 10]):
"""**Arbitrary addition of two signals with different time ranges**
Parameters
----------
signal1 : Union[list, np.array, pd.Series]
The first signal (i.e., a time series)s in the form of a vector of values.
signal2 : Union[list, np.array, pd.Series]
The second signal (i.e., a time series)s in the form of a vector of values.
time1 : list
Lists containing two numeric values corresponding to the beginning and end of ``signal1``.
time2 : list
Same as above, but for ``signal2``.
Returns
-------
array
Vector containing the sum of the two signals.
Examples
--------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal1 = np.cos(np.linspace(start=0, stop=10, num=100))
signal2 = np.cos(np.linspace(start=0, stop=20, num=100))
signal = nk.signal_merge(signal1, signal2, time1=[0, 10], time2=[-5, 5])
@savefig p_signal_merge_1.png scale=100%
nk.signal_plot(signal)
@suppress
plt.close()
"""
# Resample signals if different
sampling_rate1 = len(signal1) / np.diff(time1)[0]
sampling_rate2 = len(signal2) / np.diff(time2)[0]
if sampling_rate1 > sampling_rate2:
signal2 = signal_resample(
signal2, sampling_rate=sampling_rate2, desired_sampling_rate=sampling_rate1
)
elif sampling_rate2 > sampling_rate1:
signal1 = signal_resample(
signal1, sampling_rate=sampling_rate1, desired_sampling_rate=sampling_rate2
)
sampling_rate = np.max([sampling_rate1, sampling_rate2])
# Fill beginning
if time1[0] < time2[0]:
beginning = np.full(int(np.round(sampling_rate * (time2[0] - time1[0]))), signal2[0])
signal2 = np.concatenate((beginning, signal2))
elif time2[0] < time1[0]:
beginning = np.full(int(np.round(sampling_rate * (time1[0] - time2[0]))), signal1[0])
signal1 = np.concatenate((beginning, signal1))
# Fill end
if time1[1] > time2[1]:
end = np.full(int(np.round(sampling_rate * (time1[1] - time2[1]))), signal2[-1])
signal2 = np.concatenate((signal2, end))
elif time2[1] > time1[1]:
end = np.full(int(np.round(sampling_rate * (time2[1] - time1[1]))), signal1[-1])
signal1 = np.concatenate((signal1, end))
# Sanitize length of arrays
if len(signal1) > len(signal2):
signal1 = signal1[0 : len(signal2)]
if len(signal2) > len(signal1):
signal2 = signal2[0 : len(signal1)]
merged = signal1 + signal2
return merged
| 2,770 | 32.792683 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_findpeaks.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.misc
import scipy.signal
from ..misc import as_vector, find_closest
from ..stats import standardize
def signal_findpeaks(
signal,
height_min=None,
height_max=None,
relative_height_min=None,
relative_height_max=None,
relative_mean=True,
relative_median=False,
relative_max=False,
):
"""**Find peaks in a signal**
Locate peaks (local maxima) in a signal and their related characteristics, such as height
(prominence), width and distance with other peaks.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
height_min : float
The minimum height (i.e., amplitude in terms of absolute values). For example,
``height_min=20`` will remove all peaks which height is smaller or equal to 20 (in the
provided signal's values).
height_max : float
The maximum height (i.e., amplitude in terms of absolute values).
relative_height_min : float
The minimum height (i.e., amplitude) relative to the sample (see below). For example,
``relative_height_min=-2.96`` will remove all peaks which height lies below 2.96 standard
deviations from the mean of the heights.
relative_height_max : float
The maximum height (i.e., amplitude) relative to the sample (see below).
relative_mean : bool
If a relative threshold is specified, how should it be computed (i.e., relative to what?).
``relative_mean=True`` will use Z-scores.
relative_median : bool
If a relative threshold is specified, how should it be computed (i.e., relative to what?).
Relative to median uses a more robust form of standardization (see :func:`.standardize`).
relative_max : bool
If a relative threshold is specified, how should it be computed (i.e., relative to what?).
Relative to max will consider the maximum height as the reference.
Returns
----------
dict
Returns a dict itself containing 5 arrays:
* ``"Peaks"``: contains the peaks indices (as relative to the given signal). For instance,
the value 3 means that the third data point of the signal is a peak.
* ``"Distance"``: contains, for each peak, the closest distance with another peak. Note
that these values will be recomputed after filtering to match the selected peaks.
* ``"Height"``: contains the prominence of each peak.
See :func:`.scipy.signal.peak_prominences`.
* ``"Width"``: contains the width of each peak. See :func:`.scipy.signal.peak_widths`.
* ``"Onset"``: contains the onset, start (or left trough), of each peak.
* ``"Offset"``: contains the offset, end (or right trough), of each peak.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=5)
info = nk.signal_findpeaks(signal)
# Visualize Onsets of Peaks and Peaks of Signal
@savefig p_signal_findpeaks1.png scale=100%
nk.events_plot([info["Onsets"], info["Peaks"]], signal)
@suppress
plt.close()
.. ipython:: python
import scipy.datasets
# Load actual ECG Signal
ecg = scipy.datasets.electrocardiogram()
signal = ecg[0:1000]
# Find Unfiltered and Filtered Peaks
info1 = nk.signal_findpeaks(signal, relative_height_min=0)
info2 = nk.signal_findpeaks(signal, relative_height_min=1)
# Visualize Peaks
@savefig p_signal_findpeaks2.png scale=100%
nk.events_plot([info1["Peaks"], info2["Peaks"]], signal)
@suppress
plt.close()
See Also
--------
signal_fixpeaks
"""
info = _signal_findpeaks_scipy(signal)
# Absolute
info = _signal_findpeaks_keep(
info,
what="Height",
below=height_max,
above=height_min,
relative_mean=False,
relative_median=False,
relative_max=False,
)
# Relative
info = _signal_findpeaks_keep(
info,
what="Height",
below=relative_height_max,
above=relative_height_min,
relative_mean=relative_mean,
relative_median=relative_median,
relative_max=relative_max,
)
# Filter
info["Distance"] = _signal_findpeaks_distances(info["Peaks"])
info["Onsets"] = _signal_findpeaks_findbase(info["Peaks"], signal, what="onset")
info["Offsets"] = _signal_findpeaks_findbase(info["Peaks"], signal, what="offset")
return info
# =============================================================================
# Filtering peaks
# =============================================================================
def _signal_findpeaks_keep(
info,
what="Height",
below=None,
above=None,
relative_mean=False,
relative_median=False,
relative_max=False,
):
if below is None and above is None:
return info
keep = np.full(len(info["Peaks"]), True)
if relative_max is True:
what = info[what] / np.max(info[what])
elif relative_median is True:
what = standardize(info[what], robust=True)
elif relative_mean is True:
what = standardize(info[what])
else:
what = info[what]
if below is not None:
keep[what > below] = False
if above is not None:
keep[what < above] = False
info = _signal_findpeaks_filter(info, keep)
return info
def _signal_findpeaks_filter(info, keep):
for key in info.keys():
info[key] = info[key][keep]
return info
# =============================================================================
# Helpers
# =============================================================================
def _signal_findpeaks_distances(peaks):
"""Calculate distance between adjacent peaks.
Parameters
----------
peaks : np.ndarray
detected peaks
Returns
----------
np.ndarray
Distance vector of the same length as `peaks`
Examples
---------
```
peaks = np.array([1, 10, 10**2, 10**3, 10**4], dtype=np.float32)
_signal_findpeaks_distances(peaks) # array([ 9., 9., 90., 900., 9000.])
```
"""
if len(peaks) <= 2:
distances = np.full(len(peaks), np.nan)
else:
distances_next = np.concatenate([[np.nan], np.abs(np.diff(peaks))])
distances_prev = np.concatenate([distances_next[1:], [np.nan]])
distances = np.array([np.nanmin(i) for i in list(zip(distances_next, distances_prev))])
return distances
def _signal_findpeaks_findbase(peaks, signal, what="onset"):
if what == "onset":
direction = "smaller"
else:
direction = "greater"
troughs, _ = scipy.signal.find_peaks(-1 * signal)
bases = find_closest(peaks, troughs, direction=direction, strictly=True)
bases = as_vector(bases)
return bases
def _signal_findpeaks_scipy(signal):
peaks, _ = scipy.signal.find_peaks(signal)
# Get info
distances = _signal_findpeaks_distances(peaks)
heights, _, __ = scipy.signal.peak_prominences(signal, peaks)
widths, _, __, ___ = scipy.signal.peak_widths(signal, peaks, rel_height=0.5)
# Prepare output
info = {"Peaks": peaks, "Distance": distances, "Height": heights, "Width": widths}
return info
| 7,447 | 29.276423 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_filter.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
from ..misc import NeuroKitWarning
from .signal_interpolate import signal_interpolate
def signal_filter(
signal,
sampling_rate=1000,
lowcut=None,
highcut=None,
method="butterworth",
order=2,
window_size="default",
powerline=50,
show=False,
):
"""**Signal filtering**
Filter a signal using different methods such as "butterworth", "fir", "savgol" or "powerline"
filters.
Apply a lowpass (if "highcut" frequency is provided), highpass (if "lowcut" frequency is
provided) or bandpass (if both are provided) filter to the signal.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
lowcut : float
Lower cutoff frequency in Hz. The default is ``None``.
highcut : float
Upper cutoff frequency in Hz. The default is ``None``.
method : str
Can be one of ``"butterworth"``, ``"fir"``, ``"bessel"`` or ``"savgol"``. Note that for
Butterworth, the function uses the SOS method from :func:`.scipy.signal.sosfiltfilt`,
recommended for general purpose filtering. One can also specify ``"butterworth_ba"`` for a
more traditional and legacy method (often implemented in other software).
order : int
Only used if ``method`` is ``"butterworth"`` or ``"savgol"``. Order of the filter (default
is 2).
window_size : int
Only used if ``method`` is ``"savgol"``. The length of the filter window (i.e. the number of
coefficients). Must be an odd integer. If default, will be set to the sampling rate
divided by 10 (101 if the sampling rate is 1000 Hz).
powerline : int
Only used if ``method`` is ``"powerline"``.
The powerline frequency (normally 50 Hz or 60Hz).
show : bool
If ``True``, plot the filtered signal as an overlay of the original.
See Also
--------
signal_detrend, signal_psd
Returns
-------
array
Vector containing the filtered signal.
Examples
--------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, frequency=0.5) # Low freq
signal += nk.signal_simulate(duration=10, frequency=5) # High freq
# Visualize Lowpass Filtered Signal using Different Methods
@savefig p_signal_filter1.png scale=100%
fig1 = pd.DataFrame({"Raw": signal,
"Butter_2": nk.signal_filter(signal, highcut=3, method="butterworth",
order=2),
"Butter_2_BA": nk.signal_filter(signal, highcut=3,
method="butterworth_ba", order=2),
"Butter_5": nk.signal_filter(signal, highcut=3, method="butterworth",
order=5),
"Butter_5_BA": nk.signal_filter(signal, highcut=3,
method="butterworth_ba", order=5),
"Bessel_2": nk.signal_filter(signal, highcut=3, method="bessel", order=2),
"Bessel_5": nk.signal_filter(signal, highcut=3, method="bessel", order=5),
"FIR": nk.signal_filter(signal, highcut=3, method="fir")}).plot(subplots=True)
@suppress
plt.close()
.. ipython:: python
# Visualize Highpass Filtered Signal using Different Methods
@savefig p_signal_filter2.png scale=100%
fig2 = pd.DataFrame({"Raw": signal,
"Butter_2": nk.signal_filter(signal, lowcut=2, method="butterworth",
order=2),
"Butter_2_ba": nk.signal_filter(signal, lowcut=2,
method="butterworth_ba", order=2),
"Butter_5": nk.signal_filter(signal, lowcut=2, method="butterworth",
order=5),
"Butter_5_BA": nk.signal_filter(signal, lowcut=2,
method="butterworth_ba", order=5),
"Bessel_2": nk.signal_filter(signal, lowcut=2, method="bessel", order=2),
"Bessel_5": nk.signal_filter(signal, lowcut=2, method="bessel", order=5),
"FIR": nk.signal_filter(signal, lowcut=2, method="fir")}).plot(subplots=True)
@suppress
plt.close()
.. ipython:: python
# Using Bandpass Filtering in real-life scenarios
# Simulate noisy respiratory signal
original = nk.rsp_simulate(duration=30, method="breathmetrics", noise=0)
signal = nk.signal_distort(original, noise_frequency=[0.1, 2, 10, 100], noise_amplitude=1,
powerline_amplitude=1)
# Bandpass between 10 and 30 breaths per minute (respiratory rate range)
@savefig p_signal_filter3.png scale=100%
fig3 = pd.DataFrame({"Raw": signal,
"Butter_2": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="butterworth", order=2),
"Butter_2_BA": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="butterworth_ba", order=2),
"Butter_5": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="butterworth", order=5),
"Butter_5_BA": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="butterworth_ba", order=5),
"Bessel_2": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="bessel", order=2),
"Bessel_5": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="bessel", order=5),
"FIR": nk.signal_filter(signal, lowcut=10/60, highcut=30/60,
method="fir"),
"Savgol": nk.signal_filter(signal, method="savgol")}).plot(subplots=True)
@suppress
plt.close()
"""
method = method.lower()
signal_sanitized, missing = _signal_filter_missing(signal)
if method in ["sg", "savgol", "savitzky-golay"]:
filtered = _signal_filter_savgol(signal_sanitized, sampling_rate, order, window_size=window_size)
elif method in ["powerline"]:
filtered = _signal_filter_powerline(signal_sanitized, sampling_rate, powerline)
else:
# Sanity checks
if lowcut is None and highcut is None:
raise ValueError("NeuroKit error: signal_filter(): you need to specify a 'lowcut' or a 'highcut'.")
if method in ["butter", "butterworth"]:
filtered = _signal_filter_butterworth(signal_sanitized, sampling_rate, lowcut, highcut, order)
elif method in ["butter_ba", "butterworth_ba"]:
filtered = _signal_filter_butterworth_ba(signal_sanitized, sampling_rate, lowcut, highcut, order)
elif method in ["butter_zi", "butterworth_zi"]:
filtered = _signal_filter_butterworth_zi(signal_sanitized, sampling_rate, lowcut, highcut, order)
elif method in ["bessel"]:
filtered = _signal_filter_bessel(signal_sanitized, sampling_rate, lowcut, highcut, order)
elif method in ["fir"]:
filtered = _signal_filter_fir(signal_sanitized, sampling_rate, lowcut, highcut, window_size=window_size)
else:
raise ValueError(
"NeuroKit error: signal_filter(): 'method' should be",
" one of 'butterworth', 'butterworth_ba', 'butterworth_zi', 'bessel',",
" 'savgol' or 'fir'.",
)
filtered[missing] = np.nan
if show is True:
plt.plot(signal, color="lightgrey")
plt.plot(filtered, color="red", alpha=0.9)
return filtered
# =============================================================================
# Savitzky-Golay (savgol)
# =============================================================================
def _signal_filter_savgol(signal, sampling_rate=1000, order=2, window_size="default"):
"""Filter a signal using the Savitzky-Golay method.
Default window size is chosen based on `Sadeghi, M., & Behnia, F. (2018). Optimum window length of
Savitzky-Golay filters with arbitrary order. arXiv preprint arXiv:1808.10489.
<https://arxiv.org/ftp/arxiv/papers/1808/1808.10489.pdf>`_.
"""
window_size = _signal_filter_windowsize(window_size=window_size, sampling_rate=sampling_rate)
if window_size % 2 == 0:
window_size += 1 # Make sure it's odd
filtered = scipy.signal.savgol_filter(signal, window_length=int(window_size), polyorder=order)
return filtered
# =============================================================================
# FIR
# =============================================================================
def _signal_filter_fir(signal, sampling_rate=1000, lowcut=None, highcut=None, window_size="default"):
"""Filter a signal using a FIR filter."""
try:
import mne
except ImportError:
raise ImportError(
"NeuroKit error: signal_filter(): the 'mne' module is required for this method to run. ",
"Please install it first (`pip install mne`).",
)
if isinstance(window_size, str):
window_size = "auto"
filtered = mne.filter.filter_data(
signal,
sfreq=sampling_rate,
l_freq=lowcut,
h_freq=highcut,
method="fir",
fir_window="hamming",
filter_length=window_size,
l_trans_bandwidth="auto",
h_trans_bandwidth="auto",
phase="zero-double",
fir_design="firwin",
pad="reflect_limited",
verbose=False,
)
return filtered
# =============================================================================
# Butterworth
# =============================================================================
def _signal_filter_butterworth(signal, sampling_rate=1000, lowcut=None, highcut=None, order=5):
"""Filter a signal using IIR Butterworth SOS method."""
freqs, filter_type = _signal_filter_sanitize(lowcut=lowcut, highcut=highcut, sampling_rate=sampling_rate)
sos = scipy.signal.butter(order, freqs, btype=filter_type, output="sos", fs=sampling_rate)
filtered = scipy.signal.sosfiltfilt(sos, signal)
return filtered
def _signal_filter_butterworth_ba(signal, sampling_rate=1000, lowcut=None, highcut=None, order=5):
"""Filter a signal using IIR Butterworth B/A method."""
# Get coefficients
freqs, filter_type = _signal_filter_sanitize(lowcut=lowcut, highcut=highcut, sampling_rate=sampling_rate)
b, a = scipy.signal.butter(order, freqs, btype=filter_type, output="ba", fs=sampling_rate)
try:
filtered = scipy.signal.filtfilt(b, a, signal, method="gust")
except ValueError:
filtered = scipy.signal.filtfilt(b, a, signal, method="pad")
return filtered
def _signal_filter_butterworth_zi(signal, sampling_rate=1000, lowcut=None, highcut=None, order=5):
"""Filter a signal using IIR Butterworth SOS method, given initial state (zi)."""
freqs, filter_type = _signal_filter_sanitize(lowcut=lowcut, highcut=highcut, sampling_rate=sampling_rate)
sos = scipy.signal.butter(order, freqs, btype=filter_type, output="sos", fs=sampling_rate)
zi_coeff = scipy.signal.sosfilt_zi(sos)
zi = zi_coeff * np.mean(signal)
# Filter data along one dimension using cascaded second-order sections.
return scipy.signal.sosfilt(sos, signal, zi=zi)[0]
# =============================================================================
# Bessel
# =============================================================================
def _signal_filter_bessel(signal, sampling_rate=1000, lowcut=None, highcut=None, order=5):
freqs, filter_type = _signal_filter_sanitize(lowcut=lowcut, highcut=highcut, sampling_rate=sampling_rate)
sos = scipy.signal.bessel(order, freqs, btype=filter_type, output="sos", fs=sampling_rate)
filtered = scipy.signal.sosfiltfilt(sos, signal)
return filtered
# =============================================================================
# Powerline
# =============================================================================
def _signal_filter_powerline(signal, sampling_rate, powerline=50):
"""Filter out 50 Hz powerline noise by smoothing the signal with a moving average kernel with the width of one
period of 50Hz."""
if sampling_rate >= 100:
b = np.ones(int(sampling_rate / powerline))
else:
b = np.ones(2)
a = [len(b)]
y = scipy.signal.filtfilt(b, a, signal, method="pad")
return y
# =============================================================================
# Utility
# =============================================================================
def _signal_filter_sanitize(lowcut=None, highcut=None, sampling_rate=1000, normalize=False):
# Sanity checks
if lowcut is not None or highcut is not None:
if sampling_rate <= 2 * np.nanmax(np.array([lowcut, highcut], dtype=np.float64)):
warn(
"The sampling rate is too low. Sampling rate"
" must exceed the Nyquist rate to avoid aliasing problem."
f" In this analysis, the sampling rate has to be higher than {2 * highcut} Hz",
category=NeuroKitWarning,
)
# Replace 0 by none
if lowcut is not None and lowcut == 0:
lowcut = None
if highcut is not None and highcut == 0:
highcut = None
# Format
if lowcut is not None and highcut is not None:
if lowcut > highcut:
filter_type = "bandstop"
else:
filter_type = "bandpass"
# pass frequencies in order of lowest to highest to the scipy filter
freqs = list(np.sort([lowcut, highcut]))
elif lowcut is not None:
freqs = [lowcut]
filter_type = "highpass"
elif highcut is not None:
freqs = [highcut]
filter_type = "lowpass"
# Normalize frequency to Nyquist Frequency (Fs/2).
# However, no need to normalize if `fs` argument is provided to the scipy filter
if normalize is True:
freqs = np.array(freqs) / (sampling_rate / 2)
return freqs, filter_type
def _signal_filter_windowsize(window_size="default", sampling_rate=1000):
if isinstance(window_size, str):
window_size = int(np.round(sampling_rate / 3))
if (window_size % 2) == 0:
window_size + 1 # pylint: disable=W0104
return window_size
def _signal_filter_missing(signal):
"""Interpolate missing data and save the indices of the missing data."""
missing = np.where(np.isnan(signal))[0]
if len(missing) > 0:
return signal_interpolate(signal, method="linear"), missing
else:
return signal, missing
| 15,403 | 40.632432 | 116 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_recompose.py | import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster
from .signal_zerocrossings import signal_zerocrossings
def signal_recompose(components, method="wcorr", threshold=0.5, keep_sd=None, **kwargs):
"""**Combine signal sources after decomposition**
Combine and reconstruct meaningful signal sources after signal decomposition.
Parameters
-----------
components : array
Array of components obtained via :func:`.signal_decompose`.
method : str
The decomposition method. Can be one of ``"wcorr"``.
threshold : float
The threshold used to group components together.
keep_sd : float
If a float is specified, will only keep the reconstructed components that are superior
or equal to that percentage of the max standard deviaiton (SD) of the components. For
instance, ``keep_sd=0.01`` will remove all components with SD lower than 1% of the
max SD. This can be used to filter out noise.
**kwargs
Other arguments used to override, for instance ``metric="chebyshev"``.
Returns
-------
Array
Components of the recomposed components.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Create complex signal
signal = nk.signal_simulate(duration=10, frequency=1, noise=0.01) # High freq
signal += 3 * nk.signal_simulate(duration=10, frequency=3, noise=0.01) # Higher freq
signal += 3 * np.linspace(0, 2, len(signal)) # Add baseline and trend
signal += 2 * nk.signal_simulate(duration=10, frequency=0.1, noise=0)
# Decompose signal
components = nk.signal_decompose(signal, method='emd')
# Recompose
recomposed = nk.signal_recompose(components, method='wcorr', threshold=0.90)
@savefig p_signal_recompose1.png scale=100%
nk.signal_plot(components) # Visualize components
@suppress
plt.close()
"""
# Apply method
method = method.lower()
if method in ["wcorr"]:
clusters = _signal_recompose_wcorr(components, threshold=threshold, **kwargs)
recomposed = _signal_recompose_sum(components, clusters)
else:
raise ValueError("NeuroKit error: signal_decompose(): 'method' should be one of 'emd'")
if keep_sd is not None:
recomposed = _signal_recompose_filter_sd(components, threshold=keep_sd)
return recomposed
# =============================================================================
# Recombination methods
# =============================================================================
def _signal_recompose_sum(components, clusters):
# Reorient components
components = components.T
# Reconstruct Time Series from correlated components
clusters = [np.where(clusters == cluster)[0] for cluster in np.unique(clusters)]
if len(clusters) == 0:
raise ValueError(
"Not enough clusters of components detected. Please decrease the " "`threshold`."
)
# Initialize components matrix
recomposed = np.zeros((len(components), len(clusters)))
for i, indices in enumerate(clusters):
recomposed[:, i] = components[:, indices].sum(axis=1)
return recomposed.T
# =============================================================================
# Clustering Methods
# =============================================================================
# Weighted Correlation
# ----------------------------------------------------------------------------
def _signal_recompose_wcorr(components, threshold=0.5, metric="chebyshev"):
""""""
# Calculate the w-correlation matrix.
wcorr = _signal_recompose_get_wcorr(components, show=False)
# Find clusters in correlation matrix
pairwise_distances = scipy.cluster.hierarchy.distance.pdist(wcorr, metric=metric)
linkage = scipy.cluster.hierarchy.linkage(pairwise_distances, method="complete")
threshold = threshold * pairwise_distances.max()
clusters = scipy.cluster.hierarchy.fcluster(linkage, threshold, "distance")
return clusters
def _signal_recompose_get_wcorr(components, show=False):
"""Calculates the weighted correlation matrix for the time series.
References
----------
- https://www.kaggle.com/jdarcy/introducing-ssa-for-time-series-decomposition
"""
# Reorient components
components = components.T
L = components.shape[1]
K = components.shape[0] - L + 1
# Calculate the weights
w = np.array(list(np.arange(L) + 1) + [L] * (K - L - 1) + list(np.arange(L) + 1)[::-1])
def w_inner(F_i, F_j):
return w.dot(F_i * F_j)
# Calculated weighted norms, ||F_i||_w, then invert.
F_wnorms = np.array([w_inner(components[:, i], components[:, i]) for i in range(L)])
F_wnorms = F_wnorms ** -0.5
# Calculate Wcorr.
Wcorr = np.identity(L)
for i in range(L):
for j in range(i + 1, L):
Wcorr[i, j] = abs(
w_inner(components[:, i], components[:, j]) * F_wnorms[i] * F_wnorms[j]
)
Wcorr[j, i] = Wcorr[i, j]
if show is True:
ax = plt.imshow(Wcorr)
plt.xlabel(r"$\tilde{F}_i$")
plt.ylabel(r"$\tilde{F}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$W_{i,j}$")
plt.clim(0, 1)
# For plotting purposes:
min_range = 0
max_range = len(Wcorr) - 1
plt.xlim(min_range - 0.5, max_range + 0.5)
plt.ylim(max_range + 0.5, min_range - 0.5)
return Wcorr
# =============================================================================
# Filter method
# =============================================================================
def _signal_recompose_filter_sd(components, threshold=0.01):
"""Filter by standard deviation."""
SDs = [np.std(components[i, :], ddof=1) for i in range(len(components))]
indices = np.where(SDs >= threshold * np.max(SDs))
return components[indices]
def _signal_recompose_meanfreq(components, sampling_rate=1000):
"""Get the mean frequency of components."""
duration = components.shape[1] / sampling_rate
n = len(components)
freqs = np.zeros(n)
for i in range(n):
c = components[i, :] - np.mean(components[i, :])
freqs[i] = len(signal_zerocrossings(c)) / duration
| 6,336 | 33.818681 | 95 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_psd.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning
def signal_psd(
signal,
sampling_rate=1000,
method="welch",
show=False,
normalize=True,
min_frequency="default",
max_frequency=np.inf,
window=None,
window_type="hann",
order=16,
order_criteria="KIC",
order_corrected=True,
silent=True,
t=None,
**kwargs,
):
"""**Compute the Power Spectral Density (PSD)**
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
method : str
Either ``"welch"`` (default), ``"fft"``, ``"multitapers"`` (requires the 'mne' package),
``"lombscargle"`` (requires the 'astropy' package) or ``"burg"``.
show : bool
If ``True``, will return a plot. If ``False``, will return the density values that can be
plotted externally.
normalize : bool
Normalization of power by maximum PSD value. Default to ``True``.
Normalization allows comparison between different PSD methods.
min_frequency : str, float
The minimum frequency. If default, min_frequency is chosen based on the sampling rate and
length of signal to optimize the frequency resolution.
max_frequency : float
The maximum frequency.
window : int
Length of each window in seconds (for "Welch" method). If ``None`` (default), window will be
automatically calculated to capture at least 2 cycles of min_frequency. If the length of
recording does not allow the formal, window will be default to half of the length of
recording.
window_type : str
Desired window to use. Defaults to ``"hann"``. See :func:`.scipy.signal.get_window` for list
of windows.
order : int
The order of autoregression (only used for autoregressive (AR) methods such as ``"burg"``).
order_criteria : str
The criteria to automatically select order in parametric PSD (only used for autoregressive
(AR) methods such as ``"burg"``).
order_corrected : bool
Should the order criteria (AIC or KIC) be corrected? If unsure which method to use to choose
the order, rely on the default (i.e., the corrected KIC).
silent : bool
If ``False``, warnings will be printed. Default to ``True``.
t : array
The timestamps corresponding to each sample in the signal, in seconds
(for ``"lombscargle"`` method). Defaults to None.
**kwargs : optional
Keyword arguments to be passed to :func:`.scipy.signal.welch`.
See Also
--------
signal_filter, mne.time_frequency.psd_array_multitaper, scipy.signal.welch
Returns
-------
data : pd.DataFrame
A DataFrame containing the Power Spectrum values and a plot if
``show`` is ``True``.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 6, 50, 52, 80], noise=0.5)
# FFT method (based on numpy)
@savefig p_signal_psd1.png scale=100%
psd_multitapers = nk.signal_psd(signal, method="fft", show=True)
@suppress
plt.close
.. ipython:: python
# Welch method (based on scipy)
@savefig p_signal_psd2.png scale=100%
psd_welch = nk.signal_psd(signal, method="welch", min_frequency=1, show=True)
@suppress
plt.close()
.. ipython:: python
# Multitapers method (requires MNE)
@savefig p_signal_psd3.png scale=100%
psd_multitapers = nk.signal_psd(signal, method="multitapers", show=True)
@suppress
plt.close()
.. ipython:: python
# Burg method
@savefig p_signal_psd4.png scale=100%
psd_burg = nk.signal_psd(signal, method="burg", min_frequency=1, show=True)
@suppress
plt.close()
.. ipython:: python
# Lomb method (requires AstroPy)
@savefig p_signal_psd5.png scale=100%
psd_lomb = nk.signal_psd(signal, method="lomb", min_frequency=1, show=True)
@suppress
plt.close()
"""
# Constant Detrend
signal = signal - np.mean(signal)
# Sanitize method name
method = method.lower()
# Sanitize min_frequency
N = len(signal)
if isinstance(min_frequency, str):
if sampling_rate is None:
# This is to compute min_frequency if both min_frequency and sampling_rate are not provided (#800)
min_frequency = (2 * np.median(np.diff(t))) / (N / 2) # for high frequency resolution
else:
min_frequency = (2 * sampling_rate) / (N / 2) # for high frequency resolution
# MNE
if method in ["multitaper", "multitapers", "mne"]:
frequency, power = _signal_psd_multitaper(
signal, sampling_rate=sampling_rate, min_frequency=min_frequency, max_frequency=max_frequency,
)
# FFT (Numpy)
elif method in ["fft"]:
frequency, power = _signal_psd_fft(signal, sampling_rate=sampling_rate, **kwargs)
# Lombscargle (AtroPy)
elif method.lower() in ["lombscargle", "lomb"]:
frequency, power = _signal_psd_lomb(
signal, sampling_rate=sampling_rate, min_frequency=min_frequency, max_frequency=max_frequency, t=t
)
# Method that are using a window
else:
# Define window length
if min_frequency == 0:
min_frequency = 0.001 # sanitize min_frequency
if window is not None:
nperseg = int(window * sampling_rate)
else:
# to capture at least 2 cycles of min_frequency
nperseg = int((2 / min_frequency) * sampling_rate)
# in case duration of recording is not sufficient
if nperseg > N / 2:
if silent is False:
warn(
"The duration of recording is too short to support a"
" sufficiently long window for high frequency resolution."
" Consider using a longer recording or increasing the `min_frequency`",
category=NeuroKitWarning,
)
nperseg = int(N / 2)
# Welch (Scipy)
if method.lower() in ["welch"]:
frequency, power = _signal_psd_welch(
signal, sampling_rate=sampling_rate, nperseg=nperseg, window_type=window_type, **kwargs,
)
# BURG
elif method.lower() in ["burg", "pburg", "spectrum"]:
frequency, power = _signal_psd_burg(
signal,
sampling_rate=sampling_rate,
order=order,
criteria=order_criteria,
corrected=order_corrected,
side="one-sided",
nperseg=nperseg,
)
# Normalize
if normalize is True:
power /= np.max(power)
# Store results
data = pd.DataFrame({"Frequency": frequency, "Power": power})
# Filter
data = data.loc[np.logical_and(data["Frequency"] >= min_frequency, data["Frequency"] <= max_frequency)]
# data["Power"] = 10 * np.log(data["Power"])
if show is True:
ax = data.plot(x="Frequency", y="Power", title="Power Spectral Density (" + str(method) + " method)")
ax.set(xlabel="Frequency (Hz)", ylabel="Spectrum")
return data
# =============================================================================
# Multitaper method
# =============================================================================
def _signal_psd_fft(signal, sampling_rate=1000, n=None):
# Power-spectrum density (PSD)
power = np.abs(np.fft.rfft(signal, n=n)) ** 2
frequency = np.linspace(0, sampling_rate / 2, len(power))
return frequency, power
# =============================================================================
# Multitaper method
# =============================================================================
def _signal_psd_multitaper(signal, sampling_rate=1000, min_frequency=0, max_frequency=np.inf):
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: signal_psd(): the 'mne'",
" module is required for the 'mne' method to run.",
" Please install it first (`pip install mne`).",
) from e
power, frequency = mne.time_frequency.psd_array_multitaper(
signal,
sfreq=sampling_rate,
fmin=min_frequency,
fmax=max_frequency,
adaptive=True,
normalization="full",
verbose=False,
)
return frequency, power
# =============================================================================
# Welch method
# =============================================================================
def _signal_psd_welch(signal, sampling_rate=1000, nperseg=None, window_type="hann", **kwargs):
if nperseg is not None:
nfft = int(nperseg * 2)
else:
nfft = None
frequency, power = scipy.signal.welch(
signal,
fs=sampling_rate,
scaling="density",
detrend=False,
nfft=nfft,
average="mean",
nperseg=nperseg,
window=window_type,
**kwargs,
)
return frequency, power
# =============================================================================
# Lomb method
# =============================================================================
def _signal_psd_lomb(signal, sampling_rate=1000, min_frequency=0, max_frequency=np.inf, t=None):
try:
import astropy.timeseries
if t is None:
if max_frequency == np.inf:
max_frequency = sampling_rate / 2 # sanitize highest frequency
t = np.arange(len(signal)) / sampling_rate
frequency, power = astropy.timeseries.LombScargle(t, signal, normalization="psd").autopower(
minimum_frequency=min_frequency, maximum_frequency=max_frequency
)
else:
# determine maximum frequency with astropy defaults for unevenly spaced data
# https://docs.astropy.org/en/stable/api/astropy.timeseries.LombScargle.html#astropy.timeseries.LombScargle.autopower
frequency, power = astropy.timeseries.LombScargle(t, signal, normalization="psd").autopower(
minimum_frequency=min_frequency
)
except ImportError as e:
raise ImportError(
"NeuroKit error: signal_psd(): the 'astropy'",
" module is required for the 'lomb' method to run.",
" Please install it first (`pip install astropy`).",
) from e
return frequency, power
# =============================================================================
# Burg method
# =============================================================================
def _signal_psd_burg(
signal, sampling_rate=1000, order=16, criteria="KIC", corrected=True, side="one-sided", nperseg=None,
):
nfft = int(nperseg * 2)
ar, rho, _ = _signal_arma_burg(signal, order=order, criteria=criteria, corrected=corrected)
psd = _signal_psd_from_arma(ar=ar, rho=rho, sampling_rate=sampling_rate, nfft=nfft, side=side)
# signal is real, not complex
if nfft % 2 == 0:
power = psd[0 : int(nfft / 2 + 1)] * 2
else:
power = psd[0 : int((nfft + 1) / 2)] * 2
# angular frequencies, w
# for one-sided psd, w spans [0, pi]
# for two-sdied psd, w spans [0, 2pi)
# for dc-centered psd, w spans (-pi, pi] for even nfft, (-pi, pi) for add nfft
if side == "one-sided":
w = np.pi * np.linspace(0, 1, len(power))
# elif side == "two-sided":
# w = np.pi * np.linspace(0, 2, len(power), endpoint=False) #exclude last point
# elif side == "centerdc":
# if nfft % 2 == 0:
# w = np.pi * np.linspace(-1, 1, len(power))
# else:
# w = np.pi * np.linspace(-1, 1, len(power) + 1, endpoint=False) # exclude last point
# w = w[1:] # exclude first point (extra)
frequency = (w * sampling_rate) / (2 * np.pi)
return frequency, power
def _signal_arma_burg(signal, order=16, criteria="KIC", corrected=True):
# Sanitize order and signal
N = len(signal)
if order <= 0.0:
raise ValueError("Order must be > 0")
if order > N:
raise ValueError("Order must be less than length signal minus 2")
if not isinstance(signal, np.ndarray):
signal = np.array(signal)
# Initialisation
# rho is variance of driving white noise process (prediction error)
rho = sum(abs(signal) ** 2.0) / float(N)
denominator = rho * 2.0 * N
ar = np.zeros(0, dtype=complex) # AR parametric signal model estimate
ref = np.zeros(0, dtype=complex) # vector K of reflection coefficients (parcor coefficients)
ef = signal.astype(complex) # forward prediction error
eb = signal.astype(complex) # backward prediction error
temp = 1.0
# Main recursion
for k in range(0, order):
# calculate the next order reflection coefficient
numerator = sum([ef[j] * eb[j - 1].conjugate() for j in range(k + 1, N)])
denominator = temp * denominator - abs(ef[k]) ** 2 - abs(eb[N - 1]) ** 2
kp = -2.0 * numerator / denominator
# Update the prediction error
temp = 1.0 - abs(kp) ** 2.0
new_rho = temp * rho
if criteria is not None:
# k=k+1 because order goes from 1 to P whereas k starts at 0.
residual_new = _criteria(criteria=criteria, N=N, k=k + 1, rho=new_rho, corrected=corrected)
if k == 0:
residual_old = 2.0 * abs(residual_new)
# Stop as criteria has reached
if residual_new > residual_old:
break
# This should be after the criteria
residual_old = residual_new
rho = new_rho
if rho <= 0:
raise ValueError(f"Found a negative value (expected positive strictly) {rho}. Decrease the order.")
ar = np.resize(ar, ar.size + 1)
ar[k] = kp
if k == 0:
for j in range(N - 1, k, -1):
ef_previous = ef[j] # previous value
ef[j] = ef_previous + kp * eb[j - 1] # Eq. (8.7)
eb[j] = eb[j - 1] + kp.conjugate() * ef_previous
else:
# Update the AR coeff
khalf = (k + 1) // 2 # khalf must be an integer
for j in range(0, khalf):
ar_previous = ar[j] # previous value
ar[j] = ar_previous + kp * ar[k - j - 1].conjugate() # Eq. (8.2)
if j != k - j - 1:
ar[k - j - 1] = ar[k - j - 1] + kp * ar_previous.conjugate() # Eq. (8.2)
# Update the forward and backward prediction errors
for j in range(N - 1, k, -1):
ef_previous = ef[j] # previous value
ef[j] = ef_previous + kp * eb[j - 1] # Eq. (8.7)
eb[j] = eb[j - 1] + kp.conjugate() * ef_previous
# save the reflection coefficient
ref = np.resize(ref, ref.size + 1)
ref[k] = kp
return ar, rho, ref
# =============================================================================
# Utilities
# =============================================================================
def _criteria(criteria=None, N=None, k=None, rho=None, corrected=True):
"""Criteria to automatically select order in parametric PSD.
AIC, AICc, KIC and AKICc are based on information theory. They attempt to balance the complexity
(or length) of the model against how well the model fits the data.
AIC and KIC are biased estimates of the asymmetric and the symmetric Kullback-Leibler divergence
respectively. AICc and AKICc attempt to correct the bias.
Parameters
----------
criteria : str
The criteria to be used. The critera can be one of the following: AIC (Akaike Information Criterion),
KIC (Kullback Iinformation Criterion), FPE (Final Prediction Error Criterion), MDL (Minimum
Description Length), CAT (Criterion Autoregressive Transfer Function), AIC order-selection using
eigen values, MDL order-selection using eigen values.
N : int
The sample size of the signal.
k : int
The AR order.
rho : int
The rho at order k.
corrected : bool
Specify for AIC and KIC methods.
Returns
-------
residual : Union[int, float]
Residuals to select the optimal order.
"""
if criteria == "AIC":
if corrected is True:
residual = np.log(rho) + 2.0 * (k + 1) / (N - k - 2)
else:
residual = N * np.log(np.array(rho)) + 2.0 * (np.array(k) + 1)
elif criteria == "KIC":
if corrected is True:
residual = np.log(rho) + k / N / (N - k) + (3.0 - (k + 2.0) / N) * (k + 1.0) / (N - k - 2.0)
else:
residual = np.log(rho) + 3.0 * (k + 1.0) / float(N)
elif criteria == "FPE":
fpe = rho * (N + k + 1.0) / (N - k - 1)
return fpe
elif criteria == "MDL":
mdl = N * np.log(rho) + k * np.log(N)
return mdl
return residual
def _signal_psd_from_arma(ar=None, ma=None, rho=1.0, sampling_rate=1000, nfft=None, side="one-sided"):
if ar is None and ma is None:
raise ValueError("Either AR or MA model must be provided")
psd = np.zeros(nfft, dtype=complex)
if ar is not None:
ip = len(ar)
den = np.zeros(nfft, dtype=complex)
den[0] = 1.0 + 0j
for k in range(0, ip):
den[k + 1] = ar[k]
denf = np.fft.fft(den, nfft)
if ma is not None:
iq = len(ma)
num = np.zeros(nfft, dtype=complex)
num[0] = 1.0 + 0j
for k in range(0, iq):
num[k + 1] = ma[k]
numf = np.fft.fft(num, nfft)
if ar is not None and ma is not None:
psd = rho / sampling_rate * abs(numf) ** 2.0 / abs(denf) ** 2.0
elif ar is not None:
psd = rho / sampling_rate / abs(denf) ** 2.0
elif ma is not None:
psd = rho / sampling_rate * abs(numf) ** 2.0
psd = np.real(psd) # The PSD is a twosided PSD.
# convert to one-sided
if side == "one-sided":
assert len(psd) % 2 == 0
one_side_psd = np.array(psd[0 : len(psd) // 2 + 1]) * 2.0
one_side_psd[0] /= 2.0
# one_side_psd[-1] = psd[-1]
psd = one_side_psd
# convert to centerdc
elif side == "centerdc":
first_half = psd[0 : len(psd) // 2]
second_half = psd[len(psd) // 2 :]
rotate_second_half = second_half[-1:] + second_half[:-1]
center_psd = np.concatenate((rotate_second_half, first_half))
center_psd[0] = psd[-1]
psd = center_psd
return psd
| 19,000 | 33.6102 | 129 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_power.py | # -*- coding: utf-8 -*-
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .signal_psd import signal_psd
def signal_power(
signal,
frequency_band,
sampling_rate=1000,
continuous=False,
show=False,
normalize=True,
**kwargs,
):
"""**Compute the power of a signal in a given frequency band**
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
frequency_band :tuple or list
Tuple or list of tuples indicating the range of frequencies to compute the power in.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
continuous : bool
Compute instant frequency, or continuous power.
show : bool
If ``True``, will return a Poincaré plot. Defaults to ``False``.
normalize : bool
Normalization of power by maximum PSD value. Default to ``True``.
Normalization allows comparison between different PSD methods.
**kwargs
Keyword arguments to be passed to :func:`.signal_psd`.
See Also
--------
signal_filter, signal_psd
Returns
-------
pd.DataFrame
A DataFrame containing the Power Spectrum values and a plot if
``show`` is ``True``.
Examples
--------
.. ipython:: python
import neurokit2 as nk
import numpy as np
# Instant power
signal = nk.signal_simulate(duration=60, frequency=[10, 15, 20],
amplitude = [1, 2, 3], noise = 2)
@savefig p_signal_power1.png scale=100%
power_plot = nk.signal_power(signal, frequency_band=[(8, 12), (18, 22)], method="welch", show=True)
@suppress
plt.close()
..ipython:: python
# Continuous (simulated signal)
signal = np.concatenate((nk.ecg_simulate(duration=30, heart_rate=75), nk.ecg_simulate(duration=30, heart_rate=85)))
power = nk.signal_power(signal, frequency_band=[(72/60, 78/60), (82/60, 88/60)], continuous=True)
processed, _ = nk.ecg_process(signal)
power["ECG_Rate"] = processed["ECG_Rate"]
@savefig p_signal_power2.png scale=100%
nk.signal_plot(power, standardize=True)
@suppress
plt.close()
.. ipython:: python
# Continuous (real signal)
signal = nk.data("bio_eventrelated_100hz")["ECG"]
power = nk.signal_power(signal, sampling_rate=100, frequency_band=[(0.12, 0.15), (0.15, 0.4)], continuous=True)
processed, _ = nk.ecg_process(signal, sampling_rate=100)
power["ECG_Rate"] = processed["ECG_Rate"]
@savefig p_signal_power3.png scale=100%
nk.signal_plot(power, standardize=True)
@suppress
plt.close()
"""
if continuous is False:
out = _signal_power_instant(
signal,
frequency_band,
sampling_rate=sampling_rate,
show=show,
normalize=normalize,
**kwargs,
)
else:
out = _signal_power_continuous(signal, frequency_band, sampling_rate=sampling_rate)
out = pd.DataFrame.from_dict(out, orient="index").T
return out
# =============================================================================
# Instant
# =============================================================================
def _signal_power_instant(
signal,
frequency_band,
sampling_rate=1000,
show=False,
normalize=True,
order_criteria="KIC",
**kwargs,
):
# Sanitize frequency band
if isinstance(frequency_band[0], (int, float)):
frequency_band = [frequency_band] # put in list to iterate on
# Get min-max frequency
min_freq = min([band[0] for band in frequency_band])
max_freq = max([band[1] for band in frequency_band])
# Get PSD
psd = signal_psd(
signal,
sampling_rate=sampling_rate,
show=False,
normalize=normalize,
order_criteria=order_criteria,
**kwargs,
)
psd = psd[(psd["Frequency"] >= min_freq) & (psd["Frequency"] <= max_freq)]
out = {}
for band in frequency_band:
power = _signal_power_instant_compute(psd, band)
out[f"Hz_{band[0]}_{band[1]}"] = power
if show:
_signal_power_instant_plot(psd, out, frequency_band)
return out
def _signal_power_instant_compute(psd, band):
"""Also used in other instances"""
where = (psd["Frequency"] >= band[0]) & (psd["Frequency"] < band[1])
power = np.trapz(y=psd["Power"][where], x=psd["Frequency"][where])
return np.nan if power == 0.0 else power
def _signal_power_instant_plot(psd, out, frequency_band, ax=None):
if ax is None:
fig, ax = plt.subplots()
else:
fig = None
# Sanitize signal
if isinstance(frequency_band[0], int):
if len(frequency_band) > 2:
print(
"NeuroKit error: signal_power(): The `frequency_band` argument must be a list of tuples"
" or a tuple of 2 integers"
)
else:
frequency_band = [tuple(i for i in frequency_band)]
freq = np.array(psd["Frequency"])
power = np.array(psd["Power"])
# Get indexes for different frequency band
frequency_band_index = []
for band in frequency_band:
indexes = np.logical_and(
psd["Frequency"] >= band[0], psd["Frequency"] < band[1]
) # pylint: disable=E1111
frequency_band_index.append(np.array(indexes))
labels = list(out.keys())
# Reformat labels if of the pattern "Hz_X_Y"
if len(labels[0].split("_")) == 3:
labels = [i.split("_") for i in labels]
labels = [f"{i[1]}-{i[2]} Hz" for i in labels]
# Get cmap
cmap = matplotlib.cm.get_cmap("Set1")
colors = cmap.colors
colors = (
colors[3],
colors[1],
colors[2],
colors[4],
colors[0],
colors[5],
colors[6],
colors[7],
colors[8],
) # manually rearrange colors
colors = colors[0 : len(frequency_band_index)]
# Plot
ax.set_title("Power Spectral Density (PSD) for Frequency Domains")
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Spectrum (ms2/Hz)")
ax.fill_between(freq, 0, power, color="lightgrey")
for band_index, label, i in zip(frequency_band_index, labels, colors):
ax.fill_between(freq[band_index], 0, power[band_index], label=label, color=i)
ax.legend(prop={"size": 10}, loc="best")
return fig
# =============================================================================
# Continuous
# =============================================================================
def _signal_power_continuous(signal, frequency_band, sampling_rate=1000):
out = {}
if isinstance(frequency_band[0], (list, tuple)):
for band in frequency_band:
out.update(_signal_power_continuous_get(signal, band, sampling_rate))
else:
out.update(_signal_power_continuous_get(signal, frequency_band, sampling_rate))
return out
def _signal_power_continuous_get(signal, frequency_band, sampling_rate=1000, precision=20):
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: signal_power(): the 'mne'",
"module is required. ",
"Please install it first (`pip install mne`).",
) from e # explicitly raise error from ImportError exception
out = mne.time_frequency.tfr_array_morlet(
[[signal]],
sfreq=sampling_rate,
freqs=np.linspace(frequency_band[0], frequency_band[1], precision),
output="power",
)
power = np.mean(out[0][0], axis=0)
out = {}
out[f"{frequency_band[0]:.2f}-{frequency_band[1]:.2f}Hz"] = power # use literal string format
return out
| 7,879 | 28.961977 | 121 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_detrend.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.sparse
from ..stats import fit_loess, fit_polynomial
from .signal_decompose import signal_decompose
def signal_detrend(
signal,
method="polynomial",
order=1,
regularization=500,
alpha=0.75,
window=1.5,
stepsize=0.02,
components=[-1],
sampling_rate=1000,
):
"""**Signal Detrending**
Apply a baseline (order = 0), linear (order = 1), or polynomial (order > 1) detrending to the
signal (i.e., removing a general trend). One can also use other methods, such as smoothness
priors approach described by Tarvainen (2002) or LOESS regression, but these scale badly for
long signals.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
Can be one of ``"polynomial"`` (default; traditional detrending of a given order) or
``"tarvainen2002"`` to use the smoothness priors approach described by Tarvainen (2002)
(mostly used in HRV analyses as a lowpass filter to remove complex trends), ``"loess"`` for
LOESS smoothing trend removal or ``"locreg"`` for local linear regression (the *'runline'*
algorithm from chronux).
order : int
Only used if ``method`` is ``"polynomial"``. The order of the polynomial. 0, 1 or > 1 for a
baseline ('constant detrend', i.e., remove only the mean), linear (remove the linear trend)
or polynomial detrending, respectively. Can also be ``"auto"``, in which case it will
attempt to find the optimal order to minimize the RMSE.
regularization : int
Only used if ``method="tarvainen2002"``. The regularization parameter (default to 500).
alpha : float
Only used if ``method`` is "loess". The parameter which controls the degree of smoothing.
window : float
Only used if ``method`` is "locreg". The detrending ``window`` should correspond to the
1 divided by the desired low-frequency band to remove
(``window = 1 / detrend_frequency``)
For instance, to remove frequencies below ``0.67Hz`` the window should be ``1.5``
(``1 / 0.67 = 1.5``).
stepsize : float
Only used if ``method`` is ``"locreg"``.
components : list
Only used if ``method`` is ``"EMD"``. What Intrinsic Mode Functions (IMFs) from EMD to
remove. By default, the last one.
sampling_rate : int, optional
Only used if ``method`` is "locreg". Sampling rate (Hz) of the signal.
If not None, the ``stepsize`` and ``window`` arguments will be multiplied
by the sampling rate. By default 1000.
Returns
-------
array
Vector containing the detrended signal.
See Also
--------
signal_filter, fit_loess, signal_decompose
Examples
--------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
import matplotlib.pyplot as plt
# Simulate signal with low and high frequency
signal = nk.signal_simulate(frequency=[0.1, 2], amplitude=[2, 0.5], sampling_rate=100)
signal = signal + (3 + np.linspace(0, 6, num=len(signal))) # Add baseline and linear trend
# Apply detrending algorithms
# ---------------------------
# Method 1: Default Polynomial Detrending of a Given Order
# Constant detrend (removes the mean)
baseline = nk.signal_detrend(signal, order=0)
# Linear Detrend (removes the linear trend)
linear = nk.signal_detrend(signal, order=1)
# Polynomial Detrend (removes the polynomial trend)
quadratic = nk.signal_detrend(signal, order=2) # Quadratic detrend
cubic = nk.signal_detrend(signal, order=3) # Cubic detrend
poly10 = nk.signal_detrend(signal, order=10) # Linear detrend (10th order)
# Method 2: Tarvainen's smoothness priors approach (Tarvainen et al., 2002)
tarvainen = nk.signal_detrend(signal, method="tarvainen2002")
# Method 3: LOESS smoothing trend removal
loess = nk.signal_detrend(signal, method="loess")
# Method 4: Local linear regression (100Hz)
locreg = nk.signal_detrend(signal, method="locreg",
window=1.5, stepsize=0.02, sampling_rate=100)
# Method 5: EMD
emd = nk.signal_detrend(signal, method="EMD", components=[-2, -1])
# Visualize different methods
@savefig signal_detrend1.png scale=100%
axes = pd.DataFrame({"Original signal": signal,
"Baseline": baseline,
"Linear": linear,
"Quadratic": quadratic,
"Cubic": cubic,
"Polynomial (10th)": poly10,
"Tarvainen": tarvainen,
"LOESS": loess,
"Local Regression": locreg,
"EMD": emd}).plot(subplots=True)
# Plot horizontal lines to better visualize the detrending
for subplot in axes:
subplot.axhline(y=0, color="k", linestyle="--")
@suppress
plt.close()
References
----------
* Tarvainen, M. P., Ranta-Aho, P. O., & Karjalainen, P. A. (2002). An advanced detrending
method with application to HRV analysis. IEEE Transactions on Biomedical Engineering, 49(2),
172-175
"""
signal = np.array(signal) # Force vector
method = method.lower()
if method in ["tarvainen", "tarvainen2002"]:
detrended = _signal_detrend_tarvainen2002(signal, regularization)
elif method in ["poly", "polynomial"]:
detrended = signal - fit_polynomial(signal, X=None, order=order)[0]
elif method in ["loess", "lowess"]:
detrended = signal - fit_loess(signal, alpha=alpha)[0]
elif method in ["locdetrend", "runline", "locreg", "locregression"]:
detrended = _signal_detrend_locreg(
signal, window=window, stepsize=stepsize, sampling_rate=sampling_rate
)
elif method in ["emd"]:
detrended = _signal_detrend_emd(signal, components=components)
else:
raise ValueError(
"NeuroKit error: signal_detrend(): 'method' should be one of 'polynomial', 'loess'"
+ "'locreg', 'EMD' or 'tarvainen2002'."
)
return detrended
# =============================================================================
# Internals
# =============================================================================
def _signal_detrend_tarvainen2002(signal, regularization=500):
"""Method by Tarvainen et al., 2002.
- Tarvainen, M. P., Ranta-Aho, P. O., & Karjalainen, P. A. (2002). An advanced detrending method
with application to HRV analysis. IEEE Transactions on Biomedical Engineering, 49(2), 172-175.
"""
N = len(signal)
identity = np.eye(N)
B = np.dot(np.ones((N - 2, 1)), np.array([[1, -2, 1]]))
D_2 = scipy.sparse.dia_matrix((B.T, [0, 1, 2]), shape=(N - 2, N)) # pylint: disable=E1101
inv = np.linalg.inv(identity + regularization**2 * D_2.T @ D_2)
z_stat = ((identity - inv)) @ signal
trend = np.squeeze(np.asarray(signal - z_stat))
# detrend
return signal - trend
def _signal_detrend_locreg(signal, window=1.5, stepsize=0.02, sampling_rate=1000):
"""Local linear regression ('runline' algorithm from chronux). Based on https://github.com/sappelhoff/pyprep.
- http://chronux.org/chronuxFiles/Documentation/chronux/spectral_analysis/continuous/locdetrend.html
- https://github.com/sappelhoff/pyprep/blob/master/pyprep/removeTrend.py
- https://github.com/VisLab/EEG-Clean-Tools/blob/master/PrepPipeline/utilities/localDetrend.m
"""
length = len(signal)
# Sanitize input
if sampling_rate is None:
sampling_rate = 1
# Sanity checks
window = int(window * sampling_rate)
stepsize = int(stepsize * sampling_rate)
if window > length:
raise ValueError(
"NeuroKit error: signal_detrend(): 'window' should be "
"less than the number of samples. Try using 1.5 * sampling rate."
)
if stepsize <= 1:
raise ValueError(
"NeuroKit error: signal_detrend(): 'stepsize' should be more than 1. Increase its value."
)
y_line = np.zeros((length, 1))
norm = np.zeros((length, 1))
nwin = int(np.ceil((length - window) / stepsize))
yfit = np.zeros((nwin, window))
xwt = (np.arange(1, window + 1) - window / 2) / (window / 2)
wt = np.power(1 - np.power(np.absolute(xwt), 3), 3) # pylint: disable=E1111
for j in range(0, nwin):
tseg = signal[(stepsize * j) : (stepsize * j + window)]
y1 = np.mean(tseg)
y2 = np.mean(np.multiply(np.arange(1, window + 1), tseg)) * (2 / (window + 1))
a = np.multiply(np.subtract(y2, y1), 6 / (window - 1))
b = np.subtract(y1, a * (window + 1) / 2) # pylint: disable=E1111
yfit[j, :] = np.multiply(np.arange(1, window + 1), a) + b
y_line[(j * stepsize) : (j * stepsize + window)] = y_line[
(j * stepsize) : (j * stepsize + window)
] + np.reshape(np.multiply(yfit[j, :], wt), (window, 1))
norm[(j * stepsize) : (j * stepsize + window)] = norm[
(j * stepsize) : (j * stepsize + window)
] + np.reshape(wt, (window, 1))
above_norm = np.where(norm[:, 0] > 0)
y_line[above_norm] = y_line[above_norm] / norm[above_norm]
indx = (nwin - 1) * stepsize + window - 1
npts = length - indx + 1
y_line[indx - 1 :] = np.reshape(
(np.multiply(np.arange(window + 1, window + npts + 1), a) + b), (npts, 1)
)
detrended = signal - y_line[:, 0]
return detrended
def _signal_detrend_emd(signal, components=-1):
"""The function calculates the Intrinsic Mode Functions (IMFs) of a given
timeseries, subtracts the user-chosen IMFs for detrending, and returns the
detrended timeseries."""
# Calculate the IMFs
imfs = signal_decompose(signal, method="emd")
return signal - np.sum(imfs[components, :], axis=0)
| 10,122 | 41.894068 | 113 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_resample.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage
import scipy.signal
def signal_resample(
signal,
desired_length=None,
sampling_rate=None,
desired_sampling_rate=None,
method="interpolation",
):
"""**Resample a continuous signal to a different length or sampling rate**
Up- or down-sample a signal. The user can specify either a desired length for the vector, or
input the original sampling rate and the desired sampling rate.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
desired_length : int
The desired length of the signal.
sampling_rate : int
The original sampling frequency (in Hz, i.e., samples/second).
desired_sampling_rate : int
The desired (output) sampling frequency (in Hz, i.e., samples/second).
method : str
Can be ``"interpolation"`` (see ``scipy.ndimage.zoom()``), ``"numpy"`` for numpy's
interpolation (see ``np.interp()``),``"pandas"`` for Pandas' time series resampling,
``"poly"`` (see ``scipy.signal.resample_poly()``) or ``"FFT"`` (see
``scipy.signal.resample()``) for the Fourier method. ``"FFT"`` is the most accurate
(if the signal is periodic), but becomes exponentially slower as the signal length
increases. In contrast, ``"interpolation"`` is the fastest, followed by ``"numpy"``,
``"poly"`` and ``"pandas"``.
Returns
-------
array
Vector containing resampled signal values.
See Also
--------
signal_interpolate
Examples
--------
**Example 1**: Downsampling
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=1, sampling_rate=500, frequency=3)
# Downsample
data = {}
for m in ["interpolation", "FFT", "poly", "numpy", "pandas"]:
data[m] = nk.signal_resample(signal, sampling_rate=500, desired_sampling_rate=30, method=m)
@savefig p_signal_resample1.png scale=100%
nk.signal_plot([data[m] for m in data.keys()])
@suppress
plt.close()
**Example 2**: Upsampling
.. ipython:: python
:verbatim:
signal = nk.signal_simulate(duration=1, sampling_rate=30, frequency=3)
# Upsample
data = {}
for m in ["interpolation", "FFT", "poly", "numpy", "pandas"]:
data[m] = nk.signal_resample(signal, sampling_rate=30, desired_sampling_rate=500, method=m)
@savefig p_signal_resample2.png scale=100%
nk.signal_plot([data[m] for m in data.keys()], labels=list(data.keys()))
@suppress
plt.close()
**Example 3**: Benchmark
.. ipython:: python
:verbatim:
signal = nk.signal_simulate(duration=1, sampling_rate=1000, frequency=3)
# Timing benchmarks
%timeit nk.signal_resample(signal, method="interpolation",
sampling_rate=1000, desired_sampling_rate=500)
%timeit nk.signal_resample(signal, method="FFT",
sampling_rate=1000, desired_sampling_rate=500)
%timeit nk.signal_resample(signal, method="poly",
sampling_rate=1000, desired_sampling_rate=500)
%timeit nk.signal_resample(signal, method="numpy",
sampling_rate=1000, desired_sampling_rate=500)
%timeit nk.signal_resample(signal, method="pandas",
sampling_rate=1000, desired_sampling_rate=500)
"""
if desired_length is None:
desired_length = int(np.round(len(signal) * desired_sampling_rate / sampling_rate))
# Sanity checks
if len(signal) == desired_length:
return signal
# Resample
if method.lower() == "fft":
resampled = _resample_fft(signal, desired_length)
elif method.lower() == "poly":
resampled = _resample_poly(signal, desired_length)
elif method.lower() == "numpy":
resampled = _resample_numpy(signal, desired_length)
elif method.lower() == "pandas":
resampled = _resample_pandas(signal, desired_length)
else:
resampled = _resample_interpolation(signal, desired_length)
return resampled
# =============================================================================
# Methods
# =============================================================================
def _resample_numpy(signal, desired_length):
resampled_signal = np.interp(
np.linspace(0.0, 1.0, desired_length, endpoint=False), # where to interpolate
np.linspace(0.0, 1.0, len(signal), endpoint=False), # known positions
signal, # known data points
)
return resampled_signal
def _resample_interpolation(signal, desired_length):
resampled_signal = scipy.ndimage.zoom(signal, desired_length / len(signal))
return resampled_signal
def _resample_fft(signal, desired_length):
resampled_signal = scipy.signal.resample(signal, desired_length)
return resampled_signal
def _resample_poly(signal, desired_length):
resampled_signal = scipy.signal.resample_poly(signal, desired_length, len(signal))
return resampled_signal
def _resample_pandas(signal, desired_length):
# Convert to Time Series
index = pd.date_range("20131212", freq="L", periods=len(signal))
resampled_signal = pd.Series(signal, index=index)
# Create resampling factor
resampling_factor = str(np.round(1 / (desired_length / len(signal)), 6)) + "L"
# Resample
resampled_signal = resampled_signal.resample(resampling_factor).bfill().values
# Sanitize
resampled_signal = _resample_sanitize(resampled_signal, desired_length)
return resampled_signal
# =============================================================================
# Internals
# =============================================================================
def _resample_sanitize(resampled_signal, desired_length):
# Adjust extremities
diff = len(resampled_signal) - desired_length
if diff < 0:
resampled_signal = np.concatenate(
[resampled_signal, np.full(np.abs(diff), resampled_signal[-1])]
)
elif diff > 0:
resampled_signal = resampled_signal[0:desired_length]
return resampled_signal
| 6,386 | 32.615789 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_fixpeaks.py | # - * - coding: utf-8 - * -
from warnings import warn
import matplotlib.patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
from ..stats import standardize
from .signal_formatpeaks import _signal_formatpeaks_sanitize
from .signal_period import signal_period
def signal_fixpeaks(
peaks,
sampling_rate=1000,
iterative=True,
show=False,
interval_min=None,
interval_max=None,
relative_interval_min=None,
relative_interval_max=None,
robust=False,
method="Kubios",
**kwargs,
):
"""**Correct Erroneous Peak Placements**
Identify and correct erroneous peak placements based on outliers in peak-to-peak differences
(period).
Parameters
----------
peaks : list or array or DataFrame or Series or dict
The samples at which the peaks occur. If an array is passed in, it is assumed that it was
obtained with :func:`.signal_findpeaks`. If a DataFrame is passed in, it is assumed to be
obtained with :func:`.ecg_findpeaks` or :func:`.ppg_findpeaks` and to be of the same length
as the input signal.
sampling_rate : int
The sampling frequency of the signal that contains the peaks (in Hz, i.e., samples/second).
iterative : bool
Whether or not to apply the artifact correction repeatedly (results in superior artifact
correction).
show : bool
Whether or not to visualize artifacts and artifact thresholds.
interval_min : float
Only when ``method = "neurokit"``. The minimum interval between the peaks.
interval_max : float
Only when ``method = "neurokit"``. The maximum interval between the peaks.
relative_interval_min : float
Only when ``method = "neurokit"``. The minimum interval between the peaks as relative to
the sample (expressed in standard deviation from the mean).
relative_interval_max : float
Only when ``method = "neurokit"``. The maximum interval between the peaks as relative to
the sample (expressed in standard deviation from the mean).
robust : bool
Only when ``method = "neurokit"``. Use a robust method of standardization (see
:func:`.standardize`) for the relative thresholds.
method : str
Either ``"Kubios"`` or ``"neurokit"``. ``"Kubios"`` uses the artifact detection and
correction described in Lipponen, J. A., & Tarvainen, M. P. (2019). Note that ``"Kubios"``
is only meant for peaks in ECG or PPG. ``"neurokit"`` can be used with peaks in ECG, PPG,
or respiratory data.
**kwargs
Other keyword arguments.
Returns
-------
peaks_clean : array
The corrected peak locations.
artifacts : dict
Only if ``method="Kubios"``. A dictionary containing the indices of artifacts, accessible
with the keys ``"ectopic"``, ``"missed"``, ``"extra"``, and ``"longshort"``.
See Also
--------
signal_findpeaks, ecg_findpeaks, ecg_peaks, ppg_findpeaks, ppg_peaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate ECG data
ecg = nk.ecg_simulate(duration=240, noise=0.25, heart_rate=70, random_state=42)
# Identify and Correct Peaks using "Kubios" Method
rpeaks_uncorrected = nk.ecg_findpeaks(ecg)
@savefig p_signal_fixpeaks1.png scale=100%
artifacts, rpeaks_corrected = nk.signal_fixpeaks(
rpeaks_uncorrected, iterative=True, method="Kubios", show=True
)
@suppress
plt.close()
.. ipython:: python
# Visualize Artifact Correction
rate_corrected = nk.signal_rate(rpeaks_corrected, desired_length=len(ecg))
rate_uncorrected = nk.signal_rate(rpeaks_uncorrected, desired_length=len(ecg))
@savefig p_signal_fixpeaks2.png scale=100%
nk.signal_plot(
[rate_uncorrected, rate_corrected],
labels=["Heart Rate Uncorrected", "Heart Rate Corrected"]
)
@suppress
plt.close()
.. ipython:: python
import numpy as np
# Simulate Abnormal Signals
signal = nk.signal_simulate(duration=4, sampling_rate=1000, frequency=1)
peaks_true = nk.signal_findpeaks(signal)["Peaks"]
peaks = np.delete(peaks_true, [1]) # create gaps due to missing peaks
signal = nk.signal_simulate(duration=20, sampling_rate=1000, frequency=1)
peaks_true = nk.signal_findpeaks(signal)["Peaks"]
peaks = np.delete(peaks_true, [5, 15]) # create gaps
peaks = np.sort(np.append(peaks, [1350, 11350, 18350])) # add artifacts
# Identify and Correct Peaks using 'NeuroKit' Method
peaks_corrected = nk.signal_fixpeaks(
peaks=peaks, interval_min=0.5, interval_max=1.5, method="neurokit"
)
# Plot and shift original peaks to the right to see the difference.
@savefig p_signal_fixpeaks3.png scale=100%
nk.events_plot([peaks + 50, peaks_corrected], signal)
@suppress
plt.close()
References
----------
* Lipponen, J. A., & Tarvainen, M. P. (2019). A robust algorithm for heart rate variability time
series artefact correction using novel beat classification. Journal of medical engineering &
technology, 43(3), 173-181. 10.1080/03091902.2019.1640306
"""
# Format input
peaks = _signal_formatpeaks_sanitize(peaks)
# If method Kubios
if method.lower() == "kubios":
return _signal_fixpeaks_kubios(peaks, sampling_rate=sampling_rate, iterative=iterative, show=show, **kwargs)
else:
# Else method is NeuroKit
return _signal_fixpeaks_neurokit(
peaks,
sampling_rate=sampling_rate,
interval_min=interval_min,
interval_max=interval_max,
relative_interval_min=relative_interval_min,
relative_interval_max=relative_interval_max,
robust=robust,
)
# =============================================================================
# Methods
# =============================================================================
def _signal_fixpeaks_neurokit(
peaks,
sampling_rate=1000,
interval_min=None,
interval_max=None,
relative_interval_min=None,
relative_interval_max=None,
robust=False,
):
"""NeuroKit method."""
peaks_clean = _remove_small(peaks, sampling_rate, interval_min, relative_interval_min, robust)
peaks_clean = _interpolate_big(peaks_clean, sampling_rate, interval_max, relative_interval_max, robust,)
valid_peaks = peaks_clean[peaks_clean >= 0]
n_invalid_idcs = len(peaks_clean) - len(valid_peaks)
if n_invalid_idcs > 0:
warn(
f" Negative peak indices detected in output. " f" Removing {n_invalid_idcs} invalid peaks. ",
category=NeuroKitWarning,
)
peaks_clean = valid_peaks
return peaks_clean
def _signal_fixpeaks_kubios(peaks, sampling_rate=1000, iterative=True, show=False, **kwargs):
"""kubios method."""
# Get corrected peaks and normal-to-normal intervals.
artifacts, subspaces = _find_artifacts(peaks, sampling_rate=sampling_rate, **kwargs)
peaks_clean = _correct_artifacts(artifacts, peaks)
if iterative:
# Iteratively apply the artifact correction until the number
# of artifacts stops decreasing.
n_artifacts_current = sum([len(i) for i in artifacts.values()])
while True:
new_artifacts, new_subspaces = _find_artifacts(peaks_clean, sampling_rate=sampling_rate, **kwargs)
n_artifacts_previous = n_artifacts_current
n_artifacts_current = sum([len(i) for i in new_artifacts.values()])
if n_artifacts_current >= n_artifacts_previous:
break
artifacts = new_artifacts
subspaces = new_subspaces
peaks_clean = _correct_artifacts(artifacts, peaks_clean)
if show:
_plot_artifacts_lipponen2019(artifacts, subspaces)
return artifacts, peaks_clean
# =============================================================================
# Kubios: Lipponen & Tarvainen (2019).
# =============================================================================
def _find_artifacts(
peaks, c1=0.13, c2=0.17, alpha=5.2, window_width=91, medfilt_order=11, sampling_rate=1000,
):
# Compute period series (make sure it has same numer of elements as peaks);
# peaks are in samples, convert to seconds.
rr = np.ediff1d(peaks, to_begin=0) / sampling_rate
# For subsequent analysis it is important that the first element has
# a value in a realistic range (e.g., for median filtering).
rr[0] = np.mean(rr[1:])
# Artifact identification #################################################
###########################################################################
# Compute dRRs: time series of differences of consecutive periods (dRRs).
drrs = np.ediff1d(rr, to_begin=0)
drrs[0] = np.mean(drrs[1:])
# Normalize by threshold.
th1 = _compute_threshold(drrs, alpha, window_width)
# ignore division by 0 warning
old_setting = np.seterr(divide="ignore", invalid="ignore")
drrs /= th1
# return old setting
np.seterr(**old_setting)
# Cast dRRs to subspace s12.
# Pad drrs with one element.
padding = 2
drrs_pad = np.pad(drrs, padding, "reflect")
s12 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] > 0:
s12[d - padding] = np.max([drrs_pad[d - 1], drrs_pad[d + 1]])
elif drrs_pad[d] < 0:
s12[d - padding] = np.min([drrs_pad[d - 1], drrs_pad[d + 1]])
# Cast dRRs to subspace s22.
s22 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] >= 0:
s22[d - padding] = np.min([drrs_pad[d + 1], drrs_pad[d + 2]])
elif drrs_pad[d] < 0:
s22[d - padding] = np.max([drrs_pad[d + 1], drrs_pad[d + 2]])
# Compute mRRs: time series of deviation of RRs from median.
df = pd.DataFrame({"signal": rr})
medrr = df.rolling(medfilt_order, center=True, min_periods=1).median().signal.values
mrrs = rr - medrr
mrrs[mrrs < 0] = mrrs[mrrs < 0] * 2
# Normalize by threshold.
th2 = _compute_threshold(mrrs, alpha, window_width)
mrrs /= th2
# Artifact classification #################################################
###########################################################################
# Artifact classes.
extra_idcs = []
missed_idcs = []
ectopic_idcs = []
longshort_idcs = []
i = 0
while i < rr.size - 2: # The flow control is implemented based on Figure 1
if np.abs(drrs[i]) <= 1: # Figure 1
i += 1
continue
eq1 = np.logical_and(drrs[i] > 1, s12[i] < (-c1 * drrs[i] - c2)) # pylint: disable=E1111
eq2 = np.logical_and(drrs[i] < -1, s12[i] > (-c1 * drrs[i] + c2)) # pylint: disable=E1111
if np.any([eq1, eq2]):
# If any of the two equations is true.
ectopic_idcs.append(i)
i += 1
continue
# If none of the two equations is true.
if ~np.any([np.abs(drrs[i]) > 1, np.abs(mrrs[i]) > 3]): # Figure 1
i += 1
continue
longshort_candidates = [i]
# Check if the following beat also needs to be evaluated.
if np.abs(drrs[i + 1]) < np.abs(drrs[i + 2]):
longshort_candidates.append(i + 1)
for j in longshort_candidates:
# Long beat.
eq3 = np.logical_and(drrs[j] > 1, s22[j] < -1) # pylint: disable=E1111
# Long or short.
eq4 = np.abs(mrrs[j]) > 3 # Figure 1
# Short beat.
eq5 = np.logical_and(drrs[j] < -1, s22[j] > 1) # pylint: disable=E1111
if ~np.any([eq3, eq4, eq5]):
# If none of the three equations is true: normal beat.
i += 1
continue
# If any of the three equations is true: check for missing or extra
# peaks.
# Missing.
eq6 = np.abs(rr[j] / 2 - medrr[j]) < th2[j] # Figure 1
# Extra.
eq7 = np.abs(rr[j] + rr[j + 1] - medrr[j]) < th2[j] # Figure 1
# Check if extra.
if np.all([eq5, eq7]):
extra_idcs.append(j)
i += 1
continue
# Check if missing.
if np.all([eq3, eq6]):
missed_idcs.append(j)
i += 1
continue
# If neither classified as extra or missing, classify as "long or
# short".
longshort_idcs.append(j)
i += 1
# Prepare output
artifacts = {
"ectopic": ectopic_idcs,
"missed": missed_idcs,
"extra": extra_idcs,
"longshort": longshort_idcs,
}
subspaces = {
"rr": rr,
"drrs": drrs,
"mrrs": mrrs,
"s12": s12,
"s22": s22,
"c1": c1,
"c2": c2,
}
return artifacts, subspaces
def _compute_threshold(signal, alpha, window_width):
df = pd.DataFrame({"signal": np.abs(signal)})
q1 = df.rolling(window_width, center=True, min_periods=1).quantile(0.25).signal.values
q3 = df.rolling(window_width, center=True, min_periods=1).quantile(0.75).signal.values
th = alpha * ((q3 - q1) / 2)
return th
def _correct_artifacts(artifacts, peaks):
# Artifact correction
#####################
# The integrity of indices must be maintained if peaks are inserted or
# deleted: for each deleted beat, decrease indices following that beat in
# all other index lists by 1. Likewise, for each added beat, increment the
# indices following that beat in all other lists by 1.
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
ectopic_idcs = artifacts["ectopic"]
longshort_idcs = artifacts["longshort"]
# Delete extra peaks.
if extra_idcs:
peaks = _correct_extra(extra_idcs, peaks)
# Update remaining indices.
missed_idcs = _update_indices(extra_idcs, missed_idcs, -1)
ectopic_idcs = _update_indices(extra_idcs, ectopic_idcs, -1)
longshort_idcs = _update_indices(extra_idcs, longshort_idcs, -1)
# Add missing peaks.
if missed_idcs:
peaks = _correct_missed(missed_idcs, peaks)
# Update remaining indices.
ectopic_idcs = _update_indices(missed_idcs, ectopic_idcs, 1)
longshort_idcs = _update_indices(missed_idcs, longshort_idcs, 1)
if ectopic_idcs:
peaks = _correct_misaligned(ectopic_idcs, peaks)
if longshort_idcs:
peaks = _correct_misaligned(longshort_idcs, peaks)
return peaks
def _correct_extra(extra_idcs, peaks):
corrected_peaks = peaks.copy()
corrected_peaks = np.delete(corrected_peaks, extra_idcs)
return corrected_peaks
def _correct_missed(missed_idcs, peaks):
corrected_peaks = peaks.copy()
missed_idcs = np.array(missed_idcs)
# Calculate the position(s) of new beat(s). Make sure to not generate
# negative indices. prev_peaks and next_peaks must have the same
# number of elements.
valid_idcs = np.logical_and(missed_idcs > 1, missed_idcs < len(corrected_peaks)) # pylint: disable=E1111
missed_idcs = missed_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in missed_idcs]]
next_peaks = corrected_peaks[missed_idcs]
added_peaks = prev_peaks + (next_peaks - prev_peaks) / 2
# Add the new peaks before the missed indices (see numpy docs).
corrected_peaks = np.insert(corrected_peaks, missed_idcs, added_peaks)
return corrected_peaks
def _correct_misaligned(misaligned_idcs, peaks):
corrected_peaks = peaks.copy()
misaligned_idcs = np.array(misaligned_idcs)
# Make sure to not generate negative indices, or indices that exceed
# the total number of peaks. prev_peaks and next_peaks must have the
# same number of elements.
valid_idcs = np.logical_and(
misaligned_idcs > 1, misaligned_idcs < len(corrected_peaks) - 1, # pylint: disable=E1111
)
misaligned_idcs = misaligned_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]
half_ibi = (next_peaks - prev_peaks) / 2
peaks_interp = prev_peaks + half_ibi
# Shift the R-peaks from the old to the new position.
corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
corrected_peaks = np.concatenate((corrected_peaks, peaks_interp)).astype(int)
corrected_peaks.sort(kind="mergesort")
return corrected_peaks
def _update_indices(source_idcs, update_idcs, update):
"""For every element s in source_idcs, change every element u in update_idcs according to update, if u is larger
than s."""
if not update_idcs:
return update_idcs
for s in source_idcs:
update_idcs = [u + update if u > s else u for u in update_idcs]
return list(np.unique(update_idcs))
def _plot_artifacts_lipponen2019(artifacts, info):
# Extract parameters
longshort_idcs = artifacts["longshort"]
ectopic_idcs = artifacts["ectopic"]
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
rr = info["rr"]
drrs = info["drrs"]
mrrs = info["mrrs"]
s12 = info["s12"]
s22 = info["s22"]
c1 = info["c1"]
c2 = info["c2"]
# Visualize artifact type indices.
# Set grids
gs = matplotlib.gridspec.GridSpec(ncols=4, nrows=3, width_ratios=[1, 2, 2, 2])
fig = plt.figure(constrained_layout=False, figsize=(15, 10))
ax0 = fig.add_subplot(gs[0, :-2])
ax1 = fig.add_subplot(gs[1, :-2])
ax2 = fig.add_subplot(gs[2, :-2])
ax3 = fig.add_subplot(gs[:, -1])
ax4 = fig.add_subplot(gs[:, -2])
ax0.set_title("Artifact types", fontweight="bold")
ax0.plot(rr, label="heart period")
ax0.scatter(
longshort_idcs, rr[longshort_idcs], marker="x", c="m", s=100, zorder=3, label="long/short",
)
ax0.scatter(
ectopic_idcs, rr[ectopic_idcs], marker="x", c="g", s=100, zorder=3, label="ectopic",
)
ax0.scatter(
extra_idcs, rr[extra_idcs], marker="x", c="y", s=100, zorder=3, label="false positive",
)
ax0.scatter(
missed_idcs, rr[missed_idcs], marker="x", c="r", s=100, zorder=3, label="false negative",
)
ax0.legend(loc="upper right")
# Visualize first threshold.
ax1.set_title("Consecutive-difference criterion", fontweight="bold")
ax1.plot(np.abs(drrs), label="normalized difference consecutive heart periods")
ax1.axhline(1, c="r", label="artifact threshold")
ax1.legend(loc="upper right")
ax1.set_ylim(0, 5)
# Visualize second threshold.
ax2.set_title("Difference-from-median criterion", fontweight="bold")
ax2.plot(np.abs(mrrs), label="difference from median over 11 periods")
ax2.axhline(3, c="r", label="artifact threshold")
ax2.legend(loc="upper right")
ax2.set_ylim(0, 5)
# Visualize subspaces.
ax4.set_title("Subspace 1", fontweight="bold")
ax4.set_xlabel("S11")
ax4.set_ylabel("S12")
ax4.scatter(drrs, s12, marker="x", label="heart periods")
ax4.set_ylim(-5, 5)
ax4.set_xlim(-10, 10)
verts0 = [(-10, 5), (-10, -c1 * -10 + c2), (-1, -c1 * -1 + c2), (-1, 5)]
poly0 = matplotlib.patches.Polygon(verts0, alpha=0.3, facecolor="r", edgecolor=None, label="ectopic periods")
ax4.add_patch(poly0)
verts1 = [(1, -c1 * 1 - c2), (1, -5), (10, -5), (10, -c1 * 10 - c2)]
poly1 = matplotlib.patches.Polygon(verts1, alpha=0.3, facecolor="r", edgecolor=None)
ax4.add_patch(poly1)
ax4.legend(loc="upper right")
ax3.set_title("Subspace 2", fontweight="bold")
ax3.set_xlabel("S21")
ax3.set_ylabel("S22")
ax3.scatter(drrs, s22, marker="x", label="heart periods")
ax3.set_xlim(-10, 10)
ax3.set_ylim(-10, 10)
verts2 = [(-10, 10), (-10, 1), (-1, 1), (-1, 10)]
poly2 = matplotlib.patches.Polygon(verts2, alpha=0.3, facecolor="r", edgecolor=None, label="short periods")
ax3.add_patch(poly2)
verts3 = [(1, -1), (1, -10), (10, -10), (10, -1)]
poly3 = matplotlib.patches.Polygon(verts3, alpha=0.3, facecolor="y", edgecolor=None, label="long periods")
ax3.add_patch(poly3)
ax3.legend(loc="upper right")
# =============================================================================
# NeuroKit
# =============================================================================
def _remove_small(
peaks, sampling_rate=1000, interval_min=None, relative_interval_min=None, robust=False,
):
if interval_min is None and relative_interval_min is None:
return peaks
if interval_min is not None:
interval = signal_period(peaks, sampling_rate=sampling_rate, desired_length=None)
peaks = peaks[interval > interval_min]
if relative_interval_min is not None:
interval = signal_period(peaks, sampling_rate=sampling_rate, desired_length=None)
peaks = peaks[standardize(interval, robust=robust) > relative_interval_min]
return peaks
def _interpolate_big(
peaks, sampling_rate=1000, interval_max=None, relative_interval_max=None, robust=False,
):
if interval_max is None and relative_interval_max is None:
return peaks
else:
interval = signal_period(peaks, sampling_rate=sampling_rate, desired_length=None)
if relative_interval_max is not None:
outliers = standardize(interval, robust=robust) > relative_interval_max
else:
outliers = interval > interval_max
outliers_loc = np.where(outliers)[0]
# interval returned by signal_period at index 0 is the mean of the intervals
# so it does not actually correspond to whether the first peak is an outlier
outliers_loc = outliers_loc[outliers_loc != 0]
if np.sum(outliers) == 0:
return peaks
peaks_to_correct = peaks.copy().astype(float)
interval_without_outliers = interval[np.invert(outliers)]
mean_interval = np.nanmean(interval_without_outliers)
# go through the outliers starting with the highest indices
# so that the indices of the other outliers are not moved when
# unknown intervas are inserted
for loc in np.flip(outliers_loc):
# compute number of NaNs to insert based on the mean interval
n_nan = round(interval[loc] / mean_interval)
# Delete peak corresponding to large interval and replace by N NaNs
peaks_to_correct[loc] = np.nan
peaks_to_correct = np.insert(peaks_to_correct, loc, [np.nan] * (n_nan - 1))
# Interpolate values
interpolated_peaks = pd.Series(peaks_to_correct).interpolate(limit_area="inside").values
# If there are missing values remaining, remove
peaks = interpolated_peaks[np.invert(np.isnan(interpolated_peaks))].astype(peaks.dtype)
return peaks
| 23,099 | 36.745098 | 116 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_synchrony.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.signal
def signal_synchrony(signal1, signal2, method="hilbert", window_size=50):
"""**Synchrony (coupling) between two signals**
Signal coherence refers to the strength of the mutual relationship (i.e., the amount of shared
information) between two signals. Synchrony is coherence "in phase" (two waveforms are "in
phase" when the peaks and troughs occur at the same time). Synchrony will always be coherent,
but coherence need not always be synchronous.
This function computes a continuous index of coupling between two signals either using the
``"hilbert"`` method to get the instantaneous phase synchrony, or using a rolling window
correlation.
The instantaneous phase synchrony measures the phase similarities between signals at each
timepoint. The phase refers to the angle of the signal, calculated through the hilbert
transform, when it is resonating between -pi to pi degrees. When two signals line up in phase
their angular difference becomes zero.
For less clean signals, windowed correlations are widely used because of their simplicity, and
can be a good a robust approximation of synchrony between two signals. The limitation is the
need to select a window size.
Parameters
----------
signal1 : Union[list, np.array, pd.Series]
Time series in the form of a vector of values.
signal2 : Union[list, np.array, pd.Series]
Time series in the form of a vector of values.
method : str
The method to use. Can be one of ``"hilbert"`` or ``"correlation"``.
window_size : int
Only used if ``method='correlation'``. The number of samples to use for rolling correlation.
See Also
--------
scipy.signal.hilbert, mutual_information
Returns
-------
array
A vector containing the phase of the signal, between 0 and 2*pi.
Examples
--------
.. ipython:: python
import neurokit2 as nk
s1 = nk.signal_simulate(duration=10, frequency=1)
s2 = nk.signal_simulate(duration=10, frequency=1.5)
coupling1 = nk.signal_synchrony(s1, s2, method="hilbert")
coupling2 = nk.signal_synchrony(s1, s2, method="correlation", window_size=1000/2)
@savefig p_signal_synchrony1.png scale=100%
nk.signal_plot([s1, s2, coupling1, coupling2], labels=["s1", "s2", "hilbert", "correlation"])
@suppress
plt.close()
References
----------
* http://jinhyuncheong.com/jekyll/update/2017/12/10/Timeseries_synchrony_tutorial_and_simulations.html
"""
if method.lower() in ["hilbert", "phase"]:
coupling = _signal_synchrony_hilbert(signal1, signal2)
elif method.lower() in ["correlation"]:
coupling = _signal_synchrony_correlation(signal1, signal2, window_size=int(window_size))
else:
raise ValueError(
"NeuroKit error: signal_synchrony(): 'method' should be one of 'hilbert' or 'correlation'."
)
return coupling
# =============================================================================
# Methods
# =============================================================================
def _signal_synchrony_hilbert(signal1, signal2):
hill1 = scipy.signal.hilbert(signal1)
hill2 = scipy.signal.hilbert(signal2)
phase1 = np.angle(hill1, deg=False)
phase2 = np.angle(hill2, deg=False)
synchrony = 1 - np.sin(np.abs(phase1 - phase2) / 2)
return synchrony
def _signal_synchrony_correlation(signal1, signal2, window_size, center=False):
"""**Calculates pairwise rolling correlation at each time**
Grabs the upper triangle, at each timepoint.
* window: window size of rolling corr in samples
* center: whether to center result (Default: False, so correlation values are listed on the
right.)
"""
data = pd.DataFrame({"y1": signal1, "y2": signal2})
rolled = data.rolling(window=window_size, center=center).corr()
synchrony = rolled["y1"].loc[rolled.index.get_level_values(1) == "y2"].values
# Realign
synchrony = np.append(synchrony[int(window_size / 2) :], np.full(int(window_size / 2), np.nan))
synchrony[np.isnan(synchrony)] = np.nanmean(synchrony)
return synchrony
| 4,293 | 35.084034 | 107 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_simulate.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning, check_random_state, listify
def signal_simulate(
duration=10,
sampling_rate=1000,
frequency=1,
amplitude=0.5,
noise=0,
silent=False,
random_state=None,
):
"""**Simulate a continuous signal**
Parameters
----------
duration : float
Desired length of duration (s).
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
frequency : float or list
Oscillatory frequency of the signal (in Hz, i.e., oscillations per second).
amplitude : float or list
Amplitude of the oscillations.
noise : float
Noise level (amplitude of the laplace noise).
silent : bool
If ``False`` (default), might print warnings if impossible frequencies are queried.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
Returns
-------
array
The simulated signal.
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
@savefig p_signal_simulate1.png scale=100%
pd.DataFrame({
"1Hz": nk.signal_simulate(duration=5, frequency=1),
"2Hz": nk.signal_simulate(duration=5, frequency=2),
"Multi": nk.signal_simulate(duration=5, frequency=[0.5, 3], amplitude=[0.5, 0.2])
}).plot()
@suppress
plt.close()
"""
n_samples = int(np.rint(duration * sampling_rate))
period = 1 / sampling_rate
seconds = np.arange(n_samples) * period
signal = np.zeros(seconds.size)
params = listify(frequency=frequency, amplitude=amplitude)
for i in range(len(params["frequency"])):
freq = params["frequency"][i]
amp = params["amplitude"][i]
# Apply a very conservative Nyquist criterion in order to ensure
# sufficiently sampled signals.
nyquist = sampling_rate * 0.1
if freq > nyquist:
if not silent:
warn(
f"Skipping requested frequency"
f" of {freq} Hz since it cannot be resolved at the"
f" sampling rate of {sampling_rate} Hz. Please increase"
f" sampling rate to {freq * 10} Hz or choose frequencies"
f" smaller than or equal to {nyquist} Hz.",
category=NeuroKitWarning,
)
continue
# Also make sure that at leat one period of the frequency can be
# captured over the duration of the signal.
if (1 / freq) > duration:
if not silent:
warn(
f"Skipping requested frequency"
f" of {freq} Hz since its period of {1 / freq} seconds"
f" exceeds the signal duration of {duration} seconds."
f" Please choose frequencies larger than"
f" {1 / duration} Hz or increase the duration of the"
f" signal above {1 / freq} seconds.",
category=NeuroKitWarning,
)
continue
signal += _signal_simulate_sinusoidal(x=seconds, frequency=freq, amplitude=amp)
# Add random noise
if noise > 0:
rng = check_random_state(random_state)
signal += rng.laplace(0, noise, len(signal))
return signal
# =============================================================================
# Simple Sinusoidal Model
# =============================================================================
def _signal_simulate_sinusoidal(x, frequency=100, amplitude=0.5):
signal = amplitude * np.sin(2 * np.pi * frequency * x)
return signal
| 3,880 | 32.456897 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_binarize.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import sklearn.mixture
def signal_binarize(signal, method="threshold", threshold="auto"):
"""**Binarize a continuous signal**
Convert a continuous signal into zeros and ones depending on a given threshold.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
The algorithm used to discriminate between the two states. Can be one of ``"mixture"``
(default) or ``"threshold"``. If ``"mixture"``, will use a Gaussian Mixture Model to
categorize between the two states. If ``"threshold"``, will consider as activated all
points which value is superior to the threshold.
threshold : float
If ``method`` is ``"mixture"``, then it corresponds to the minimum probability required to
be considered as activated (if ``"auto"``, then 0.5). If ``method`` is ``"threshold"``,
then it corresponds to the minimum amplitude to detect as onset. If ``"auto"``, takes the
value between the max and the min.
Returns
-------
list
A list or array depending on the type passed.
Examples
--------
.. ipython:: python
import neurokit2 as nk
import numpy as np
import pandas as pd
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
binary = nk.signal_binarize(signal)
@savefig p_signal_binarize.png scale=100%
pd.DataFrame({"Raw": signal, "Binary": binary}).plot()
@suppress
plt.close()
"""
# Return appropriate type
if isinstance(signal, list):
binary = _signal_binarize(np.array(signal), method=method, threshold=threshold)
signal = list(binary)
elif isinstance(signal, pd.Series):
signal = signal.copy() # Avoid annoying pandas warning
binary = _signal_binarize(signal.values, method=method, threshold=threshold)
signal[:] = binary
else:
signal = _signal_binarize(signal, method=method, threshold=threshold)
return signal
def _signal_binarize(signal, method="threshold", threshold="auto"):
method = method.lower() # remove capitalised letters
if method == "threshold":
binary = _signal_binarize_threshold(signal, threshold=threshold)
elif method == "mixture":
binary = _signal_binarize_mixture(signal, threshold=threshold)
else:
raise ValueError(
"NeuroKit error: signal_binarize(): 'method' should be one of 'threshold' or 'mixture'."
)
return binary
# =============================================================================
# Methods
# =============================================================================
def _signal_binarize_threshold(signal, threshold="auto"):
if threshold == "auto":
threshold = np.mean([np.nanmax(signal), np.nanmin(signal)])
if threshold == "mean":
threshold = np.nanmean(signal)
if threshold == "median":
threshold = np.nanmedian(signal)
binary = np.zeros(len(signal))
binary[signal > threshold] = 1
return binary
def _signal_binarize_mixture(signal, threshold="auto"):
if threshold == "auto":
threshold = 0.5
# fit a Gaussian Mixture Model with two components
clf = sklearn.mixture.GaussianMixture(n_components=2, random_state=333)
clf = clf.fit(signal.reshape(-1, 1))
# Get predicted probabilities
probability = clf.predict_proba(signal.reshape(-1, 1))[:, np.argmax(clf.means_[:, 0])]
binary = np.zeros(len(signal))
binary[probability >= threshold] = 1
return binary
| 3,686 | 33.138889 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_sanitize.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def signal_sanitize(signal):
"""**Signal input sanitization**
Reset indexing for Pandas Series.
Parameters
----------
signal : Series
The indexed input signal (``pandas Dataframe.set_index()``)
Returns
-------
Series
The default indexed signal
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, sampling_rate=1000, frequency=1)
df = pd.DataFrame({'signal': signal, 'id': [x*2 for x in range(len(signal))]})
df = df.set_index('id')
default_index_signal = nk.signal_sanitize(df.signal)
"""
# Series check for non-default index
if isinstance(signal, pd.Series) and not isinstance(signal.index, pd.RangeIndex):
return signal.reset_index(drop=True).values
return np.array(signal)
| 942 | 21.452381 | 85 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/__init__.py | """Submodule for NeuroKit."""
from .signal_autocor import signal_autocor
from .signal_binarize import signal_binarize
from .signal_changepoints import signal_changepoints
from .signal_decompose import signal_decompose
from .signal_detrend import signal_detrend
from .signal_distort import signal_distort
from .signal_fillmissing import signal_fillmissing
from .signal_filter import signal_filter
from .signal_findpeaks import signal_findpeaks
from .signal_fixpeaks import signal_fixpeaks
from .signal_flatline import signal_flatline
from .signal_formatpeaks import signal_formatpeaks
from .signal_interpolate import signal_interpolate
from .signal_merge import signal_merge
from .signal_noise import signal_noise
from .signal_period import signal_period
from .signal_phase import signal_phase
from .signal_plot import signal_plot
from .signal_power import signal_power
from .signal_psd import signal_psd
from .signal_rate import signal_rate
from .signal_recompose import signal_recompose
from .signal_resample import signal_resample
from .signal_sanitize import signal_sanitize
from .signal_simulate import signal_simulate
from .signal_smooth import signal_smooth
from .signal_surrogate import signal_surrogate
from .signal_synchrony import signal_synchrony
from .signal_timefrequency import signal_timefrequency
from .signal_zerocrossings import signal_zerocrossings
__all__ = [
"signal_simulate",
"signal_binarize",
"signal_resample",
"signal_zerocrossings",
"signal_smooth",
"signal_filter",
"signal_psd",
"signal_distort",
"signal_interpolate",
"signal_detrend",
"signal_findpeaks",
"signal_fixpeaks",
"signal_formatpeaks",
"signal_rate",
"signal_merge",
"signal_noise",
"signal_period",
"signal_plot",
"signal_phase",
"signal_power",
"signal_synchrony",
"signal_autocor",
"signal_changepoints",
"signal_decompose",
"signal_recompose",
"signal_surrogate",
"signal_timefrequency",
"signal_sanitize",
"signal_flatline",
"signal_fillmissing",
]
| 2,067 | 30.815385 | 54 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.