Spaces:
Sleeping
Sleeping
Initial deploy: braindecode model architecture browser
Browse files- README.md +83 -6
- app.py +272 -0
- docstring_renderer.py +196 -0
- requirements.txt +7 -0
README.md
CHANGED
|
@@ -1,12 +1,89 @@
|
|
| 1 |
---
|
| 2 |
-
title: Model Explorer
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Braindecode Model Explorer
|
| 3 |
+
emoji: 🧠
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.44.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: bsd-3-clause
|
| 11 |
+
short_description: Browse 57 EEG / biosignal architectures from braindecode
|
| 12 |
+
tags:
|
| 13 |
+
- eeg
|
| 14 |
+
- meg
|
| 15 |
+
- ecog
|
| 16 |
+
- biosignal
|
| 17 |
+
- pytorch
|
| 18 |
+
- neuroscience
|
| 19 |
+
- brain-computer-interface
|
| 20 |
+
- deep-learning
|
| 21 |
---
|
| 22 |
|
| 23 |
+
# Braindecode Model Explorer
|
| 24 |
+
|
| 25 |
+
Interactive browser for **57 EEG / biosignal model architectures** from
|
| 26 |
+
[`braindecode`](https://braindecode.org).
|
| 27 |
+
|
| 28 |
+
For each model you can:
|
| 29 |
+
|
| 30 |
+
- read the rendered docstring (architecture figure, parameters, references);
|
| 31 |
+
- configure the input signal shape (`n_chans`, `sfreq`, `input_window_seconds`, `n_outputs`);
|
| 32 |
+
- instantiate the model live and inspect parameter count, layer summary
|
| 33 |
+
(via `torchinfo`), and output shape on a dummy forward pass.
|
| 34 |
+
|
| 35 |
+
> **No pretrained weights are loaded** — this Space is a pure architecture
|
| 36 |
+
> explorer, runs on the free CPU tier, and never downloads checkpoints.
|
| 37 |
+
> For curated foundation-model weights, see
|
| 38 |
+
> [`huggingface.co/braindecode`](https://huggingface.co/braindecode).
|
| 39 |
+
|
| 40 |
+
## Models included
|
| 41 |
+
|
| 42 |
+
All classes that subclass `braindecode.models.base.EEGModuleMixin`,
|
| 43 |
+
auto-discovered at startup. Examples by family:
|
| 44 |
+
|
| 45 |
+
| Family | Examples |
|
| 46 |
+
|---|---|
|
| 47 |
+
| Foundation models | BIOT, BENDR, SignalJEPA, Labram, EEGPT, CodeBrain, LUNA |
|
| 48 |
+
| Convolutional | EEGNet, Deep4Net, ShallowFBCSPNet, EEGITNet, EEGNeX |
|
| 49 |
+
| Transformer | EEGConformer, ATCNet, MSVTNet, MEDFormer, CTNet |
|
| 50 |
+
| Sleep staging | USleep, SleepStagerChambon2018, AttnSleep, DeepSleepNet |
|
| 51 |
+
| Filter-bank | FBCNet, FBLightConvNet, FBMSNet, IFNet |
|
| 52 |
+
| Other | DGCNN, TSception, SyncNet, REVE, SCCNet |
|
| 53 |
+
|
| 54 |
+
## Local development
|
| 55 |
+
|
| 56 |
+
```bash
|
| 57 |
+
pip install -r requirements.txt
|
| 58 |
+
python app.py
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
Open <http://localhost:7860>.
|
| 62 |
+
|
| 63 |
+
## How docstrings are rendered
|
| 64 |
+
|
| 65 |
+
Braindecode docstrings use NumpyDoc + Sphinx extensions (`.. figure::`,
|
| 66 |
+
`:bdg-danger:`, `.. versionadded::`). The `docstring_renderer` module
|
| 67 |
+
maps Sphinx-only directives to plain rST, then renders to HTML via
|
| 68 |
+
`docutils`. No Sphinx build is needed at runtime — the Space stays
|
| 69 |
+
dependency-light and rebuilds in seconds.
|
| 70 |
+
|
| 71 |
+
## Citation
|
| 72 |
+
|
| 73 |
+
```bibtex
|
| 74 |
+
@article{HBM:HBM23730,
|
| 75 |
+
author = {Schirrmeister, Robin Tibor and Springenberg, Jost Tobias
|
| 76 |
+
and Fiederer, Lukas Dominique Josef and Glasstetter, Martin
|
| 77 |
+
and Eggensperger, Katharina and Tangermann, Michael and Hutter,
|
| 78 |
+
Frank and Burgard, Wolfram and Ball, Tonio},
|
| 79 |
+
title = {Deep learning with convolutional neural networks for EEG
|
| 80 |
+
decoding and visualization},
|
| 81 |
+
journal = {Human Brain Mapping},
|
| 82 |
+
year = {2017},
|
| 83 |
+
doi = {10.1002/hbm.23730},
|
| 84 |
+
}
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## License
|
| 88 |
+
|
| 89 |
+
BSD-3-Clause, matching the upstream braindecode library.
|
app.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Braindecode Model Explorer — interactive architecture browser.
|
| 2 |
+
|
| 3 |
+
This Hugging Face Space lets users browse all 57 EEG model architectures
|
| 4 |
+
in braindecode, read the rendered docstring (parameters, references,
|
| 5 |
+
architecture figure), and instantiate any model with custom signal
|
| 6 |
+
shape to inspect its parameter count and layer summary.
|
| 7 |
+
|
| 8 |
+
No pretrained weights are loaded — this is a pure architecture explorer.
|
| 9 |
+
|
| 10 |
+
Run locally:
|
| 11 |
+
pip install -r requirements.txt
|
| 12 |
+
python app.py
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import inspect
|
| 18 |
+
from typing import Any
|
| 19 |
+
|
| 20 |
+
import gradio as gr
|
| 21 |
+
import torch
|
| 22 |
+
from torchinfo import summary
|
| 23 |
+
|
| 24 |
+
import braindecode.models as M
|
| 25 |
+
from braindecode.models.base import EEGModuleMixin
|
| 26 |
+
|
| 27 |
+
from docstring_renderer import (
|
| 28 |
+
get_signature_str,
|
| 29 |
+
get_source_link,
|
| 30 |
+
render_docstring_html,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# ---------------------------------------------------------------------------
|
| 34 |
+
# Catalog: discover every EEGModuleMixin subclass exported by braindecode.
|
| 35 |
+
# ---------------------------------------------------------------------------
|
| 36 |
+
|
| 37 |
+
def _discover_models() -> dict[str, type]:
|
| 38 |
+
catalog: dict[str, type] = {}
|
| 39 |
+
for name in sorted(getattr(M, "__all__", []) or dir(M)):
|
| 40 |
+
if name.startswith("_"):
|
| 41 |
+
continue
|
| 42 |
+
obj = getattr(M, name, None)
|
| 43 |
+
if (
|
| 44 |
+
inspect.isclass(obj)
|
| 45 |
+
and issubclass(obj, EEGModuleMixin)
|
| 46 |
+
and obj is not EEGModuleMixin
|
| 47 |
+
):
|
| 48 |
+
catalog[name] = obj
|
| 49 |
+
return catalog
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
MODELS: dict[str, type] = _discover_models()
|
| 53 |
+
MODEL_NAMES: list[str] = sorted(MODELS.keys())
|
| 54 |
+
|
| 55 |
+
# ---------------------------------------------------------------------------
|
| 56 |
+
# Heuristic defaults for the signal-shape form. Different model families
|
| 57 |
+
# expect very different inputs (sleep stagers want 30 s @ 100 Hz; motor-
|
| 58 |
+
# imagery models want ~4 s @ 250 Hz). Pick a reasonable default per family.
|
| 59 |
+
# ---------------------------------------------------------------------------
|
| 60 |
+
|
| 61 |
+
DEFAULTS = {
|
| 62 |
+
"sleep": dict(n_chans=2, sfreq=100, input_window_seconds=30.0, n_outputs=5),
|
| 63 |
+
"biot": dict(n_chans=16, sfreq=200, input_window_seconds=10.0, n_outputs=2),
|
| 64 |
+
"bendr": dict(n_chans=20, sfreq=256, input_window_seconds=4.0, n_outputs=2),
|
| 65 |
+
"labram": dict(n_chans=22, sfreq=200, input_window_seconds=4.0, n_outputs=2),
|
| 66 |
+
"default": dict(n_chans=22, sfreq=250, input_window_seconds=4.0, n_outputs=4),
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _defaults_for(name: str) -> dict[str, Any]:
|
| 71 |
+
lower = name.lower()
|
| 72 |
+
if "sleep" in lower or name in {"USleep", "AttnSleep", "DeepSleepNet"}:
|
| 73 |
+
return DEFAULTS["sleep"]
|
| 74 |
+
if "biot" in lower:
|
| 75 |
+
return DEFAULTS["biot"]
|
| 76 |
+
if "bendr" in lower:
|
| 77 |
+
return DEFAULTS["bendr"]
|
| 78 |
+
if "labram" in lower or "cbramod" in lower or "eegpt" in lower:
|
| 79 |
+
return DEFAULTS["labram"]
|
| 80 |
+
return DEFAULTS["default"]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# ---------------------------------------------------------------------------
|
| 84 |
+
# Rendering helpers
|
| 85 |
+
# ---------------------------------------------------------------------------
|
| 86 |
+
|
| 87 |
+
def _info_card(name: str) -> str:
|
| 88 |
+
cls = MODELS[name]
|
| 89 |
+
sig = get_signature_str(cls)
|
| 90 |
+
link = get_source_link(cls)
|
| 91 |
+
link_html = (
|
| 92 |
+
f'<a href="{link}" target="_blank" '
|
| 93 |
+
f'style="color:#0072B2;text-decoration:none;">View source on GitHub →</a>'
|
| 94 |
+
if link
|
| 95 |
+
else ""
|
| 96 |
+
)
|
| 97 |
+
return (
|
| 98 |
+
f"<div style='background:#f6f8fa;padding:12px 16px;border-radius:8px;"
|
| 99 |
+
f"border:1px solid #d0d7de;margin-bottom:12px;'>"
|
| 100 |
+
f"<div style='font-size:1.3em;font-weight:600;color:#0072B2;"
|
| 101 |
+
f"margin-bottom:4px;'>{name}</div>"
|
| 102 |
+
f"<div style='font-family:Menlo,Consolas,monospace;font-size:0.82em;"
|
| 103 |
+
f"color:#475569;margin-bottom:6px;word-break:break-all;'>{sig}</div>"
|
| 104 |
+
f"<div style='font-size:0.9em;'>{link_html}</div>"
|
| 105 |
+
f"</div>"
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def show_model(name: str) -> tuple[str, str, dict, dict, dict, dict]:
|
| 110 |
+
"""Update info card, rendered docstring, and form defaults."""
|
| 111 |
+
if name not in MODELS:
|
| 112 |
+
return "", "", {}, {}, {}, {}
|
| 113 |
+
info = _info_card(name)
|
| 114 |
+
doc_html = render_docstring_html(MODELS[name].__doc__)
|
| 115 |
+
d = _defaults_for(name)
|
| 116 |
+
return (
|
| 117 |
+
info,
|
| 118 |
+
doc_html,
|
| 119 |
+
gr.update(value=d["n_chans"]),
|
| 120 |
+
gr.update(value=d["sfreq"]),
|
| 121 |
+
gr.update(value=d["input_window_seconds"]),
|
| 122 |
+
gr.update(value=d["n_outputs"]),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def instantiate(
|
| 127 |
+
name: str,
|
| 128 |
+
n_chans: int,
|
| 129 |
+
sfreq: float,
|
| 130 |
+
window_s: float,
|
| 131 |
+
n_outputs: int,
|
| 132 |
+
) -> tuple[str, str]:
|
| 133 |
+
"""Instantiate the selected model and run a dummy forward pass."""
|
| 134 |
+
if name not in MODELS:
|
| 135 |
+
return "Pick a model first.", ""
|
| 136 |
+
|
| 137 |
+
cls = MODELS[name]
|
| 138 |
+
n_times = int(round(window_s * sfreq))
|
| 139 |
+
|
| 140 |
+
kwargs = dict(
|
| 141 |
+
n_chans=int(n_chans),
|
| 142 |
+
sfreq=float(sfreq),
|
| 143 |
+
input_window_seconds=float(window_s),
|
| 144 |
+
n_outputs=int(n_outputs),
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# Drop kwargs the class does not accept (some models do not take all
|
| 148 |
+
# of these in __init__; the mixin infers what it can).
|
| 149 |
+
sig_params = set(inspect.signature(cls.__init__).parameters)
|
| 150 |
+
kwargs = {k: v for k, v in kwargs.items() if k in sig_params}
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
model = cls(**kwargs)
|
| 154 |
+
except Exception as exc: # noqa: BLE001 — surface any constructor error
|
| 155 |
+
return f"❌ **Failed to instantiate `{name}`** with `{kwargs}`:\n```\n{exc}\n```", ""
|
| 156 |
+
|
| 157 |
+
n_params = sum(p.numel() for p in model.parameters())
|
| 158 |
+
n_train = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 159 |
+
|
| 160 |
+
try:
|
| 161 |
+
info = summary(
|
| 162 |
+
model,
|
| 163 |
+
input_size=(1, int(n_chans), n_times),
|
| 164 |
+
depth=3,
|
| 165 |
+
verbose=0,
|
| 166 |
+
col_names=("output_size", "num_params"),
|
| 167 |
+
)
|
| 168 |
+
summary_str = str(info)
|
| 169 |
+
except Exception as exc: # noqa: BLE001
|
| 170 |
+
summary_str = f"(torchinfo summary unavailable: {exc})"
|
| 171 |
+
|
| 172 |
+
try:
|
| 173 |
+
x = torch.randn(2, int(n_chans), n_times)
|
| 174 |
+
with torch.no_grad():
|
| 175 |
+
y = model(x)
|
| 176 |
+
out_shape = tuple(y.shape) if hasattr(y, "shape") else type(y).__name__
|
| 177 |
+
except Exception as exc: # noqa: BLE001
|
| 178 |
+
out_shape = f"forward failed: {exc}"
|
| 179 |
+
|
| 180 |
+
header = (
|
| 181 |
+
f"### `{name}` instantiated\n\n"
|
| 182 |
+
f"| metric | value |\n|---|---|\n"
|
| 183 |
+
f"| total parameters | **{n_params:,}** |\n"
|
| 184 |
+
f"| trainable parameters | {n_train:,} |\n"
|
| 185 |
+
f"| input shape | `(batch, {n_chans}, {n_times})` |\n"
|
| 186 |
+
f"| output shape | `{out_shape}` |\n"
|
| 187 |
+
f"| input window | {window_s} s @ {sfreq} Hz |\n"
|
| 188 |
+
)
|
| 189 |
+
return header, f"```\n{summary_str}\n```"
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# ---------------------------------------------------------------------------
|
| 193 |
+
# UI
|
| 194 |
+
# ---------------------------------------------------------------------------
|
| 195 |
+
|
| 196 |
+
INTRO = """
|
| 197 |
+
# Braindecode Model Explorer
|
| 198 |
+
|
| 199 |
+
Browse **57 EEG / biosignal model architectures** from
|
| 200 |
+
[braindecode](https://braindecode.org). Read the rendered docstring,
|
| 201 |
+
configure signal shape, and instantiate any model live to inspect its
|
| 202 |
+
parameter count and layer summary.
|
| 203 |
+
|
| 204 |
+
> No pretrained weights are loaded — this is a pure architecture browser.
|
| 205 |
+
> For weights, see [`huggingface.co/braindecode`](https://huggingface.co/braindecode).
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def build_app() -> gr.Blocks:
|
| 210 |
+
with gr.Blocks(
|
| 211 |
+
title="Braindecode Model Explorer",
|
| 212 |
+
theme=gr.themes.Soft(primary_hue="blue"),
|
| 213 |
+
) as app:
|
| 214 |
+
gr.Markdown(INTRO)
|
| 215 |
+
|
| 216 |
+
with gr.Row():
|
| 217 |
+
with gr.Column(scale=1):
|
| 218 |
+
model_dd = gr.Dropdown(
|
| 219 |
+
choices=MODEL_NAMES,
|
| 220 |
+
value="EEGNetv4",
|
| 221 |
+
label="Architecture",
|
| 222 |
+
interactive=True,
|
| 223 |
+
)
|
| 224 |
+
info_html = gr.HTML(_info_card("EEGNetv4"))
|
| 225 |
+
|
| 226 |
+
gr.Markdown("### Signal configuration")
|
| 227 |
+
with gr.Group():
|
| 228 |
+
n_chans = gr.Number(value=22, label="n_chans", precision=0)
|
| 229 |
+
sfreq = gr.Number(value=250, label="sfreq (Hz)")
|
| 230 |
+
window_s = gr.Number(
|
| 231 |
+
value=4.0, label="input_window_seconds"
|
| 232 |
+
)
|
| 233 |
+
n_outputs = gr.Number(
|
| 234 |
+
value=4, label="n_outputs", precision=0
|
| 235 |
+
)
|
| 236 |
+
run_btn = gr.Button("Instantiate model", variant="primary")
|
| 237 |
+
|
| 238 |
+
with gr.Column(scale=2):
|
| 239 |
+
with gr.Tabs():
|
| 240 |
+
with gr.TabItem("Documentation"):
|
| 241 |
+
doc_html = gr.HTML(
|
| 242 |
+
render_docstring_html(MODELS["EEGNetv4"].__doc__)
|
| 243 |
+
)
|
| 244 |
+
with gr.TabItem("Live instance"):
|
| 245 |
+
result_md = gr.Markdown(
|
| 246 |
+
"_Press **Instantiate model** to build the network._"
|
| 247 |
+
)
|
| 248 |
+
summary_md = gr.Markdown()
|
| 249 |
+
|
| 250 |
+
# wiring
|
| 251 |
+
model_dd.change(
|
| 252 |
+
show_model,
|
| 253 |
+
inputs=model_dd,
|
| 254 |
+
outputs=[info_html, doc_html, n_chans, sfreq, window_s, n_outputs],
|
| 255 |
+
)
|
| 256 |
+
run_btn.click(
|
| 257 |
+
instantiate,
|
| 258 |
+
inputs=[model_dd, n_chans, sfreq, window_s, n_outputs],
|
| 259 |
+
outputs=[result_md, summary_md],
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
gr.Markdown(
|
| 263 |
+
"---\nMade with [braindecode](https://braindecode.org) · "
|
| 264 |
+
"Source: [github.com/braindecode/braindecode]"
|
| 265 |
+
"(https://github.com/braindecode/braindecode)"
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
return app
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
if __name__ == "__main__":
|
| 272 |
+
build_app().launch()
|
docstring_renderer.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Render braindecode rST docstrings to HTML for the model-explorer Space.
|
| 2 |
+
|
| 3 |
+
Braindecode docstrings use NumpyDoc + Sphinx extensions:
|
| 4 |
+
.. figure:: (architecture image)
|
| 5 |
+
:bdg-danger: (sphinx-design colored badges)
|
| 6 |
+
.. versionadded:: (Sphinx admonition)
|
| 7 |
+
.. important:: (admonition)
|
| 8 |
+
.. code-block:: (highlighted code)
|
| 9 |
+
[ref]_ (NumpyDoc citation reference)
|
| 10 |
+
|
| 11 |
+
Pure docutils does not know about the Sphinx directives. Rather than
|
| 12 |
+
spinning up a full Sphinx build inside the Space, we:
|
| 13 |
+
|
| 14 |
+
1. Pre-process the docstring with regex substitutions that map
|
| 15 |
+
Sphinx-only directives to plain rST equivalents docutils can parse.
|
| 16 |
+
2. Hand the result to docutils.publish_parts for rST -> HTML.
|
| 17 |
+
3. Wrap with a small CSS style block that matches braindecode.org colors.
|
| 18 |
+
|
| 19 |
+
This keeps the Space dependency-light (no sphinx at runtime) while still
|
| 20 |
+
rendering figures, headings, parameter tables, and references.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from __future__ import annotations
|
| 24 |
+
|
| 25 |
+
import inspect
|
| 26 |
+
import re
|
| 27 |
+
from textwrap import dedent
|
| 28 |
+
|
| 29 |
+
from docutils.core import publish_parts
|
| 30 |
+
from docutils.utils import SystemMessage
|
| 31 |
+
|
| 32 |
+
_BADGE_COLORS = {
|
| 33 |
+
"bdg-danger": "#d9534f",
|
| 34 |
+
"bdg-success": "#5cb85c",
|
| 35 |
+
"bdg-primary": "#0072B2",
|
| 36 |
+
"bdg-info": "#56B4E9",
|
| 37 |
+
"bdg-warning": "#E69F00",
|
| 38 |
+
"bdg-secondary": "#6c757d",
|
| 39 |
+
"bdg-light": "#f0f0f0",
|
| 40 |
+
"bdg-dark": "#343a40",
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
_BADGE_RE = re.compile(r":(bdg-[a-z]+):`([^`]+)`")
|
| 44 |
+
_VERSIONADDED_RE = re.compile(r"^\.\. versionadded::\s*(.+)$", re.MULTILINE)
|
| 45 |
+
_VERSIONCHANGED_RE = re.compile(r"^\.\. versionchanged::\s*(.+)$", re.MULTILINE)
|
| 46 |
+
_CODE_BLOCK_RE = re.compile(r"^(\s*)\.\. code-block::\s*(\w+)?\s*$", re.MULTILINE)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _replace_badges(text: str) -> str:
|
| 50 |
+
"""Convert :bdg-danger:`Foundation Model` to inline raw HTML."""
|
| 51 |
+
|
| 52 |
+
def repl(match: re.Match) -> str:
|
| 53 |
+
cls, label = match.group(1), match.group(2)
|
| 54 |
+
color = _BADGE_COLORS.get(cls, "#888")
|
| 55 |
+
# Use rST raw HTML inline pass-through.
|
| 56 |
+
return (
|
| 57 |
+
f"\n\n.. raw:: html\n\n"
|
| 58 |
+
f" <span style=\"display:inline-block;padding:2px 8px;"
|
| 59 |
+
f"border-radius:4px;background:{color};color:white;"
|
| 60 |
+
f"font-size:11px;font-weight:600;margin-right:4px;\">{label}</span>\n\n"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
return _BADGE_RE.sub(repl, text)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _replace_versionadded(text: str) -> str:
|
| 67 |
+
"""Convert Sphinx versionadded/versionchanged to plain admonitions."""
|
| 68 |
+
text = _VERSIONADDED_RE.sub(
|
| 69 |
+
r".. note::\n\n *New in version \1.*", text
|
| 70 |
+
)
|
| 71 |
+
text = _VERSIONCHANGED_RE.sub(
|
| 72 |
+
r".. note::\n\n *Changed in version \1.*", text
|
| 73 |
+
)
|
| 74 |
+
return text
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _normalize_code_block(text: str) -> str:
|
| 78 |
+
"""Convert `.. code-block:: python` to vanilla `.. code::` (docutils-OK)."""
|
| 79 |
+
|
| 80 |
+
def repl(match: re.Match) -> str:
|
| 81 |
+
indent, lang = match.group(1), match.group(2) or ""
|
| 82 |
+
return f"{indent}.. code:: {lang}".rstrip()
|
| 83 |
+
|
| 84 |
+
return _CODE_BLOCK_RE.sub(repl, text)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _strip_unsupported_directives(text: str) -> str:
|
| 88 |
+
"""Drop directives that docutils cannot parse and we do not want rendered.
|
| 89 |
+
|
| 90 |
+
Currently: rubric (treated as a small heading) and bibliography-only items.
|
| 91 |
+
"""
|
| 92 |
+
text = re.sub(r"^\.\. rubric::\s*(.+)$", r"\n**\1**\n", text, flags=re.MULTILINE)
|
| 93 |
+
return text
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def preprocess_docstring(doc: str) -> str:
|
| 97 |
+
"""Apply all rST → docutils-friendly transformations."""
|
| 98 |
+
if not doc:
|
| 99 |
+
return ""
|
| 100 |
+
doc = dedent(doc)
|
| 101 |
+
doc = _replace_badges(doc)
|
| 102 |
+
doc = _replace_versionadded(doc)
|
| 103 |
+
doc = _normalize_code_block(doc)
|
| 104 |
+
doc = _strip_unsupported_directives(doc)
|
| 105 |
+
return doc
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
_STYLE = """
|
| 109 |
+
<style>
|
| 110 |
+
.bd-doc { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto,
|
| 111 |
+
sans-serif; line-height: 1.55; color: #1f2937; }
|
| 112 |
+
.bd-doc h1, .bd-doc h2, .bd-doc h3 { color: #0072B2; margin-top: 1.2em; }
|
| 113 |
+
.bd-doc h1 { font-size: 1.5em; border-bottom: 2px solid #0072B2; padding-bottom: 4px; }
|
| 114 |
+
.bd-doc h2 { font-size: 1.2em; }
|
| 115 |
+
.bd-doc h3 { font-size: 1.05em; }
|
| 116 |
+
.bd-doc pre { background: #f6f8fa; padding: 10px 12px; border-radius: 6px;
|
| 117 |
+
font-size: 0.9em; overflow-x: auto; }
|
| 118 |
+
.bd-doc code { background: #f0f0f5; padding: 1px 5px; border-radius: 3px;
|
| 119 |
+
font-size: 0.9em; }
|
| 120 |
+
.bd-doc pre code { background: transparent; padding: 0; }
|
| 121 |
+
.bd-doc img { max-width: 480px; display: block; margin: 12px auto;
|
| 122 |
+
border-radius: 6px; }
|
| 123 |
+
.bd-doc table { border-collapse: collapse; margin: 8px 0; }
|
| 124 |
+
.bd-doc th, .bd-doc td { border: 1px solid #d0d7de; padding: 4px 10px;
|
| 125 |
+
text-align: left; }
|
| 126 |
+
.bd-doc th { background: #f6f8fa; }
|
| 127 |
+
.bd-doc .admonition { border-left: 4px solid #0072B2; background: #f0f7ff;
|
| 128 |
+
padding: 8px 14px; margin: 12px 0; border-radius: 4px; }
|
| 129 |
+
.bd-doc .admonition.important { border-color: #D55E00; background: #fdf6ec; }
|
| 130 |
+
.bd-doc .admonition.note { border-color: #009E73; background: #effaf3; }
|
| 131 |
+
.bd-doc .admonition-title { font-weight: 600; margin-bottom: 4px; }
|
| 132 |
+
.bd-doc dl.field-list { display: grid; grid-template-columns: max-content auto;
|
| 133 |
+
gap: 4px 12px; }
|
| 134 |
+
.bd-doc dl.field-list dt { font-weight: 600; color: #475569; }
|
| 135 |
+
</style>
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def render_docstring_html(doc: str | None) -> str:
|
| 140 |
+
"""Render an rST docstring to a self-contained HTML fragment.
|
| 141 |
+
|
| 142 |
+
Returns a string with embedded <style> + <div class="bd-doc">…</div>.
|
| 143 |
+
Failures fall back to a <pre> dump so the Space never blanks out.
|
| 144 |
+
"""
|
| 145 |
+
if not doc:
|
| 146 |
+
return _STYLE + "<div class='bd-doc'><em>No docstring available.</em></div>"
|
| 147 |
+
|
| 148 |
+
processed = preprocess_docstring(doc)
|
| 149 |
+
try:
|
| 150 |
+
parts = publish_parts(
|
| 151 |
+
source=processed,
|
| 152 |
+
writer_name="html5",
|
| 153 |
+
settings_overrides={
|
| 154 |
+
"report_level": 5, # suppress all docutils warnings
|
| 155 |
+
"halt_level": 5,
|
| 156 |
+
"embed_stylesheet": False,
|
| 157 |
+
"input_encoding": "unicode",
|
| 158 |
+
"output_encoding": "unicode",
|
| 159 |
+
"doctitle_xform": False,
|
| 160 |
+
"initial_header_level": 2,
|
| 161 |
+
},
|
| 162 |
+
)
|
| 163 |
+
body = parts["html_body"]
|
| 164 |
+
except SystemMessage as exc: # pragma: no cover — defensive
|
| 165 |
+
body = f"<pre>{processed}</pre><p><em>(rST parse error: {exc})</em></p>"
|
| 166 |
+
|
| 167 |
+
return _STYLE + f"<div class='bd-doc'>{body}</div>"
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def get_signature_str(cls: type) -> str:
|
| 171 |
+
"""Return the formatted __init__ signature for display."""
|
| 172 |
+
try:
|
| 173 |
+
sig = inspect.signature(cls.__init__)
|
| 174 |
+
return f"{cls.__name__}{sig}"
|
| 175 |
+
except (ValueError, TypeError):
|
| 176 |
+
return f"{cls.__name__}(...)"
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def get_source_link(cls: type, branch: str = "master") -> str | None:
|
| 180 |
+
"""Return a github link to the class definition."""
|
| 181 |
+
try:
|
| 182 |
+
module = inspect.getmodule(cls)
|
| 183 |
+
if module is None or not hasattr(module, "__file__"):
|
| 184 |
+
return None
|
| 185 |
+
rel_path = module.__file__.split("braindecode/", 1)[-1]
|
| 186 |
+
rel_path = "braindecode/" + rel_path
|
| 187 |
+
try:
|
| 188 |
+
_, lineno = inspect.getsourcelines(cls)
|
| 189 |
+
except OSError:
|
| 190 |
+
lineno = 1
|
| 191 |
+
return (
|
| 192 |
+
f"https://github.com/braindecode/braindecode/blob/{branch}/"
|
| 193 |
+
f"{rel_path}#L{lineno}"
|
| 194 |
+
)
|
| 195 |
+
except Exception:
|
| 196 |
+
return None
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
braindecode>=1.5
|
| 2 |
+
torch>=2.0
|
| 3 |
+
torchinfo>=1.8
|
| 4 |
+
gradio>=4.44
|
| 5 |
+
docutils>=0.20
|
| 6 |
+
mne>=1.11
|
| 7 |
+
huggingface_hub>=0.20
|