repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
flair | flair-master/flair/trainers/plugins/functional/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/flair/trainers/plugins/functional/anneal_on_plateau.py | import logging
import os
from flair.trainers.plugins.base import TrainerPlugin, TrainingInterrupt
from flair.trainers.plugins.metric_records import MetricRecord
from flair.training_utils import AnnealOnPlateau
log = logging.getLogger("flair")
class AnnealingPlugin(TrainerPlugin):
"""Plugin for annealing logic in Flair."""
def __init__(
self,
base_path,
min_learning_rate,
anneal_factor,
patience,
initial_extra_patience,
anneal_with_restarts,
) -> None:
super().__init__()
# path to store the model
self.base_path = base_path
# special annealing modes
self.anneal_with_restarts = anneal_with_restarts
# determine the min learning rate
self.min_learning_rate = min_learning_rate
self.anneal_factor = anneal_factor
self.patience = patience
self.initial_extra_patience = initial_extra_patience
def store_learning_rate(self):
optimizer = self.trainer.optimizer
self.current_learning_rate = [group["lr"] for group in optimizer.param_groups]
self.current_momentum = [
group["betas"][0] if "betas" in group else group.get("momentum", 0) for group in optimizer.param_groups
]
@TrainerPlugin.hook
def after_setup(
self,
train_with_dev,
optimizer,
**kw,
):
"""Initialize different schedulers, including anneal target for AnnealOnPlateau, batch_growth_annealing, loading schedulers.
:param train_with_dev:
:param optimizer:
:param kw:
:return:
"""
# minimize training loss if training with dev data, else maximize dev score
anneal_mode = "min" if train_with_dev else "max"
# instantiate the scheduler
self.scheduler: AnnealOnPlateau = AnnealOnPlateau(
factor=self.anneal_factor,
patience=self.patience,
initial_extra_patience=self.initial_extra_patience,
mode=anneal_mode,
verbose=False,
optimizer=self.trainer.optimizer,
)
self.store_learning_rate()
@TrainerPlugin.hook
def after_evaluation(self, current_model_is_best, validation_scores, **kw):
"""Scheduler step of AnnealOnPlateau.
:param current_model_is_best:
:param validation_scores:
:param kw:
:return:
"""
reduced_learning_rate: bool = self.scheduler.step(*validation_scores)
self.store_learning_rate()
bad_epochs = self.scheduler.num_bad_epochs
if reduced_learning_rate:
bad_epochs = self.patience + 1
log.info(
f" - {bad_epochs} epochs without improvement (above 'patience')"
f"-> annealing learning_rate to {self.current_learning_rate}"
)
else:
log.info(f" - {bad_epochs} epochs without improvement")
self.trainer.dispatch(
"metric_recorded",
MetricRecord.scalar(name="bad_epochs", value=bad_epochs, global_step=self.scheduler.last_epoch + 1),
)
# stop training if learning rate becomes too small
for lr in self.current_learning_rate:
if lr < self.min_learning_rate:
raise TrainingInterrupt("learning rate too small - quitting training!")
# reload last best model if annealing with restarts is enabled
if self.anneal_with_restarts and reduced_learning_rate and os.path.exists(self.base_path / "best-model.pt"):
log.info("resetting to best model")
self.model.load_state_dict(self.model.load(self.base_path / "best-model.pt").state_dict())
def __str__(self) -> str:
return (
f"AnnealOnPlateau | "
f"patience: '{self.patience}', "
f"anneal_factor: '{self.anneal_factor}', "
f"min_learning_rate: '{self.min_learning_rate}'"
)
| 3,975 | 31.859504 | 132 | py |
flair | flair-master/tests/test_lemmatizer.py | import torch
import flair
from flair.data import Sentence
from flair.models import Lemmatizer
def test_words_to_char_indices():
sentence = Sentence("Hello look what a beautiful day!")
lemmatizer = Lemmatizer() # lemmatizer uses standard char dictionary
d = lemmatizer.dummy_index
e = lemmatizer.end_index
s = lemmatizer.start_index
string_list = sentence.to_tokenized_string().split()
# With end symbol, without start symbol, padding in front
target = torch.tensor(
[
[d, d, d, d, 55, 5, 15, 15, 12, e],
[d, d, d, d, d, 15, 12, 12, 28, e],
[d, d, d, d, d, 23, 13, 9, 8, e],
[d, d, d, d, d, d, d, d, 9, e],
[24, 5, 9, 16, 8, 7, 22, 16, 15, e],
[d, d, d, d, d, d, 14, 9, 27, e],
[d, d, d, d, d, d, d, d, 76, e],
],
dtype=torch.long,
).to(flair.device)
out = lemmatizer.words_to_char_indices(string_list, end_symbol=True, start_symbol=False, padding_in_front=True)
assert torch.equal(target, out)
# Without end symbol, with start symbol, padding in back
target = torch.tensor(
[
[s, 55, 5, 15, 15, 12, d, d, d, d],
[s, 15, 12, 12, 28, d, d, d, d, d],
[s, 23, 13, 9, 8, d, d, d, d, d],
[s, 9, d, d, d, d, d, d, d, d],
[s, 24, 5, 9, 16, 8, 7, 22, 16, 15],
[s, 14, 9, 27, d, d, d, d, d, d],
[s, 76, d, d, d, d, d, d, d, d],
],
dtype=torch.long,
).to(flair.device)
out = lemmatizer.words_to_char_indices(string_list, end_symbol=False, start_symbol=True, padding_in_front=False)
assert torch.equal(target, out)
# Without end symbol, without start symbol, padding in front
assert lemmatizer.words_to_char_indices(
string_list, end_symbol=False, start_symbol=False, padding_in_front=True
).size() == (7, 9)
| 1,907 | 33.690909 | 116 | py |
flair | flair-master/tests/test_labels.py | from typing import List
from flair.data import Label, Relation, Sentence, Span
def test_token_tags():
# example sentence
sentence = Sentence("I love Berlin")
# set 4 labels for 2 tokens ('love' is tagged twice)
sentence[1].add_label("pos", "verb")
sentence[1].add_label("sentiment", "positive")
sentence[2].add_label("pos", "proper noun")
sentence[0].add_label("pos", "pronoun")
# check if there are three POS labels with correct text and values
labels: List[Label] = sentence.get_labels("pos")
assert len(labels) == 3
assert labels[0].data_point.text == "I"
assert labels[0].value == "pronoun"
assert labels[1].data_point.text == "love"
assert labels[1].value == "verb"
assert labels[2].data_point.text == "Berlin"
assert labels[2].value == "proper noun"
# check if there are is one SENTIMENT label with correct text and values
labels: List[Label] = sentence.get_labels("sentiment")
assert len(labels) == 1
assert labels[0].data_point.text == "love"
assert labels[0].value == "positive"
# check if all tokens are correctly labeled
assert len(sentence) == 3
assert sentence[0].text == "I"
assert sentence[1].text == "love"
assert sentence[2].text == "Berlin"
assert len(sentence[0].get_labels("pos")) == 1
assert len(sentence[1].get_labels("pos")) == 1
assert len(sentence[1].labels) == 2
assert len(sentence[2].get_labels("pos")) == 1
assert sentence[1].get_label("pos").value == "verb"
assert sentence[1].get_label("sentiment").value == "positive"
# remove the pos label from the last word
sentence[2].remove_labels("pos")
# there should be 2 POS labels left
labels: List[Label] = sentence.get_labels("pos")
assert len(labels) == 2
assert len(sentence[0].get_labels("pos")) == 1
assert len(sentence[1].get_labels("pos")) == 1
assert len(sentence[1].labels) == 2
assert len(sentence[2].get_labels("pos")) == 0
# now remove all pos tags
sentence.remove_labels("pos")
print(sentence[0].get_labels("pos"))
assert len(sentence.get_labels("pos")) == 0
assert len(sentence.get_labels("sentiment")) == 1
assert len(sentence.labels) == 1
assert len(sentence[0].get_labels("pos")) == 0
assert len(sentence[1].get_labels("pos")) == 0
assert len(sentence[2].get_labels("pos")) == 0
def test_span_tags():
# set 3 labels for 2 spans (HU is tagged twice)
sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")
sentence[0:4].add_label("ner", "Organization")
sentence[0:4].add_label("ner", "University")
sentence[7:8].add_label("ner", "City")
# check if there are three labels with correct text and values
labels: List[Label] = sentence.get_labels("ner")
assert len(labels) == 3
assert labels[0].data_point.text == "Humboldt Universität zu Berlin"
assert labels[0].value == "Organization"
assert labels[1].data_point.text == "Humboldt Universität zu Berlin"
assert labels[1].value == "University"
assert labels[2].data_point.text == "Berlin"
assert labels[2].value == "City"
# check if there are two spans with correct text and values
spans: List[Span] = sentence.get_spans("ner")
assert len(spans) == 2
assert spans[0].text == "Humboldt Universität zu Berlin"
assert len(spans[0].get_labels("ner")) == 2
assert spans[1].text == "Berlin"
assert spans[1].get_label("ner").value == "City"
# now delete the NER tags of "Humboldt-Universität zu Berlin"
sentence[0:4].remove_labels("ner")
# should be only one NER label left
labels: List[Label] = sentence.get_labels("ner")
assert len(labels) == 1
assert labels[0].data_point.text == "Berlin"
assert labels[0].value == "City"
# and only one NER span
spans: List[Span] = sentence.get_spans("ner")
assert len(spans) == 1
assert spans[0].text == "Berlin"
assert spans[0].get_label("ner").value == "City"
def test_different_span_tags():
# set 3 labels for 2 spans (HU is tagged twice with different tags)
sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")
sentence[0:4].add_label("ner", "Organization")
sentence[0:4].add_label("orgtype", "University")
sentence[7:8].add_label("ner", "City")
# check if there are three labels with correct text and values
labels: List[Label] = sentence.get_labels("ner")
assert len(labels) == 2
assert labels[0].data_point.text == "Humboldt Universität zu Berlin"
assert labels[0].value == "Organization"
assert labels[1].data_point.text == "Berlin"
assert labels[1].value == "City"
# check if there are two spans with correct text and values
spans: List[Span] = sentence.get_spans("ner")
assert len(spans) == 2
assert spans[0].text == "Humboldt Universität zu Berlin"
assert spans[0].get_label("ner").value == "Organization"
assert spans[0].get_label("orgtype").value == "University"
assert len(spans[0].get_labels("ner")) == 1
assert spans[1].text == "Berlin"
assert spans[1].get_label("ner").value == "City"
# now delete the NER tags of "Humboldt-Universität zu Berlin"
sentence[0:4].remove_labels("ner")
# should be only one NER label left
labels: List[Label] = sentence.get_labels("ner")
assert len(labels) == 1
assert labels[0].data_point.text == "Berlin"
assert labels[0].value == "City"
# and only one NER span
spans: List[Span] = sentence.get_spans("ner")
assert len(spans) == 1
assert spans[0].text == "Berlin"
assert spans[0].get_label("ner").value == "City"
# but there is also one orgtype span and label
labels: List[Label] = sentence.get_labels("orgtype")
assert len(labels) == 1
assert labels[0].data_point.text == "Humboldt Universität zu Berlin"
assert labels[0].value == "University"
# and only one NER span
spans: List[Span] = sentence.get_spans("orgtype")
assert len(spans) == 1
assert spans[0].text == "Humboldt Universität zu Berlin"
assert spans[0].get_label("orgtype").value == "University"
# let's add the NER tag back
sentence[0:4].add_label("ner", "Organization")
# check if there are three labels with correct text and values
labels: List[Label] = sentence.get_labels("ner")
print(labels)
assert len(labels) == 2
assert labels[0].data_point.text == "Humboldt Universität zu Berlin"
assert labels[0].value == "Organization"
assert labels[1].data_point.text == "Berlin"
assert labels[1].value == "City"
# check if there are two spans with correct text and values
spans: List[Span] = sentence.get_spans("ner")
assert len(spans) == 2
assert spans[0].text == "Humboldt Universität zu Berlin"
assert spans[0].get_label("ner").value == "Organization"
assert spans[0].get_label("orgtype").value == "University"
assert len(spans[0].get_labels("ner")) == 1
assert spans[1].text == "Berlin"
assert spans[1].get_label("ner").value == "City"
# now remove all NER tags
sentence.remove_labels("ner")
assert len(sentence.get_labels("ner")) == 0
assert len(sentence.get_spans("ner")) == 0
assert len(sentence.get_spans("orgtype")) == 1
assert len(sentence.get_labels("orgtype")) == 1
assert len(sentence.labels) == 1
assert len(sentence[0:4].get_labels("ner")) == 0
assert len(sentence[0:4].get_labels("orgtype")) == 1
def test_relation_tags():
# set 3 labels for 2 spans (HU is tagged twice with different tags)
sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")
# create two relation label
Relation(sentence[0:4], sentence[7:8]).add_label("rel", "located in")
Relation(sentence[0:2], sentence[3:4]).add_label("rel", "university of")
Relation(sentence[0:2], sentence[3:4]).add_label("syntactic", "apposition")
# there should be two relation labels
labels: List[Label] = sentence.get_labels("rel")
assert len(labels) == 2
assert labels[0].value == "located in"
assert labels[1].value == "university of"
# there should be one syntactic labels
labels: List[Label] = sentence.get_labels("syntactic")
assert len(labels) == 1
# there should be two relations, one with two and one with one label
relations: List[Relation] = sentence.get_relations("rel")
assert len(relations) == 2
assert len(relations[0].labels) == 1
assert len(relations[1].labels) == 2
def test_sentence_labels():
# example sentence
sentence = Sentence("I love Berlin")
sentence.add_label("sentiment", "positive")
sentence.add_label("topic", "travelling")
assert len(sentence.labels) == 2
assert len(sentence.get_labels("sentiment")) == 1
assert len(sentence.get_labels("topic")) == 1
# add another topic label
sentence.add_label("topic", "travelling")
assert len(sentence.labels) == 3
assert len(sentence.get_labels("sentiment")) == 1
assert len(sentence.get_labels("topic")) == 2
sentence.remove_labels("topic")
assert len(sentence.labels) == 1
assert len(sentence.get_labels("sentiment")) == 1
assert len(sentence.get_labels("topic")) == 0
def test_mixed_labels():
# example sentence
sentence = Sentence("I love New York")
# has sentiment value
sentence.add_label("sentiment", "positive")
# has 4 part of speech tags
sentence[1].add_label("pos", "verb")
sentence[2].add_label("pos", "proper noun")
sentence[3].add_label("pos", "proper noun")
sentence[0].add_label("pos", "pronoun")
# has 1 NER tag
sentence[2:4].add_label("ner", "City")
# should be in total 6 labels
assert len(sentence.labels) == 6
assert len(sentence.get_labels("pos")) == 4
assert len(sentence.get_labels("sentiment")) == 1
assert len(sentence.get_labels("ner")) == 1
def test_data_point_equality():
# example sentence
sentence = Sentence("George Washington went to Washington .")
# add two NER labels
sentence[0:2].add_label("span_ner", "PER")
sentence[0:2].add_label("span_other", "Politician")
sentence[4].add_label("ner", "LOC")
sentence[4].add_label("other", "Village")
# get the four labels
ner_label = sentence.get_label("ner")
other_label = sentence.get_label("other")
span_ner_label = sentence.get_label("span_ner")
span_other_label = sentence.get_label("span_other")
# check that only two of the respective data points are equal
assert ner_label.data_point == other_label.data_point
assert span_ner_label.data_point == span_other_label.data_point
assert ner_label.data_point != span_other_label.data_point
assert other_label.data_point != span_ner_label.data_point
| 10,789 | 37.673835 | 80 | py |
flair | flair-master/tests/test_datasets.py | import copy
import shutil
import pytest
import flair
import flair.datasets
from flair.data import MultiCorpus, Sentence
from flair.datasets import ColumnCorpus
from flair.datasets.sequence_labeling import (
ONTONOTES,
JsonlCorpus,
JsonlDataset,
MultiFileJsonlCorpus,
)
def test_load_imdb_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ClassificationCorpus(
tasks_base_path / "imdb",
memory_mode="full",
)
assert len(corpus.train) == 5
assert len(corpus.dev) == 5
assert len(corpus.test) == 5
def test_load_imdb_data_streaming(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ClassificationCorpus(
tasks_base_path / "imdb",
memory_mode="disk",
)
assert len(corpus.train) == 5
assert len(corpus.dev) == 5
assert len(corpus.test) == 5
def test_load_imdb_data_max_tokens(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ClassificationCorpus(tasks_base_path / "imdb", memory_mode="full", truncate_to_max_tokens=3)
assert len(corpus.train[0]) <= 3
assert len(corpus.dev[0]) <= 3
assert len(corpus.test[0]) <= 3
def test_load_imdb_data_streaming_max_tokens(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ClassificationCorpus(tasks_base_path / "imdb", memory_mode="full", truncate_to_max_tokens=3)
assert len(corpus.train[0]) <= 3
assert len(corpus.dev[0]) <= 3
assert len(corpus.test[0]) <= 3
def test_load_ag_news_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ClassificationCorpus(tasks_base_path / "ag_news")
assert len(corpus.train) == 10
assert len(corpus.dev) == 10
assert len(corpus.test) == 10
def test_load_sequence_labeling_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(tasks_base_path / "fashion", column_format={0: "text", 2: "ner"})
assert len(corpus.train) == 6
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
def test_load_sequence_labeling_whitespace_after(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(
tasks_base_path / "column_with_whitespaces",
column_format={0: "text", 1: "ner", 2: "space-after"},
)
assert len(corpus.train) == 1
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
assert corpus.train[0].to_tokenized_string() == "It is a German - owned firm ."
assert corpus.train[0].to_plain_string() == "It is a German-owned firm."
for token in corpus.train[0]:
assert token.start_position is not None
assert token.end_position is not None
def test_load_column_corpus_options(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(
tasks_base_path / "column_corpus_options",
column_format={0: "text", 1: "ner"},
column_delimiter="\t",
skip_first_line=True,
)
assert len(corpus.train) == 1
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
assert corpus.train[0].to_tokenized_string() == "This is New Berlin"
def test_load_span_data(tasks_base_path):
# load column dataset with one entry
dataset = flair.datasets.ColumnDataset(
tasks_base_path / "span_labels" / "span_first.txt",
column_name_map={0: "text", 1: "ner"},
)
assert len(dataset) == 1
assert dataset[0][2].text == "RAB"
assert dataset[0][2].get_label("ner").value == "PARTA"
# load column dataset with two entries
dataset = flair.datasets.ColumnDataset(
tasks_base_path / "span_labels" / "span_second.txt",
column_name_map={0: "text", 1: "ner"},
)
assert len(dataset) == 2
assert dataset[1][2].text == "RAB"
assert dataset[1][2].get_label("ner").value == "PARTA"
# load column dataset with three entries
dataset = flair.datasets.ColumnDataset(
tasks_base_path / "span_labels" / "span_third.txt",
column_name_map={0: "text", 1: "ner"},
)
assert len(dataset) == 3
assert dataset[2][2].text == "RAB"
assert dataset[2][2].get_label("ner").value == "PARTA"
def test_load_germeval_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(tasks_base_path / "ner_german_germeval", column_format={0: "text", 2: "ner"})
assert len(corpus.train) == 2
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
def test_load_ud_english_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.UD_ENGLISH(tasks_base_path)
assert len(corpus.train) == 6
assert len(corpus.test) == 4
assert len(corpus.dev) == 2
# check if Token labels are correct
sentence = corpus.train[0]
assert sentence[0].text == "From"
assert sentence[0].get_label("upos").value == "ADP"
assert sentence[1].text == "the"
assert sentence[1].get_label("upos").value == "DET"
def test_load_up_english_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.UP_ENGLISH(tasks_base_path)
assert len(corpus.train) == 4
assert len(corpus.test) == 2
assert len(corpus.dev) == 2
# check if Token labels for frames are correct
sentence = corpus.dev[0]
assert sentence[2].text == "AP"
assert sentence[2].get_label("frame", zero_tag_value="no_label").value == "no_label"
assert sentence[3].text == "comes"
assert sentence[3].get_label("frame").value == "come.03"
def test_load_no_dev_data(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(tasks_base_path / "fashion_nodev", column_format={0: "text", 2: "ner"})
assert len(corpus.train) == 5
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
def test_load_no_dev_data_explicit(tasks_base_path):
# get training, test and dev data
corpus = flair.datasets.ColumnCorpus(
tasks_base_path / "fashion_nodev",
column_format={0: "text", 2: "ner"},
train_file="train.tsv",
test_file="test.tsv",
)
assert len(corpus.train) == 5
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
def test_multi_corpus(tasks_base_path):
corpus_1 = flair.datasets.ColumnCorpus(tasks_base_path / "ner_german_germeval", column_format={0: "text", 2: "ner"})
corpus_2 = flair.datasets.ColumnCorpus(tasks_base_path / "fashion", column_format={0: "text", 2: "ner"})
# get two corpora as one
corpus = MultiCorpus([corpus_1, corpus_2])
assert len(corpus.train) == 8
assert len(corpus.dev) == 2
assert len(corpus.test) == 2
def test_download_load_data(tasks_base_path):
# get training, test and dev data for full English UD corpus from web
corpus = flair.datasets.UD_ENGLISH()
assert len(corpus.train) == 12544
assert len(corpus.dev) == 2001
assert len(corpus.test) == 2077
# clean up data directory
shutil.rmtree(flair.cache_root / "datasets" / "ud_english")
def _assert_conllu_dataset(dataset):
sent1 = dataset[0]
assert [label.data_point.text for label in sent1.get_labels("ner")] == ["Larry Page", "Sergey Brin", "Google"]
assert [label.value for label in sent1.get_labels("ner")] == ["PER", "PER", "ORG"]
assert [token.get_label("upos").value for token in sent1.tokens] == [
"PROPN",
"PROPN",
"CCONJ",
"PROPN",
"PROPN",
"VERB",
"PROPN",
"PUNCT",
]
assert [token.whitespace_after for token in sent1.tokens] == [
1,
1,
1,
1,
1,
1,
0,
0,
]
ner_spans1 = sent1.get_labels("ner")
assert len(ner_spans1) == 3
upos_spans1 = sent1.get_labels("upos")
assert len(upos_spans1) == 8
rels1 = sent1.get_labels("relation")
assert len(rels1) == 2
assert [token.idx for token in rels1[1].data_point.first] == [7]
assert [token.idx for token in rels1[1].data_point.second] == [4, 5]
sent3 = dataset[2]
ner_labels3 = sent3.get_labels("ner")
assert len(ner_labels3) == 3
upos_labels3 = sent3.get_labels("upos")
assert len(upos_labels3) == 11
rels3 = sent3.get_labels("relation")
assert len(rels3) == 1
assert [token.idx for token in rels3[0].data_point.first] == [6]
assert [token.idx for token in rels3[0].data_point.second] == [1, 2]
def test_load_conllu_corpus(tasks_base_path):
corpus = ColumnCorpus(
tasks_base_path / "conllu",
train_file="train.conllu",
dev_file="train.conllu",
test_file="train.conllu",
in_memory=False,
column_format={1: "text", 2: "upos", 3: "ner", 4: "feats"},
)
assert len(corpus.train) == 4
assert len(corpus.dev) == 4
assert len(corpus.test) == 4
_assert_conllu_dataset(corpus.train)
def test_load_conllu_corpus_in_memory(tasks_base_path):
corpus = ColumnCorpus(
tasks_base_path / "conllu",
train_file="train.conllu",
dev_file="train.conllu",
test_file="train.conllu",
column_format={1: "text", 2: "upos", 3: "ner", 4: "feats"},
in_memory=True,
)
assert len(corpus.train) == 4
assert len(corpus.dev) == 4
assert len(corpus.test) == 4
_assert_conllu_dataset(corpus.train)
def test_load_conllu_plus_corpus(tasks_base_path):
corpus = ColumnCorpus(
tasks_base_path / "conllu",
train_file="train.conllup",
dev_file="train.conllup",
test_file="train.conllup",
column_format={1: "text", 2: "upos", 3: "ner", 4: "feats"},
in_memory=False,
)
assert len(corpus.train) == 4
assert len(corpus.dev) == 4
assert len(corpus.test) == 4
_assert_conllu_dataset(corpus.train)
def test_load_conllu_corpus_plus_in_memory(tasks_base_path):
corpus = ColumnCorpus(
tasks_base_path / "conllu",
train_file="train.conllup",
dev_file="train.conllup",
test_file="train.conllup",
column_format={1: "text", 2: "upos", 3: "ner", 4: "feats"},
in_memory=True,
)
assert len(corpus.train) == 4
assert len(corpus.dev) == 4
assert len(corpus.test) == 4
_assert_conllu_dataset(corpus.train)
def _assert_universal_dependencies_conllu_dataset(dataset):
sent1: Sentence = dataset[0]
assert [token.whitespace_after for token in sent1.tokens] == [
1,
1,
1,
1,
0,
0,
]
assert len(sent1.get_labels("Number")) == 4
assert sent1[1].get_labels("Number")[0].value == "Plur"
assert sent1[1].get_labels("Person")[0].value == "3"
assert sent1[1].get_labels("Tense")[0].value == "Pres"
# assert [token.get_tag("head").value for token in sent1.tokens] == [
# "2",
# "0",
# "4",
# "2",
# "2",
# "2",
# ]
assert [token.get_label("deprel").value for token in sent1.tokens] == [
"nsubj",
"root",
"cc",
"conj",
"obj",
"punct",
]
def test_load_universal_dependencies_conllu_corpus(tasks_base_path):
# This test only covers basic universal dependencies datasets.
# For example, multi-word tokens or the "deps" column sentence annotations are not supported yet.
# Here, we use the default token annotation fields.
corpus = ColumnCorpus(
tasks_base_path / "conllu",
train_file="universal_dependencies.conllu",
dev_file="universal_dependencies.conllu",
test_file="universal_dependencies.conllu",
column_format={
1: "text",
2: "lemma",
3: "upos",
4: "pos",
5: "feats",
6: "head",
7: "deprel",
8: "deps",
9: "misc",
},
)
assert len(corpus.train) == 1
assert len(corpus.dev) == 1
assert len(corpus.test) == 1
_assert_universal_dependencies_conllu_dataset(corpus.train)
def test_hipe_2022_corpus(tasks_base_path):
# This test covers the complete HIPE 2022 dataset.
# https://github.com/hipe-eval/HIPE-2022-data
# Includes variant with document separator, and all versions of the dataset.
# We have manually checked, that these numbers are correct:
hipe_stats = {
"v1.0": {
"ajmc": {
"de": {"sample": {"sents": 119, "docs": 8, "labels": ["date", "loc", "pers", "scope", "work"]}},
"en": {"sample": {"sents": 83, "docs": 5, "labels": ["date", "loc", "pers", "scope", "work"]}},
},
"hipe2020": {
"de": {
"train": {
"sents": 3470 + 2, # 2 sentences with missing EOS marker
"docs": 103,
"labels": ["loc", "org", "pers", "prod", "time"],
},
"dev": {"sents": 1202, "docs": 33, "labels": ["loc", "org", "pers", "prod", "time"]},
},
"en": {"dev": {"sents": 1045, "docs": 80, "labels": ["loc", "org", "pers", "prod", "time"]}},
"fr": {
"train": {"sents": 5743, "docs": 158, "labels": ["loc", "org", "pers", "prod", "time", "comp"]},
"dev": {"sents": 1244, "docs": 43, "labels": ["loc", "org", "pers", "prod", "time"]},
},
},
"letemps": {
"fr": {
"train": {"sents": 14051, "docs": 414, "labels": ["loc", "org", "pers"]},
"dev": {"sents": 1341, "docs": 51, "labels": ["loc", "org", "pers"]},
}
},
"newseye": {
# +1 offset, because of missing EOS marker at EOD
"de": {
"train": {"sents": 23646 + 1, "docs": 11, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev": {"sents": 1110 + 1, "docs": 12, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev2": {"sents": 1541 + 1, "docs": 12, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
},
"fi": {
"train": {"sents": 1141 + 1, "docs": 24, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev": {"sents": 140 + 1, "docs": 24, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev2": {"sents": 104 + 1, "docs": 21, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
},
"fr": {
"train": {"sents": 7106 + 1, "docs": 35, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev": {"sents": 662 + 1, "docs": 35, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev2": {"sents": 1016 + 1, "docs": 35, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
},
"sv": {
"train": {"sents": 1063 + 1, "docs": 21, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev": {"sents": 126 + 1, "docs": 21, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
"dev2": {"sents": 136 + 1, "docs": 21, "labels": ["HumanProd", "LOC", "ORG", "PER"]},
},
},
"sonar": {
"de": {
"dev": {
"sents": 1603 + 10, # 10 sentences with missing EOS marker
"docs": 10,
"labels": ["LOC", "ORG", "PER"],
}
}
},
"topres19th": {
"en": {
"train": {"sents": 5874, "docs": 309, "labels": ["BUILDING", "LOC", "STREET"]},
"dev": {"sents": 646, "docs": 34, "labels": ["BUILDING", "LOC", "STREET"]},
}
},
}
}
hipe_stats["v2.0"] = copy.deepcopy(hipe_stats["v1.0"])
hipe_stats["v2.0"]["ajmc"] = {
"de": {
"train": {
"sents": 1022 + 2, # 2 sentences with missing EOS marker
"docs": 76,
"labels": ["date", "loc", "object", "pers", "scope", "work"],
},
"dev": {"sents": 192, "docs": 14, "labels": ["loc", "object", "pers", "scope", "work"]},
},
"en": {
"train": {
"sents": 1153 + 1, # 1 sentence with missing EOS marker
"docs": 60,
"labels": ["date", "loc", "object", "pers", "scope", "work"],
},
"dev": {
"sents": 251 + 1, # 1 sentence with missing EOS marker
"docs": 14,
"labels": ["date", "loc", "pers", "scope", "work"],
},
},
"fr": {
"train": {
"sents": 893 + 1, # 1 sentence with missing EOS marker
"docs": 72,
"labels": ["date", "loc", "object", "pers", "scope", "work"],
},
"dev": {
"sents": 201 + 1, # 1 sentence with missing EOS marker
"docs": 17,
"labels": ["pers", "scope", "work"],
},
},
}
hipe_stats["v2.0"]["newseye"]["de"] = {
"train": {"sents": 20839 + 1, "docs": 7, "labels": ["HumanProd", "LOC", "ORG", "PER"]} # missing EOD marker
}
hipe_stats["v2.0"]["sonar"] = {
"de": {
"dev": {
"sents": 816 + 10, # 9 sentences with missing EOS marker + missing EOD
"docs": 10,
"labels": ["LOC", "ORG", "PER"],
}
}
}
hipe_stats["v2.1"] = copy.deepcopy(hipe_stats["v2.0"])
hipe_stats["v2.1"]["hipe2020"]["fr"]["train"] = {
"sents": 5743,
"docs": 158,
"labels": ["loc", "org", "pers", "prod", "time"],
}
# Test data for v2.1 release
hipe_stats["v2.1"]["ajmc"]["de"]["test"] = {
"sents": 224,
"docs": 16,
"labels": ["loc", "object", "pers", "scope", "work"],
}
hipe_stats["v2.1"]["ajmc"]["en"]["test"] = {
"sents": 238,
"docs": 13,
"labels": ["date", "loc", "pers", "scope", "work"],
}
hipe_stats["v2.1"]["ajmc"]["fr"]["test"] = {
"sents": 188 + 1, # 1 sentence with missing EOS marker
"docs": 15,
"labels": ["date", "loc", "pers", "scope", "work"],
}
hipe_stats["v2.1"]["hipe2020"]["de"]["test"] = {
"sents": 1215 + 2, # 2 sentences with missing EOS marker
"docs": 49,
"labels": ["loc", "org", "pers", "prod", "time"],
}
hipe_stats["v2.1"]["hipe2020"]["en"]["test"] = {
"sents": 553,
"docs": 46,
"labels": ["loc", "org", "pers", "prod", "time"],
}
hipe_stats["v2.1"]["hipe2020"]["fr"]["test"] = {
"sents": 1462,
"docs": 43,
"labels": ["loc", "org", "pers", "prod", "time"],
}
hipe_stats["v2.1"]["letemps"]["fr"]["test"] = {"sents": 2381, "docs": 51, "labels": ["loc", "org", "pers"]}
hipe_stats["v2.1"]["newseye"]["de"]["test"] = {
"sents": 3336 + 1, # 1 missing EOD marker
"docs": 13,
"labels": ["HumanProd", "LOC", "ORG", "PER"],
}
hipe_stats["v2.1"]["newseye"]["fi"]["test"] = {
"sents": 390 + 1, # 1 missing EOD marker
"docs": 24,
"labels": ["HumanProd", "LOC", "ORG", "PER"],
}
hipe_stats["v2.1"]["newseye"]["fr"]["test"] = {
"sents": 2534 + 1, # 1 missing EOD marker
"docs": 35,
"labels": ["HumanProd", "LOC", "ORG", "PER"],
}
hipe_stats["v2.1"]["newseye"]["sv"]["test"] = {
"sents": 342 + 1, # 1 missing EOD marker
"docs": 21,
"labels": ["HumanProd", "LOC", "ORG", "PER"],
}
hipe_stats["v2.1"]["sonar"]["de"]["test"] = {
"sents": 807 + 8 + 1, # 8 missing EOS marker + missing EOD
"docs": 10,
"labels": ["LOC", "ORG", "PER"],
}
hipe_stats["v2.1"]["topres19th"]["en"]["test"] = {
"sents": 2001,
"docs": 112,
"labels": ["BUILDING", "LOC", "STREET"],
}
def test_hipe_2022(dataset_version="v2.1", add_document_separator=True):
for dataset_name, languages in hipe_stats[dataset_version].items():
for language in languages:
splits = languages[language]
corpus = flair.datasets.NER_HIPE_2022(
version=dataset_version,
dataset_name=dataset_name,
language=language,
dev_split_name="dev",
add_document_separator=add_document_separator,
)
for split_name, stats in splits.items():
split_description = f"{dataset_name}@{dataset_version}/{language}#{split_name}"
current_sents = stats["sents"]
current_docs = stats["docs"]
current_labels = set(stats["labels"])
total_sentences = current_sents + current_docs if add_document_separator else stats["sents"]
if split_name == "train":
assert (
len(corpus.train) == total_sentences
), f"Sentence count mismatch for {split_description}: {len(corpus.train)} vs. {total_sentences}"
gold_labels = set(corpus.make_label_dictionary(label_type="ner").get_items())
assert (
current_labels == gold_labels
), f"Label mismatch for {split_description}: {current_labels} vs. {gold_labels}"
elif split_name in ["dev", "sample"]:
assert (
len(corpus.dev) == total_sentences
), f"Sentence count mismatch for {split_description}: {len(corpus.dev)} vs. {total_sentences}"
corpus._train = corpus._dev
gold_labels = set(corpus.make_label_dictionary(label_type="ner").get_items())
assert (
current_labels == gold_labels
), f"Label mismatch for {split_description}: {current_labels} vs. {gold_labels}"
elif split_name == "dev2":
corpus = flair.datasets.NER_HIPE_2022(
version=dataset_version,
dataset_name=dataset_name,
language=language,
dev_split_name="dev2",
add_document_separator=add_document_separator,
)
corpus._train = corpus._dev
gold_labels = set(corpus.make_label_dictionary(label_type="ner").get_items())
assert (
len(corpus.dev) == total_sentences
), f"Sentence count mismatch for {split_description}: {len(corpus.dev)} vs. {total_sentences}"
assert (
current_labels == gold_labels
), f"Label mismatch for {split_description}: {current_labels} vs. {gold_labels}"
test_hipe_2022(dataset_version="v1.0", add_document_separator=True)
test_hipe_2022(dataset_version="v1.0", add_document_separator=False)
test_hipe_2022(dataset_version="v2.0", add_document_separator=True)
test_hipe_2022(dataset_version="v2.0", add_document_separator=False)
test_hipe_2022(dataset_version="v2.1", add_document_separator=True)
test_hipe_2022(dataset_version="v2.1", add_document_separator=False)
def test_icdar_europeana_corpus(tasks_base_path):
# This test covers the complete ICDAR Europeana corpus:
# https://github.com/stefan-it/historic-domain-adaptation-icdar
gold_stats = {"fr": {"train": 7936, "dev": 992, "test": 992}, "nl": {"train": 5777, "dev": 722, "test": 723}}
def check_number_sentences(reference: int, actual: int, split_name: str):
assert actual == reference, f"Mismatch in number of sentences for {split_name} split"
for language in ["fr", "nl"]:
corpus = flair.datasets.NER_ICDAR_EUROPEANA(language=language)
check_number_sentences(len(corpus.train), gold_stats[language]["train"], "train")
check_number_sentences(len(corpus.dev), gold_stats[language]["dev"], "dev")
check_number_sentences(len(corpus.test), gold_stats[language]["test"], "test")
def test_masakhane_corpus(tasks_base_path):
# This test covers the complete MasakhaNER dataset, including support for v1 and v2.
supported_versions = ["v1", "v2"]
supported_languages = {
"v1": ["amh", "hau", "ibo", "kin", "lug", "luo", "pcm", "swa", "wol", "yor"],
"v2": [
"bam",
"bbj",
"ewe",
"fon",
"hau",
"ibo",
"kin",
"lug",
"mos",
"pcm",
"nya",
"sna",
"swa",
"tsn",
"twi",
"wol",
"xho",
"yor",
"zul",
],
}
masakhane_stats = {
"v1": {
"amh": {"train": 1750, "dev": 250, "test": 500},
"hau": {"train": 1912, "dev": 276, "test": 552},
"ibo": {"train": 2235, "dev": 320, "test": 638},
"kin": {
"train": 2116,
"dev": 302,
"test": 605,
},
"lug": {"train": 1428, "dev": 200, "test": 407},
"luo": {"train": 644, "dev": 92, "test": 186},
"pcm": {"train": 2124, "dev": 306, "test": 600},
"swa": {"train": 2109, "dev": 300, "test": 604},
"wol": {"train": 1871, "dev": 267, "test": 539},
"yor": {"train": 2171, "dev": 305, "test": 645},
},
"v2": {
"bam": {"train": 4462, "dev": 638, "test": 1274},
"bbj": {"train": 3384, "dev": 483, "test": 966},
"ewe": {"train": 3505, "dev": 501, "test": 1001},
"fon": {"train": 4343, "dev": 621, "test": 1240},
"hau": {"train": 5716, "dev": 816, "test": 1633},
"ibo": {"train": 7634, "dev": 1090, "test": 2181},
"kin": {"train": 7825, "dev": 1118, "test": 2235},
"lug": {"train": 4942, "dev": 706, "test": 1412},
"mos": {"train": 4532, "dev": 648, "test": 1294},
"pcm": {"train": 5646, "dev": 806, "test": 1613},
"nya": {"train": 6250, "dev": 893, "test": 1785},
"sna": {"train": 6207, "dev": 887, "test": 1773},
"swa": {"train": 6593, "dev": 942, "test": 1883},
"tsn": {"train": 3489, "dev": 499, "test": 996},
"twi": {"train": 4240, "dev": 605, "test": 1211},
"wol": {"train": 4593, "dev": 656, "test": 1312},
"xho": {"train": 5718, "dev": 817, "test": 1633},
"yor": {"train": 6876, "dev": 983, "test": 1964},
"zul": {"train": 5848, "dev": 836, "test": 1670},
},
}
def check_number_sentences(reference: int, actual: int, split_name: str, language: str, version: str):
assert actual == reference, f"Mismatch in number of sentences for {language}@{version}/{split_name}"
for version in supported_versions:
for language in supported_languages[version]:
corpus = flair.datasets.NER_MASAKHANE(languages=language, version=version)
gold_stats = masakhane_stats[version][language]
check_number_sentences(len(corpus.train), gold_stats["train"], "train", language, version)
check_number_sentences(len(corpus.dev), gold_stats["dev"], "dev", language, version)
check_number_sentences(len(corpus.test), gold_stats["test"], "test", language, version)
def test_nermud_corpus(tasks_base_path):
# This test covers the NERMuD dataset. Official stats can be found here:
# https://github.com/dhfbk/KIND/tree/main/evalita-2023
gold_stats = {
"WN": {"train": 10912, "dev": 2594},
"FIC": {"train": 11423, "dev": 1051},
"ADG": {"train": 5147, "dev": 1122},
}
def check_number_sentences(reference: int, actual: int, split_name: str):
assert actual == reference, f"Mismatch in number of sentences for {split_name} split"
for domain, stats in gold_stats.items():
corpus = flair.datasets.NER_NERMUD(domains=domain)
check_number_sentences(len(corpus.train), stats["train"], "train")
check_number_sentences(len(corpus.dev), stats["dev"], "dev")
def test_multi_file_jsonl_corpus_should_use_label_type(tasks_base_path):
corpus = MultiFileJsonlCorpus(
train_files=[tasks_base_path / "jsonl/train.jsonl"],
dev_files=[tasks_base_path / "jsonl/testa.jsonl"],
test_files=[tasks_base_path / "jsonl/testb.jsonl"],
label_type="pos",
)
for sentence in corpus.get_all_sentences():
assert sentence.has_label("pos")
assert not sentence.has_label("ner")
def test_jsonl_corpus_should_use_label_type(tasks_base_path):
corpus = JsonlCorpus(tasks_base_path / "jsonl", label_type="pos")
for sentence in corpus.get_all_sentences():
assert sentence.has_label("pos")
assert not sentence.has_label("ner")
def test_jsonl_dataset_should_use_label_type(tasks_base_path):
"""Tests whether the dataset respects the label_type parameter."""
dataset = JsonlDataset(tasks_base_path / "jsonl" / "train.jsonl", label_type="pos") # use other type
for sentence in dataset.sentences:
assert sentence.has_label("pos")
assert not sentence.has_label("ner")
def test_reading_jsonl_dataset_should_be_successful(tasks_base_path):
"""Tests reading a JsonlDataset containing multiple tagged entries."""
dataset = JsonlDataset(tasks_base_path / "jsonl" / "train.jsonl")
assert len(dataset.sentences) == 5
assert len(dataset.sentences[0].get_labels("ner")) == 1
assert dataset.sentences[0][2:4].get_label("ner").value == "LOC"
def test_simple_folder_jsonl_corpus_should_load(tasks_base_path):
corpus = JsonlCorpus(tasks_base_path / "jsonl")
assert len(corpus.get_all_sentences()) == 11
def test_jsonl_corpus_loads_spans(tasks_base_path):
corpus = JsonlCorpus(tasks_base_path / "jsonl")
assert corpus.train is not None
example = corpus.train[0]
assert len(example.get_spans("ner")) > 0
def test_ontonotes_download():
from urllib.parse import urlparse
res = urlparse(ONTONOTES.archive_url)
assert all([res.scheme, res.netloc])
def test_ontonotes_extraction(tasks_base_path):
import os
import tempfile
from flair.file_utils import unpack_file
ontonotes_path = tasks_base_path / "ontonotes"
with tempfile.TemporaryDirectory() as tmp_dir:
unpack_file(ontonotes_path / "tiny-conll-2012.zip", tmp_dir, "zip", True)
assert "conll-2012" in os.listdir(tmp_dir)
corpus = ONTONOTES(base_path=tmp_dir)
label_dictionary = corpus.make_label_dictionary("ner")
assert len(label_dictionary) == 14
assert label_dictionary.span_labels
domain_specific_corpus = ONTONOTES(base_path=tmp_dir, domain=["bc"])
assert len(corpus.train) > len(domain_specific_corpus.train)
TRAIN_FILE = "tests/resources/tasks/jsonl/train.jsonl"
TESTA_FILE = "tests/resources/tasks/jsonl/testa.jsonl"
TESTB_FILE = "tests/resources/tasks/jsonl/testa.jsonl"
@pytest.mark.parametrize(
("train_files", "dev_files", "test_files", "expected_size"),
[
(
[TRAIN_FILE],
[TESTA_FILE],
[TESTB_FILE],
11,
),
(
[TRAIN_FILE],
[],
[TESTB_FILE],
8,
),
(
[TRAIN_FILE],
[],
None,
5,
),
(
None,
[TESTA_FILE],
None,
3,
),
(
[TRAIN_FILE, TESTA_FILE],
[TESTA_FILE],
[TESTB_FILE],
14,
),
],
)
def test_corpus_with_single_files_should_load(train_files, dev_files, test_files, expected_size):
corpus = MultiFileJsonlCorpus(train_files, dev_files, test_files)
assert len(corpus.get_all_sentences()) == expected_size
def test_empty_corpus_should_raise_error():
with pytest.raises(RuntimeError) as err:
MultiFileJsonlCorpus(None, None, None)
assert str(err.value) == "No data provided when initializing corpus object."
| 32,776 | 34.168455 | 120 | py |
flair | flair-master/tests/test_tokenize_sentence.py | from typing import List
import pytest
import flair
from flair.data import Sentence, Token
from flair.splitter import (
NewlineSentenceSplitter,
NoSentenceSplitter,
SciSpacySentenceSplitter,
SegtokSentenceSplitter,
SpacySentenceSplitter,
TagSentenceSplitter,
)
from flair.tokenization import (
JapaneseTokenizer,
SciSpacyTokenizer,
SegtokTokenizer,
SpaceTokenizer,
SpacyTokenizer,
TokenizerWrapper,
)
def test_create_sentence_on_empty_string():
sentence: Sentence = Sentence("")
assert len(sentence.tokens) == 0
def test_create_sentence_with_newline():
sentence: Sentence = Sentence(["I", "\t", "ich", "\n", "you", "\t", "du", "\n"])
assert len(sentence.tokens) == 8
assert sentence.tokens[3].text == "\n"
sentence: Sentence = Sentence("I \t ich \n you \t du \n", use_tokenizer=False)
assert len(sentence.tokens) == 8
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[3].text == "\n"
def test_create_sentence_with_extra_whitespace():
sentence: Sentence = Sentence("I love Berlin .")
assert len(sentence.tokens) == 4
assert sentence.get_token(1).text == "I"
assert sentence.get_token(2).text == "love"
assert sentence.get_token(3).text == "Berlin"
assert sentence.get_token(4).text == "."
def test_create_sentence_difficult_encoding():
text = "so out of the norm ❤ ️ enjoyed every moment️"
sentence = Sentence(text)
assert len(sentence) == 9
text = (
"equivalently , accumulating the logs as :( 6 ) sl = 1N ∑ t = 1Nlogp "
"( Ll | xt \u200b , θ ) where "
"p ( Ll | xt \u200b , θ ) represents the class probability output"
)
sentence = Sentence(text)
assert len(sentence) == 37
text = "This guy needs his own show on Discivery Channel ! "
sentence = Sentence(text)
assert len(sentence) == 10
text = "n't have new vintages."
sentence = Sentence(text, use_tokenizer=True)
assert len(sentence) == 5
def test_create_sentence_word_by_word():
token1: Token = Token("Munich")
token2: Token = Token("and")
token3: Token = Token("Berlin")
token4: Token = Token("are")
token5: Token = Token("nice")
sentence: Sentence = Sentence([token1, token2, token3, token4, token5, Token("cities"), Token(".")])
assert sentence.to_tokenized_string() == "Munich and Berlin are nice cities ."
def test_create_sentence_pretokenized():
pretoks = ["The", "grass", "is", "green", "."]
sent = Sentence(pretoks)
for i, token in enumerate(sent):
assert token.text == pretoks[i]
def test_create_sentence_without_tokenizer():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=False)
assert len(sentence.tokens) == 3
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].text == "I"
assert sentence.tokens[1].start_position == 2
assert sentence.tokens[1].text == "love"
assert sentence.tokens[2].start_position == 7
assert sentence.tokens[2].text == "Berlin."
def test_create_sentence_with_default_tokenizer():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=True)
assert len(sentence.tokens) == 4
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].text == "I"
assert sentence.tokens[1].start_position == 2
assert sentence.tokens[1].text == "love"
assert sentence.tokens[2].start_position == 7
assert sentence.tokens[2].text == "Berlin"
assert sentence.tokens[3].start_position == 13
assert sentence.tokens[3].text == "."
def test_create_sentence_with_segtok():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SegtokTokenizer())
assert len(sentence.tokens) == 4
assert sentence.tokens[0].text == "I"
assert sentence.tokens[1].text == "love"
assert sentence.tokens[2].text == "Berlin"
assert sentence.tokens[3].text == "."
def test_create_sentence_with_custom_tokenizer():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=TokenizerWrapper(no_op_tokenizer))
assert len(sentence.tokens) == 1
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].text == "I love Berlin."
@pytest.mark.skip(reason="SpacyTokenizer needs optional requirements, so we skip the test by default")
def test_create_sentence_with_spacy_tokenizer():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SpacyTokenizer("en_core_sci_sm"))
assert len(sentence.tokens) == 4
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].text == "I"
assert sentence.tokens[1].start_position == 2
assert sentence.tokens[1].text == "love"
assert sentence.tokens[2].start_position == 7
assert sentence.tokens[2].text == "Berlin"
assert sentence.tokens[3].start_position == 13
assert sentence.tokens[3].text == "."
def test_create_sentence_using_japanese_tokenizer():
sentence: Sentence = Sentence("私はベルリンが好き", use_tokenizer=JapaneseTokenizer("janome"))
assert len(sentence.tokens) == 5
assert sentence.tokens[0].text == "私"
assert sentence.tokens[1].text == "は"
assert sentence.tokens[2].text == "ベルリン"
assert sentence.tokens[3].text == "が"
assert sentence.tokens[4].text == "好き"
@pytest.mark.skip(reason="SciSpacyTokenizer need optional requirements, so we skip the test by default")
def test_create_sentence_using_scispacy_tokenizer():
sentence: Sentence = Sentence(
"Spinal and bulbar muscular atrophy (SBMA) is an inherited motor neuron",
use_tokenizer=SciSpacyTokenizer(),
)
assert len(sentence.tokens) == 13
assert sentence.tokens[0].text == "Spinal"
assert sentence.tokens[1].text == "and"
assert sentence.tokens[2].text == "bulbar"
assert sentence.tokens[3].text == "muscular"
assert sentence.tokens[4].text == "atrophy"
assert sentence.tokens[5].text == "("
assert sentence.tokens[6].text == "SBMA"
assert sentence.tokens[7].text == ")"
assert sentence.tokens[8].text == "is"
assert sentence.tokens[9].text == "an"
assert sentence.tokens[10].text == "inherited"
assert sentence.tokens[11].text == "motor"
assert sentence.tokens[12].text == "neuron"
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[1].start_position == 7
assert sentence.tokens[2].start_position == 11
assert sentence.tokens[3].start_position == 18
assert sentence.tokens[4].start_position == 27
assert sentence.tokens[5].start_position == 35
assert sentence.tokens[6].start_position == 36
assert sentence.tokens[7].start_position == 40
assert sentence.tokens[8].start_position == 42
assert sentence.tokens[9].start_position == 45
assert sentence.tokens[10].start_position == 48
assert sentence.tokens[11].start_position == 58
assert sentence.tokens[12].start_position == 64
assert sentence.tokens[4].whitespace_after == 1
assert sentence.tokens[5].whitespace_after != 1
assert sentence.tokens[6].whitespace_after != 1
assert sentence.tokens[7].whitespace_after == 1
def test_split_text_segtok():
segtok_splitter = SegtokSentenceSplitter()
sentences = segtok_splitter.split("I love Berlin. Berlin is a great city.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 4
assert sentences[1].start_position == 15
assert len(sentences[1].tokens) == 6
segtok_splitter = SegtokSentenceSplitter(tokenizer=TokenizerWrapper(no_op_tokenizer))
sentences = segtok_splitter.split("I love Berlin. Berlin is a great city.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 1
assert sentences[1].start_position == 15
assert len(sentences[1].tokens) == 1
def test_split_text_nosplit():
no_splitter = NoSentenceSplitter()
sentences = no_splitter.split("I love Berlin")
assert len(sentences) == 1
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 3
no_splitter = NoSentenceSplitter(TokenizerWrapper(no_op_tokenizer))
sentences = no_splitter.split("I love Berlin")
assert len(sentences) == 1
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 1
def test_split_text_on_tag():
tag_splitter = TagSentenceSplitter(tag="#!")
sentences = tag_splitter.split("I love Berlin#!Me too")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 3
assert sentences[1].start_position == 15
assert len(sentences[1].tokens) == 2
tag_splitter = TagSentenceSplitter(tag="#!", tokenizer=TokenizerWrapper(no_op_tokenizer))
sentences = tag_splitter.split("I love Berlin#!Me too")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 1
assert sentences[1].start_position == 15
assert len(sentences[1].tokens) == 1
sentences = tag_splitter.split("I love Berlin Me too")
assert len(sentences) == 1
sentences = tag_splitter.split("I love Berlin#!#!Me too")
assert len(sentences) == 2
sentences = tag_splitter.split("I love Berl#! #!inMe too")
assert len(sentences) == 2
def test_split_text_on_newline():
newline_splitter = NewlineSentenceSplitter()
sentences = newline_splitter.split("I love Berlin\nMe too")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 3
assert sentences[0].start_position == 0
assert len(sentences[1].tokens) == 2
newline_splitter = NewlineSentenceSplitter(tokenizer=TokenizerWrapper(no_op_tokenizer))
sentences = newline_splitter.split("I love Berlin\nMe too")
assert len(sentences) == 2
assert len(sentences[0].tokens) == 1
assert sentences[1].start_position == 14
assert len(sentences[1].tokens) == 1
sentences = newline_splitter.split("I love Berlin Me too")
assert len(sentences) == 1
sentences = newline_splitter.split("I love Berlin\n\nMe too")
assert len(sentences) == 2
sentences = newline_splitter.split("I love Berlin\n \nMe too")
assert len(sentences) == 2
@pytest.mark.skip(reason="SpacySentenceSplitter need optional requirements, so we skip the test by default")
def test_split_text_spacy():
spacy_splitter = SpacySentenceSplitter("en_core_sci_sm")
sentences = spacy_splitter.split("This a sentence. And here is another one.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 4
assert sentences[1].start_position == 17
assert len(sentences[1].tokens) == 6
sentences = spacy_splitter.split("VF inhibits something. ACE-dependent (GH+) issuses too.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 4
assert sentences[1].start_position == 23
assert len(sentences[1].tokens) == 7
spacy_splitter = SpacySentenceSplitter("en_core_sci_sm", tokenizer=TokenizerWrapper(no_op_tokenizer))
sentences = spacy_splitter.split("This a sentence. And here is another one.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 1
assert sentences[1].start_position == 17
assert len(sentences[1].tokens) == 1
@pytest.mark.skip(reason="SciSpacySentenceSplitter need optional requirements, so we skip the test by default")
def test_split_text_scispacy():
scispacy_splitter = SciSpacySentenceSplitter()
sentences = scispacy_splitter.split("VF inhibits something. ACE-dependent (GH+) issuses too.")
assert len(sentences) == 2
assert sentences[0].start_position == 0
assert len(sentences[0].tokens) == 4
assert sentences[1].start_position == 23
assert len(sentences[1].tokens) == 9
def test_print_sentence_tokenized():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SegtokTokenizer())
assert sentence.to_tokenized_string() == "I love Berlin ."
def test_print_original_text():
text = ": nation on"
sentence = Sentence(text)
assert text == sentence.to_original_text()
text = ": nation on"
sentence = Sentence(text, use_tokenizer=SegtokTokenizer())
assert text == sentence.to_original_text()
text = "I love Berlin."
sentence = Sentence(text)
assert text == sentence.to_original_text()
text = (
'Schartau sagte dem " Tagesspiegel " vom Freitag , Fischer sei " '
"in einer Weise aufgetreten , die alles andere als überzeugend "
'war " .'
)
sentence = Sentence(text)
assert text == sentence.to_original_text()
text = (
'Schartau sagte dem " Tagesspiegel " vom Freitag , Fischer sei " '
"in einer Weise aufgetreten , die alles andere als überzeugend "
'war " .'
)
sentence = Sentence(text, use_tokenizer=SegtokTokenizer())
assert text == sentence.to_original_text()
def test_print_sentence_plain(tasks_base_path):
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SegtokTokenizer())
assert sentence.to_plain_string() == "I love Berlin."
corpus = flair.datasets.NER_GERMAN_GERMEVAL(base_path=tasks_base_path)
sentence = corpus.train[0]
sentence.infer_space_after()
assert (
sentence.to_tokenized_string() == 'Schartau sagte dem " Tagesspiegel " vom Freitag , Fischer sei " in '
"einer Weise aufgetreten , "
'die alles andere als überzeugend war " .'
)
assert (
sentence.to_plain_string() == 'Schartau sagte dem "Tagesspiegel" vom Freitag, Fischer sei "in einer '
"Weise aufgetreten, die "
'alles andere als überzeugend war".'
)
sentence = corpus.train[1]
sentence.infer_space_after()
assert (
sentence.to_tokenized_string() == "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als "
"Möbelvertreter , als er einen fliegenden Händler aus dem Libanon traf ."
)
assert (
sentence.to_plain_string() == "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als "
"Möbelvertreter, als er einen fliegenden Händler aus dem Libanon traf."
)
def test_infer_space_after():
sentence: Sentence = Sentence([Token("xyz"), Token('"'), Token("abc"), Token('"')])
sentence.infer_space_after()
assert sentence.to_tokenized_string() == 'xyz " abc "'
assert sentence.to_plain_string() == 'xyz "abc"'
sentence: Sentence = Sentence('xyz " abc "')
sentence.infer_space_after()
assert sentence.to_tokenized_string() == 'xyz " abc "'
assert sentence.to_plain_string() == 'xyz "abc"'
def test_sentence_get_item():
sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SegtokTokenizer())
assert sentence.get_token(1) == sentence[0]
assert sentence.get_token(3) == sentence[2]
with pytest.raises(IndexError):
_ = sentence[4]
def test_token_positions_when_creating_with_tokenizer():
sentence = Sentence("I love Berlin .", use_tokenizer=SpaceTokenizer())
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].end_position == 1
assert sentence.tokens[1].start_position == 2
assert sentence.tokens[1].end_position == 6
assert sentence.tokens[2].start_position == 7
assert sentence.tokens[2].end_position == 13
sentence = Sentence(" I love Berlin.", use_tokenizer=SegtokTokenizer())
assert sentence.tokens[0].start_position == 1
assert sentence.tokens[0].end_position == 2
assert sentence.tokens[1].start_position == 3
assert sentence.tokens[1].end_position == 7
assert sentence.tokens[2].start_position == 9
assert sentence.tokens[2].end_position == 15
def test_token_positions_when_creating_word_by_word():
sentence: Sentence = Sentence(
[
Token("I"),
Token("love"),
Token("Berlin"),
Token("."),
]
)
assert sentence.tokens[0].start_position == 0
assert sentence.tokens[0].end_position == 1
assert sentence.tokens[1].start_position == 2
assert sentence.tokens[1].end_position == 6
assert sentence.tokens[2].start_position == 7
assert sentence.tokens[2].end_position == 13
def no_op_tokenizer(text: str) -> List[str]:
return [text]
| 16,472 | 34.967249 | 115 | py |
flair | flair-master/tests/embedding_test_utils.py | from typing import Any, Dict, List, Optional, Type
import pytest
import torch
from flair.data import Sentence
from flair.embeddings import Embeddings
from flair.embeddings.base import load_embeddings
class BaseEmbeddingsTest:
embedding_cls: Type[Embeddings[Sentence]]
is_token_embedding: bool
is_document_embedding: bool
default_args: Dict[str, Any]
valid_args: List[Dict[str, Any]] = []
invalid_args: List[Dict[str, Any]] = []
invalid_names: List[str] = []
name_field: Optional[str] = None
weired_texts: List[str] = [
"Hybrid mesons , qq ̄ states with an admixture",
"typical proportionalities of \u223C 1nmV \u2212 1 [ 3,4 ] .",
"🤟 🤟 🤟 hüllo",
"🤟hallo 🤟 🤟 🤟 🤟",
"🤟",
"\uF8F9",
]
def create_embedding_from_name(self, name: str):
"""Overwrite this method if it is more complex to load an embedding by name."""
assert self.name_field is not None
kwargs = dict(self.default_args)
kwargs.pop(self.name_field)
return self.embedding_cls(name, **kwargs) # type: ignore[call-arg]
def create_embedding_with_args(self, args: Dict[str, Any]):
kwargs = dict(self.default_args)
for k, v in args.items():
kwargs[k] = v
return self.embedding_cls(**kwargs)
@pytest.mark.parametrize("text", weired_texts)
def test_embedding_works_with_weird_text(self, text):
embeddings = self.create_embedding_with_args(self.default_args)
embedding_names = embeddings.get_names()
sentence = Sentence(text)
embeddings.embed(sentence)
if self.is_token_embedding:
for token in sentence:
assert len(token.get_embedding(embedding_names)) == embeddings.embedding_length
if self.is_document_embedding:
assert len(sentence.get_embedding(embedding_names)) == embeddings.embedding_length
@pytest.mark.parametrize("args", valid_args)
def test_embedding_also_sets_trailing_whitespaces(self, args):
if not self.is_token_embedding:
pytest.skip("The test is only valid for token embeddings")
embeddings = self.create_embedding_with_args(args)
sentence: Sentence = Sentence(["hello", " ", "hm", " "])
embeddings.embed(sentence)
names = embeddings.get_names()
for token in sentence:
assert len(token.get_embedding(names)) == embeddings.embedding_length
@pytest.mark.parametrize("args", valid_args)
def test_generic_sentence(self, args):
embeddings = self.create_embedding_with_args(args)
sentence: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence)
names = embeddings.get_names()
if self.is_token_embedding:
for token in sentence:
assert len(token.get_embedding(names)) == embeddings.embedding_length
if self.is_document_embedding:
assert len(sentence.get_embedding(names)) == embeddings.embedding_length
@pytest.mark.parametrize("name", invalid_names)
def test_load_non_existing_embedding(self, name):
with pytest.raises(ValueError):
self.create_embedding_from_name(name)
def test_keep_batch_order(self):
embeddings = self.create_embedding_with_args(self.default_args)
embedding_names = embeddings.get_names()
sentences_1 = [Sentence("First sentence"), Sentence("This is second sentence")]
sentences_2 = [Sentence("This is second sentence"), Sentence("First sentence")]
embeddings.embed(sentences_1)
embeddings.embed(sentences_2)
assert sentences_1[0].to_original_text() == "First sentence"
assert sentences_1[1].to_original_text() == "This is second sentence"
if self.is_document_embedding:
assert (
torch.norm(
sentences_1[0].get_embedding(embedding_names) - sentences_2[1].get_embedding(embedding_names)
)
== 0.0
)
assert (
torch.norm(
sentences_1[1].get_embedding(embedding_names) - sentences_2[0].get_embedding(embedding_names)
)
== 0.0
)
if self.is_token_embedding:
for i in range(len(sentences_1[0])):
assert (
torch.norm(
sentences_1[0][i].get_embedding(embedding_names)
- sentences_2[1][i].get_embedding(embedding_names)
)
== 0.0
)
for i in range(len(sentences_1[1])):
assert (
torch.norm(
sentences_1[1][i].get_embedding(embedding_names)
- sentences_2[0][i].get_embedding(embedding_names)
)
== 0.0
)
del embeddings
@pytest.mark.parametrize("args", valid_args)
def test_embeddings_stay_the_same_after_saving_and_loading(self, args):
embeddings = self.create_embedding_with_args(args)
sentence_old: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence_old)
names_old = embeddings.get_names()
embedding_length_old = embeddings.embedding_length
save_data = embeddings.save_embeddings(use_state_dict=True)
del embeddings
new_embeddings = load_embeddings(save_data)
sentence_new: Sentence = Sentence("I love Berlin")
new_embeddings.embed(sentence_new)
names_new = new_embeddings.get_names()
embedding_length_new = new_embeddings.embedding_length
assert names_old == names_new
assert embedding_length_old == embedding_length_new
if self.is_token_embedding:
for token_old, token_new in zip(sentence_old, sentence_new):
assert (token_old.get_embedding(names_old) == token_new.get_embedding(names_new)).all()
if self.is_document_embedding:
assert (sentence_old.get_embedding(names_old) == sentence_new.get_embedding(names_new)).all()
def test_default_embeddings_stay_the_same_after_saving_and_loading(self):
embeddings = self.create_embedding_with_args(self.default_args)
sentence_old: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence_old)
names_old = embeddings.get_names()
embedding_length_old = embeddings.embedding_length
save_data = embeddings.save_embeddings(use_state_dict=True)
new_embeddings = load_embeddings(save_data)
sentence_new: Sentence = Sentence("I love Berlin")
new_embeddings.embed(sentence_new)
names_new = new_embeddings.get_names()
embedding_length_new = new_embeddings.embedding_length
assert not new_embeddings.training
assert names_old == names_new
assert embedding_length_old == embedding_length_new
if self.is_token_embedding:
for token_old, token_new in zip(sentence_old, sentence_new):
assert (token_old.get_embedding(names_old) == token_new.get_embedding(names_new)).all()
if self.is_document_embedding:
assert (sentence_old.get_embedding(names_old) == sentence_new.get_embedding(names_new)).all()
def test_embeddings_load_in_eval_mode(self):
embeddings = self.create_embedding_with_args(self.default_args)
assert not embeddings.training
| 7,511 | 39.387097 | 113 | py |
flair | flair-master/tests/test_trainer.py | import pytest
from torch.optim import Adam
import flair
from flair.data import Sentence
from flair.datasets import ClassificationCorpus
from flair.embeddings import DocumentPoolEmbeddings, FlairEmbeddings, WordEmbeddings
from flair.models import SequenceTagger, TextClassifier
from flair.trainers import ModelTrainer
turian_embeddings = WordEmbeddings("turian")
@pytest.mark.integration()
def test_text_classifier_multi(results_base_path, tasks_base_path):
flair.set_seed(123)
flair_embeddings = FlairEmbeddings("news-forward-fast")
corpus = ClassificationCorpus(
tasks_base_path / "trivial" / "trivial_text_classification_single",
label_type="city",
)
label_dict = corpus.make_label_dictionary(label_type="city")
model: TextClassifier = TextClassifier(
embeddings=DocumentPoolEmbeddings([flair_embeddings], fine_tune_mode="linear"),
label_dictionary=label_dict,
label_type="city",
)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, mini_batch_size=2, max_epochs=1, shuffle=True)
del model
train_log_file = results_base_path / "training.log"
assert train_log_file.exists()
lines = train_log_file.read_text(encoding="utf-8").split("\n")
expected_substrings = [
"compute on device: ",
"Corpus: ",
"- learning_rate: ",
"patience",
"embedding storage:",
"epoch 1 - iter",
"EPOCH 1 done: loss",
"Results:",
]
for expected_substring in expected_substrings:
assert any(expected_substring in line for line in lines), expected_substring
@pytest.mark.integration()
def test_train_load_use_tagger_large(results_base_path, tasks_base_path):
corpus = flair.datasets.UD_ENGLISH().downsample(0.01)
tag_dictionary = corpus.make_label_dictionary("pos")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="pos",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=32,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_adam(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
optimizer=Adam,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
def test_missing_validation_split(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(
data_folder=tasks_base_path / "fewshot_conll",
train_file="1shot.txt",
sample_missing_splits=False,
column_format={0: "text", 1: "ner"},
)
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=True)
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
optimizer=Adam,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
| 4,999 | 28.585799 | 118 | py |
flair | flair-master/tests/test_language_model.py | import pytest
from flair.data import Dictionary, Sentence
from flair.embeddings import FlairEmbeddings, TokenEmbeddings
from flair.models import LanguageModel
from flair.trainers.language_model_trainer import LanguageModelTrainer, TextCorpus
@pytest.mark.integration()
def test_train_language_model(results_base_path, resources_path):
# get default dictionary
dictionary: Dictionary = Dictionary.load("chars")
# init forward LM with 128 hidden states and 1 layer
language_model: LanguageModel = LanguageModel(dictionary, is_forward_lm=True, hidden_size=128, nlayers=1)
# get the example corpus and process at character level in forward direction
corpus: TextCorpus = TextCorpus(
resources_path / "corpora/lorem_ipsum",
dictionary,
language_model.is_forward_lm,
character_level=True,
)
# train the language model
trainer: LanguageModelTrainer = LanguageModelTrainer(language_model, corpus, test_mode=True)
trainer.train(results_base_path, sequence_length=10, mini_batch_size=10, max_epochs=2)
# use the character LM as embeddings to embed the example sentence 'I love Berlin'
char_lm_embeddings: TokenEmbeddings = FlairEmbeddings(str(results_base_path / "best-lm.pt"))
sentence = Sentence("I love Berlin")
char_lm_embeddings.embed(sentence)
text, likelihood = language_model.generate_text(number_of_characters=100)
assert text is not None
assert len(text) >= 100
# clean up results directory
del trainer, language_model, corpus, char_lm_embeddings
@pytest.mark.integration()
def test_train_resume_language_model(resources_path, results_base_path, tasks_base_path):
# get default dictionary
dictionary: Dictionary = Dictionary.load("chars")
# init forward LM with 128 hidden states and 1 layer
language_model: LanguageModel = LanguageModel(dictionary, is_forward_lm=True, hidden_size=128, nlayers=1)
# get the example corpus and process at character level in forward direction
corpus: TextCorpus = TextCorpus(
resources_path / "corpora/lorem_ipsum",
dictionary,
language_model.is_forward_lm,
character_level=True,
)
# train the language model
trainer: LanguageModelTrainer = LanguageModelTrainer(language_model, corpus, test_mode=True)
trainer.train(
results_base_path,
sequence_length=10,
mini_batch_size=10,
max_epochs=2,
checkpoint=True,
)
del trainer, language_model
trainer = LanguageModelTrainer.load_checkpoint(results_base_path / "checkpoint.pt", corpus)
trainer.train(results_base_path, sequence_length=10, mini_batch_size=10, max_epochs=2)
del trainer
def test_generate_text_with_small_temperatures():
from flair.embeddings import FlairEmbeddings
language_model = FlairEmbeddings("news-forward-fast", has_decoder=True).lm
text, likelihood = language_model.generate_text(temperature=0.01, number_of_characters=100)
assert text is not None
assert len(text) >= 100
del language_model
def test_compute_perplexity():
from flair.embeddings import FlairEmbeddings
language_model = FlairEmbeddings("news-forward-fast", has_decoder=True).lm
grammatical = "The company made a profit"
perplexity_gramamtical_sentence = language_model.calculate_perplexity(grammatical)
ungrammatical = "Nook negh qapla!"
perplexity_ungramamtical_sentence = language_model.calculate_perplexity(ungrammatical)
print(f'"{grammatical}" - perplexity is {perplexity_gramamtical_sentence}')
print(f'"{ungrammatical}" - perplexity is {perplexity_ungramamtical_sentence}')
assert perplexity_gramamtical_sentence < perplexity_ungramamtical_sentence
language_model = FlairEmbeddings("news-backward-fast", has_decoder=True).lm
grammatical = "The company made a profit"
perplexity_gramamtical_sentence = language_model.calculate_perplexity(grammatical)
ungrammatical = "Nook negh qapla!"
perplexity_ungramamtical_sentence = language_model.calculate_perplexity(ungrammatical)
print(f'"{grammatical}" - perplexity is {perplexity_gramamtical_sentence}')
print(f'"{ungrammatical}" - perplexity is {perplexity_ungramamtical_sentence}')
assert perplexity_gramamtical_sentence < perplexity_ungramamtical_sentence
del language_model
| 4,360 | 36.594828 | 109 | py |
flair | flair-master/tests/test_tars.py | from flair.data import Sentence
from flair.datasets import ClassificationCorpus
from flair.models import TARSClassifier
from flair.trainers import ModelTrainer
def test_init_tars_and_switch(tasks_base_path):
# test corpus
corpus = ClassificationCorpus(tasks_base_path / "imdb")
# create a TARS classifier
tars = TARSClassifier(
task_name="2_CLASS",
label_dictionary=corpus.make_label_dictionary(label_type="class"),
label_type="class",
)
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 2
# switch to task with only one label
tars.add_and_switch_to_new_task("1_CLASS", "one class", "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 1
# switch to task with three labels provided as list
tars.add_and_switch_to_new_task("3_CLASS", ["list 1", "list 2", "list 3"], "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 3
# switch to task with four labels provided as set
tars.add_and_switch_to_new_task("4_CLASS", {"set 1", "set 2", "set 3", "set 4"}, "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 4
# switch to task with two labels provided as Dictionary
tars.add_and_switch_to_new_task("2_CLASS_AGAIN", corpus.make_label_dictionary(label_type="class"), "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 2
def test_train_tars(tasks_base_path, results_base_path):
# test corpus
corpus = ClassificationCorpus(tasks_base_path / "imdb_underscore")
# create a TARS classifier
tars = TARSClassifier(embeddings="sshleifer/tiny-distilroberta-base")
# switch to a new task (TARS can do multiple tasks so you must define one)
tars.add_and_switch_to_new_task(
task_name="question 2_CLASS",
label_dictionary=corpus.make_label_dictionary(label_type="class"),
label_type="class",
)
# initialize the text classifier trainer
trainer = ModelTrainer(tars, corpus)
# start the training
trainer.train(
base_path=results_base_path,
learning_rate=0.02,
mini_batch_size=1,
max_epochs=1,
)
sentence = Sentence("This is great!")
tars.predict(sentence)
| 2,404 | 31.945205 | 115 | py |
flair | flair-master/tests/conftest.py | from pathlib import Path
import pytest
import torch
import flair
@pytest.fixture(scope="module")
def resources_path():
return Path(__file__).parent / "resources"
@pytest.fixture(scope="module")
def tasks_base_path(resources_path):
return resources_path / "tasks"
@pytest.fixture()
def results_base_path(resources_path):
path = resources_path / "results"
try:
yield path
finally:
for p in reversed(list(path.rglob("*"))):
if p.is_file():
p.unlink()
else:
p.rmdir()
if path.is_dir():
path.rmdir()
@pytest.fixture(autouse=True)
def set_cpu(force_cpu):
if force_cpu:
flair.device = torch.device("cpu")
def pytest_addoption(parser):
parser.addoption(
"--runintegration",
action="store_true",
default=False,
help="run integration tests",
)
parser.addoption(
"--force-cpu",
action="store_true",
default=False,
help="use cpu for tests even when gpu is available",
)
def pytest_collection_modifyitems(config, items):
if not config.getoption("--runintegration"):
skip_integration = pytest.mark.skip(reason="need --runintegration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
def pytest_generate_tests(metafunc):
option_value = metafunc.config.getoption("--force-cpu")
if "force_cpu" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("force_cpu", [option_value])
| 1,620 | 23.19403 | 89 | py |
flair | flair-master/tests/test_corpus_dictionary.py | import os
import pytest
import flair
from flair.data import Corpus, Dictionary, Label, Sentence
from flair.datasets import ColumnCorpus, FlairDatapointDataset, SentenceDataset
def test_dictionary_get_items_with_unk():
dictionary: Dictionary = Dictionary(add_unk=True)
dictionary.add_item("class_1")
dictionary.add_item("class_2")
dictionary.add_item("class_3")
items = dictionary.get_items()
assert len(items) == 4
assert items[0] == "<unk>"
assert items[1] == "class_1"
assert items[2] == "class_2"
assert items[3] == "class_3"
def test_dictionary_get_items_without_unk():
dictionary: Dictionary = Dictionary(add_unk=False)
dictionary.add_item("class_1")
dictionary.add_item("class_2")
dictionary.add_item("class_3")
items = dictionary.get_items()
assert len(items) == 3
assert items[0] == "class_1"
assert items[1] == "class_2"
assert items[2] == "class_3"
def test_dictionary_get_idx_for_item():
dictionary: Dictionary = Dictionary(add_unk=False)
dictionary.add_item("class_1")
dictionary.add_item("class_2")
dictionary.add_item("class_3")
idx = dictionary.get_idx_for_item("class_2")
assert idx == 1
def test_dictionary_get_item_for_index():
dictionary: Dictionary = Dictionary(add_unk=False)
dictionary.add_item("class_1")
dictionary.add_item("class_2")
dictionary.add_item("class_3")
item = dictionary.get_item_for_index(0)
assert item == "class_1"
def test_dictionary_save_and_load():
dictionary: Dictionary = Dictionary(add_unk=False)
dictionary.add_item("class_1")
dictionary.add_item("class_2")
dictionary.add_item("class_3")
file_path = "dictionary.txt"
dictionary.save(file_path)
loaded_dictionary = dictionary.load_from_file(file_path)
assert len(dictionary) == len(loaded_dictionary)
assert len(dictionary.get_items()) == len(loaded_dictionary.get_items())
# clean up file
os.remove(file_path)
def test_deprecated_sentence_dataset():
with pytest.warns(DeprecationWarning): # test to make sure the warning comes, but class works
SentenceDataset([Sentence("Short sentences are short")])
def test_tagged_corpus_get_all_sentences():
train_sentence = Sentence("I'm used in training.")
dev_sentence = Sentence("I'm a dev sentence.")
test_sentence = Sentence("I will be only used for testing.")
corpus: Corpus = Corpus(
FlairDatapointDataset([train_sentence]),
FlairDatapointDataset([dev_sentence]),
FlairDatapointDataset([test_sentence]),
)
all_sentences = corpus.get_all_sentences()
assert len(all_sentences) == 3
def test_tagged_corpus_make_vocab_dictionary():
train_sentence = Sentence("used in training. training is cool.")
corpus: Corpus = Corpus(FlairDatapointDataset([train_sentence]), sample_missing_splits=False)
vocab = corpus.make_vocab_dictionary(max_tokens=2, min_freq=-1)
assert len(vocab) == 3
assert "<unk>" in vocab.get_items()
assert "training" in vocab.get_items()
assert "." in vocab.get_items()
vocab = corpus.make_vocab_dictionary(max_tokens=-1, min_freq=-1)
assert len(vocab) == 7
vocab = corpus.make_vocab_dictionary(max_tokens=-1, min_freq=2)
assert len(vocab) == 3
assert "<unk>" in vocab.get_items()
assert "training" in vocab.get_items()
assert "." in vocab.get_items()
def test_label_set_confidence():
label = Label(data_point=None, value="class_1", score=3.2)
assert label.score == 3.2
assert label.value == "class_1"
label._score = 0.2
assert label.score == 0.2
def test_tagged_corpus_make_label_dictionary():
sentence_1 = Sentence("sentence 1").add_label("label", "class_1")
sentence_2 = Sentence("sentence 2").add_label("label", "class_2")
sentence_3 = Sentence("sentence 3").add_label("label", "class_1")
corpus: Corpus = Corpus(
FlairDatapointDataset([sentence_1, sentence_2, sentence_3]),
FlairDatapointDataset([]),
FlairDatapointDataset([]),
)
label_dict = corpus.make_label_dictionary("label", add_unk=True)
assert len(label_dict) == 3
assert "<unk>" in label_dict.get_items()
assert "class_1" in label_dict.get_items()
assert "class_2" in label_dict.get_items()
with pytest.warns(DeprecationWarning): # test to make sure the warning comes, but function works
corpus.make_tag_dictionary("label")
def test_obtain_statistics():
sentence_1 = Sentence("The snake hissed to the mountain goat")
sentence_1_labels = " O B-Ani O O O B-Ani E-Ani".split()
sentence_2 = Sentence("Saber tooth tigers are extinct")
sentence_2_labels = " B-Ani I-Ani E-Ani O O".split()
for sentence, labels in [(sentence_1, sentence_1_labels), (sentence_2, sentence_2_labels)]:
assert len(sentence) == len(labels)
for token, label in zip(sentence, labels):
token.add_label("ner", label)
corpus = Corpus(
FlairDatapointDataset([sentence_1, sentence_2]),
FlairDatapointDataset([]),
FlairDatapointDataset([sentence_2]),
)
statistics = corpus.obtain_statistics("ner", pretty_print=False)
assert statistics == {
"TRAIN": {
"dataset": "TRAIN",
"total_number_of_documents": 2,
"number_of_documents_per_class": {"O": 6, "B-Ani": 3, "E-Ani": 2, "I-Ani": 1},
"number_of_tokens_per_tag": {"O": 6, "B-Ani": 3, "E-Ani": 2, "I-Ani": 1},
"number_of_tokens": {"total": 12, "min": 5, "max": 7, "avg": 6.0},
},
"TEST": {
"dataset": "TEST",
"total_number_of_documents": 1,
"number_of_documents_per_class": {"B-Ani": 1, "I-Ani": 1, "E-Ani": 1, "O": 2},
"number_of_tokens_per_tag": {"B-Ani": 1, "I-Ani": 1, "E-Ani": 1, "O": 2},
"number_of_tokens": {"total": 5, "min": 5, "max": 5, "avg": 5.0},
},
"DEV": {},
}
def test_tagged_corpus_statistics():
train_sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label("label", "class_1")
dev_sentence = Sentence("The sun is shining.", use_tokenizer=True).add_label("label", "class_2")
test_sentence = Sentence("Berlin is sunny.", use_tokenizer=True).add_label("label", "class_1")
class_to_count_dict = Corpus._count_sentence_labels([train_sentence, dev_sentence, test_sentence])
assert "class_1" in class_to_count_dict
assert "class_2" in class_to_count_dict
assert class_to_count_dict["class_1"] == 2
assert class_to_count_dict["class_2"] == 1
tokens_in_sentences = Corpus._get_tokens_per_sentence([train_sentence, dev_sentence, test_sentence])
assert len(tokens_in_sentences) == 3
assert tokens_in_sentences[0] == 4
assert tokens_in_sentences[1] == 5
assert tokens_in_sentences[2] == 4
def test_tagged_corpus_statistics_multi_label():
train_sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label("label", "class_1")
dev_sentence = Sentence("The sun is shining.", use_tokenizer=True).add_label("label", "class_2")
test_sentence = Sentence("Berlin is sunny.", use_tokenizer=True)
test_sentence.add_label("label", "class_1")
test_sentence.add_label("label", "class_2")
class_to_count_dict = Corpus._count_sentence_labels([train_sentence, dev_sentence, test_sentence])
assert "class_1" in class_to_count_dict
assert "class_2" in class_to_count_dict
assert class_to_count_dict["class_1"] == 2
assert class_to_count_dict["class_2"] == 2
tokens_in_sentences = Corpus._get_tokens_per_sentence([train_sentence, dev_sentence, test_sentence])
assert len(tokens_in_sentences) == 3
assert tokens_in_sentences[0] == 4
assert tokens_in_sentences[1] == 5
assert tokens_in_sentences[2] == 4
def test_tagged_corpus_downsample():
sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label("label", "class_1")
corpus: Corpus = Corpus(
FlairDatapointDataset(
[
sentence,
sentence,
sentence,
sentence,
sentence,
sentence,
sentence,
sentence,
sentence,
sentence,
]
),
sample_missing_splits=False,
)
assert len(corpus.train) == 10
corpus.downsample(percentage=0.3, downsample_dev=False, downsample_test=False)
assert len(corpus.train) == 3
def test_classification_corpus_multi_labels_without_negative_examples(tasks_base_path):
corpus = flair.datasets.ClassificationCorpus(
tasks_base_path / "multi_class_negative_examples",
allow_examples_without_labels=False,
)
assert len(corpus.train) == 7
assert len(corpus.dev) == 4
assert len(corpus.test) == 5
def test_classification_corpus_multi_labels_with_negative_examples(tasks_base_path):
corpus = flair.datasets.ClassificationCorpus(
tasks_base_path / "multi_class_negative_examples",
allow_examples_without_labels=True,
)
assert len(corpus.train) == 8
assert len(corpus.dev) == 5
assert len(corpus.test) == 6
def test_misalignment_spans(tasks_base_path):
example_txt = """George B-NAME
Washington I-NAME
went O
\t O
Washington B-CITY
and O
enjoyed O
some O
coffee B-BEVERAGE
"""
train_path = tasks_base_path / "tmp" / "train.txt"
try:
train_path.parent.mkdir(exist_ok=True, parents=True)
train_path.write_text(example_txt, encoding="utf-8")
corpus = ColumnCorpus(
data_folder=train_path.parent, column_format={0: "text", 1: "ner"}, train_file=train_path.name
)
sentence = corpus.train[0]
span_texts = [span.text for span in sentence.get_spans("ner")]
assert span_texts == ["George Washington", "Washington", "coffee"]
finally:
train_path.unlink()
train_path.parent.rmdir()
| 10,036 | 30.662461 | 106 | py |
flair | flair-master/tests/model_test_utils.py | from typing import Any, Dict, List, Optional, Type
import pytest
import flair
from flair.data import Dictionary, Sentence
from flair.embeddings import TransformerEmbeddings
from flair.models import FewshotClassifier
from flair.nn import Model
from flair.trainers import ModelTrainer
class BaseModelTest:
model_cls: Type[Model]
pretrained_model: Optional[str] = None
empty_sentence = Sentence(" ")
train_label_type: str
multiclass_prediction_labels: List[str]
model_args: Dict[str, Any] = {}
training_args: Dict[str, Any] = {}
finetune_instead_of_train: bool = False
@pytest.fixture()
def embeddings(self):
pytest.skip("This test requires the `embeddings` fixture to be defined")
@pytest.fixture()
def corpus(self, tasks_base_path):
pytest.skip("This test requires the `corpus` fixture to be defined")
@pytest.fixture()
def multi_class_corpus(self, tasks_base_path):
pytest.skip("This test requires the `multi_class_corpus` fixture to be defined")
@pytest.fixture()
def multi_corpus(self, tasks_base_path):
pytest.skip("This test requires the `multi_corpus` fixture to be defined")
@pytest.fixture()
def example_sentence(self):
return Sentence("I love Berlin")
@pytest.fixture()
def train_test_sentence(self):
return Sentence("Berlin is a really nice city.")
@pytest.fixture()
def labeled_sentence(self):
pytest.skip("This test requires the `labeled_sentence` fixture to be defined")
@pytest.fixture()
def multiclass_train_test_sentence(self):
pytest.skip("This test requires the `multiclass_train_test_sentence` fixture to be defined")
def transform_corpus(self, model, corpus):
return corpus
def assert_training_example(self, predicted_training_example):
pass
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
label_dictionary=label_dict,
label_type=self.train_label_type,
**model_args,
**kwargs,
)
def has_embedding(self, sentence):
return sentence.get_embedding().cpu().numpy().size > 0
@pytest.fixture()
def loaded_pretrained_model(self):
if self.pretrained_model is None:
pytest.skip("For this test `pretrained_model` needs to be set.")
return self.model_cls.load(self.pretrained_model)
@pytest.mark.integration()
def test_load_use_model(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence)
loaded_pretrained_model.predict([example_sentence, self.empty_sentence])
loaded_pretrained_model.predict([self.empty_sentence])
del loaded_pretrained_model
example_sentence.clear_embeddings()
self.empty_sentence.clear_embeddings()
@pytest.mark.integration()
def test_train_load_use_model(self, results_base_path, corpus, embeddings, example_sentence, train_test_sentence):
flair.set_seed(123)
label_dict = corpus.make_label_dictionary(label_type=self.train_label_type)
model = self.build_model(embeddings, label_dict)
corpus = self.transform_corpus(model, corpus)
trainer = ModelTrainer(model, corpus)
if self.finetune_instead_of_train:
trainer.fine_tune(results_base_path, shuffle=False, **self.training_args)
else:
trainer.train(results_base_path, shuffle=False, **self.training_args)
model.predict(train_test_sentence)
self.assert_training_example(train_test_sentence)
for label in train_test_sentence.get_labels(self.train_label_type):
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert isinstance(label.score, float)
del trainer, model, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_model_multi_corpus(
self, results_base_path, multi_corpus, embeddings, example_sentence, train_test_sentence
):
flair.set_seed(123)
label_dict = multi_corpus.make_label_dictionary(label_type=self.train_label_type)
model = self.build_model(embeddings, label_dict)
corpus = self.transform_corpus(model, multi_corpus)
trainer = ModelTrainer(model, corpus)
if self.finetune_instead_of_train:
trainer.fine_tune(results_base_path, shuffle=False, **self.training_args)
else:
trainer.train(results_base_path, shuffle=False, **self.training_args)
model.predict(train_test_sentence)
self.assert_training_example(train_test_sentence)
for label in train_test_sentence.get_labels(self.train_label_type):
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert isinstance(label.score, float)
del trainer, model, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
def test_forward_loss(self, labeled_sentence, embeddings):
label_dict = Dictionary()
for label in labeled_sentence.get_labels(self.train_label_type):
label_dict.add_item(label.value)
model = self.build_model(embeddings, label_dict)
loss, count = model.forward_loss([labeled_sentence])
assert loss.size() == ()
assert count == len(labeled_sentence.get_labels(self.train_label_type))
def test_load_use_model_keep_embedding(self, example_sentence, loaded_pretrained_model):
assert not self.has_embedding(example_sentence)
loaded_pretrained_model.predict(example_sentence, embedding_storage_mode="cpu")
assert self.has_embedding(example_sentence)
del loaded_pretrained_model
def test_train_load_use_model_multi_label(
self, results_base_path, multi_class_corpus, embeddings, example_sentence, multiclass_train_test_sentence
):
flair.set_seed(123)
label_dict = multi_class_corpus.make_label_dictionary(label_type=self.train_label_type)
model = self.build_model(embeddings, label_dict, multi_label=True)
corpus = self.transform_corpus(model, multi_class_corpus)
trainer = ModelTrainer(model, corpus)
trainer.train(
results_base_path,
mini_batch_size=1,
max_epochs=5,
shuffle=False,
train_with_test=True,
train_with_dev=True,
)
model.predict(multiclass_train_test_sentence)
sentence = Sentence("apple tv")
model.predict(sentence)
for label in self.multiclass_prediction_labels:
assert label in [label.value for label in sentence.get_labels(self.train_label_type)], label
for label in sentence.labels:
print(label)
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert type(label.score) is float
del trainer, model, multi_class_corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
def test_context_is_set_correctly(self):
sentences = [
Sentence("this is a very very very long sentence"),
Sentence("this is a shorter sentence"),
Sentence(""),
Sentence("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Sentence("b"),
]
embedding = TransformerEmbeddings("distilbert-base-cased", use_context=True)
label_dictionary = Dictionary()
model = self.build_model(embedding, label_dictionary)
if isinstance(model, FewshotClassifier):
model.add_and_switch_to_new_task("test", ["a", "b"], "label")
model.predict(sentences)
for first, second in zip(sentences[:-1], sentences[1:]):
assert first.next_sentence() == second
assert first == second.previous_sentence()
assert sentences[0].previous_sentence() is None
assert sentences[-1].next_sentence() is None
| 8,882 | 36.167364 | 118 | py |
flair | flair-master/tests/test_visual.py | from flair.data import Sentence, Span, Token
from flair.embeddings import FlairEmbeddings
from flair.visual import Highlighter
from flair.visual.ner_html import HTML_PAGE, PARAGRAPH, TAGGED_ENTITY, render_ner_html
from flair.visual.training_curves import Plotter
def test_highlighter(resources_path):
with (resources_path / "visual/snippet.txt").open() as f:
sentences = [x for x in f.read().split("\n") if x]
embeddings = FlairEmbeddings("news-forward")
features = embeddings.lm.get_representation(sentences[0], "", "").squeeze()
Highlighter().highlight_selection(
features,
sentences[0],
n=1000,
file_=str(resources_path / "visual/highligh.html"),
)
# clean up directory
(resources_path / "visual/highligh.html").unlink()
def test_plotting_training_curves_and_weights(resources_path):
plotter = Plotter()
plotter.plot_training_curves(resources_path / "visual/loss.tsv")
plotter.plot_weights(resources_path / "visual/weights.txt")
# clean up directory
(resources_path / "visual/weights.png").unlink()
(resources_path / "visual/training.png").unlink()
def mock_ner_span(text, tag, start, end):
span = Span([]).set_label("class", tag)
span.start_pos = start
span.end_pos = end
span.tokens = [Token(text[start:end])]
return span
def test_html_rendering():
text = (
"Boris Johnson has been elected new Conservative leader in "
"a ballot of party members and will become the "
"next UK prime minister. &"
)
sentence = Sentence(text)
print(sentence[0:2].add_label("ner", "PER"))
print(sentence[6:7].add_label("ner", "MISC"))
print(sentence[19:20].add_label("ner", "LOC"))
colors = {
"PER": "#F7FF53",
"ORG": "#E8902E",
"LOC": "yellow",
"MISC": "#4647EB",
"O": "#ddd",
}
actual = render_ner_html([sentence], colors=colors)
expected_res = HTML_PAGE.format(
text=PARAGRAPH.format(
sentence=TAGGED_ENTITY.format(color="#F7FF53", entity="Boris Johnson", label="PER")
+ " has been elected new "
+ TAGGED_ENTITY.format(color="#4647EB", entity="Conservative", label="MISC")
+ " leader in a ballot of party members and will become the next "
+ TAGGED_ENTITY.format(color="yellow", entity="UK", label="LOC")
+ " prime minister. &"
),
title="Flair",
)
assert expected_res == actual
| 2,506 | 31.141026 | 95 | py |
flair | flair-master/tests/test_datasets_biomedical.py | import inspect
import logging
import os
import tempfile
from operator import itemgetter
from pathlib import Path
from typing import Callable, List, Optional, Type
import pytest
from tqdm import tqdm
import flair
from flair.data import Sentence, Token, _iter_dataset
from flair.datasets import ColumnCorpus, biomedical
from flair.datasets.biomedical import (
CoNLLWriter,
Entity,
HunerDataset,
InternalBioNerDataset,
filter_nested_entities,
)
from flair.splitter import NoSentenceSplitter, SentenceSplitter
from flair.tokenization import SpaceTokenizer
logger = logging.getLogger("flair")
logger.propagate = True
def has_balanced_parantheses(text: str) -> bool:
stack = []
opening = ["(", "[", "{"]
closing = [")", "]", "}"]
for c in text:
if c in opening:
stack.append(c)
elif c in closing:
if not stack:
return False
last_paren = stack.pop()
if opening.index(last_paren) != closing.index(c):
return False
return len(stack) == 0
def gene_predicate(member):
return inspect.isclass(member) and "HUNER_GENE_" in str(member)
def chemical_predicate(member):
return inspect.isclass(member) and "HUNER_CHEMICAL_" in str(member)
def disease_predicate(member):
return inspect.isclass(member) and "HUNER_DISEASE_" in str(member)
def species_predicate(member):
return inspect.isclass(member) and "HUNER_SPECIES_" in str(member)
def cellline_predicate(member):
return inspect.isclass(member) and "HUNER_CELL_LINE_" in str(member)
CELLLINE_DATASETS = [
i[1] for i in sorted(inspect.getmembers(biomedical, predicate=cellline_predicate), key=itemgetter(0))
]
CHEMICAL_DATASETS = [
i[1] for i in sorted(inspect.getmembers(biomedical, predicate=chemical_predicate), key=itemgetter(0))
]
DISEASE_DATASETS = [
i[1] for i in sorted(inspect.getmembers(biomedical, predicate=disease_predicate), key=itemgetter(0))
]
GENE_DATASETS = [i[1] for i in sorted(inspect.getmembers(biomedical, predicate=gene_predicate), key=itemgetter(0))]
SPECIES_DATASETS = [
i[1] for i in sorted(inspect.getmembers(biomedical, predicate=species_predicate), key=itemgetter(0))
]
ALL_DATASETS = CELLLINE_DATASETS + CHEMICAL_DATASETS + DISEASE_DATASETS + GENE_DATASETS + SPECIES_DATASETS
def simple_tokenizer(text: str) -> List[str]:
tokens: List[str] = []
word = ""
index = -1
for index, char in enumerate(text):
if char == " " or char == "-":
if len(word) > 0:
tokens.append(word)
word = ""
else:
word += char
# increment for last token in sentence if not followed by whitespace
index += 1
if len(word) > 0:
tokens.append(word)
return tokens
def test_write_to_conll():
text = "This is entity1 entity2 and a long entity3"
dataset = InternalBioNerDataset(
documents={"1": text},
entities_per_document={
"1": [
Entity((text.find("entity1"), text.find("entity1") + len("entity1")), "E"),
Entity((text.find("entity2"), text.find("entity2") + len("entity2")), "E"),
Entity(
(
text.find("a long entity3"),
text.find("a long entity3") + len("a long entity3"),
),
"E",
),
]
},
)
expected_labeling = [
"This O +",
"is O +",
"entity1 B-E +",
"entity2 B-E +",
"and O +",
"a B-E +",
"long I-E +",
"entity3 I-E -",
]
assert_conll_writer_output(dataset, expected_labeling)
def test_conll_writer_one_token_multiple_entities1():
text = "This is entity1 entity2"
dataset = InternalBioNerDataset(
documents={"1": text},
entities_per_document={
"1": [
Entity((text.find("entity1"), text.find("entity1") + 2), "E"),
Entity((text.find("tity1"), text.find("tity1") + 5), "E"),
Entity((text.find("entity2"), text.find("entity2") + len("entity2")), "E"),
]
},
)
assert_conll_writer_output(dataset, ["This O +", "is O +", "entity1 B-E +", "entity2 B-E -"])
def test_conll_writer_one_token_multiple_entities2():
text = "This is entity1 entity2"
dataset = InternalBioNerDataset(
documents={"1": text},
entities_per_document={
"1": [
Entity((text.find("entity1"), text.find("entity1") + 2), "E"),
Entity((text.find("tity1"), text.find("tity1") + 5), "E"),
]
},
)
assert_conll_writer_output(dataset, ["This O +", "is O +", "entity1 B-E +", "entity2 O -"])
def assert_conll_writer_output(
dataset: InternalBioNerDataset,
expected_output: List[str],
sentence_splitter: Optional[SentenceSplitter] = None,
):
fd, outfile_path = tempfile.mkstemp()
try:
sentence_splitter = sentence_splitter if sentence_splitter else NoSentenceSplitter(tokenizer=SpaceTokenizer())
writer = CoNLLWriter(sentence_splitter=sentence_splitter)
writer.write_to_conll(dataset, Path(outfile_path))
with open(outfile_path) as f:
contents = [line.strip() for line in f.readlines() if line.strip()]
finally:
os.close(fd)
os.remove(outfile_path)
assert contents == expected_output
def test_filter_nested_entities(caplog):
entities_per_document = {
"d0": [Entity((0, 1), "t0"), Entity((2, 3), "t1")],
"d1": [Entity((0, 6), "t0"), Entity((2, 3), "t1"), Entity((4, 5), "t2")],
"d2": [Entity((0, 3), "t0"), Entity((3, 5), "t1")],
"d3": [Entity((0, 3), "t0"), Entity((2, 5), "t1"), Entity((4, 7), "t2")],
"d4": [Entity((0, 4), "t0"), Entity((3, 5), "t1")],
"d5": [Entity((0, 4), "t0"), Entity((3, 9), "t1")],
"d6": [Entity((0, 4), "t0"), Entity((2, 6), "t1")],
}
target = {
"d0": [Entity((0, 1), "t0"), Entity((2, 3), "t1")],
"d1": [Entity((2, 3), "t1"), Entity((4, 5), "t2")],
"d2": [Entity((0, 3), "t0"), Entity((3, 5), "t1")],
"d3": [Entity((0, 3), "t0"), Entity((4, 7), "t2")],
"d4": [Entity((0, 4), "t0")],
"d5": [Entity((3, 9), "t1")],
"d6": [Entity((0, 4), "t0")],
}
dataset = InternalBioNerDataset(documents={}, entities_per_document=entities_per_document)
caplog.set_level(logging.WARNING)
filter_nested_entities(dataset)
assert "WARNING: Corpus modified by filtering nested entities." in caplog.text
for key, entities in dataset.entities_per_document.items():
assert key in target
assert len(target[key]) == len(entities)
for e1, e2 in zip(
sorted(target[key], key=lambda x: str(x)),
sorted(entities, key=lambda x: str(x)),
):
assert str(e1) == str(e2)
def sanity_check_all_corpora(check: Callable[[ColumnCorpus], None]):
for _, CorpusType in tqdm(ALL_DATASETS):
corpus = CorpusType()
check(corpus)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
def test_sanity_not_starting_with_minus(CorpusType: Type[ColumnCorpus]):
corpus = CorpusType() # type: ignore[call-arg]
entities_starting_with_minus = []
for sentence in _iter_dataset(corpus.get_all_sentences()):
entities = sentence.get_spans("ner")
for entity in entities:
if str(entity.tokens[0].text).startswith("-"):
entities_starting_with_minus.append(" ".join([t.text for t in entity.tokens]))
assert len(entities_starting_with_minus) == 0, "|".join(entities_starting_with_minus)
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_sanity_no_repeating_Bs(CorpusType: Type[ColumnCorpus]):
corpus = CorpusType() # type: ignore[call-arg]
longest_repeat_tokens: List[Token] = []
repeat_tokens: List[Token] = []
for sentence in _iter_dataset(corpus.get_all_sentences()):
for token in sentence.tokens:
if token.get_labels()[0].value.startswith("B") or token.get_labels()[0].value.startswith("S"):
repeat_tokens.append(token)
else:
if len(repeat_tokens) > len(longest_repeat_tokens):
longest_repeat_tokens = repeat_tokens
repeat_tokens = []
assert len(longest_repeat_tokens) < 4
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_sanity_no_long_entities(CorpusType: Type[ColumnCorpus]):
corpus = CorpusType() # type: ignore[call-arg]
longest_entity: List[str] = []
for sentence in _iter_dataset(corpus.get_all_sentences()):
entities = sentence.get_spans("ner")
for entity in entities:
if len(entity.tokens) > len(longest_entity):
longest_entity = [t.text for t in entity.tokens]
assert len(longest_entity) < 10, " ".join(longest_entity)
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_sanity_no_unmatched_parentheses(CorpusType: Type[ColumnCorpus]):
corpus = CorpusType() # type: ignore[call-arg]
unbalanced_entities = []
for sentence in _iter_dataset(corpus.get_all_sentences()):
entities = sentence.get_spans("ner")
for entity in entities:
entity_text = "".join(t.text for t in entity.tokens)
if not has_balanced_parantheses(entity_text):
unbalanced_entities.append(entity_text)
assert unbalanced_entities == []
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_sanity_not_too_many_entities(CorpusType: Type[ColumnCorpus]):
corpus = CorpusType() # type: ignore[call-arg]
n_entities_per_sentence = []
for sentence in _iter_dataset(corpus.get_all_sentences()):
entities = sentence.get_spans("ner")
n_entities_per_sentence.append(len(entities))
avg_entities_per_sentence = sum(n_entities_per_sentence) / len(n_entities_per_sentence)
assert avg_entities_per_sentence <= 5
@pytest.mark.parametrize("CorpusType", ALL_DATASETS)
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_sanity_no_misaligned_entities(CorpusType: Type[HunerDataset]):
dataset_name = CorpusType.__class__.__name__.lower()
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
corpus = CorpusType()
internal = corpus.to_internal(data_folder)
for doc_id, _doc_text in internal.documents.items():
misaligned_starts = []
misaligned_ends: List[int] = []
entities = internal.entities_per_document[doc_id]
entity_starts = [i.char_span.start for i in entities]
entity_ends = [i.char_span.stop for i in entities]
for start in entity_starts:
if start not in entity_starts:
misaligned_starts.append(start)
for end in entity_ends:
if end not in entity_ends:
misaligned_starts.append(end)
assert len(misaligned_starts) <= len(entities) // 10
assert len(misaligned_ends) <= len(entities) // 10
@pytest.mark.skip(reason="We skip this test because it's only relevant for development purposes")
def test_scispacy_tokenization():
from flair.tokenization import SciSpacyTokenizer
spacy_tokenizer = SciSpacyTokenizer()
sentence = Sentence("HBeAg(+) patients", use_tokenizer=spacy_tokenizer)
assert len(sentence) == 5
assert sentence[0].text == "HBeAg"
assert sentence[0].start_position == 0
assert sentence[1].text == "("
assert sentence[1].start_position == 5
assert sentence[2].text == "+"
assert sentence[2].start_position == 6
assert sentence[3].text == ")"
assert sentence[3].start_position == 7
assert sentence[4].text == "patients"
assert sentence[4].start_position == 9
sentence = Sentence("HBeAg(+)/HBsAg(+)", use_tokenizer=spacy_tokenizer)
assert len(sentence) == 9
assert sentence[0].text == "HBeAg"
assert sentence[0].start_position == 0
assert sentence[1].text == "("
assert sentence[1].start_position == 5
assert sentence[2].text == "+"
assert sentence[2].start_position == 6
assert sentence[3].text == ")"
assert sentence[3].start_position == 7
assert sentence[4].text == "/"
assert sentence[4].start_position == 8
assert sentence[5].text == "HBsAg"
assert sentence[5].start_position == 9
assert sentence[6].text == "("
assert sentence[6].start_position == 14
assert sentence[7].text == "+"
assert sentence[7].start_position == 15
assert sentence[8].text == ")"
assert sentence[8].start_position == 16
sentence = Sentence("doxorubicin (DOX)-induced", use_tokenizer=spacy_tokenizer)
assert len(sentence) == 5
assert sentence[0].text == "doxorubicin"
assert sentence[1].text == "("
assert sentence[2].text == "DOX"
assert sentence[3].text == ")"
assert sentence[4].text == "-induced"
| 13,573 | 34.815303 | 118 | py |
flair | flair-master/tests/test_sentence.py | from flair.data import Sentence
def test_sentence_context():
# make a sentence and some right context
sentence = Sentence("George Washington ging nach Washington.")
sentence._next_sentence = Sentence("Das ist eine schöne Stadt.")
assert sentence.right_context(1) == [sentence._next_sentence[0]]
assert sentence.right_context(10) == sentence._next_sentence.tokens[:10]
def test_equality():
assert Sentence("Guten Tag!") != Sentence("Good day!")
assert Sentence("Guten Tag!", use_tokenizer=True) != Sentence("Guten Tag!", use_tokenizer=False)
# TODO: is this desirable? Or should two sentences with same text be considered same objects?
assert Sentence("Guten Tag!") != Sentence("Guten Tag!")
def test_token_labeling():
sentence = Sentence("This sentence will be labled")
assert sentence.get_labels("ner") == []
assert sentence.get_labels() == []
sentence[2].add_label("ner", "B-promise")
sentence[3].add_label("ner", "I-promise")
sentence[4].add_label("ner", "I-promise")
assert [label.value for label in sentence.get_labels()] == ["B-promise", "I-promise", "I-promise"]
assert [token.get_label("ner").value for token in sentence] == ["O", "O", "B-promise", "I-promise", "I-promise"]
sentence[1].set_label("ner", "B-object")
sentence[1].set_label("ner", "B-subject")
assert [label.value for label in sentence.get_labels("ner")] == ["B-subject", "B-promise", "I-promise", "I-promise"]
assert [token.get_label("ner").value for token in sentence] == [
"O",
"B-subject",
"B-promise",
"I-promise",
"I-promise",
]
sentence.set_label("class", "positive")
sentence.remove_labels("ner")
assert sentence.get_labels("ner") == []
assert [label.value for label in sentence.get_labels()] == ["positive"]
sentence[0].add_label("pos", "first")
sentence[0].add_label("pos", "primero")
sentence[0].add_label("pos", "erstes")
assert [label.value for label in sentence.get_labels("pos")] == ["first", "primero", "erstes"]
assert sentence[0].get_label("pos").value == "first"
def test_start_end_position_untokenized() -> None:
sentence: Sentence = Sentence("This is a sentence.", start_position=10)
assert sentence.start_position == 10
assert sentence.end_position == 29
assert [(token.start_position, token.end_position) for token in sentence] == [
(0, 4),
(5, 7),
(8, 9),
(10, 18),
(18, 19),
]
def test_start_end_position_pretokenized() -> None:
# Initializing a Sentence this way assumes that there is a space after each token
sentence: Sentence = Sentence(["This", "is", "a", "sentence", "."], start_position=10)
assert sentence.start_position == 10
assert sentence.end_position == 30
assert [(token.start_position, token.end_position) for token in sentence] == [
(0, 4),
(5, 7),
(8, 9),
(10, 18),
(19, 20),
]
| 2,992 | 38.381579 | 120 | py |
flair | flair-master/tests/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/tests/test_multitask.py | import pytest
import flair
from flair.data import Sentence
from flair.datasets import SENTEVAL_CR, SENTEVAL_SST_GRANULAR
from flair.embeddings import TransformerDocumentEmbeddings
from flair.models import MultitaskModel, TextClassifier
from flair.nn.multitask import make_multitask_model_and_corpus
from flair.trainers import ModelTrainer
@pytest.mark.integration()
def test_train_load_use_classifier(results_base_path, tasks_base_path):
# --- Embeddings that are shared by both models --- #
shared_embedding = TransformerDocumentEmbeddings("distilbert-base-uncased", fine_tune=True)
# --- Task 1: Sentiment Analysis (5-class) --- #
flair.set_seed(123)
# Define corpus and model
corpus_1 = SENTEVAL_SST_GRANULAR().downsample(0.01)
model_1 = TextClassifier(
shared_embedding, label_dictionary=corpus_1.make_label_dictionary("class", add_unk=False), label_type="class"
)
# -- Task 2: Binary Sentiment Analysis on Customer Reviews -- #
flair.set_seed(123)
# Define corpus and model
corpus_2 = SENTEVAL_CR().downsample(0.01)
model_2 = TextClassifier(
shared_embedding,
label_dictionary=corpus_2.make_label_dictionary("sentiment", add_unk=False),
label_type="sentiment",
inverse_model=True,
)
# -- Define mapping (which tagger should train on which model) -- #
multitask_model, multicorpus = make_multitask_model_and_corpus(
[
(model_1, corpus_1),
(model_2, corpus_2),
]
)
# -- Create model trainer and train -- #
trainer = ModelTrainer(multitask_model, multicorpus)
trainer.fine_tune(results_base_path, max_epochs=1)
del trainer, multitask_model, corpus_1, corpus_2
loaded_model = MultitaskModel.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
for label in sentence.labels:
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert type(label.score) is float
del loaded_model
| 2,209 | 31.5 | 117 | py |
flair | flair-master/tests/test_utils.py | from flair.data import Dictionary
from flair.training_utils import convert_labels_to_one_hot
def test_convert_labels_to_one_hot():
label_dict = Dictionary(add_unk=False)
label_dict.add_item("class-1")
label_dict.add_item("class-2")
label_dict.add_item("class-3")
one_hot = convert_labels_to_one_hot([["class-2"]], label_dict)
assert one_hot[0][0] == 0
assert one_hot[0][1] == 1
assert one_hot[0][2] == 0
| 440 | 26.5625 | 66 | py |
flair | flair-master/tests/models/test_relation_classifier.py | from operator import itemgetter
from typing import Dict, List, Optional, Set, Tuple
import pytest
from torch.utils.data import Dataset
from flair.data import Relation, Sentence
from flair.datasets import ColumnCorpus, DataLoader
from flair.embeddings import TransformerDocumentEmbeddings
from flair.models import RelationClassifier
from flair.models.relation_classifier_model import (
EncodedSentence,
EncodingStrategy,
EntityMarker,
EntityMarkerPunct,
EntityMask,
TypedEntityMarker,
TypedEntityMarkerPunct,
TypedEntityMask,
)
from tests.model_test_utils import BaseModelTest
encoding_strategies: Dict[EncodingStrategy, List[Tuple[str, str]]] = {
EntityMask(): [("[HEAD]", "[TAIL]") for _ in range(7)],
TypedEntityMask(): [
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
],
EntityMarker(): [
("[HEAD] Google [/HEAD]", "[TAIL] Larry Page [/TAIL]"),
("[HEAD] Google [/HEAD]", "[TAIL] Sergey Brin [/TAIL]"),
("[HEAD] Microsoft [/HEAD]", "[TAIL] Bill Gates [/TAIL]"),
("[HEAD] Berlin [/HEAD]", "[TAIL] Konrad Zuse [/TAIL]"),
("[HEAD] Berlin [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
("[HEAD] Germany [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
("[HEAD] MIT [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
],
TypedEntityMarker(): [
("[HEAD-ORG] Google [/HEAD-ORG]", "[TAIL-PER] Larry Page [/TAIL-PER]"),
("[HEAD-ORG] Google [/HEAD-ORG]", "[TAIL-PER] Sergey Brin [/TAIL-PER]"),
("[HEAD-ORG] Microsoft [/HEAD-ORG]", "[TAIL-PER] Bill Gates [/TAIL-PER]"),
("[HEAD-LOC] Berlin [/HEAD-LOC]", "[TAIL-PER] Konrad Zuse [/TAIL-PER]"),
("[HEAD-LOC] Berlin [/HEAD-LOC]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
("[HEAD-LOC] Germany [/HEAD-LOC]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
("[HEAD-ORG] MIT [/HEAD-ORG]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
],
EntityMarkerPunct(): [
("@ Google @", "# Larry Page #"),
("@ Google @", "# Sergey Brin #"),
("@ Microsoft @", "# Bill Gates #"),
("@ Berlin @", "# Konrad Zuse #"),
("@ Berlin @", "# Joseph Weizenbaum #"),
("@ Germany @", "# Joseph Weizenbaum #"),
("@ MIT @", "# Joseph Weizenbaum #"),
],
TypedEntityMarkerPunct(): [
("@ * ORG * Google @", "# ^ PER ^ Larry Page #"),
("@ * ORG * Google @", "# ^ PER ^ Sergey Brin #"),
("@ * ORG * Microsoft @", "# ^ PER ^ Bill Gates #"),
("@ * LOC * Berlin @", "# ^ PER ^ Konrad Zuse #"),
("@ * LOC * Berlin @", "# ^ PER ^ Joseph Weizenbaum #"),
("@ * LOC * Germany @", "# ^ PER ^ Joseph Weizenbaum #"),
("@ * ORG * MIT @", "# ^ PER ^ Joseph Weizenbaum #"),
],
}
class TestRelationClassifier(BaseModelTest):
model_cls = RelationClassifier
train_label_type = "relation"
multiclass_prediction_labels = ["apple", "tv"]
model_args = {
"entity_label_types": "ner",
"entity_pair_labels": { # Define valid entity pair combinations, used as relation candidates
("ORG", "PER"), # founded_by
("LOC", "PER"), # place_of_birth
},
"allow_unk_tag": False,
}
training_args = {"max_epochs": 2, "learning_rate": 4e-4, "mini_batch_size": 4}
finetune_instead_of_train = True
@pytest.fixture()
def corpus(self, tasks_base_path):
return ColumnCorpus(
data_folder=tasks_base_path / "conllu",
train_file="train.conllup",
dev_file="train.conllup",
test_file="train.conllup",
column_format={1: "text", 2: "pos", 3: "ner"},
)
@pytest.fixture()
def embeddings(self):
return TransformerDocumentEmbeddings(model="distilbert-base-uncased", layers="-1", fine_tune=True)
def transform_corpus(self, model, corpus):
return model.transform_corpus(corpus)
@pytest.fixture()
def example_sentence(self):
sentence = Sentence(["Microsoft", "was", "found", "by", "Bill", "Gates"])
sentence[:1].add_label(typename="ner", value="ORG", score=1.0)
sentence[4:].add_label(typename="ner", value="PER", score=1.0)
return sentence
@pytest.fixture()
def train_test_sentence(self):
sentence: Sentence = Sentence(
[
"Intel",
"was",
"founded",
"on",
"July",
"18",
",",
"1968",
",",
"by",
"semiconductor",
"pioneers",
"Gordon",
"Moore",
"and",
"Robert",
"Noyce",
".",
]
)
sentence[:1].add_label(typename="ner", value="ORG", score=1.0) # Intel -> ORG
sentence[12:14].add_label(typename="ner", value="PER", score=1.0) # Gordon Moore -> PER
sentence[15:17].add_label(typename="ner", value="PER", score=1.0) # Robert Noyce -> PER
return sentence
def assert_training_example(self, predicted_training_example):
relations: List[Relation] = predicted_training_example.get_relations("relation")
assert len(relations) == 2
# Intel ----founded_by---> Gordon Moore
assert [label.value for label in relations[0].labels] == ["founded_by"]
assert (
relations[0].unlabeled_identifier
== Relation(
first=predicted_training_example[:1], second=predicted_training_example[12:14]
).unlabeled_identifier
)
# Intel ----founded_by---> Robert Noyce
assert [label.value for label in relations[1].labels] == ["founded_by"]
assert (
relations[1].unlabeled_identifier
== Relation(
first=predicted_training_example[:1], second=predicted_training_example[15:17]
).unlabeled_identifier
)
@staticmethod
def check_transformation_correctness(
split: Optional[Dataset],
ground_truth: Set[Tuple[str, Tuple[str, ...]]],
) -> None:
# Ground truth is a set of tuples of (<Sentence Text>, <Relation Label Values>)
assert split is not None
data_loader = DataLoader(split, batch_size=1)
assert all(isinstance(sentence, EncodedSentence) for sentence in map(itemgetter(0), data_loader))
assert {
(sentence.to_tokenized_string(), tuple(label.value for label in sentence.get_labels("relation")))
for sentence in map(itemgetter(0), data_loader)
} == ground_truth
@pytest.mark.parametrize(
"cross_augmentation", [True, False], ids=["with_cross_augmentation", "without_cross_augmentation"]
)
@pytest.mark.parametrize(
("encoding_strategy", "encoded_entity_pairs"),
encoding_strategies.items(),
ids=[type(encoding_strategy).__name__ for encoding_strategy in encoding_strategies],
)
def test_transform_corpus(
self,
corpus: ColumnCorpus,
embeddings: TransformerDocumentEmbeddings,
cross_augmentation: bool,
encoding_strategy: EncodingStrategy,
encoded_entity_pairs: List[Tuple[str, str]],
) -> None:
label_dictionary = corpus.make_label_dictionary("relation")
model: RelationClassifier = self.build_model(
embeddings, label_dictionary, cross_augmentation=cross_augmentation, encoding_strategy=encoding_strategy
)
transformed_corpus = model.transform_corpus(corpus)
# Check sentence masking and relation label annotation on
# training, validation and test dataset (in this test the splits are the same)
ground_truth: Set[Tuple[str, Tuple[str, ...]]] = {
# Entity pair permutations of: "Larry Page and Sergey Brin founded Google ."
(f"{encoded_entity_pairs[0][1]} and Sergey Brin founded {encoded_entity_pairs[0][0]} .", ("founded_by",)),
(f"Larry Page and {encoded_entity_pairs[1][1]} founded {encoded_entity_pairs[1][0]} .", ("founded_by",)),
# Entity pair permutations of: "Microsoft was founded by Bill Gates ."
(f"{encoded_entity_pairs[2][0]} was founded by {encoded_entity_pairs[2][1]} .", ("founded_by",)),
# Entity pair permutations of: "Konrad Zuse was born in Berlin on 22 June 1910 ."
(
f"{encoded_entity_pairs[3][1]} was born in {encoded_entity_pairs[3][0]} on 22 June 1910 .",
("place_of_birth",),
),
# Entity pair permutations of: "Joseph Weizenbaum , a professor at MIT , was born in Berlin , Germany."
(
f"{encoded_entity_pairs[4][1]} , a professor at MIT , "
f"was born in {encoded_entity_pairs[4][0]} , Germany .",
("place_of_birth",),
),
(
f"{encoded_entity_pairs[5][1]} , a professor at MIT , "
f"was born in Berlin , {encoded_entity_pairs[5][0]} .",
("place_of_birth",),
),
}
if cross_augmentation:
# This sentence is only included if we transform the corpus with cross augmentation
ground_truth.add(
(
f"{encoded_entity_pairs[6][1]} , a professor at {encoded_entity_pairs[6][0]} , "
f"was born in Berlin , Germany .",
("O",),
)
)
for split in (transformed_corpus.train, transformed_corpus.dev, transformed_corpus.test):
self.check_transformation_correctness(split, ground_truth)
| 9,948 | 40.627615 | 118 | py |
flair | flair-master/tests/models/test_sequence_tagger.py | import pytest
import flair
from flair.embeddings import FlairEmbeddings, WordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from tests.model_test_utils import BaseModelTest
class TestSequenceTagger(BaseModelTest):
model_cls = SequenceTagger
pretrained_model = "ner-fast"
train_label_type = "ner"
training_args = {
"max_epochs": 2,
"learning_rate": 0.1,
"mini_batch_size": 2,
}
model_args = {
"hidden_size": 64,
"use_crf": False,
}
def has_embedding(self, sentence):
return all(token.get_embedding().cpu().numpy().size != 0 for token in sentence)
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
tag_dictionary=label_dict,
tag_type=self.train_label_type,
**model_args,
**kwargs,
)
@pytest.fixture()
def embeddings(self):
return WordEmbeddings("turian")
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
@pytest.mark.integration()
def test_all_tag_proba_embedding(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence, return_probabilities_for_all_classes=True)
for token in example_sentence:
assert len(token.get_tags_proba_dist(loaded_pretrained_model.label_type)) == len(
loaded_pretrained_model.label_dictionary
)
score_sum = 0.0
for label in token.get_tags_proba_dist(loaded_pretrained_model.label_type):
assert label.data_point == token
score_sum += label.score
assert abs(score_sum - 1.0) < 1.0e-5
@pytest.mark.integration()
def test_force_token_predictions(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence, force_token_predictions=True)
assert example_sentence.get_token(3).text == "Berlin"
assert example_sentence.get_token(3).tag == "S-LOC"
@pytest.mark.integration()
def test_train_load_use_tagger_flair_embeddings(self, results_base_path, corpus, example_sentence):
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
model = self.build_model(FlairEmbeddings("news-forward-fast"), tag_dictionary)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_with_trainable_hidden_state(
self, embeddings, results_base_path, corpus, example_sentence
):
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
model = self.build_model(embeddings, tag_dictionary, train_initial_hidden_state=True)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_disjunct_tags(
self, results_base_path, tasks_base_path, embeddings, example_sentence
):
corpus = flair.datasets.ColumnCorpus(
data_folder=tasks_base_path / "fashion_disjunct",
column_format={0: "text", 3: "ner"},
)
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=True)
model = self.build_model(embeddings, tag_dictionary, allow_unk_predictions=True)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
| 4,859 | 38.193548 | 120 | py |
flair | flair-master/tests/models/test_relation_extractor.py | import pytest
from flair.data import Sentence
from flair.datasets import ColumnCorpus
from flair.embeddings import TransformerWordEmbeddings
from flair.models import RelationExtractor
from tests.model_test_utils import BaseModelTest
class TestRelationExtractor(BaseModelTest):
model_cls = RelationExtractor
train_label_type = "relation"
pretrained_model = "relations"
model_args = {
"entity_label_type": "ner",
"train_on_gold_pairs_only": True,
"entity_pair_filters": { # Define valid entity pair combinations, used as relation candidates
("ORG", "PER"), # founded_by
("LOC", "PER"), # place_of_birth
},
}
training_args = {
"max_epochs": 4,
"mini_batch_size": 2,
"learning_rate": 0.1,
}
@pytest.fixture()
def corpus(self, tasks_base_path):
return ColumnCorpus(
data_folder=tasks_base_path / "conllu",
train_file="train.conllup",
dev_file="train.conllup",
test_file="train.conllup",
column_format={1: "text", 2: "pos", 3: "ner"},
)
@pytest.fixture()
def example_sentence(self):
sentence = Sentence(["Microsoft", "was", "found", "by", "Bill", "Gates"])
sentence[:1].add_label(typename="ner", value="ORG", score=1.0)
sentence[4:].add_label(typename="ner", value="PER", score=1.0)
return sentence
@pytest.fixture()
def train_test_sentence(self):
sentence = Sentence(["Apple", "was", "founded", "by", "Steve", "Jobs", "."])
sentence[0:1].add_label("ner", "ORG")
sentence[4:6].add_label("ner", "PER")
return sentence
@pytest.fixture()
def embeddings(self):
return TransformerWordEmbeddings(model="distilbert-base-uncased", fine_tune=True)
def assert_training_example(self, predicted_training_example):
relations = predicted_training_example.get_relations("relation")
assert len(relations) == 1
assert relations[0].tag == "founded_by"
def has_embedding(self, sentence):
return all(token.get_embedding().cpu().numpy().size != 0 for token in sentence)
| 2,183 | 33.666667 | 102 | py |
flair | flair-master/tests/models/test_tars_classifier.py | import pytest
from flair.data import Sentence
from flair.datasets import ClassificationCorpus
from flair.embeddings import TransformerDocumentEmbeddings
from flair.models import TARSClassifier
from tests.model_test_utils import BaseModelTest
class TestTarsClassifier(BaseModelTest):
model_cls = TARSClassifier
train_label_type = "class"
model_args = {"task_name": "2_CLASS"}
training_args = {"mini_batch_size": 1, "max_epochs": 2}
pretrained_model = "tars-base"
@pytest.fixture()
def corpus(self, tasks_base_path):
return ClassificationCorpus(tasks_base_path / "imdb_underscore")
@pytest.fixture()
def embeddings(self):
return TransformerDocumentEmbeddings("distilbert-base-uncased")
@pytest.fixture()
def example_sentence(self):
return Sentence("This is great!")
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
label_type=self.train_label_type,
**model_args,
**kwargs,
)
def transform_corpus(self, model, corpus):
model.add_and_switch_to_new_task(
task_name="2_CLASS",
label_dictionary=corpus.make_label_dictionary(self.train_label_type),
label_type=self.train_label_type,
)
return corpus
@pytest.mark.integration()
def test_predict_zero_shot(self, loaded_pretrained_model):
sentence = Sentence("I am so glad you liked it!")
loaded_pretrained_model.predict_zero_shot(sentence, ["happy", "sad"])
assert len(sentence.get_labels(loaded_pretrained_model.label_type)) == 1
assert sentence.get_labels(loaded_pretrained_model.label_type)[0].value == "happy"
@pytest.mark.integration()
def test_predict_zero_shot_single_label_always_predicts(self, loaded_pretrained_model):
sentence = Sentence("I hate it")
loaded_pretrained_model.predict_zero_shot(sentence, ["happy", "sad"])
# Ensure this is an example that predicts no classes in multilabel
assert len(sentence.get_labels(loaded_pretrained_model.label_type)) == 0
loaded_pretrained_model.predict_zero_shot(sentence, ["happy", "sad"], multi_label=False)
assert len(sentence.get_labels(loaded_pretrained_model.label_type)) == 1
assert sentence.get_labels(loaded_pretrained_model.label_type)[0].value == "sad"
@pytest.mark.integration()
def test_init_tars_and_switch(self, tasks_base_path, corpus):
tars = TARSClassifier(
task_name="2_CLASS",
label_dictionary=corpus.make_label_dictionary(label_type="class"),
label_type="class",
)
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 2
# switch to task with only one label
tars.add_and_switch_to_new_task("1_CLASS", "one class", "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 1
# switch to task with three labels provided as list
tars.add_and_switch_to_new_task("3_CLASS", ["list 1", "list 2", "list 3"], "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 3
# switch to task with four labels provided as set
tars.add_and_switch_to_new_task("4_CLASS", {"set 1", "set 2", "set 3", "set 4"}, "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 4
# switch to task with two labels provided as Dictionary
tars.add_and_switch_to_new_task("2_CLASS_AGAIN", corpus.make_label_dictionary(label_type="class"), "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 2
@pytest.mark.skip("embeddings are not supported in tars")
def test_load_use_model_keep_embedding(self):
pass
@pytest.mark.skip("tars needs additional setup after loading")
def test_load_use_model(self):
pass
| 4,232 | 38.194444 | 119 | py |
flair | flair-master/tests/models/test_text_regressor.py | import pytest
import flair
from flair.embeddings import DocumentRNNEmbeddings, WordEmbeddings
from flair.models.text_regression_model import TextRegressor
from tests.model_test_utils import BaseModelTest
class TestTextRegressor(BaseModelTest):
model_cls = TextRegressor
train_label_type = "regression"
training_args = {
"max_epochs": 3,
"mini_batch_size": 2,
"learning_rate": 0.1,
"main_evaluation_metric": ("correlation", "pearson"),
}
def build_model(self, embeddings, label_dict, **kwargs):
# no need for label_dict
return self.model_cls(embeddings, self.train_label_type)
@pytest.fixture()
def embeddings(self):
glove_embedding = WordEmbeddings("turian")
return DocumentRNNEmbeddings([glove_embedding], 128, 1, False, 64, False, False)
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.ClassificationCorpus(tasks_base_path / "regression", label_type=self.train_label_type)
| 1,016 | 31.806452 | 116 | py |
flair | flair-master/tests/models/test_entity_linker.py | import pytest
from flair.data import Sentence
from flair.datasets import NEL_ENGLISH_AIDA
from flair.embeddings import TransformerWordEmbeddings
from flair.models import EntityLinker
from tests.model_test_utils import BaseModelTest
class TestEntityLinker(BaseModelTest):
model_cls = EntityLinker
train_label_type = "nel"
training_args = {"max_epochs": 2}
@pytest.fixture()
def embeddings(self):
return TransformerWordEmbeddings(model="distilbert-base-uncased", layers="-1", fine_tune=True)
@pytest.fixture()
def corpus(self, tasks_base_path):
return NEL_ENGLISH_AIDA().downsample(0.01)
@pytest.fixture()
def train_test_sentence(self):
sentence = Sentence("I love NYC and hate OYC")
sentence[2:3].add_label("nel", "New York City")
sentence[5:6].add_label("nel", "Old York City")
return sentence
@pytest.fixture()
def labeled_sentence(self):
sentence = Sentence("I love NYC and hate OYC")
sentence[2:3].add_label("nel", "New York City")
sentence[5:6].add_label("nel", "Old York City")
return sentence
| 1,132 | 28.815789 | 102 | py |
flair | flair-master/tests/models/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/tests/models/test_tars_ner.py | import pytest
import flair
from flair.data import Sentence
from flair.embeddings import TransformerWordEmbeddings
from flair.models import TARSTagger
from tests.model_test_utils import BaseModelTest
class TestTarsTagger(BaseModelTest):
model_cls = TARSTagger
train_label_type = "ner"
model_args = {"task_name": "2_NER"}
training_args = {"mini_batch_size": 1, "max_epochs": 2}
pretrained_model = "tars-ner"
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
@pytest.fixture()
def embeddings(self):
return TransformerWordEmbeddings("distilbert-base-uncased")
@pytest.fixture()
def example_sentence(self):
return Sentence("George Washington was born in Washington")
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
label_type=self.train_label_type,
**model_args,
**kwargs,
)
def transform_corpus(self, model, corpus):
model.add_and_switch_to_new_task(
task_name="2_NER",
label_dictionary=corpus.make_label_dictionary(self.train_label_type),
label_type=self.train_label_type,
)
return corpus
@pytest.mark.integration()
def test_predict_zero_shot(self, loaded_pretrained_model):
sentence = Sentence("George Washington was born in Washington")
loaded_pretrained_model.predict_zero_shot(sentence, ["location", "person"])
assert len(sentence.get_labels("location-person")) == 2
assert sorted([label.value for label in sentence.get_labels("location-person")]) == [
"location",
"person",
]
@pytest.mark.integration()
def test_init_tars_and_switch(self, tasks_base_path, corpus):
tars = TARSTagger(
task_name="2_NER",
label_dictionary=corpus.make_label_dictionary(label_type="ner"),
label_type="ner",
)
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 10
# switch to task with only one label
tars.add_and_switch_to_new_task("1_CLASS", "one class", "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 1
# switch to task with three labels provided as list
tars.add_and_switch_to_new_task("3_CLASS", ["list 1", "list 2", "list 3"], "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 3
# switch to task with four labels provided as set
tars.add_and_switch_to_new_task("4_CLASS", {"set 1", "set 2", "set 3", "set 4"}, "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 4
# switch to task with two labels provided as Dictionary
tars.add_and_switch_to_new_task("2_CLASS_AGAIN", corpus.make_label_dictionary(label_type="ner"), "testlabel")
# check if right number of classes
assert len(tars.get_current_label_dictionary()) == 10
@pytest.mark.skip("embeddings are not supported in tars")
def test_load_use_model_keep_embedding(self):
pass
@pytest.mark.skip("tars needs additional setup after loading")
def test_load_use_model(self):
pass
| 3,630 | 34.950495 | 120 | py |
flair | flair-master/tests/models/test_text_classifier.py | import pytest
import flair.datasets
from flair.data import Sentence
from flair.embeddings import DocumentRNNEmbeddings, FlairEmbeddings, WordEmbeddings
from flair.models import TextClassifier
from flair.samplers import ImbalancedClassificationDatasetSampler
from flair.trainers import ModelTrainer
from tests.model_test_utils import BaseModelTest
class TestTextClassifier(BaseModelTest):
model_cls = TextClassifier
pretrained_model = "sentiment"
train_label_type = "topic"
multiclass_prediction_labels = ["apple", "tv"]
training_args = {
"max_epochs": 4,
}
@pytest.fixture()
def embeddings(self):
turian_embeddings = WordEmbeddings("turian")
document_embeddings = DocumentRNNEmbeddings([turian_embeddings], 128, 1, False, 64, False, False)
return document_embeddings
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.ClassificationCorpus(tasks_base_path / "imdb", label_type="topic")
@pytest.fixture()
def multiclass_train_test_sentence(self):
return Sentence("apple tv")
@pytest.fixture()
def multi_class_corpus(self, tasks_base_path):
return flair.datasets.ClassificationCorpus(tasks_base_path / "multi_class", label_type="topic")
@pytest.mark.integration()
def test_train_load_use_classifier_with_sampler(
self, results_base_path, corpus, embeddings, example_sentence, train_test_sentence
):
flair.set_seed(123)
label_dict = corpus.make_label_dictionary(label_type=self.train_label_type)
model = self.model_cls(embeddings=embeddings, label_dictionary=label_dict, label_type=self.train_label_type)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, max_epochs=2, shuffle=False, sampler=ImbalancedClassificationDatasetSampler)
model.predict(train_test_sentence)
for label in train_test_sentence.get_labels(self.train_label_type):
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert isinstance(label.score, float)
del trainer, model, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
@pytest.mark.integration()
def test_predict_with_prob(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence, return_probabilities_for_all_classes=True)
assert len(example_sentence.get_labels(loaded_pretrained_model.label_type)) == len(
loaded_pretrained_model.label_dictionary
)
assert (
sum([label.score for label in example_sentence.get_labels(loaded_pretrained_model.label_type)]) > 1 - 1e-5
)
@pytest.mark.integration()
def test_train_load_use_classifier_flair(self, results_base_path, corpus, example_sentence, train_test_sentence):
flair.set_seed(123)
embeddings = DocumentRNNEmbeddings([FlairEmbeddings("news-forward-fast")], 128, 1, False, 64, False, False)
label_dict = corpus.make_label_dictionary(label_type=self.train_label_type)
model = self.model_cls(embeddings=embeddings, label_dictionary=label_dict, label_type=self.train_label_type)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, max_epochs=2, shuffle=False)
model.predict(train_test_sentence)
for label in train_test_sentence.get_labels(self.train_label_type):
assert label.value is not None
assert 0.0 <= label.score <= 1.0
assert isinstance(label.score, float)
del trainer, model, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
| 4,076 | 39.77 | 118 | py |
flair | flair-master/tests/models/test_word_tagger.py | import pytest
import flair
from flair.embeddings import TransformerWordEmbeddings
from flair.models import TokenClassifier
from tests.model_test_utils import BaseModelTest
class TestWordTagger(BaseModelTest):
model_cls = TokenClassifier
train_label_type = "pos"
training_args = {
"max_epochs": 2,
"learning_rate": 0.1,
"mini_batch_size": 2,
}
def has_embedding(self, sentence):
for token in sentence:
if token.get_embedding().cpu().numpy().size == 0:
return False
return None
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
label_dictionary=label_dict,
label_type=self.train_label_type,
**model_args,
**kwargs,
)
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.UD_ENGLISH(tasks_base_path)
@pytest.fixture()
def embeddings(self):
return TransformerWordEmbeddings("distilbert-base-uncased")
| 1,212 | 26.568182 | 67 | py |
flair | flair-master/tests/embeddings/test_word_embeddings.py | from typing import Any, Dict
from flair.embeddings import MuseCrosslingualEmbeddings, NILCEmbeddings, WordEmbeddings
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestWordEmbeddings(BaseEmbeddingsTest):
embedding_cls = WordEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"embeddings": "turian"}
name_field = "embeddings"
invalid_names = ["other", "not/existing/path/to/embeddings"]
class TestMuseCrosslingualEmbeddings(BaseEmbeddingsTest):
embedding_cls = MuseCrosslingualEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args: Dict[str, Any] = {}
class TestNILCEmbeddings(BaseEmbeddingsTest):
embedding_cls = NILCEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"embeddings": "fasttext", "model": "cbow", "size": 50}
valid_args = [{"embeddings": "glove"}]
name_field = "embeddings"
invalid_names = ["other", "not/existing/path/to/embeddings"]
| 1,033 | 30.333333 | 87 | py |
flair | flair-master/tests/embeddings/test_byte_pair_embeddings.py | from flair.embeddings import BytePairEmbeddings
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestBytePairEmbeddings(BaseEmbeddingsTest):
embedding_cls = BytePairEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"language": "en"}
| 299 | 29 | 57 | py |
flair | flair-master/tests/embeddings/test_transformer_word_embeddings.py | import importlib.util
import warnings
import pytest
import torch
from PIL import Image
from transformers.utils import is_detectron2_available
from flair.data import BoundingBox, Dictionary, Sentence
from flair.embeddings import TransformerJitWordEmbeddings, TransformerWordEmbeddings
from flair.models import SequenceTagger
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestTransformerWordEmbeddings(BaseEmbeddingsTest):
embedding_cls = TransformerWordEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"model": "distilbert-base-uncased", "allow_long_sentences": False}
valid_args = [
{"layers": "-1,-2,-3,-4", "layer_mean": False},
{"layers": "all", "layer_mean": True},
{"layers": "all", "layer_mean": False},
{"layers": "all", "layer_mean": True, "subtoken_pooling": "mean"},
]
name_field = "embeddings"
invalid_names = ["other", "not/existing/path/to/embeddings"]
@pytest.mark.integration()
def test_transformer_jit_embeddings(self, results_base_path):
base_embeddings = TransformerWordEmbeddings(
"distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False, allow_long_sentences=True
)
sentence: Sentence = Sentence("I love Berlin, but Vienna is where my hearth is.")
class JitWrapper(torch.nn.Module):
def __init__(self, embedding: TransformerWordEmbeddings) -> None:
super().__init__()
self.embedding = embedding
def forward(
self,
input_ids: torch.Tensor,
token_lengths: torch.LongTensor,
attention_mask: torch.Tensor,
overflow_to_sample_mapping: torch.Tensor,
word_ids: torch.Tensor,
):
return self.embedding.forward(
input_ids=input_ids,
token_lengths=token_lengths,
attention_mask=attention_mask,
overflow_to_sample_mapping=overflow_to_sample_mapping,
word_ids=word_ids,
)["token_embeddings"]
base_embeddings.embed(sentence)
base_token_embedding = sentence[5].get_embedding().clone()
sentence.clear_embeddings()
tensors = base_embeddings.prepare_tensors([sentence])
# ensure that the prepared tensors is what we expect
assert sorted(tensors.keys()) == [
"attention_mask",
"input_ids",
"overflow_to_sample_mapping",
"token_lengths",
"word_ids",
]
wrapper = JitWrapper(base_embeddings)
parameter_names, parameter_list = TransformerJitWordEmbeddings.parameter_to_list(
base_embeddings, wrapper, [sentence]
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
script_module = torch.jit.trace(wrapper, parameter_list)
jit_embeddings = TransformerJitWordEmbeddings.create_from_embedding(
script_module, base_embeddings, parameter_names
)
jit_embeddings.embed(sentence)
jit_token_embedding = sentence[5].get_embedding().clone()
assert torch.isclose(base_token_embedding, jit_token_embedding).all()
sentence.clear_embeddings()
# use a SequenceTagger to save and reload the embedding in the manner it is supposed to work
example_tagger = SequenceTagger(embeddings=jit_embeddings, tag_dictionary=Dictionary(), tag_type="none")
results_base_path.mkdir(exist_ok=True, parents=True)
example_tagger.save(results_base_path / "tagger.pt")
del example_tagger
new_example_tagger = SequenceTagger.load(results_base_path / "tagger.pt")
loaded_jit_embedding = new_example_tagger.embeddings
loaded_jit_embedding.embed(sentence)
loaded_jit_token_embedding = sentence[5].get_embedding().clone()
sentence.clear_embeddings()
assert torch.isclose(jit_token_embedding, loaded_jit_token_embedding).all()
def test_transformers_context_expansion(self, results_base_path):
emb = TransformerWordEmbeddings(
"distilbert-base-uncased", use_context=True, use_context_separator=True, respect_document_boundaries=True
)
# previous and next sentence as context
sentence_previous = Sentence("How is it?")
sentence_next = Sentence("Then again, maybe not...")
# test expansion for sentence without context
sentence = Sentence("This is great!")
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert " ".join([token.text for token in expanded]) == "[FLERT] This is great ! [FLERT]"
# test expansion for with previous and next as context
sentence = Sentence("This is great.")
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert (
" ".join([token.text for token in expanded])
== "How is it ? [FLERT] This is great . [FLERT] Then again , maybe not ..."
)
# test expansion if first sentence is document boundary
sentence = Sentence("This is great?")
sentence_previous.is_document_boundary = True
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert (
" ".join([token.text for token in expanded]) == "[FLERT] This is great ? [FLERT] Then again , maybe not ..."
)
# test expansion if we don't use context
emb.context_length = 0
sentence = Sentence("I am here.")
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert " ".join([token.text for token in expanded]) == "I am here ."
@pytest.mark.integration()
def test_layoutlm_embeddings(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
@pytest.mark.skipif(
condition=not is_detectron2_available(), reason="layoutlmV2 requires detectron2 to be installed manually."
)
def test_layoutlmv2_embeddings(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"])
sentence.add_metadata("image", img)
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv2-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlmv3_embeddings(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"])
sentence.add_metadata("image", img)
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlmv3_embeddings_with_long_context(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"] * 512)
sentence.add_metadata("image", img)
for i in range(512):
sentence[i * 3].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[i * 3 + 1].add_metadata("bbox", (12, 0, 22, 10))
sentence[i * 3 + 2].add_metadata("bbox", (0, 12, 0, 10))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_ocr_embeddings_fails_when_no_bbox(self):
sentence = Sentence(["I", "love", "Berlin"])
emb = TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
with pytest.raises(ValueError):
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlm_embeddings_with_context_warns_user(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
with pytest.warns(UserWarning) as record:
TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", use_context=True)
assert len(record) == 1
assert "microsoft/layoutlm" in record[0].message.args[0]
@pytest.mark.integration()
def test_layoutlmv3_without_image_embeddings_fails(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
with pytest.raises(ValueError):
emb.embed(sentence)
@pytest.mark.skipif(importlib.util.find_spec("sacremoses") is None, reason="XLM-Embeddings require 'sacremoses'")
def test_transformer_word_embeddings_forward_language_ids(self):
cos = torch.nn.CosineSimilarity(dim=0, eps=1e-10)
sent_en = Sentence(["This", "is", "a", "sentence"], language_code="en")
sent_de = Sentence(["Das", "ist", "ein", "Satz"], language_code="de")
embeddings = TransformerWordEmbeddings("xlm-mlm-ende-1024", layers="all", allow_long_sentences=False)
embeddings.embed([sent_de, sent_en])
expected_similarities = [
0.7102344036102295,
0.7598986625671387,
0.7437312602996826,
0.5584433674812317,
]
for token_de, token_en, exp_sim in zip(sent_de, sent_en, expected_similarities):
sim = cos(token_de.embedding, token_en.embedding).item()
assert abs(exp_sim - sim) < 1e-5
def test_transformer_force_max_length(self):
sentence: Sentence = Sentence("I love Berlin, but Vienna is where my hearth is.")
short_embeddings = TransformerWordEmbeddings("distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False)
long_embeddings = TransformerWordEmbeddings(
"distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False, force_max_length=True
)
short_tensors = short_embeddings.prepare_tensors([sentence])
long_tensors = long_embeddings.prepare_tensors([sentence])
for tensor in short_tensors.values():
if tensor.dim() > 1: # all tensors that have a sequence length need to be shorter
assert tensor.shape[1] < 512
for tensor in long_tensors.values():
if tensor.dim() > 1: # all tensors that have a sequence length need to be exactly max length
assert tensor.shape[1] == 512
short_embeddings.embed(sentence)
short_embedding_0 = sentence[0].get_embedding()
sentence.clear_embeddings()
long_embeddings.embed(sentence)
long_embedding_0 = sentence[0].get_embedding()
# apparently the precision is not that high on cuda, hence the absolute tolerance needs to be higher.
assert torch.isclose(short_embedding_0, long_embedding_0, atol=1e-4).all()
def test_transformers_keep_tokenizer_when_saving(self, results_base_path):
embeddings = TransformerWordEmbeddings("distilbert-base-uncased")
results_base_path.mkdir(exist_ok=True, parents=True)
initial_tagger_path = results_base_path / "initial_tokenizer.pk"
reloaded_tagger_path = results_base_path / "reloaded_tokenizer.pk"
initial_tagger = SequenceTagger(embeddings, Dictionary(), "ner")
initial_tagger.save(initial_tagger_path)
reloaded_tagger = SequenceTagger.load(initial_tagger_path)
reloaded_tagger.save(reloaded_tagger_path)
def test_transformers_keep_tokenizer_bloom_when_saving(self, results_base_path):
embeddings = TransformerWordEmbeddings("Muennighoff/bloom-tiny-random")
results_base_path.mkdir(exist_ok=True, parents=True)
initial_tagger_path = results_base_path / "initial_tokenizer.pk"
reloaded_tagger_path = results_base_path / "reloaded_tokenizer.pk"
initial_tagger = SequenceTagger(embeddings, Dictionary(), "ner")
initial_tagger.save(initial_tagger_path)
reloaded_tagger = SequenceTagger.load(initial_tagger_path)
reloaded_tagger.save(reloaded_tagger_path)
def test_transformer_subword_token_mapping(self):
sentence = Sentence("El pasto es verde.")
embeddings = TransformerWordEmbeddings("PlanTL-GOB-ES/roberta-base-biomedical-es", layers="-1")
embeddings.embed(sentence)
@pytest.mark.skipif(importlib.util.find_spec("onnxruntime") is None, reason="Onnx export require 'onnxruntime'")
def test_onnx_export_works(self, results_base_path):
texts = [
"I live in Berlin",
"I live in Vienna",
"Berlin to Germany is like Vienna to Austria",
]
normal_sentences = [Sentence(text) for text in texts]
onnx_sentences = [Sentence(text) for text in texts]
embeddings = TransformerWordEmbeddings("distilbert-base-uncased")
results_base_path.mkdir(exist_ok=True, parents=True)
onnx_embeddings = embeddings.export_onnx(results_base_path / "onnx-export.onnx", normal_sentences)
embeddings.embed(normal_sentences)
onnx_embeddings.embed(onnx_sentences)
for sent_a, sent_b in zip(normal_sentences, onnx_sentences):
for token_a, token_b in zip(sent_a, sent_b):
assert torch.isclose(token_a.get_embedding(), token_b.get_embedding(), atol=1e-6).all()
| 15,110 | 45.352761 | 120 | py |
flair | flair-master/tests/embeddings/test_flair_embeddings.py | from flair.data import Dictionary, Sentence
from flair.embeddings import (
DocumentLMEmbeddings,
DocumentRNNEmbeddings,
FlairEmbeddings,
)
from flair.models import LanguageModel
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestFlairEmbeddings(BaseEmbeddingsTest):
embedding_cls = FlairEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"model": "news-forward-fast"}
name_field = "model"
invalid_names = ["other", "not/existing/path/to/embeddings"]
def test_fine_tunable_flair_embedding(self):
language_model_forward = LanguageModel(Dictionary.load("chars"), is_forward_lm=True, hidden_size=32, nlayers=1)
embeddings: DocumentRNNEmbeddings = DocumentRNNEmbeddings(
[FlairEmbeddings(language_model_forward, fine_tune=True)],
hidden_size=128,
bidirectional=False,
)
sentence: Sentence = Sentence("I love Berlin.")
embeddings.embed(sentence)
assert len(sentence.get_embedding()) == 128
assert len(sentence.get_embedding()) == embeddings.embedding_length
sentence.clear_embeddings()
assert len(sentence.get_embedding()) == 0
embeddings: DocumentLMEmbeddings = DocumentLMEmbeddings(
[FlairEmbeddings(language_model_forward, fine_tune=True)]
)
sentence: Sentence = Sentence("I love Berlin.")
embeddings.embed(sentence)
assert len(sentence.get_embedding()) == 32
assert len(sentence.get_embedding()) == embeddings.embedding_length
sentence.clear_embeddings()
assert len(sentence.get_embedding()) == 0
del embeddings
| 1,707 | 30.054545 | 119 | py |
flair | flair-master/tests/embeddings/test_simple_token_embeddings.py | from flair.data import Dictionary
from flair.embeddings import CharacterEmbeddings, HashEmbeddings, OneHotEmbeddings
from tests.embedding_test_utils import BaseEmbeddingsTest
vocab_dictionary = Dictionary(add_unk=True)
vocab_dictionary.add_item("I")
vocab_dictionary.add_item("love")
vocab_dictionary.add_item("berlin")
class TestCharacterEmbeddings(BaseEmbeddingsTest):
embedding_cls = CharacterEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"path_to_char_dict": None}
class TestOneHotEmbeddings(BaseEmbeddingsTest):
embedding_cls = OneHotEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"vocab_dictionary": vocab_dictionary}
class TestHashEmbeddings(BaseEmbeddingsTest):
embedding_cls = HashEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"num_embeddings": 10}
| 923 | 29.8 | 82 | py |
flair | flair-master/tests/embeddings/test_stacked_embeddings.py | from flair.data import Sentence
from flair.embeddings import (
FlairEmbeddings,
StackedEmbeddings,
TokenEmbeddings,
WordEmbeddings,
)
from flair.embeddings.base import load_embeddings
def test_stacked_embeddings():
glove: TokenEmbeddings = WordEmbeddings("turian")
flair_embedding: TokenEmbeddings = FlairEmbeddings("news-forward-fast")
embeddings: StackedEmbeddings = StackedEmbeddings([glove, flair_embedding])
sentence: Sentence = Sentence("I love Berlin. Berlin is a great place to live.")
embeddings.embed(sentence)
for token in sentence.tokens:
assert len(token.get_embedding()) == 1074
token.clear_embeddings()
assert len(token.get_embedding()) == 0
del embeddings
def test_stacked_embeddings_stay_the_same_after_saving_and_loading():
glove: TokenEmbeddings = WordEmbeddings("turian")
flair_embedding: TokenEmbeddings = FlairEmbeddings("news-forward-fast")
embeddings: StackedEmbeddings = StackedEmbeddings([glove, flair_embedding])
assert not embeddings.training
sentence_old: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence_old)
names_old = embeddings.get_names()
embedding_length_old = embeddings.embedding_length
save_data = embeddings.save_embeddings(use_state_dict=True)
new_embeddings = load_embeddings(save_data)
sentence_new: Sentence = Sentence("I love Berlin")
new_embeddings.embed(sentence_new)
names_new = new_embeddings.get_names()
embedding_length_new = new_embeddings.embedding_length
assert not new_embeddings.training
assert names_old == names_new
assert embedding_length_old == embedding_length_new
for token_old, token_new in zip(sentence_old, sentence_new):
assert (token_old.get_embedding(names_old) == token_new.get_embedding(names_new)).all()
| 1,851 | 33.296296 | 95 | py |
flair | flair-master/tests/embeddings/test_transformer_document_embeddings.py | from flair.data import Dictionary
from flair.embeddings import TransformerDocumentEmbeddings
from flair.models import TextClassifier
from flair.nn import Classifier
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestTransformerDocumentEmbeddings(BaseEmbeddingsTest):
embedding_cls = TransformerDocumentEmbeddings
is_document_embedding = True
is_token_embedding = False
default_args = {"model": "distilbert-base-uncased", "allow_long_sentences": False}
valid_args = [
{"layers": "-1,-2,-3,-4", "layer_mean": False},
{"layers": "all", "layer_mean": True},
{"layers": "all", "layer_mean": False},
]
name_field = "embeddings"
invalid_names = ["other", "not/existing/path/to/embeddings"]
def test_if_loaded_embeddings_have_all_attributes(tasks_base_path):
# dummy model with embeddings
embeddings = TransformerDocumentEmbeddings(
"distilbert-base-uncased",
use_context=True,
use_context_separator=False,
)
model = TextClassifier(label_type="ner", label_dictionary=Dictionary(), embeddings=embeddings)
# save the dummy and load it again
model.save(tasks_base_path / "single.pt")
loaded_single_task = Classifier.load(tasks_base_path / "single.pt")
# check that context_length and use_context_separator is the same for both
assert model.embeddings.context_length == loaded_single_task.embeddings.context_length
assert model.embeddings.use_context_separator == loaded_single_task.embeddings.use_context_separator
| 1,549 | 37.75 | 104 | py |
flair | flair-master/tests/embeddings/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/tests/embeddings/test_tfidf_embeddings.py | from flair.data import Sentence
from flair.embeddings import DocumentTFIDFEmbeddings
from tests.embedding_test_utils import BaseEmbeddingsTest
class TFIDFEmbeddingsTest(BaseEmbeddingsTest):
embedding_cls = DocumentTFIDFEmbeddings
is_document_embedding = True
is_token_embedding = False
default_args = {
"train_dataset": [
Sentence("This is a sentence"),
Sentence("This is another sentence"),
Sentence("another a This I Berlin"),
]
}
| 508 | 27.277778 | 57 | py |
flair | flair-master/tests/embeddings/test_document_transform_word_embeddings.py | from typing import Any, Dict, List
from flair.embeddings import (
DocumentCNNEmbeddings,
DocumentLMEmbeddings,
DocumentPoolEmbeddings,
DocumentRNNEmbeddings,
FlairEmbeddings,
TokenEmbeddings,
WordEmbeddings,
)
from tests.embedding_test_utils import BaseEmbeddingsTest
word: TokenEmbeddings = WordEmbeddings("turian")
flair_embedding: TokenEmbeddings = FlairEmbeddings("news-forward-fast")
flair_embedding_back: TokenEmbeddings = FlairEmbeddings("news-backward-fast")
class BaseDocumentsViaWordEmbeddingsTest(BaseEmbeddingsTest):
is_document_embedding = True
is_token_embedding = False
base_embeddings: List[TokenEmbeddings] = [word, flair_embedding]
def create_embedding_from_name(self, name: str):
"""Overwrite this method if it is more complex to load an embedding by name."""
assert self.name_field is not None
kwargs = dict(self.default_args)
kwargs.pop(self.name_field)
return self.embedding_cls(name, **kwargs) # type: ignore[call-arg]
def create_embedding_with_args(self, args: Dict[str, Any]):
kwargs = dict(self.default_args)
for k, v in args.items():
kwargs[k] = v
return self.embedding_cls(self.base_embeddings, **kwargs) # type: ignore[call-arg]
class TestDocumentLstmEmbeddings(BaseDocumentsViaWordEmbeddingsTest):
embedding_cls = DocumentRNNEmbeddings
default_args = {
"hidden_size": 128,
"bidirectional": False,
}
valid_args = [{"bidirectional": False}, {"bidirectional": True}]
class TestDocumentPoolEmbeddings(BaseDocumentsViaWordEmbeddingsTest):
embedding_cls = DocumentPoolEmbeddings
default_args = {
"fine_tune_mode": "nonlinear",
}
valid_args = [{"pooling": "mean"}, {"pooling": "max"}, {"pooling": "min"}]
class TestDocumentCNNEmbeddings(BaseDocumentsViaWordEmbeddingsTest):
embedding_cls = DocumentCNNEmbeddings
default_args = {
"kernels": ((50, 2), (50, 3)),
}
valid_args = [{"reproject_words_dimension": None}, {"reproject_words_dimension": 100}]
class TestDocumentLMEmbeddings(BaseDocumentsViaWordEmbeddingsTest):
embedding_cls = DocumentLMEmbeddings
base_embeddings = [flair_embedding, flair_embedding_back]
default_args: Dict[str, Any] = {}
| 2,301 | 33.358209 | 91 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/DA_preparation.py | # -*- coding: utf-8 -*-
# assimilation shallow water
import numpy as np
def VAR_3D(xb,Y,H,B,R): #booleen=0 garde la trace
dim_x = xb.size
#dim_y = Y.size
Y.shape = (Y.size,1)
xb1=np.copy(xb)
xb1.shape=(xb1.size,1)
K=np.dot(B,np.dot(np.transpose(H),np.linalg.pinv(np.dot(H,np.dot(B,np.transpose(H)))+R))) #matrice de gain
A=np.dot(np.dot((np.eye(dim_x)-np.dot(K,H)),B),np.transpose((np.eye(dim_x)-np.dot(K,H))))+np.dot(np.dot(K,R),np.transpose(K))
vect=np.dot(H,xb1)
xa=np.copy(xb1+np.dot(K,(Y-vect)))
return xa,A
def x_to_y(X): # averaging in 2*2 windows (4 pixels)
dim = int(X.shape[0])
dim = 20
Y = np.zeros((int(dim/2),int(dim/2)))
for i in range(int(dim/2)):
for j in range(int(dim/2)):
Y[i,j] = X[2*i,2*j] + X[2*i+1,2*j] + X[2*i,2*j+1] + X[2*i+1,2*j+1]
Y_noise = np.random.multivariate_normal(np.zeros(100),0.0000 * np.eye(100))
Y_noise.shape = (10,10)
Y = Y + Y_noise
return Y
| 1,041 | 25.05 | 129 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/prediction_plotting.py | # %%
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
#
data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1114.npy').astype(np.float32)
data1=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_948.npy').astype(np.float32)
train_data=np.concatenate((data,data1),axis=0)
del data
del data1
data2=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_947.npy').astype(np.float32)
train_data=np.concatenate((train_data,data2),axis=0)
del data2
data3=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_940.npy').astype(np.float32)
train_data=np.concatenate((train_data,data3),axis=0)
del data3
data4=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_941.npy').astype(np.float32)
train_data=np.concatenate((train_data,data4),axis=0)
del data4
data5=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1111.npy').astype(np.float32)
train_data=np.concatenate((train_data,data5),axis=0)
del data5
data6=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1112.npy').astype(np.float32)
train_data=np.concatenate((train_data,data6),axis=0)
del data6
data7=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_946.npy').astype(np.float32)
train_data=np.concatenate((train_data,data7),axis=0)
del data7
data8=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_945.npy').astype(np.float32)
train_data=np.concatenate((train_data,data8),axis=0)
del data8
data9=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_944.npy').astype(np.float32)
train_data=np.concatenate((train_data,data9),axis=0)
del data9
data10=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_943.npy').astype(np.float32)
train_data=np.concatenate((train_data,data10),axis=0)
del data10
data11=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_942.npy').astype(np.float32)
train_data=np.concatenate((train_data,data11),axis=0)
del data11
data12=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1116.npy').astype(np.float32)
train_data=np.concatenate((train_data,data12),axis=0)
del data12
data13=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1115.npy').astype(np.float32)
train_data=np.concatenate((train_data,data13),axis=0)
del data13
data14=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1113.npy').astype(np.float32)
train_data=np.concatenate((train_data,data14),axis=0)
del data14
data15=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_939.npy').astype(np.float32)
train_data=np.concatenate((train_data,data15),axis=0)
del data15
data16=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_938.npy').astype(np.float32)
train_data=np.concatenate((train_data,data16),axis=0)
del data16
data17=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_937.npy').astype(np.float32)
train_data=np.concatenate((train_data,data17),axis=0)
del data17
data18=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1117.npy').astype(np.float32)
train_data=np.concatenate((train_data,data18),axis=0)
del data18
data19=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1118.npy').astype(np.float32)
train_data=np.concatenate((train_data,data19),axis=0)
del data19
# train_data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1179.npy').astype(np.float32) no
# train_data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_944.npy').astype(np.float32)
# %%
print("train_data shape: ",train_data.shape)
# ###############################################################################
obs = train_data.reshape((train_data.shape[0],200*1000+101))
del train_data
# %%
X=None
y=None
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
y=y.reshape((obs.shape[0],101))
X=obs[:,:200*1000].reshape((obs.shape[0],200,1000))
# X=std_scaler.transform(X)
X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:1000,:]
input_data=X[:,:200,:]
output_data=y
##########################################################################
train_part = 0.97
threshold = int(train_part*obs.shape[0])
##########################################################################
# train_input = input_data[:threshold,:]
# train_output = output_data[:threshold,:]
# test_input = input_data [threshold:,:]
# true_test_output = output_data[threshold:,:]
# X1 = train_input
# Y1 = train_output
X2 = input_data
true_test_output=output_data
# train_input = input_data[:threshold,:]
# train_output = output_data[:threshold,:]
# test_input=input_data
# true_test_output = output_data
# X1 = train_input
# Y1 = train_output
# X2 = test_input
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# %%
from tensorflow.keras.models import load_model
# model1=load_model('data2/sequentiallstm200_b128_h200_norm_out_gen22.h5')
model1=load_model('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5')
# Calculate predictions
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],1000,200))
PredValSet2 = model1.predict(X2.reshape(X2.shape[0],200,200))
print("PredValSet2 shape: ",PredValSet2.shape)
print("true_test_output shape: ",true_test_output.shape)
# fig, ((ax1,ax2),(ax3,ax4),(ax5,ax6))=plt.subplots(nrows=3, ncols=2)
# %%
# sample1=np.random.randint(0,100,1)
sample1=np.array([0])
print(sample1[0])
plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
plt.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
# plt.legend()
plt.show()
# %%
sample1=np.array([100])
print(sample1[0])
plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
plt.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
# plt.legend()
# plt.show()
# ax1.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
# ax1.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
# ax1.legend()
# # sample2=np.random.randint(0,100,1)
# sample2=np.array([82])
# while sample2[0]==sample1[0]:
# sample2=np.random.randint(0,100,1)
# print(sample2)
# # plt.plot(PredValSet2[:,sample2[0]],true_test_output[:,sample2[0]],'o', color='b',markersize=5)
# # plt.legend()
# # plt.show()
# # plt.xlim(-1, 1)
# # plt.ylim(-1, 1)
# # plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# ax2.plot(PredValSet2[:,sample2[0]],true_test_output[:,sample2[0]],'o', color='b',markersize=5)
# ax2.plot(true_test_output[:,sample2[0]],true_test_output[:,sample2[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax2.legend()
# # sample3=np.random.randint(0,100,1)
# sample3=np.array([84])
# while sample3[0]==sample2[0] or sample3[0]==sample1[0]:
# sample3=np.random.randint(0,100,1)
# print(sample3)
# # # plt.xlim(-1, 1)
# # # plt.ylim(-1, 1)
# # # plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# # plt.plot(PredValSet2[:,sample3[0]],true_test_output[:,sample3[0]],'o', color='b',markersize=5)
# # # plt.plot(true_test_output[:,2],true_test_output[:,2],'o', color='r',markersize=5)
# # # plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# # # plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# # # plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.legend()
# # plt.show()
# # plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# ax3.plot(PredValSet2[:,sample3[0]],true_test_output[:,sample3[0]],'o', color='b',markersize=5)
# ax3.plot(true_test_output[:,sample3[0]],true_test_output[:,sample3[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax3.legend()
# # sample4=np.random.randint(0,100,1)
# sample4=np.array([86])
# while sample4[0]==sample2[0] or sample4[0]==sample1[0] or sample4[0]==sample3[0]:
# sample4=np.random.randint(0,100,1)
# print(sample4)
# # # plt.xlim(-10, 10)
# # # plt.ylim(-10, 10)
# # # plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# # plt.plot(PredValSet2[:,sample4[0]],true_test_output[:,sample4[0]],'o', color='b',markersize=5)
# # # plt.plot(true_test_output[:,3],true_test_output[:,3],'o', color='r',markersize=5)
# # # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.legend()
# # plt.show()
# # plt.xlim(-10, 10)
# # plt.ylim(-10, 10)
# # plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# ax4.plot(PredValSet2[:,sample4[0]],true_test_output[:,sample4[0]],'o', color='b',markersize=5)
# ax4.plot(true_test_output[:,sample4[0]],true_test_output[:,sample4[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax4.legend()
# # plt.show()
# # sample5=np.random.randint(0,100,1)
# sample5=np.array([88])
# while sample5[0]==sample2[0] or sample5[0]==sample1[0] or sample5[0]==sample3[0] or sample5[0]==sample4[0]:
# sample5=np.random.randint(0,100,1)
# print(sample5)
# ax5.plot(PredValSet2[:,sample5[0]],true_test_output[:,sample5[0]],'o', color='b',markersize=5)
# ax5.plot(true_test_output[:,sample5[0]],true_test_output[:,sample5[0]],'-', color='r',linewidth=5)
# ax5.legend()
# sample6=np.array([99])
# print(sample6)
# ax6.plot(PredValSet2[:,sample6[0]],true_test_output[:,sample6[0]],'o', color='b',markersize=5)
# ax6.plot(true_test_output[:,sample6[0]],true_test_output[:,sample6[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax6.legend()
# fig.supxlabel('Prediction')
# fig.supylabel('true value')
# plt.tight_layout()
# plt.show()
# # plt.plot(true_test_output[:,0],color='b',label='r0')
# # plt.plot(true_test_output[:,1],color='r',label='r1')
# # plt.plot(true_test_output[:,2],color='y',label='r2')
# # plt.legend()
# # plt.show()
# ##########################################################################################"
# # predint = model.predict(train_input[:3000])
# # trueint = train_output[:3000]
# # plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.show()
| 12,529 | 31.973684 | 109 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/data_generation_no_v_20.py | #shallow water propagation
"""
Solution of Shallow-water equations using a Python class.
Adapted for Python training course at CNRS from https://github.com/mrocklin/ShallowWater/
Dmitry Khvorostyanov, 2015
CNRS/LMD/IPSL, dmitry.khvorostyanov @ lmd.polytechnique.fr
"""
import time
from pylab import *
import matplotlib.gridspec as gridspec
from shallowwater import *
# import imageio
from constructB import *
from DA_preparation import *
import os
if not os.path.isdir('uniform01_2'):
os.makedirs('uniform01_2')
print("30/05/2021")
class shallow_dynamique(object):
time = 0
plt = []
fig = []
def __init__(self, x=[],y=[],u=zeros((100,100)),v = zeros((100,100)),h=ones((100,100)),dx=0.01,dt=0.0001, N=100,L=1., g=1., b=2.0): # How define no default argument before?
self.g = g
self.b = b
self.L=L
self.N=N
# limits for h,u,v
self.dx=dx
self.dt=dt
self.x,self.y = mgrid[:self.N,:self.N]
self.u=u
self.v=v
self.h=h
# self.h= ones((self.N,self.N))
#self.lims = [(self.h_ini-self.Hp,self.h_ini+self.Hp),(-0.02,0.02),(-0.02,0.02)]
def dxy(self, A, axis=0):
"""
Compute derivative of array A using balanced finite differences
Axis specifies direction of spatial derivative (d/dx or d/dy)
dA[i]/dx = (A[i+1] - A[i-1] ) / 2dx
"""
return (roll(A, -1, axis) - roll(A, 1, axis)) / (self.dx*2.) # roll: shift the array axis=0 shift the horizontal axis
def d_dx(self, A):
return self.dxy(A,1)
def d_dy(self, A):
return self.dxy(A,0)
def d_dt(self, h, u, v):
"""
http://en.wikipedia.org/wiki/Shallow_water_equations#Non-conservative_form
"""
for x in [h, u, v]: # type check
assert isinstance(x, ndarray) and not isinstance(x, matrix)
g,b,dx = self.g, self.b, self.dx
du_dt = -g*self.d_dx(h) - b*u
dv_dt = -g*self.d_dy(h) - b*v
H = 0 #h.mean() - our definition of h includes this term
dh_dt = -self.d_dx(u * (h)) - self.d_dy(v * (h))
return dh_dt, du_dt, dv_dt
def evolve(self):
"""
Evolve state (h, u, v) forward in time using simple Euler method
x_{N+1} = x_{N} + dx/dt * d_t
"""
dh_dt, du_dt, dv_dt = self.d_dt(self.h, self.u, self.v)
dt = self.dt
self.h += dh_dt * dt
self.u += du_dt * dt
self.v += dv_dt * dt
self.time += dt
return self.h, self.u, self.v
############################################################################
#define H (from x to y in the one dimensional space)
H_1D_uv = np.zeros((100,400))
for i in range(10):
for j in range(10):
H_1D_uv[i*10+j,2*i*20+2*j] = 1
H_1D_uv[i*10+j,(2*i+1)*20+2*j] = 1
H_1D_uv[i*10+j,2*i*20+2*j+1] = 1
H_1D_uv[i*10+j,(2*i+1)*20+2*j+1] = 1
H_1strow = np.concatenate((H_1D_uv,np.zeros((100,400))),axis = 1)
H_2ndrow = np.concatenate((np.zeros((100,400)),H_1D_uv),axis = 1)
H = np.concatenate((H_1strow,H_2ndrow),axis = 0)
############################################################################
iteration_times=1000
parameter_size=100+1
trainning_set = np.zeros((1,iteration_times*200+parameter_size))
index=210+60*26
try:
for ii in range(0,30000):
if ii%100==0:
print(ii)
#############################################################################
# define the R matrix
# D = 0.001*np.random.lognormal(0, 0.7, 100)
D = 0.001*np.random.uniform(0.01, 0.1, 100)
r = np.random.uniform(1, 8)
# same observation error covariance for u and v (D is the lower unit triangle, Balgovind is the diagonal triangle, R is the SPD or HPD)
R = np.dot(np.dot(np.sqrt(np.diag(D)),Balgovind(10,r)[:100,:100]),np.sqrt(np.diag(D)))
import copy
D_copy=D.copy()
D_copy.shape=(10,10)
parameters=np.zeros((1,parameter_size))
parameters[0,:100]=D
parameters[0,100:]=r
# v = np.random.uniform(0,100.)
# R=v*R
##############################################################################
SW = shallow(u=0.001*np.identity(20),v=0.001*np.identity(20),px=10,py=10,N = 20,R=10)
###############################################################################################
h_ini = SW.h
# SW = shallow_dynamique(u = np.zeros((SW.h.shape[0],SW.h.shape[1])),
# v = np.zeros((SW.h.shape[0],SW.h.shape[1])),
# h = h_ini)
y_obs = np.zeros((H.shape[0],iteration_times))
###############################################################################################
for i in range(iteration_times):
print("sample time: ",ii)
print("iteration time: ",i)
print(f"index {index}")
SW.evolve()
Y = np.dot(H, np.concatenate((SW.u.ravel(),SW.v.ravel())).
reshape(2*SW.u.size,1))#generate observations
#R = np.dot(np.dot(np.sqrt(np.diag(D)),np.eye(100)),np.sqrt(np.diag(D)))
Y[:100] += np.random.multivariate_normal(np.zeros(100),R).reshape(100,1)# u: update
Y[100:] += np.random.multivariate_normal(np.zeros(100),R).reshape(100,1)# v: update
# Y: 200*1
#y_obs: 200*iteration_times
# ValueError: could not broadcast input array from shape (200,1) into shape (200):
# https://stackoverflow.com/a/39825046/10349608
y_obs[:,[i]]=Y
train_row=y_obs.ravel()
train_row.shape = (1,train_row.size)
train_row=np.concatenate((train_row,parameters),axis=1)
trainning_set=np.concatenate((trainning_set,train_row),axis=0)
if (ii+1)%500==0:
index=index+1
trainning_set = trainning_set[1:,:]
np.save(f"uniform01_2/trainset_withx_repeat_shwater3_uniform0011_{index}.npy", trainning_set)
trainning_set = np.zeros((1,iteration_times*200+parameter_size))
# if (ii+1)%1000==0:
# np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6.csv", trainning_set, delimiter=",")
except Exception as e:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",")
except KeyboardInterrupt:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",")
except UserAbort:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",") | 7,057 | 25.335821 | 176 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/shallowwater.py | """
Solution of Shallow-water equations using a Python class.
Adapted for Python training course at CNRS from https://github.com/mrocklin/ShallowWater/
Dmitry Khvorostyanov, 2015
CNRS/LMD/IPSL, dmitry.khvorostyanov @ lmd.polytechnique.fr
"""
import time
from pylab import *
import matplotlib.gridspec as gridspec
import numpy as np
#construct background states, observations with error
def x_to_y(X): # averaging in 2*2 windows (4 pixels)
dim = X.shape[0]
dim = 20
Y = np.zeros((dim/2,dim/2))
for i in range(dim/2):
for j in range(dim/2):
Y[i,j] = X[2*i,2*j] + X[2*i+1,2*j] + X[2*i,2*j+1] + X[2*i+1,2*j+1]
Y_noise = np.random.multivariate_normal(np.zeros(100),0.0000 * np.eye(100))
Y_noise.shape = (10,10)
Y = Y + Y_noise
return Y
class shallow(object):
# domain
#N = 100
#L = 1.
#dx = L / N
#dt = dx / 100.
# Initial Conditions
#u = zeros((N,N)) # velocity in x direction
#v = zeros((N,N)) # velocity in y direction
#h_ini = 1.
#h = h_ini * ones((N,N)) # pressure deviation (like height)
#x,y = mgrid[:N,:N]
time = 0
plt = []
fig = []
def __init__(self, x=[],y=[],h_ini = 1.,u=[],v = [],dx=0.01,dt=0.0001, N=100,L=1., px=50, py=50, R=100, Hp=0.1, g=1., b=2.): # How define no default argument before?
# add a perturbation in pressure surface
self.px, self.py = px, py
self.R = R
self.Hp = Hp
# Physical parameters
self.g = g
self.b = b
self.L=L
self.N=N
# limits for h,u,v
#self.dx = self.L / self.N # a changer
#self.dt = self.dx / 100.
self.dx=dx
self.dt=dt
self.x,self.y = mgrid[:self.N,:self.N]
self.u=zeros((self.N,self.N))
self.v=zeros((self.N,self.N))
self.h_ini=h_ini
self.h=self.h_ini * ones((self.N,self.N))
rr = (self.x-px)**2 + (self.y-py)**2
self.h[rr<R] = self.h_ini + Hp #set initial conditions
self.lims = [(self.h_ini-self.Hp,self.h_ini+self.Hp),(-0.02,0.02),(-0.02,0.02)]
def dxy(self, A, axis=0):
"""
Compute derivative of array A using balanced finite differences
Axis specifies direction of spatial derivative (d/dx or d/dy)
dA[i]/dx = (A[i+1] - A[i-1] ) / 2dx
"""
return (roll(A, -1, axis) - roll(A, 1, axis)) / (self.dx*2.) # roll: shift the array axis=0 shift the horizontal axis
def d_dx(self, A):
return self.dxy(A,1)
def d_dy(self, A):
return self.dxy(A,0)
def d_dt(self, h, u, v):
"""
http://en.wikipedia.org/wiki/Shallow_water_equations#Non-conservative_form
"""
for x in [h, u, v]: # type check
assert isinstance(x, ndarray) and not isinstance(x, matrix)
g,b,dx = self.g, self.b, self.dx
du_dt = -g*self.d_dx(h) - b*u
dv_dt = -g*self.d_dy(h) - b*v
H = 0 #h.mean() - our definition of h includes this term
dh_dt = -self.d_dx(u * (H+h)) - self.d_dy(v * (H+h))
return dh_dt, du_dt, dv_dt
def evolve(self):
"""
Evolve state (h, u, v) forward in time using simple Euler method
x_{N+1} = x_{N} + dx/dt * d_t
"""
dh_dt, du_dt, dv_dt = self.d_dt(self.h, self.u, self.v)
dt = self.dt
self.h += dh_dt * dt
self.u += du_dt * dt
self.v += dv_dt * dt
self.time += dt
return self.h, self.u, self.v
## def plot(self,tit=None,autolims=False):
## """Plot u,v,h at current state."""
##
## self.fig.append(figure())
## for i,v in enumerate([self.h,self.u,self.v]):
## if autolims:
## vmin,vmax = None, None
## else:
## vmin, vmax = self.lims[i][0], self.lims[i][1]
##
## self.fig[-1].add_subplot(3,1,i+1)
##
## self.plt.append(pcolormesh(v, vmin=vmin, vmax=vmax))
## colorbar(shrink=0.9)
##
## if i==0:
## if tit is None:
## self.tit = title('At time %f'% self.time)
## else:
## self.tit = title(tit)
##
##
## def animate(self):
## """Plot u,v,h at current state."""
##
## for i,v in enumerate([self.h,self.u,self.v]):
## self.plt[i].set_array(v.ravel())
##
## if i==0:
## self.tit.set_text('At time %f'%self.time)
if __name__ == '__main__': #run the current script
iteration_times= 500
SW = shallow(N=20,px=10,py=10,R=10.)
# chose a point (x,y) to check the evolution
x=10
y=10
#SW.plot()
# u_vect=np.zeros(iteration_times)
# v_vect=np.zeros(iteration_times)
# h_vect=np.zeros(iteration_times)
# for i in range(iteration_times):
# SW.evolve()
# u_vect[i]=SW.u[x][y]
# v_vect[i]=SW.v[x][y]
# h_vect[i]=SW.h[x][y]
# #SW.animate()
#
# if i % 100 == 0:
# print ('time %f'%SW.time)
## SW.fig[-1].savefig('sw_%.3d.png'% i)
##
## show()
# print SW.time
## plt.subplot(311)
## plt.imshow( SW.h)
## set_title("Title for first plot")
##
##
## plt.subplot(312)
## plt.imshow( SW.u)
##
##
##
## plt.subplot(313)
## plt.imshow( SW.v)
##
## plt.show()
gs = gridspec.GridSpec(2, 2,
width_ratios=[1, 1],
height_ratios=[1, 1]
)
fig = plt.figure()
#fig = plt.figure()
t=SW.time
fig = plt.figure()
t=SW.time
fig.suptitle('At time: T=%1.3f'%t, fontsize=16)
ax1 = plt.subplot(gs[0])
ax1.set_title("h")
im=ax1.imshow( SW.h)
plt.colorbar(im)
ax2 = plt.subplot(gs[1])
ax2.set_title("u")
im=ax2.imshow( SW.u)
plt.colorbar(im)
ax3 = plt.subplot(gs[2])
ax3.set_title("v")
im=ax3.imshow( SW.v)
plt.colorbar(im)
plt.show()
gs = gridspec.GridSpec(1, 2,
width_ratios=[1, 1],
)
fig = plt.figure()
#fig = plt.figure()
t=SW.time
################################################################################
#u_t=SW.u[[45,46,47,48,49,51,52,53,54,55], :][:, [45,46,47,48,49,51,52,53,54,55]]
#v_t=SW.v[[45,46,47,48,49,51,52,53,54,55], :][:, [45,46,47,48,49,51,52,53,54,55]]
iteration_times = 5000
for i in range(iteration_times):
SW.evolve()
if i%100 ==0:
u_t = SW.u
v_t = SW.v
# fig = plt.figure()
# t=SW.time
# fig.suptitle('At time: T=%1.3f'%t, fontsize=16)
# ax1 = plt.subplot(gs[0])
# ax1.set_title("u")
# im=ax1.imshow( u_t,interpolation='none')
# plt.colorbar(im)
#
# ax2 = plt.subplot(gs[1])
# ax2.set_title("v")
# im=ax2.imshow( v_t,interpolation='none')
# plt.colorbar(im)
#
# plt.show()
im = plt.imshow(SW.h, interpolation = "None")
plt.colorbar(im)
plt.title("$h (t = 0.05)$")
#plt.savefig("Figures/h_t005.eps", format = "eps")
plt.show()
plt.close()
im = plt.imshow(SW.u, interpolation = "None")
plt.colorbar(im)
plt.title("$u (t = 0.05)$")
#plt.savefig("Figures/u_t005.eps", format = "eps")
plt.show()
plt.close()
im = plt.imshow(SW.v, interpolation = "None")
plt.colorbar(im)
plt.title("$v (t = 0.05)$")
#plt.savefig("Figures/v_t005.eps", format = "eps")
plt.show()
plt.close()
# i = 300
# xbu = np.loadtxt("data/SW_b_u_"+str(i)+".txt")
# xbv = np.loadtxt("data/SW_b_v_"+str(i)+".txt")
# xbh = np.loadtxt("data/SW_b_h_"+str(i)+".txt")
#
# im = plt.imshow(xbu, interpolation = "None")
# plt.colorbar(im)
# plt.title("$u_b (t = 0.05)$")
# #plt.savefig("Figures/u_b005.eps", format = "eps")
# plt.show()
# plt.close()
#
# im = plt.imshow(xbv, interpolation = "None")
# plt.colorbar(im)
# plt.title("$v_b (t = 0.05)$")
# #plt.savefig("Figures/v_b005.eps", format = "eps")
# plt.show()
# plt.close()
#
# im = plt.imshow(xbh, interpolation = "None")
# plt.colorbar(im)
# plt.title("$h_b (t = 0.05)$")
# #plt.savefig("Figures/h_b005.eps", format = "eps")
# plt.show()
# plt.close()
#
# yu = x_to_y(SW.u)
# yv = x_to_y(SW.v)
#
# im = plt.imshow(yu, interpolation = "None")
# plt.colorbar(im)
# plt.title("$y_{t,u} (t = 0.05)$")
# #plt.savefig("Figures/yt_u005.eps", format = "eps")
# plt.show()
# plt.close()
#
# im = plt.imshow(yv, interpolation = "None")
# plt.colorbar(im)
# plt.title("$y_{t,v} (t = 0.05)$")
# #plt.savefig("Figures/yt_v005.eps", format = "eps")
# plt.show()
# plt.close()
#
# ybu = np.loadtxt("data/SW_yu_"+str(i)+".txt")
# ybv = np.loadtxt("data/SW_yv_"+str(i)+".txt")
#
# im = plt.imshow(ybu, interpolation = "None")
# plt.colorbar(im)
# plt.title("$y_{b,u} (t = 0.05)$")
# #plt.savefig("Figures/yb_u005.eps", format = "eps")
# plt.show()
# plt.close()
#
# im = plt.imshow(ybv, interpolation = "None")
# plt.colorbar(im)
# plt.title("$y_{b,v} (t = 0.05)$")
# #plt.savefig("Figures/yb_v005.eps", format = "eps")
# plt.show()
# plt.close() | 9,745 | 24.989333 | 169 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/simulated_data_generation.py | #shallow water propagation
"""
Solution of Shallow-water equations using a Python class.
Adapted for Python training course at CNRS from https://github.com/mrocklin/ShallowWater/
Dmitry Khvorostyanov, 2015
CNRS/LMD/IPSL, dmitry.khvorostyanov @ lmd.polytechnique.fr
"""
import time
from pylab import *
import matplotlib.gridspec as gridspec
from shallowwater import *
# import imageio
from constructB import *
from DA_preparation import *
import os
if not os.path.isdir('uniform01_2'):
os.makedirs('uniform01_2')
print("30/05/2021")
class shallow_dynamique(object):
time = 0
plt = []
fig = []
def __init__(self, x=[],y=[],u=zeros((100,100)),v = zeros((100,100)),h=ones((100,100)),dx=0.01,dt=0.0001, N=100,L=1., g=1., b=2.0): # How define no default argument before?
self.g = g
self.b = b
self.L=L
self.N=N
# limits for h,u,v
self.dx=dx
self.dt=dt
self.x,self.y = mgrid[:self.N,:self.N]
self.u=u
self.v=v
self.h=h
# self.h= ones((self.N,self.N))
#self.lims = [(self.h_ini-self.Hp,self.h_ini+self.Hp),(-0.02,0.02),(-0.02,0.02)]
def dxy(self, A, axis=0):
"""
Compute derivative of array A using balanced finite differences
Axis specifies direction of spatial derivative (d/dx or d/dy)
dA[i]/dx = (A[i+1] - A[i-1] ) / 2dx
"""
return (roll(A, -1, axis) - roll(A, 1, axis)) / (self.dx*2.) # roll: shift the array axis=0 shift the horizontal axis
def d_dx(self, A):
return self.dxy(A,1)
def d_dy(self, A):
return self.dxy(A,0)
def d_dt(self, h, u, v):
"""
http://en.wikipedia.org/wiki/Shallow_water_equations#Non-conservative_form
"""
for x in [h, u, v]: # type check
assert isinstance(x, ndarray) and not isinstance(x, matrix)
g,b,dx = self.g, self.b, self.dx
du_dt = -g*self.d_dx(h) - b*u
dv_dt = -g*self.d_dy(h) - b*v
H = 0 #h.mean() - our definition of h includes this term
dh_dt = -self.d_dx(u * (h)) - self.d_dy(v * (h))
return dh_dt, du_dt, dv_dt
def evolve(self):
"""
Evolve state (h, u, v) forward in time using simple Euler method
x_{N+1} = x_{N} + dx/dt * d_t
"""
dh_dt, du_dt, dv_dt = self.d_dt(self.h, self.u, self.v)
dt = self.dt
self.h += dh_dt * dt
self.u += du_dt * dt
self.v += dv_dt * dt
self.time += dt
return self.h, self.u, self.v
############################################################################
#define H (from x to y in the one dimensional space)
H_1D_uv = np.zeros((100,400))
for i in range(10):
for j in range(10):
H_1D_uv[i*10+j,2*i*20+2*j] = 1
H_1D_uv[i*10+j,(2*i+1)*20+2*j] = 1
H_1D_uv[i*10+j,2*i*20+2*j+1] = 1
H_1D_uv[i*10+j,(2*i+1)*20+2*j+1] = 1
H_1strow = np.concatenate((H_1D_uv,np.zeros((100,400))),axis = 1)
H_2ndrow = np.concatenate((np.zeros((100,400)),H_1D_uv),axis = 1)
H = np.concatenate((H_1strow,H_2ndrow),axis = 0)
############################################################################
iteration_times=1000
parameter_size=100+1
trainning_set = np.zeros((1,iteration_times*200+parameter_size))
index=0
try:
for ii in range(0,30000):
if ii%100==0:
print(ii)
#############################################################################
# define the R matrix
# D = 0.001*np.random.lognormal(0, 0.7, 100)
D = 0.001*np.random.uniform(0.01, 0.1, 100)
r = np.random.uniform(1, 8)
# same observation error covariance for u and v (D is the lower unit triangle, Balgovind is the diagonal triangle, R is the SPD or HPD)
R = np.dot(np.dot(np.sqrt(np.diag(D)),Balgovind(10,r)[:100,:100]),np.sqrt(np.diag(D)))
import copy
D_copy=D.copy()
D_copy.shape=(10,10)
parameters=np.zeros((1,parameter_size))
parameters[0,:100]=D
parameters[0,100:]=r
# v = np.random.uniform(0,100.)
# R=v*R
##############################################################################
SW = shallow(u=np.zeros((20,20)),v=np.zeros((20,20)),px=10,py=10,N = 20,R=10)
###############################################################################################
h_ini = SW.h
# SW = shallow_dynamique(u = np.zeros((SW.h.shape[0],SW.h.shape[1])),
# v = np.zeros((SW.h.shape[0],SW.h.shape[1])),
# h = h_ini)
y_obs = np.zeros((H.shape[0],iteration_times))
###############################################################################################
for i in range(iteration_times):
print("sample time: ",ii)
print("iteration time: ",i)
print(f"index {index}")
SW.evolve()
Y = np.dot(H, np.concatenate((SW.u.ravel(),SW.v.ravel())).
reshape(2*SW.u.size,1))#generate observations
#R = np.dot(np.dot(np.sqrt(np.diag(D)),np.eye(100)),np.sqrt(np.diag(D)))
Y[:100] += np.random.multivariate_normal(np.zeros(100),R).reshape(100,1)# u: update
Y[100:] += np.random.multivariate_normal(np.zeros(100),R).reshape(100,1)# v: update
# Y: 200*1
#y_obs: 200*iteration_times
# ValueError: could not broadcast input array from shape (200,1) into shape (200):
# https://stackoverflow.com/a/39825046/10349608
y_obs[:,[i]]=Y
train_row=y_obs.ravel()
train_row.shape = (1,train_row.size)
train_row=np.concatenate((train_row,parameters),axis=1)
trainning_set=np.concatenate((trainning_set,train_row),axis=0)
if (ii+1)%500==0:
index=index+1
trainning_set = trainning_set[1:,:]
np.save(f"uniform01_2/trainset_withx_repeat_shwater3_uniform0011_{index}.npy", trainning_set)
trainning_set = np.zeros((1,iteration_times*200+parameter_size))
# if (ii+1)%1000==0:
# np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6.csv", trainning_set, delimiter=",")
except Exception as e:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",")
except KeyboardInterrupt:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",")
except UserAbort:
np.savetxt("uniform01_2/trainset_withx_repeat_shwater3_uniform0011_total_test6_1.csv", trainning_set, delimiter=",") | 7,041 | 25.276119 | 176 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/constructB.py | # coding: utf8
#construction of matrix B and special H with measure on the boarder
import numpy as np
import math
from scipy.linalg import sqrtm
from shallowwater import *
##def B_Balgovind(n,Sigma,L):
## Gamma = np.identity(n)
## for i in xrange(n):
## for j in xrange(n):
## Gamma[i,j] = ( 1. + abs(i-j)/L)*np.exp(-abs(i-j)/L)
## B = np.dot(Sigma,np.dot(Gamma,Sigma))
## return B
def get_index_2d (dim,n): #get caratesian coordinate
j=n % dim
j=j/1. #float( i)
i=(n-j)/dim
return (i,j)# pourquoi float?
#identite
def identiity(n):#n : taille de vecteur xb
B=np.eye(n)
return B
#Blgovind
def Balgovind(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=(1+r/L)*(math.exp(-r/L))
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B
return B
def Gaussian(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=math.exp(-r**2/(2*L**2))
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B
return B
def expontielle(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=math.exp(-r/L)
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B
return B
def bord_M_aleatoire(dimension, proba):
M=np.zeros((dimension**2,dimension**2))
for i in range(dimension**2):
for j in range(dimension**2):
if j % 10==0 or j % 10==9:
M[i,j]=np.random.binomial(1, proba)
elif j<=9 or j>=90:
M[i,j]=np.random.binomial(1, proba)
return M
def cov_to_cor(B):
inv_diag_B=np.linalg.inv(sqrtm(np.diag(np.diag(B))))
inv_diag_B=np.copy(inv_diag_B.real)
cor_B=np.dot(inv_diag_B,np.dot(B,inv_diag_B))
return cor_B
########################################################################
#covariance 1d
def Balgovind_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=(1+r/L)*(math.exp(-r/L))
return B
def expontielle_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=math.exp(-r/L)
return B
def Gaussian_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=math.exp(-r**2/(2*L**2))
return B
#Blgovind
def Balgovind_noniso(dim,L,rayon): #diag_vect: vector of dim, variance of each point
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=(1+r/L)*(math.exp(-r/L))
rr1 = math.sqrt((a1-5)**2 + (b1-5)**2) #cercle in the middle
rr2 = math.sqrt((a2-5)**2 + (b2-5)**2)
if rr1<=rayon: #inside the cercle
sub_B[i,j]=sub_B[i,j]*4
else:
sub_B[i,j]=sub_B[i,j]*0.25
if rr2<=rayon:
sub_B[i,j]=sub_B[i,j]*4
else:
sub_B[i,j]=sub_B[i,j]*0.25
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B
return B
if __name__ == "__main__":
im = plt.imshow(0.01*Balgovind(10,3)[:100,:100])
#im = plt.imshow(0.01*Balgovind(10,3))
plt.colorbar(im)
#plt.savefig("Figures/R_bal_3.eps", format ='eps') | 4,880 | 31.324503 | 84 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/shallowwater_lstm1000_model.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import tensorflow.keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
# import keras
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import LSTM,Dropout
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import os
import json
import pickle
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# # Currently, memory growth needs to be the same across GPUs
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Memory growth must be set before GPUs have been initialized
# print(e)
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0:3], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
#=======================================================================
# Generator
class DataGenerator(tf.keras.utils.Sequence):
# Generates data for keras
# list_IDs: all IDs/all files
# list_IDs_temp: studying batch IDs
def __init__(self, list_IDs,batch_size=1,dim=(1000,200),n_channels=1, shuffle=True):
self.dim=dim
self.batch_size=batch_size
# self.labels=labels
self.list_IDs=list_IDs
self.n_channels=n_channels
# self.n_classes=n_classes
self.shuffle=shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.list_IDs)/self.batch_size))
def __getitem__(self, index):
print(index)
# we do not know the whole length of the matrix in a file without loading all of the data ??????????
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
return X,y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
for i,ID in enumerate(list_IDs_temp):
# open the file
obs = np.load('uniform020/trainset_withx_repeat_shwater3_uniform0011_test6_'+str(ID)+'.npy')
obs_size=obs.shape[0]
print("ID: ",ID)
print("obs_size: ",obs_size)
X=np.empty((obs_size,200*1000))
y=np.empty((obs_size,101))
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
X=obs[:,:200*1000]
X=X.reshape((obs_size,200,1000))
X=np.array([X[j].transpose() for j in range(obs_size)])
return X,y
print("31")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
# #====================== Read file list_IDs =================================================
partition=np.array([i for i in range(211)])
# #====================== Parameters =================================================
Params={'dim':(1000,200),
'batch_size':1,
'n_channels':1,
'shuffle':True}
# train_data1000 = np.array(pd.read_csv('0001D07/trainset_withx_repeat_shwater3_0001D07_total_3.csv',delimiter=",",
# header=None,
# index_col=False))
# obs = train_data1000.reshape((train_data1000.shape[0],200*1000+101))
# X=np.empty((train_data1000.shape[0],200*1000))
# y=np.empty((train_data1000.shape[0],101))
# y=obs[:,-101:]
# X=obs[:,:200*1000].reshape((train_data1000.shape[0],200,1000))
# X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:,:200]
# output_data=y
# #====================== Generators =================================================
train_part = 0.97
threshold = int(train_part*len(partition))
# input_generator=DataGenerator(partition,**Params)
training_generator=DataGenerator(partition[:threshold],**Params)
validation_generator=DataGenerator(partition[threshold:],**Params)
#============================= Model Design ==========================================
if not os.path.isdir('data2'):
os.makedirs('data2')
# try:
hidden_size=200
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(1000,200)))
model.add(Dense(101))
# model=load_model('data2/sequentiallstm1000_b128_h200_norm_out_new_model.h5',compile = False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=100)
# modelcheckpoint
mc=ModelCheckpoint('data2/sequentiallstm1000_b128_h200_norm_out_new_model.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
# history=model.fit(input_data, output_data, validation_split=train_part, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, validation_batch_size=5,verbose=1, callbacks=[es,mc])
# history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, use_multiprocessing=True, workers=6,verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('data2/sequentiallstm1000_b128_h200_norm_out_new_model_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('data2/sequentiallstm1000_b128_h200_norm_out_new_model_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
| 6,880 | 30.135747 | 157 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/shallowwater_lstm200_model.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import tensorflow.keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
# import keras
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import LSTM,Dropout
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import os
import json
import pickle
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# # Currently, memory growth needs to be the same across GPUs
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Memory growth must be set before GPUs have been initialized
# print(e)
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0:3], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
#=======================================================================
# Generator
class DataGenerator(tf.keras.utils.Sequence):
# Generates data for keras
# list_IDs: all IDs/all files
# list_IDs_temp: studying batch IDs
def __init__(self, list_IDs,batch_size=1,dim=(1000,200),n_channels=1, shuffle=True):
self.dim=dim
self.batch_size=batch_size
# self.labels=labels
self.list_IDs=list_IDs
self.n_channels=n_channels
# self.n_classes=n_classes
self.shuffle=shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.list_IDs)/self.batch_size))
def __getitem__(self, index):
print(index)
# we do not know the whole length of the matrix in a file without loading all of the data ??????????
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
return X,y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
for i,ID in enumerate(list_IDs_temp):
# open the file
obs = np.load('uniform020/trainset_withx_repeat_shwater3_uniform0011_test6_'+str(ID)+'.npy')
obs_size=obs.shape[0]
print("ID: ",ID)
print("obs_size: ",obs_size)
X=np.empty((obs_size,200*1000))
y=np.empty((obs_size,101))
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
X=obs[:,:200*1000]
X=X.reshape((obs_size,200,1000))[:,:,:200]
X=np.array([X[j].transpose() for j in range(obs_size)])
return X,y
print("31")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
# #====================== Read file list_IDs =================================================
partition=np.array([i for i in range(211)])
# #====================== Parameters =================================================
Params={'dim':(1000,200),
'batch_size':1,
'n_channels':1,
'shuffle':True}
# train_data1000 = np.array(pd.read_csv('0001D07/trainset_withx_repeat_shwater3_0001D07_total_3.csv',delimiter=",",
# header=None,
# index_col=False))
# obs = train_data1000.reshape((train_data1000.shape[0],200*1000+101))
# X=np.empty((train_data1000.shape[0],200*1000))
# y=np.empty((train_data1000.shape[0],101))
# y=obs[:,-101:]
# X=obs[:,:200*1000].reshape((train_data1000.shape[0],200,1000))
# X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:,:200]
# output_data=y
# #====================== Generators =================================================
train_part = 0.97
threshold = int(train_part*len(partition))
# input_generator=DataGenerator(partition,**Params)
training_generator=DataGenerator(partition[:threshold],**Params)
validation_generator=DataGenerator(partition[threshold:],**Params)
#============================= Model Design ==========================================
if not os.path.isdir('data2'):
os.makedirs('data2')
# try:
hidden_size=200
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(200,200)))
model.add(Dense(101))
# model=load_model('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5',compile = False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=100)
# modelcheckpoint
mc=ModelCheckpoint('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
# history=model.fit(input_data, output_data, validation_split=train_part, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, validation_batch_size=5,verbose=1, callbacks=[es,mc])
# history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, use_multiprocessing=True, workers=6,verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('data2/sequentiallstm2222200_b128_h200_norm_out_gen22_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('data2/sequentiallstm2222200_b128_h200_norm_out_gen22_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
| 6,889 | 29.622222 | 157 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lstmR_d05R_plotting.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import matplotlib.pyplot as plt
# check scikit-learn version
# check scikit-learn version
import pandas as pd
# def data_set_order(file):
# train_data = np.array(pd.read_csv(file))
# r0=train_data[:,:1001]
# r1=train_data[:,1001:2002]
# r2=train_data[:,2002:3003]
# r3=train_data[:,3003:]/10
# train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
# train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
# train_data=np.concatenate((train_data,r3),axis=1)
# return train_data
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001]
r1=train_data[:,1001:2002]
r2=train_data[:,2002:3003]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
# def data_set_order(file):
# train_data = np.array(pd.read_csv(file))[:-2,:]
# r0=train_data[:,:1001][:,:201]
# r1=train_data[:,1001:2002][:,:201]
# r2=train_data[:,2002:3003][:,:201]
# r3=train_data[:,3003:]
# r3[:,-1]=r3[:,-1]/100
# train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
# train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
# train_data=np.concatenate((train_data,r3),axis=1)
# return train_data
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_11.csv')[:10000,:]
print("train_data shape: ",train_data.shape)
# ###############################################################################
# LSTM1000
input_data = train_data[:,0:1001*3]
output_data = train_data[:,1001*3:]
# LSTM200
# input_data = train_data[:,0:603]
# output_data = train_data[:,603:]
########################################################################
train_part = 0.97
# threshold = int(train_part*train_data.shape[0])
threshold=10000
##########################################################################
test_input = input_data[:threshold,:]
true_test_output = output_data[:threshold,:]
# test_input = input_data [threshold:,:]
# true_test_output = output_data[threshold:,:]
# X1 = train_input
# Y1 = train_output
X2 = test_input
print("X2 shape: ",X2.shape[0])
#Y2 = ValidationSet_Y
############################################################################
R=np.load('label_data/di05_original_version_R_all_10000.npy')
PredValSet2=np.zeros((R.shape[0],4))
for i in range(R.shape[0]):
print(i)
r0=R[i,0,1]
r1=R[i,0,2]
r2=R[i,1,2]
r3=np.trace(R[i,:,:])/3
r=np.array([[r0,r1,r2,r3]])
if i==0:
PredValSet2=r.copy()
else:
PredValSet2=np.concatenate((PredValSet2,r),axis = 0)
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# from tensorflow.keras.models import load_model
# model1=load_model('data2/sequentiallstm1000_ing_f.h5')
# model1=load_model('data2/sequentiallstm200_ing_f.h5')
# Calculate predictions
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],1001,3))
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],201,3))
# PredTestSet = model.predict(X1)
# PredValSet = model.predict(X2)
# Save predictions
#np.savetxt("numerique/trainresults_raindebit.csv", PredTestSet, delimiter=",")
#np.savetxt("numerique/valresults_raindebit.csv", PredValSet, delimiter=",")
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('Model loss')
# plt.ylabel('Loss')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Test'], loc='upper left')
# #plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
# plt.show()
#plt.plot(true_test_output[:,1],'r',label = "true")
#plt.plot(PredValSet[:,1],label = "model")
#plt.title("1st coeff linear")
#plt.legend()
#plt.show()
#deep_error = []
#
#for i in range(150):
#
# deep_error.append(np.linalg.norm(PredValSet[:,i]-true_test_output[:,i]))
#
#print('deep_error',deep_error)
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,0],true_test_output[:,0],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,0]/PredValSet2[:,3],true_test_output[:,0],'o', color='b',markersize=5)
plt.plot(true_test_output[:,0],true_test_output[:,0],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
plt.xticks([-2.00,-1.00,0.00,1.00,2.00])
plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,0],true_test_output[:,0],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,0],true_test_output[:,0],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,0],true_test_output[:,0],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,0],true_test_output[:,0],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,0],true_test_output[:,0],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,0],true_test_output[:,0],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,1]/PredValSet2[:,3],true_test_output[:,1],'o', color='b',markersize=5)
plt.plot(true_test_output[:,1],true_test_output[:,1],'-', color='r',linewidth=5)
# plt.xlabel('prediction',fontsize=22)
# plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.xticks(fontsize=22)
# plt.yticks(fontsize=22)
# plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.legend()
plt.show()
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,1],true_test_output[:,1],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,1],true_test_output[:,1],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,2],true_test_output[:,2],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,2],true_test_output[:,2],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,2]/PredValSet2[:,3],true_test_output[:,2],'o', color='b',markersize=5)
plt.plot(true_test_output[:,2],true_test_output[:,2],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
# plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,3],true_test_output[:,3],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,3],true_test_output[:,3],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
# plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,3],true_test_output[:,3]*100,'o', color='b',markersize=5)
plt.plot(true_test_output[:,3]*100,true_test_output[:,3]*100,'-', color='r',linewidth=5)
# plt.xlabel('prediction',fontsize=22)
# plt.ylabel('true value',fontsize=22)
# # plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.xticks(fontsize=22)
# plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.plot(true_test_output[:,0],color='b',label='r0')
# plt.plot(true_test_output[:,1],color='r',label='r1')
# plt.plot(true_test_output[:,2],color='y',label='r2')
# plt.legend()
# plt.show()
##########################################################################################"
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 9,710 | 29.731013 | 101 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lorenz_lstm1000.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import keras.backend as K
import sys
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001]
r1=train_data[:,1001:2002]
r2=train_data[:,2002:3003]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
#input
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_11.csv')
print("train_data shape: ",train_data.shape)
print(f"training dataset size: {train_data.shape[0]*0.9}")
print(f"validation dataset size: {train_data.shape[0]*0.1}")
sys.exit()
# train_data1 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis3.csv')
# print("train_data1 shape: ",train_data1.shape)
# train_data2 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis4.csv')
# print("train_data2 shape: ",train_data2.shape)
# train_data3 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis5.csv')
# print("train_data3 shape: ",train_data3.shape)
# train_data4 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis6.csv')
# print("train_data4 shape: ",train_data4.shape)
# train_data5 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis7.csv')
# print("train_data5 shape: ",train_data5.shape)
# train_data6 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis8.csv')
# print("train_data6 shape: ",train_data6.shape)
#size: num_steps*3,r1,r2,r3,v
#########################################################################################
#train_data = np.array(pd.read_csv('data_1000steps/trainset_withx_1000steps.csv'))
#
#
#train_data1 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis1.csv'))
#
#train_data2 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis2.csv'))
#
#train_data3 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis3.csv'))
# train_data = np.concatenate((train_data6,train_data5),axis = 0)
# train_data = np.concatenate((train_data,train_data4),axis = 0)
# train_data = np.concatenate((train_data,train_data3),axis = 0)
# train_data = np.concatenate((train_data,train_data2),axis = 0)
# train_data = np.concatenate((train_data,train_data1),axis = 0)
# train_data = np.concatenate((train_data,train_data0),axis = 0)
# train_data=train_data[:120000,:]
#weightstrain_data[:,604:]
np.random.shuffle(train_data )
input_data = train_data[:,0:3003]
output_data = train_data[:,3003:]
########################################################################
train_part = 0.97
threshold = int(train_part*train_data.shape[0])
##########################################################################
train_input = input_data[:threshold]
print("input_data shape: ",input_data.shape)
train_output = output_data[:threshold]
print("output_data shape: ",output_data.shape)
test_input = input_data [threshold:]
true_test_output = output_data[threshold:]
X1 = train_input
Y1 = train_output
X2 = test_input
#Y2 = ValidationSet_Y
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# ========================================================================================
from keras.layers import LSTM,Dropout
from keras.layers import TimeDistributed
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
# save data
import os
import json
import pickle
if not os.path.isdir('save_data_v2'):
os.makedirs('save_data_v2')
hidden_size=200
input_sample=input_data.shape[0] #for one sample
output_sample=output_data.shape[0]
input_data=input_data.reshape(input_sample,1001,3) #201 is the time steps in data_generation
output_data=output_data.reshape(output_sample,4)
use_dropout=True
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(1001,3)))
model.add(Dense(4))
# opt = Adam(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=50)
# modelcheckpoint
mc=ModelCheckpoint('save_data_v2/sequentiallstm1000_ing.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
history=model.fit(input_data, output_data, validation_split=0.1, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('save_data_v2/sequentiallstm1000_ing_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('save_data_v2/sequentiallstm1000_ing_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
# Calculate predictions
PredTestSet = model.predict(X1.reshape(X1.shape[0],1001,3))
PredValSet = model.predict(X2.reshape(X2.shape[0],1001,3))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
plt.show()
plt.plot(PredValSet[:,2],true_test_output[:,2],'o', color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
plt.plot(PredValSet[:,3],true_test_output[:,3],'o',color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 6,328 | 25.817797 | 121 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/simulated_data_generation.py | # -*- coding: utf-8 -*-
# generate the trainning set for keras regression
import numpy as np
from scipy.optimize import fmin
from scipy.optimize import fmin_l_bfgs_b
#from scipy.optimize import fmin_ncg
from scipy.linalg import sqrtm
import math
from constructB import *
from lorentz_attractor import *
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import time
import random
import lorentz_attractor
import sklearn
from sklearn import datasets
import os
if not os.path.isdir('lorenz_cov_train_v2'):
os.makedirs('lorenz_cov_train_v2')
#######################################################################
def correlation_from_covariance(covariance):
v = np.sqrt(np.diag(covariance))
outer_v = np.outer(v, v)
correlation = covariance / outer_v #https://www.mygreatlearning.com/blog/covariance-vs-correlation/
correlation[covariance == 0] = 0
return correlation
######################################################################
#define matrix R by extra-diagonal elements
def R_covariance_dim3(r1,r2,r3):
M = np.zeros((3,3))
M[0,1] = r1
M[0,2] = r2
M[1,2] = r3
M = M + M.T
M += np.eye(3)
return M
################################################################################
#######################################################################
def cov_to_cor(M): # from a covariance matrix to its associated correlation matrix
inv_diag_M=np.linalg.inv(sqrtm(np.diag(np.diag(M))))
cor_M = np.dot(inv_diag_M, np.dot(M,inv_diag_M))
return cor_M
def lorenz_1step(x, y, z, s=10, r=28, b=2.667,dt = 0.001):
x_dot, y_dot, z_dot = lorenz(x, y, z)
x_next = x + (x_dot * dt)
y_next = y + (y_dot * dt)
z_next = z + (z_dot * dt)
return x_next, y_next, z_next
def VAR_3D(xb,Y,H,B,R): #booleen=1 garde la trace
xb1=np.copy(xb)
xb1.shape=(xb1.size,1)
Y.shape = (Y.size,1)
dim_x = xb1.size
K=np.dot(B,np.dot(np.transpose(H),np.linalg.inv(np.dot(H,np.dot(B,np.transpose(H)))+R))) #matrice de gain, Kalman gain
A=np.dot(np.dot((np.eye(dim_x)-np.dot(K,H)),B),np.transpose((np.eye(dim_x)-np.dot(K,H))))+np.dot(np.dot(K,R),np.transpose(K)) #not the kalman filter expression???
vect=np.dot(H,xb1)
xa=np.copy(xb1+np.dot(K,(Y-vect)))
return xa,A #xa is the new estimated data, A is the new covariance,
###################################################################################
###################################################################################
#parameters
num_steps = 1000
H = np.array([[1,1,0],[2,0,1],[0,0,3]])
R = 0.001*np.array([[1,0.4,0.1],[0.4,1,0.4],[0.1,0.4,1]])
B =0.01*np.array([[1,0.2,0.],[0.2,1,0.2],[0.,0.2,1]])
#Q = 0.0001*np.eye(3)
###################################################################################
#save the trainning set for different R
trainning_set = np.zeros((1,num_steps*3+3+4))
###################################################################################
#############################################################################
# true states vector 3 * number_steps
xs,ys,zs = lorenz_attractor(s=10, r=28, b=2.667, dt = 0.001, num_steps=1000)
x_true = np.zeros((3,num_steps+1))
x_true[0,:] = np.copy(xs)
x_true[1,:] = np.copy(ys)
x_true[2,:] = np.copy(zs)
###############################################################################
for ii in range(2000):
if ii%100 ==0:
print(ii)
# construct observations
#=========================================================================
#generate x with noise
for repetation in range(10):
xs,ys,zs = lorenz_attractor(s=10, r=28, b=2.667, dt = 0.001, num_steps = 1000,x0 = 0.+np.random.normal(0, 0.05),
y0=1.+np.random.normal(0, 0.05),z0=1.05+np.random.normal(0, 0.05))
x_true = np.zeros((3,num_steps+1))
x_true[0,:] = np.copy(xs)
x_true[1,:] = np.copy(ys)
x_true[2,:] = np.copy(zs)
#=========================================================================
y_true = np.zeros((3,num_steps+1))
y_obs = np.zeros((3,num_steps+1))
v = np.random.uniform(0,100.)
R = correlation_from_covariance(sklearn.datasets.make_spd_matrix(3)) #SPD covariance
r1 = R[0,1]
r2 = R[0,2]
r3 = R[1,2]
R = v*R
for i in range(num_steps+1):
print("sample time: ",ii)
print("iteration time: ",i)
x = x_true[:,i]
x.shape = (x.size,1)
y = np.dot(H,x) #why this is this expression to calculate y?
y.shape = (y.size,)
y_true[:,i] = y
y_noise = np.random.multivariate_normal(np.zeros(3),R)
y_noise.shape = (y_noise.size,)
y_noise += y
y_obs[:,i] = y_noise
parameters = np.array([r1,r2,r3,v]) #output for deep learning regression
#train_row = np.concatenate((y_obs.ravel(),x_true.ravel())) #input for deep learning #what are the functionalities of these r ->covaraicen! why v is not necessary???
train_row = y_obs.ravel()
train_row = np.concatenate((train_row.ravel(),parameters))
train_row.shape = (1,train_row.size)
trainning_set = np.concatenate((trainning_set,train_row), axis=0)
# if repetation+ii*10==5000:
# np.savetxt(f"lorenz_cov_train_v2/trainset_withx_steps1000_test_{10000+repetation+ii*10}.csv", trainning_set, delimiter=",")
trainning_set = trainning_set[1:,:]
#####################################################################################""
np.savetxt("lorenz_cov_train_v2/trainset_withx_steps1000_test8.csv", trainning_set, delimiter=",") | 5,902 | 34.993902 | 192 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/constructB.py | # coding: utf8
#construction of matrix B and special H with measure on the boarder
import numpy as np
import math
from scipy.linalg import sqrtm
##def B_Balgovind(n,Sigma,L):
## Gamma = np.identity(n)
## for i in xrange(n):
## for j in xrange(n):
## Gamma[i,j] = ( 1. + abs(i-j)/L)*np.exp(-abs(i-j)/L)
## B = np.dot(Sigma,np.dot(Gamma,Sigma))
## return B
def get_index_2d (dim,n): #get caratesian coordinate
j=n % dim
j=j/1. #float( i)
i=(n-j)/dim
return (i,j)# pourquoi float?
#identite
def identiity(n):#n : taille de vecteur xb
B=np.eye(n)
return B
#Blgovind (Balgovind correlation functions: file:///C:/paper_ml/sibo/Polyphemus-1.2-Guide-2.pdf)
def Balgovind(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=(1+r/L)*(math.exp(-r/L))
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B [dim**2*2,dim**2*2]
return B
def Gaussian(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=math.exp(-r**2/(2*L**2))
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B [dim**2*2,dim**2*2]
return B
def expontielle(dim,L):
sub_B=np.zeros((dim**2,dim**2))
for i in range(dim**2):
(a1,b1)=get_index_2d(dim,i)
for j in range(dim**2):
(a2,b2)=get_index_2d(dim,j) #reprends les donnees caracterisennes
r=math.sqrt((a1-a2)**2+(b1-b2)**2)
sub_B[i,j]=math.exp(-r/L)
B1=np.concatenate((sub_B, np.zeros((dim**2,dim**2))), axis=1)
B2=np.concatenate(( np.zeros((dim**2,dim**2)), sub_B),axis=1)
B=np.concatenate((B1,B2), axis=0)# a changer construction matrice B [dim**2*2,dim**2*2]
return B
def bord_M_aleatoire(dimension, proba):
M=np.zeros((dimension**2,dimension**2))
for i in range(dimension**2):
for j in range(dimension**2):
if j % 10==0 or j % 10==9:
M[i,j]=np.random.binomial(1, proba)
elif j<=9 or j>=90:
M[i,j]=np.random.binomial(1, proba) #if there is some problems? because both of them are np.random.binomial????
return M # what are other parameters which are not satisfied with this condition ???
def cov_to_cor(B):
inv_diag_B=np.linalg.inv(sqrtm(np.diag(np.diag(B))))
inv_diag_B=np.copy(inv_diag_B.real)
cor_B=np.dot(inv_diag_B,np.dot(B,inv_diag_B))
return cor_B
########################################################################
#covariance 1d
def Balgovind_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=(1+r/L)*(math.exp(-r/L))
return B
def expontielle_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=math.exp(-r/L)
return B
def Gaussian_1D(dim,L):
B=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
r=abs(i-j)*1.
B[i,j]=math.exp(-r**2/(2*L**2))
return B | 3,935 | 32.355932 | 130 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lorentz_attractor.py | # -*- coding: utf-8 -*-
# lorentz system
import numpy as np
import time
import random
import matplotlib.pyplot as plt
import itertools
import math
from constructB import *
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
def lorenz(x, y, z, s=10, r=28, b=2.667):
'''
Given:
x, y, z: a point of interest in three dimensional space
s, r, b: parameters defining the lorenz attractor
Returns:
x_dot, y_dot, z_dot: values of the lorenz attractor's partial
derivatives at the point x, y, z;
s: sigma
r: rho
b: beta
'''
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
def lorenz_1step(x, y, z, s=10, r=28, b=2.667,dt = 0.001):
x_dot, y_dot, z_dot = lorenz(x, y, z)
x_next = x + (x_dot * dt)
y_next = y + (y_dot * dt)
z_next = z + (z_dot * dt)
return x_next, y_next, z_next
def lorenz_attractor(s=10, r=28, b=2.667, dt = 0.001,num_steps = 1000,x0=0.,y0=1.,z0=1.05 ):
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
#xs[0], ys[0], zs[0] = (0., 1., 1.05)
xs[0] = x0
ys[0] = y0
zs[0] = z0
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
return xs,ys,zs
B = 0.01*np.eye(3)
x_noise_initital = np.random.multivariate_normal(np.zeros(3),B)
def lorenz_attractor_noisy(s=10, r=28, b=2.667, dt = 0.001,num_steps = 100000, B = 0.01*np.eye(3), Q = 1e-5*np.eye(3)):
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
xs[0], ys[0], zs[0] = (0., 1., 1.05)
#np.random.seed( 10 )
#x_noise = np.random.multivariate_normal(np.zeros(3),B)
xs[0] += x_noise_initital[0]
ys[0] += x_noise_initital[1]
zs[0] += x_noise_initital[2]
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_noise_step = np.random.multivariate_normal(np.zeros(3),Q)
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt) + x_noise_step[0]
ys[i + 1] = ys[i] + (y_dot * dt) + x_noise_step[1]
zs[i + 1] = zs[i] + (z_dot * dt) + x_noise_step[2]
return xs,ys,zs
# Plot
if __name__ == '__main__' :
# plot for amusing
# for i in range(1200,1400):
# xs,ys,zs = lorenz_attractor(num_steps = 10*i)
# xb,yb,zb = lorenz_attractor(num_steps = 10*i-300)
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# #ax.plot(xs, ys, zs, lw=0.5)
# ax.scatter(xs, ys, zs, lw=0.5)
# ax.scatter(xb, yb, zb, 'r',lw=0.35)
# ax.set_xlabel("X Axis")
# ax.set_ylabel("Y Axis")
# ax.set_zlabel("Z Axis")
# ax.set_title("Lorenz Attractor")
# plt.savefig('tmp_figure/lorenz_catch_'+str(i)+'.png', format='png')
# #plt.show()
# #plt.pause(3)
# plt.close()
#
# import imageio
# images = []
# for i in range(1200,1400):
# images.append(imageio.imread('tmp_figure/lorenz_catch_'+str(i)+'.png'))
# imageio.mimsave('tmp_figure/lorenz_catch.gif', images)
##
# import glob
# from PIL import *
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in range(2000):
# new_frame = Image.open('tmp_figure/lorenz_compose_'+str(i)+'.png')
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('tmp_figure/png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
#############################################################################################"
# print noisy system
xs,ys,zs = lorenz_attractor_noisy(num_steps = 1400)
# plot for amusing
for i in range(1200,1400):
#xs,ys,zs = lorenz_attractor_noisy(num_steps = 10*i)
#xb,yb,zb = lorenz_attractor(num_steps = 10*i-300)
fig = plt.figure()
ax = fig.gca(projection='3d')
#ax.plot(xs, ys, zs, lw=0.5)
ax.scatter(xs[:i], ys[:i], zs[:i], lw=0.5)
#ax.scatter(xb, yb, zb, 'r',lw=0.35)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Lorenz Attractor")
plt.savefig('tmp_figure/lorenz_noisy_'+str(i)+'.png', format='png')
#plt.show()
#plt.pause(3)
plt.close()
import imageio
images = []
for i in range(1200,1400):
images.append(imageio.imread('tmp_figure/lorenz_noisy_'+str(i)+'.png'))
imageio.mimsave('tmp_figure/lorenz_noisy.gif', images) | 5,328 | 30.720238 | 119 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lorenz_lstm200.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001][:,:201]
r1=train_data[:,1001:2002][:,:201]
r2=train_data[:,2002:3003][:,:201]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
#input
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_1.csv')
print("train_data shape: ",train_data.shape)
# train_data1 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis3.csv')
# print("train_data1 shape: ",train_data1.shape)
# train_data2 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis4.csv')
# print("train_data2 shape: ",train_data2.shape)
# train_data3 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis5.csv')
# print("train_data3 shape: ",train_data3.shape)
# train_data4 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis6.csv')
# print("train_data4 shape: ",train_data4.shape)
# train_data5 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis7.csv')
# print("train_data5 shape: ",train_data5.shape)
# train_data6 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis8.csv')
# print("train_data6 shape: ",train_data6.shape)
#size: num_steps*3,r1,r2,r3,v
#########################################################################################
#train_data = np.array(pd.read_csv('data_1000steps/trainset_withx_1000steps.csv'))
#
#
#train_data1 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis1.csv'))
#
#train_data2 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis2.csv'))
#
#train_data3 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis3.csv'))
# train_data = np.concatenate((train_data6,train_data5),axis = 0)
# train_data = np.concatenate((train_data,train_data4),axis = 0)
# train_data = np.concatenate((train_data,train_data3),axis = 0)
# train_data = np.concatenate((train_data,train_data2),axis = 0)
# train_data = np.concatenate((train_data,train_data1),axis = 0)
# train_data = np.concatenate((train_data,train_data0),axis = 0)
# train_data=train_data[:120000,:]
#weightstrain_data[:,604:]
np.random.shuffle(train_data )
input_data = train_data[:,0:603]
output_data = train_data[:,603:]
########################################################################
train_part = 0.97
threshold = int(train_part*train_data.shape[0])
##########################################################################
train_input = input_data[:threshold]
print("input_data shape: ",input_data.shape)
train_output = output_data[:threshold]
print("output_data shape: ",output_data.shape)
test_input = input_data [threshold:]
true_test_output = output_data[threshold:]
X1 = train_input
Y1 = train_output
X2 = test_input
#Y2 = ValidationSet_Y
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# ========================================================================================
from keras.layers import LSTM,Dropout
from keras.layers import TimeDistributed
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
# save data
import os
import json
import pickle
if not os.path.isdir('save_data_v2'):
os.makedirs('save_data_v2')
hidden_size=200
input_sample=input_data.shape[0] #for one sample
output_sample=output_data.shape[0]
input_data=input_data.reshape(input_sample,201,3) #201 is the time steps in data_generation
output_data=output_data.reshape(output_sample,4)
use_dropout=True
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(201,3)))
model.add(Dense(4))
# opt = Adam(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=50)
# modelcheckpoint
mc=ModelCheckpoint('save_data_v2/sequentiallstm200_ing.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
history=model.fit(input_data, output_data, validation_split=0.1, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('save_data_v2/sequentiallstm200_ing_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('save_data_v2/sequentiallstm200_ing_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
# Calculate predictions
PredTestSet = model.predict(X1.reshape(X1.shape[0],201,3))
PredValSet = model.predict(X2.reshape(X2.shape[0],201,3))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
plt.show()
plt.plot(PredValSet[:,2],true_test_output[:,2],'o', color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
plt.plot(PredValSet[:,3],true_test_output[:,3],'o',color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 6,201 | 25.618026 | 121 | py |
PMEmo | PMEmo-master/features.py | #! usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This features.py is used to extract audio features based on openSIMLE.
Require: openSMILE-2.2rc1
OpenSMILE only support audios in WAV format,
so before using this script you could
transform MP3s into WAVs by transformat.sh.
'''
__author__ = 'huizhang'
import csv
import os
import shutil
import subprocess
from math import floor
import numpy as np
def extract_all_wav_feature(wavdir, distfile, opensmiledir):
'''Extract 6373-dimension static features into one dist file.
Args:
wavdir: Path to audios in WAV format.
distfile: Path of distfile.
opensmiledir: Path to opensimle project root.
Returns:
Distfile containing 6373-dimension static features of all the WAVs.
'''
SMILExtract = os.path.join(opensmiledir,"SMILExtract")
config_file = os.path.join(opensmiledir,"config", "IS13_ComParE.conf")
if os.path.exists(distfile):
os.remove(distfile)
wav = [f for f in os.listdir(wavdir) if f[-4:] == ".wav"]
for w in wav:
wavpath = os.path.join(wavdir,w)
subprocess.check_call([SMILExtract, "-C", config_file, "-I", wavpath, "-O", distfile, "-instname", w])
def extract_frame_feature(wavdir, distdir, opensmiledir):
'''Extract lld features in frame size: 60ms, step size: 10ms.
Args:
wavdir: Path to audios in WAV format.
distdir: Path of distdir.
opensmiledir: Path to opensimle project root.
Returns:
Distfiles containing lld features for each WAV.
'''
SMILExtract = os.path.join(opensmiledir,"SMILExtract")
config_file = os.path.join(opensmiledir,"config", "IS13_ComParE_lld.conf")
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.mkdir(distdir)
wav = [f for f in os.listdir(wavdir) if f[-4:] == ".wav"]
for w in wav:
wavpath = os.path.join(wavdir,w)
distfile = os.path.join(distdir,w[:-4]+".csv")
subprocess.check_call([SMILExtract, "-C", config_file, "-I", wavpath, "-O", distfile])
def process_dynamic_feature(llddir, distdir, all_songs_distfile, delimiter=";"):
'''Obtain dynamic features in window size: 1s, shift size: 0.5s.
Args:
llddir: Path to lld feature files.
distdir: Path of distdir.
all_songs_distfile: Path of distfile.
delimiter: csv delimiter in lld feature files, default=';'.
Returns:
Distfiles containing 260-dimension dynamic features all WAVs.
'''
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.mkdir(distdir)
# names of features
headers = ['musicId', 'frameTime', 'F0final_sma_mean', 'voicingFinalUnclipped_sma_mean', 'jitterLocal_sma_mean', 'jitterDDP_sma_mean', 'shimmerLocal_sma_mean', 'logHNR_sma_mean', 'audspec_lengthL1norm_sma_mean', 'audspecRasta_lengthL1norm_sma_mean', 'pcm_RMSenergy_sma_mean', 'pcm_zcr_sma_mean', 'audSpec_Rfilt_sma[0]_mean', 'audSpec_Rfilt_sma[1]_mean', 'audSpec_Rfilt_sma[2]_mean', 'audSpec_Rfilt_sma[3]_mean', 'audSpec_Rfilt_sma[4]_mean', 'audSpec_Rfilt_sma[5]_mean', 'audSpec_Rfilt_sma[6]_mean', 'audSpec_Rfilt_sma[7]_mean', 'audSpec_Rfilt_sma[8]_mean', 'audSpec_Rfilt_sma[9]_mean', 'audSpec_Rfilt_sma[10]_mean', 'audSpec_Rfilt_sma[11]_mean', 'audSpec_Rfilt_sma[12]_mean', 'audSpec_Rfilt_sma[13]_mean', 'audSpec_Rfilt_sma[14]_mean', 'audSpec_Rfilt_sma[15]_mean', 'audSpec_Rfilt_sma[16]_mean', 'audSpec_Rfilt_sma[17]_mean', 'audSpec_Rfilt_sma[18]_mean', 'audSpec_Rfilt_sma[19]_mean', 'audSpec_Rfilt_sma[20]_mean', 'audSpec_Rfilt_sma[21]_mean', 'audSpec_Rfilt_sma[22]_mean', 'audSpec_Rfilt_sma[23]_mean', 'audSpec_Rfilt_sma[24]_mean', 'audSpec_Rfilt_sma[25]_mean', 'pcm_fftMag_fband250-650_sma_mean', 'pcm_fftMag_fband1000-4000_sma_mean', 'pcm_fftMag_spectralRollOff25.0_sma_mean', 'pcm_fftMag_spectralRollOff50.0_sma_mean', 'pcm_fftMag_spectralRollOff75.0_sma_mean', 'pcm_fftMag_spectralRollOff90.0_sma_mean', 'pcm_fftMag_spectralFlux_sma_mean', 'pcm_fftMag_spectralCentroid_sma_mean', 'pcm_fftMag_spectralEntropy_sma_mean', 'pcm_fftMag_spectralVariance_sma_mean', 'pcm_fftMag_spectralSkewness_sma_mean', 'pcm_fftMag_spectralKurtosis_sma_mean', 'pcm_fftMag_spectralSlope_sma_mean', 'pcm_fftMag_psySharpness_sma_mean', 'pcm_fftMag_spectralHarmonicity_sma_mean', 'pcm_fftMag_mfcc_sma[1]_mean', 'pcm_fftMag_mfcc_sma[2]_mean', 'pcm_fftMag_mfcc_sma[3]_mean', 'pcm_fftMag_mfcc_sma[4]_mean', 'pcm_fftMag_mfcc_sma[5]_mean', 'pcm_fftMag_mfcc_sma[6]_mean', 'pcm_fftMag_mfcc_sma[7]_mean', 'pcm_fftMag_mfcc_sma[8]_mean', 'pcm_fftMag_mfcc_sma[9]_mean', 'pcm_fftMag_mfcc_sma[10]_mean', 'pcm_fftMag_mfcc_sma[11]_mean', 'pcm_fftMag_mfcc_sma[12]_mean', 'pcm_fftMag_mfcc_sma[13]_mean', 'pcm_fftMag_mfcc_sma[14]_mean', 'F0final_sma_de_mean', 'voicingFinalUnclipped_sma_de_mean', 'jitterLocal_sma_de_mean', 'jitterDDP_sma_de_mean', 'shimmerLocal_sma_de_mean', 'logHNR_sma_de_mean', 'audspec_lengthL1norm_sma_de_mean', 'audspecRasta_lengthL1norm_sma_de_mean', 'pcm_RMSenergy_sma_de_mean', 'pcm_zcr_sma_de_mean', 'audSpec_Rfilt_sma_de[0]_mean', 'audSpec_Rfilt_sma_de[1]_mean', 'audSpec_Rfilt_sma_de[2]_mean', 'audSpec_Rfilt_sma_de[3]_mean', 'audSpec_Rfilt_sma_de[4]_mean', 'audSpec_Rfilt_sma_de[5]_mean', 'audSpec_Rfilt_sma_de[6]_mean', 'audSpec_Rfilt_sma_de[7]_mean', 'audSpec_Rfilt_sma_de[8]_mean', 'audSpec_Rfilt_sma_de[9]_mean', 'audSpec_Rfilt_sma_de[10]_mean', 'audSpec_Rfilt_sma_de[11]_mean', 'audSpec_Rfilt_sma_de[12]_mean', 'audSpec_Rfilt_sma_de[13]_mean', 'audSpec_Rfilt_sma_de[14]_mean', 'audSpec_Rfilt_sma_de[15]_mean', 'audSpec_Rfilt_sma_de[16]_mean', 'audSpec_Rfilt_sma_de[17]_mean', 'audSpec_Rfilt_sma_de[18]_mean', 'audSpec_Rfilt_sma_de[19]_mean', 'audSpec_Rfilt_sma_de[20]_mean', 'audSpec_Rfilt_sma_de[21]_mean', 'audSpec_Rfilt_sma_de[22]_mean', 'audSpec_Rfilt_sma_de[23]_mean', 'audSpec_Rfilt_sma_de[24]_mean', 'audSpec_Rfilt_sma_de[25]_mean', 'pcm_fftMag_fband250-650_sma_de_mean', 'pcm_fftMag_fband1000-4000_sma_de_mean', 'pcm_fftMag_spectralRollOff25.0_sma_de_mean', 'pcm_fftMag_spectralRollOff50.0_sma_de_mean', 'pcm_fftMag_spectralRollOff75.0_sma_de_mean', 'pcm_fftMag_spectralRollOff90.0_sma_de_mean', 'pcm_fftMag_spectralFlux_sma_de_mean', 'pcm_fftMag_spectralCentroid_sma_de_mean', 'pcm_fftMag_spectralEntropy_sma_de_mean', 'pcm_fftMag_spectralVariance_sma_de_mean', 'pcm_fftMag_spectralSkewness_sma_de_mean', 'pcm_fftMag_spectralKurtosis_sma_de_mean', 'pcm_fftMag_spectralSlope_sma_de_mean', 'pcm_fftMag_psySharpness_sma_de_mean', 'pcm_fftMag_spectralHarmonicity_sma_de_mean', 'pcm_fftMag_mfcc_sma_de[1]_mean', 'pcm_fftMag_mfcc_sma_de[2]_mean', 'pcm_fftMag_mfcc_sma_de[3]_mean', 'pcm_fftMag_mfcc_sma_de[4]_mean', 'pcm_fftMag_mfcc_sma_de[5]_mean', 'pcm_fftMag_mfcc_sma_de[6]_mean', 'pcm_fftMag_mfcc_sma_de[7]_mean', 'pcm_fftMag_mfcc_sma_de[8]_mean', 'pcm_fftMag_mfcc_sma_de[9]_mean', 'pcm_fftMag_mfcc_sma_de[10]_mean', 'pcm_fftMag_mfcc_sma_de[11]_mean', 'pcm_fftMag_mfcc_sma_de[12]_mean', 'pcm_fftMag_mfcc_sma_de[13]_mean', 'pcm_fftMag_mfcc_sma_de[14]_mean', 'F0final_sma_std', 'voicingFinalUnclipped_sma_std', 'jitterLocal_sma_std', 'jitterDDP_sma_std', 'shimmerLocal_sma_std', 'logHNR_sma_std', 'audspec_lengthL1norm_sma_std', 'audspecRasta_lengthL1norm_sma_std', 'pcm_RMSenergy_sma_std', 'pcm_zcr_sma_std', 'audSpec_Rfilt_sma[0]_std', 'audSpec_Rfilt_sma[1]_std', 'audSpec_Rfilt_sma[2]_std', 'audSpec_Rfilt_sma[3]_std', 'audSpec_Rfilt_sma[4]_std', 'audSpec_Rfilt_sma[5]_std', 'audSpec_Rfilt_sma[6]_std', 'audSpec_Rfilt_sma[7]_std', 'audSpec_Rfilt_sma[8]_std', 'audSpec_Rfilt_sma[9]_std', 'audSpec_Rfilt_sma[10]_std', 'audSpec_Rfilt_sma[11]_std', 'audSpec_Rfilt_sma[12]_std', 'audSpec_Rfilt_sma[13]_std', 'audSpec_Rfilt_sma[14]_std', 'audSpec_Rfilt_sma[15]_std', 'audSpec_Rfilt_sma[16]_std', 'audSpec_Rfilt_sma[17]_std', 'audSpec_Rfilt_sma[18]_std', 'audSpec_Rfilt_sma[19]_std', 'audSpec_Rfilt_sma[20]_std', 'audSpec_Rfilt_sma[21]_std', 'audSpec_Rfilt_sma[22]_std', 'audSpec_Rfilt_sma[23]_std', 'audSpec_Rfilt_sma[24]_std', 'audSpec_Rfilt_sma[25]_std', 'pcm_fftMag_fband250-650_sma_std', 'pcm_fftMag_fband1000-4000_sma_std', 'pcm_fftMag_spectralRollOff25.0_sma_std', 'pcm_fftMag_spectralRollOff50.0_sma_std', 'pcm_fftMag_spectralRollOff75.0_sma_std', 'pcm_fftMag_spectralRollOff90.0_sma_std', 'pcm_fftMag_spectralFlux_sma_std', 'pcm_fftMag_spectralCentroid_sma_std', 'pcm_fftMag_spectralEntropy_sma_std', 'pcm_fftMag_spectralVariance_sma_std', 'pcm_fftMag_spectralSkewness_sma_std', 'pcm_fftMag_spectralKurtosis_sma_std', 'pcm_fftMag_spectralSlope_sma_std', 'pcm_fftMag_psySharpness_sma_std', 'pcm_fftMag_spectralHarmonicity_sma_std', 'pcm_fftMag_mfcc_sma[1]_std', 'pcm_fftMag_mfcc_sma[2]_std', 'pcm_fftMag_mfcc_sma[3]_std', 'pcm_fftMag_mfcc_sma[4]_std', 'pcm_fftMag_mfcc_sma[5]_std', 'pcm_fftMag_mfcc_sma[6]_std', 'pcm_fftMag_mfcc_sma[7]_std', 'pcm_fftMag_mfcc_sma[8]_std', 'pcm_fftMag_mfcc_sma[9]_std', 'pcm_fftMag_mfcc_sma[10]_std', 'pcm_fftMag_mfcc_sma[11]_std', 'pcm_fftMag_mfcc_sma[12]_std', 'pcm_fftMag_mfcc_sma[13]_std', 'pcm_fftMag_mfcc_sma[14]_std', 'F0final_sma_de_std', 'voicingFinalUnclipped_sma_de_std', 'jitterLocal_sma_de_std', 'jitterDDP_sma_de_std', 'shimmerLocal_sma_de_std', 'logHNR_sma_de_std', 'audspec_lengthL1norm_sma_de_std', 'audspecRasta_lengthL1norm_sma_de_std', 'pcm_RMSenergy_sma_de_std', 'pcm_zcr_sma_de_std', 'audSpec_Rfilt_sma_de[0]_std', 'audSpec_Rfilt_sma_de[1]_std', 'audSpec_Rfilt_sma_de[2]_std', 'audSpec_Rfilt_sma_de[3]_std', 'audSpec_Rfilt_sma_de[4]_std', 'audSpec_Rfilt_sma_de[5]_std', 'audSpec_Rfilt_sma_de[6]_std', 'audSpec_Rfilt_sma_de[7]_std', 'audSpec_Rfilt_sma_de[8]_std', 'audSpec_Rfilt_sma_de[9]_std', 'audSpec_Rfilt_sma_de[10]_std', 'audSpec_Rfilt_sma_de[11]_std', 'audSpec_Rfilt_sma_de[12]_std', 'audSpec_Rfilt_sma_de[13]_std', 'audSpec_Rfilt_sma_de[14]_std', 'audSpec_Rfilt_sma_de[15]_std', 'audSpec_Rfilt_sma_de[16]_std', 'audSpec_Rfilt_sma_de[17]_std', 'audSpec_Rfilt_sma_de[18]_std', 'audSpec_Rfilt_sma_de[19]_std', 'audSpec_Rfilt_sma_de[20]_std', 'audSpec_Rfilt_sma_de[21]_std', 'audSpec_Rfilt_sma_de[22]_std', 'audSpec_Rfilt_sma_de[23]_std', 'audSpec_Rfilt_sma_de[24]_std', 'audSpec_Rfilt_sma_de[25]_std', 'pcm_fftMag_fband250-650_sma_de_std', 'pcm_fftMag_fband1000-4000_sma_de_std', 'pcm_fftMag_spectralRollOff25.0_sma_de_std', 'pcm_fftMag_spectralRollOff50.0_sma_de_std', 'pcm_fftMag_spectralRollOff75.0_sma_de_std', 'pcm_fftMag_spectralRollOff90.0_sma_de_std', 'pcm_fftMag_spectralFlux_sma_de_std', 'pcm_fftMag_spectralCentroid_sma_de_std', 'pcm_fftMag_spectralEntropy_sma_de_std', 'pcm_fftMag_spectralVariance_sma_de_std', 'pcm_fftMag_spectralSkewness_sma_de_std', 'pcm_fftMag_spectralKurtosis_sma_de_std', 'pcm_fftMag_spectralSlope_sma_de_std', 'pcm_fftMag_psySharpness_sma_de_std', 'pcm_fftMag_spectralHarmonicity_sma_de_std', 'pcm_fftMag_mfcc_sma_de[1]_std', 'pcm_fftMag_mfcc_sma_de[2]_std', 'pcm_fftMag_mfcc_sma_de[3]_std', 'pcm_fftMag_mfcc_sma_de[4]_std', 'pcm_fftMag_mfcc_sma_de[5]_std', 'pcm_fftMag_mfcc_sma_de[6]_std', 'pcm_fftMag_mfcc_sma_de[7]_std', 'pcm_fftMag_mfcc_sma_de[8]_std', 'pcm_fftMag_mfcc_sma_de[9]_std', 'pcm_fftMag_mfcc_sma_de[10]_std', 'pcm_fftMag_mfcc_sma_de[11]_std', 'pcm_fftMag_mfcc_sma_de[12]_std', 'pcm_fftMag_mfcc_sma_de[13]_std', 'pcm_fftMag_mfcc_sma_de[14]_std']
window = 1
overlap = 0.5
llds = [f for f in os.listdir(llddir) if f[-4:] == ".csv"]
all_dynamic_features = []
all_musicId = []
for lld in llds:
musicId = []
lldpath = os.path.join(llddir,lld)
single_song_distfile = os.path.join(distdir,lld)
dynamic_features = _compute_feature_with_window_and_overlap(lldpath, window, overlap, delimiter)
for i in range(len(dynamic_features)):
musicId.append(lld[:-4])
_write_features_to_csv(headers, musicId, dynamic_features, single_song_distfile)
all_musicId += musicId
all_dynamic_features += dynamic_features
_write_features_to_csv(headers, all_musicId, all_dynamic_features, all_songs_distfile)
def _compute_feature_with_window_and_overlap(lldpath, window, overlap, delimiter):
'''Compute the mean and std for frame-wise features in window size: 1s, shift size: 0.5s.'''
fs = 0.01
num_in_new_frame = floor(overlap/fs)
num_in_window = floor(window/fs)
# load the features from disk
all_frame = []
with open(lldpath) as f:
reader = csv.reader(f,delimiter=delimiter)
next(reader)
for row in reader:
frame_feature = []
for i in range(len(row)-1): #旧的frametime不用记录
frame_feature.append(float(row[i+1]))
all_frame.append(frame_feature)
# compute new number of frames
new_num_of_frame = floor(len(all_frame)/num_in_new_frame)
all_new_frame = []
# compute mean and std in each window as the feature corresponding to the frame.
for i in range(new_num_of_frame):
start_index = num_in_new_frame * i
new_frame_array = np.array(all_frame[start_index:start_index+num_in_window])
mean_llds = np.mean(new_frame_array,axis=0)
std_llds = np.std(new_frame_array,axis=0)
new_frametime = i * overlap
new_frame = [new_frametime] + mean_llds.tolist() + std_llds.tolist()
all_new_frame.append(new_frame)
return all_new_frame
def _write_features_to_csv(headers, musicIds, contents, distfile):
'''Write all the features into one file, and add the last column as the annotation value'''
with open(distfile,"w") as newfile:
writer = csv.writer(newfile)
writer.writerow(headers + ["class"])
for i in range(len(contents)):
writer.writerow([musicIds[i]] + contents[i] + ["?"])
if __name__ == "__main__":
wavdir ="/Path/to/WAVs"
opensmiledir = "/Path/to/openSMILE-2.1.0"
static_distfile = "static_features.arff"
lld_distdir = "IS13features_lld"
dynamic_distdir = "dynamic_features"
all_dynamic_distfile = "dynamic_features.csv"
delimiter = ";"
extract_all_wav_feature(wavdir,static_distfile,opensmiledir)
extract_frame_feature(wavdir,lld_distdir,opensmiledir)
process_dynamic_feature(lld_distdir,dynamic_distdir,all_dynamic_distfile,delimiter)
| 14,123 | 81.116279 | 8,566 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/setup.py | import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="explainable_ai_image_measures",
version="1.0.1",
description="Compute IAUC, DAUC, IROF scores to measure quality of image attributions",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Meier-Johannes/ExplainableAIImageMeasures",
author="Johannes Meier",
author_email="johannes-michael.meier@student.uni-tuebingen.de",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
packages=["explainable_ai_image_measures"],
include_package_data=True,
install_requires=["numpy", "torch", "scikit-image", "scikit-learn"],
)
| 989 | 32 | 91 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/scoring_metric.py | import numpy as np
import torch
from sklearn.metrics import auc
import torch.nn.functional as F
from explainable_ai_image_measures.irof import IrofDataset
from explainable_ai_image_measures.pixel_relevancy import PixelRelevancyDataset
class Measures:
def __init__(self,
model,
batch_size=64,
irof_segments=40,
irof_sigma=5,
pixel_package_size=1,
normalize=True,
clip01=False,
baseline_color=None):
"""
Parametrize the future measurements
model: PyTorch model
batch_size: During each iteration batch_size number of images will be sent through the network simultaneously
irof_segments: Maximum number of slic segments, that we want to use for measuring. Only relevant if you compute
IROF later
irof_sigma: Parameter used in the slic algorithm
pixel_package_size: E.g. for imagenet you may have 224*224=50,176 pixels. Therefore sending 50,176 pixels
through the network may lead to too much overhead. Instead you can also remove / add blocks
of pixels to speed up the computation. Only relevant for IAUC, DAUC
normalize: With activated normalization the new probabilities are divided by the probabiilties of the old image.
This allows the comparison of attributions independent of how sure the network is for the original
image. Activating normalization is highly encouraged if comparing attributions across several
images.
clip01: Clips the computed probabilities between [0, 1]. This is only relevant for normalize=True.
In some cases the probabilities after e.g. removing parts of the original image may be higher than
before. E.g. for IROF this could theoretically lead to negative scores. If you want to prohibit this,
activate clip01. Note that the clipping clips each individual score. Indirectly you also ensure that
the final score is within [0,1]
baseline_color: For IROF and DAUC we iteratively remove parts of the image and replace it by the baseline
color as specified here. For IAUC we start with an image consisting only of the baseline_color.
By default the mean color is used.
"""
self.model = model
self.batch_size = batch_size
self.irof_segments = irof_segments
self.irof_sigma = irof_sigma
self.pixel_package_size = pixel_package_size
self.normalize = normalize
self.clip01 = clip01
self.baseline_color = baseline_color
def _calc_probs(self, image_batch, label):
probs = F.softmax(self.model(image_batch), dim=1)
return probs[:, label]
def _calc_single_score(self, scoring_dataset, label):
probs = []
with torch.no_grad():
for j, img_batch in enumerate(scoring_dataset):
probs += [self._calc_probs(img_batch, label)]
probs = torch.cat(probs).flatten()
if self.normalize:
probs = probs[:-1] / probs[-1]
else:
probs = probs[:-1]
if self.clip01:
probs = torch.clamp(probs, 0, 1)
probs = scoring_dataset.postprocess_scores(probs)
x = np.arange(0, len(probs))
y = probs.detach().cpu().numpy()
score = auc(x, y) / len(probs)
return score, probs.detach()
def _assert_check(self, image, attribution):
assert(len(image.shape) == 3)
assert(image.shape[1:] == attribution.shape)
if self.baseline_color is not None:
assert(len(self.baseline_color.shape) == 1)
assert(len(self.baseline_color) == image.shape[0])
def compute_IAUC(self, image, attribution, label):
"""
Computes IAUC for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = PixelRelevancyDataset(
image,
attribution,
True,
self.batch_size,
self.pixel_package_size,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_DAUC(self, image, attribution, label):
"""
Computes DAUC for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = PixelRelevancyDataset(
image,
attribution,
False,
self.batch_size,
self.pixel_package_size,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_IROF(self, image, attribution, label):
"""
Computes IROF for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = IrofDataset(
image,
attribution,
self.batch_size,
self.irof_segments,
self.irof_sigma,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_batch(self, images, attributions, labels, IROF=True, IAUC=True, DAUC=True):
"""
Computes the batch for many images and allows multiple attributions per image.
image: Torch.FloatTensor(nr_images, color_channel, width, height)
attribution: (nr_images, nr_attributions_per_image, width, height)
labels: Tuple / Array / Tensor of Int
IROF: Defines, whether IROF is computed
IAUC: Defines, whether IAUC is computed
DAUC: Defines, whether DAUC is computed
"""
assert(len(images) == len(attributions))
assert(len(images) == len(labels))
functions = dict()
if IROF:
functions["IROF"] = self.compute_IROF
if IAUC:
functions["IAUC"] = self.compute_IAUC
if DAUC:
functions["DAUC"] = self.compute_DAUC
if len(functions) == 0:
return None
result = dict()
for method in functions:
scores = torch.zeros(attributions.shape[0:2])
probs = []
for img_id in range(len(images)):
probs.append([])
for attr_id in range(len(attributions[img_id])):
score, prob = functions[method](
images[img_id],
attributions[img_id, attr_id],
labels[img_id]
)
scores[img_id, attr_id] = score
probs[-1].append(prob)
probs[-1] = torch.stack(probs[-1])
result[method] = (scores, probs)
return result
| 7,637 | 37.771574 | 120 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/pixel_manipulation.py | import torch
from torch.utils.data import Dataset
import abc
class PixelManipulationBase(Dataset):
"""
Requires that self._pixel_batches is defined in the constructor
"""
def __init__(self, image, attribution, insert, batch_size, device, baseline_color):
self._image = image
self._batch_size = batch_size
self._insert = insert
self._attribution = attribution
self._device = device
self._baseline_color = baseline_color
self._pixel_batches = None # Expected to be set by child class
self._temp_image = None # Expected to be set by child class
if self._baseline_color is None:
self._baseline_color = torch.mean(image.reshape(self.color_channels, self.width, self.height), dim=(1, 2))
self._temp_baseline = None
self._temp_image = None
@abc.abstractmethod
def generate_pixel_batches(self):
return
def generate_initial_image(self):
if self._insert:
# Create a new image of original width & height with every pixel set to the baseline color
self._temp_image = (
self._baseline_color.view(self.color_channels, 1)
.repeat(1, self.width * self.height)
)
else:
self._temp_image = self._image
self._temp_image = self._temp_image.flatten()
def generate_temp_baseline(self):
# Keep baseline image multiply times to avoid repeat the generation every iteration
self._temp_baseline = (
self._baseline_color.view(self.color_channels, 1).repeat(1, self.nr_pixels).flatten()
)
def __len__(self):
return len(self._pixel_batches)
@property
def color_channels(self):
return self._image.shape[0]
@property
def width(self):
return self._image.shape[1]
@property
def height(self):
return self._image.shape[2]
def _get_batch_size(self, index):
return len(self._pixel_batches[index])
def _index_shift(self, matrix, add_per_row):
# Adds (i-1) * add_per_row to every cell in row i. Nothing for row 1
factor = add_per_row * torch.diag(torch.arange(0, len(matrix), device=self._device)).float()
new_matrix = factor @ torch.ones_like(matrix, device=self._device).float() + matrix
return new_matrix.long()
@abc.abstractmethod
def _gen_indices(self, index):
return
@property
def nr_pixels(self):
return self.width * self.height
def _color_channel_shift(self, indices):
# For every color channel shift by nr_pixels
return torch.stack(
[indices + i*self.nr_pixels for i in range(self.color_channels)]
).to(self._device).T.flatten()
def _batch_shift(self, indices, pixel_per_image):
# Depending on pixel_per_image create an array of the following form:
# [0 0 1 1 1 2 2 2 2 2 2]
# How often a number is repeated depends on pixel_per_image
nr_pixels_cum = torch.cumsum(pixel_per_image, 0)
image_indices = [torch.Tensor(nr_pixels_cum[i]*[i]) for i in range(len(nr_pixels_cum))]
image_indices = torch.cat(image_indices).to(self._device).long()
# Multiply it with the total number of data points per image
image_indices_shift = image_indices * self.color_channels * self.nr_pixels
# Expand the shift for each color channel
nr_man_pixels = int(len(indices) / self.color_channels)
image_indices_shift = image_indices_shift.reshape(-1, 1).expand(nr_man_pixels, self.color_channels)
image_indices_shift = image_indices_shift.flatten()
# Shift the original indices
batch_indices = indices + image_indices_shift
return batch_indices
@abc.abstractmethod
def _get_fake_image_size(self):
return
@abc.abstractmethod
def postprocess_scores(self, y):
return y
def __getitem__(self, index):
"""
Returns batch_size of images, where the most important pixels have been removed / added
Important: Call the method with consecutive index values!
"""
batch_size = self._get_batch_size(index)
# Start with the image of the last run
# Create batch of image [batch_size, color_channels x width x height]
image_batch = self._temp_image.view(1, -1).repeat(batch_size, 1).flatten()
# Get the indices that need to be modified from the image of the last run
# template_indices = not unique, batch_indices = unique
template_indices, batch_indices = self._gen_indices(index)
if index == self.__len__() - 1:
# Only in the last run: Ensure that there is no problem for the original image
# Therefore remove the fake indices as added in the constructor
template_indices = template_indices[
: -self._get_fake_image_size() * self.color_channels * batch_size
]
batch_indices = batch_indices[
: -self._get_fake_image_size() * self.color_channels * batch_size
]
# Modify the pixels
if self._insert:
image_batch[batch_indices] = self._image.flatten()[template_indices]
else:
image_batch[batch_indices] = self._temp_baseline[template_indices]
# Reshape the image to proper sizes as required by the network
image_batch = image_batch.reshape(
-1, self.color_channels, self.width, self.height
)
if index == self.__len__() - 1:
# Only in the last run: Add the original image
image_batch[batch_size - 1] = self._image
else:
# Save last image for the next run
self._temp_image = image_batch[-1] # TODO
return image_batch
| 5,848 | 35.55625 | 118 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/irof.py | import torch
import numpy as np
from skimage.segmentation import slic
from explainable_ai_image_measures.pixel_manipulation import PixelManipulationBase
class IrofDataset(PixelManipulationBase):
def __init__(
self, image, attribution, batch_size, irof_segments, irof_sigma, device, baseline_color
):
PixelManipulationBase.__init__(
self, image, attribution, False, batch_size, device, baseline_color
)
self._irof_segments = irof_segments
self._irof_sigma = irof_sigma
self.generate_pixel_batches()
self.generate_initial_image()
self.generate_temp_baseline()
def generate_pixel_batches(self):
# Apply Slic algorithm to get superpixel areas
img_np = self._image.detach().cpu().numpy().transpose(1, 2, 0)
segments = slic(img_np, self._irof_segments, self._irof_segments).reshape(-1)
nr_segments = np.max(segments) + 1
segments = torch.LongTensor(segments).to(device=self._device)
# Attribution score of each segment = Mean attribution
attr = self._attribution.reshape(-1)
seg_mean = [
torch.mean(attr[segments == seg]).item() for seg in range(nr_segments)
]
seg_mean = torch.FloatTensor(seg_mean).to(device=self._device)
# Sort segments descending by mean attribution
seg_rank = torch.argsort(-seg_mean)
# Create lists of "shape" [batch_size, segment_size]
# containing the indices of the segments
self._pixel_batches = [[]]
for seg in seg_rank:
indices = torch.nonzero(segments == seg).flatten()
IrofDataset._add_to_hierarhical_list(
self._pixel_batches, self._batch_size, indices
)
# Add placeholder for original image
IrofDataset._add_to_hierarhical_list(
self._pixel_batches, self._batch_size, torch.Tensor([0]).to(device=self._device)
)
@staticmethod
def _add_to_hierarhical_list(list_element, target_size, item):
if len(list_element[-1]) == target_size:
list_element.append([])
list_element[-1].append(item)
def _gen_indices(self, index):
batch_size = self._get_batch_size(index)
# Get all pixels
all_pixels = torch.cat(self._pixel_batches[index]).to(self._device).long()
# Create a matrix of indices of size [batch_size, all_pixels]
template_indices = all_pixels.view(1, -1).repeat(batch_size, 1)
# For each package only keep the previous pixels and package size additional pixels
pixel_per_image = torch.LongTensor(
[len(package) for package in self._pixel_batches[index]]
).to(self._device)
cumsum = torch.cumsum(pixel_per_image, dim=0)
keep_index_template = torch.cat(
[torch.arange(0, s.item()) for s in cumsum]
).to(self._device)
template_indices = template_indices.reshape(-1)[keep_index_template]
template_indices = self._color_channel_shift(template_indices)
batch_indices = self._batch_shift(template_indices, pixel_per_image)
return template_indices, batch_indices
def _get_fake_image_size(self):
return 1
def postprocess_scores(self, y):
return 1-y
| 3,308 | 35.766667 | 95 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/pixel_relevancy.py | import torch
from explainable_ai_image_measures.pixel_manipulation import PixelManipulationBase
class PixelRelevancyDataset(PixelManipulationBase):
def __init__(self, image, attribution, insert, batch_size, package_size, device, baseline_color):
PixelManipulationBase.__init__(
self, image, attribution, insert, batch_size, device, baseline_color
)
self._package_size = package_size
self._device = device
self.generate_pixel_batches()
self.generate_initial_image()
self.generate_temp_baseline()
def generate_pixel_batches(self):
# For simplicity: Ensure that all packages have the same size.
max_nr_pixels = (
self.width * self.height - self.width * self.height % self._package_size
)
# Sort pixels in descending order by attribution score
pixel_relevancy_desc = torch.argsort(-self._attribution.flatten())[
:max_nr_pixels
]
# Add placeholder for original image
placeholder = torch.LongTensor(self._package_size * [0]).to(device=self._device)
pixel_relevancy_desc = torch.cat((pixel_relevancy_desc, placeholder))
# Form groups of size package_size
pixel_relevancy_groups = pixel_relevancy_desc.reshape(-1, self._package_size)
# Forms batches of groups: (batch_size x package_size)
self._pixel_batches = torch.split(
pixel_relevancy_groups, self._batch_size, dim=0
)
def _gen_indices(self, index):
batch_size = self._get_batch_size(index)
# Create a matrix of indices of size [batch_size, package_size * batch_size]
template_indices = self._pixel_batches[index].view(1, -1).repeat(batch_size, 1)
# For each package only keep the previous pixels and package_size additional pixels
keep_index_template = torch.cat(
[torch.arange(0, self._package_size * i) for i in range(1, batch_size + 1)]
)
template_indices = template_indices.reshape(-1)[keep_index_template]
template_indices = self._color_channel_shift(template_indices)
# Shift each batch item by all the pixels of previous images
pixel_per_image = torch.Tensor(batch_size * [self._package_size]).to(self._device).long()
batch_indices = self._batch_shift(template_indices, pixel_per_image)
return template_indices, batch_indices
def _get_fake_image_size(self):
return self._package_size
def postprocess_scores(self, y):
return y
| 2,552 | 41.55 | 101 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/__init__.py | from explainable_ai_image_measures.scoring_metric import Measures
__version__ = "1.0.1"
__all__ = ["Measures"]
| 112 | 21.6 | 65 | py |
ellip-corr | ellip-corr-master/PyEllipCorr.py | import os
from numpy import f2py
import numpy as np
from collections import defaultdict
from pyellip import select_phase, coeffs, ellip
class PyEllipCorr:
def __init__(self):
self._tbl_fn = os.path.join(os.path.dirname(__file__), 'ellip/elcordir.tbl')
self._coeffs = defaultdict(list)
# end func
def get_correction(self, phase, edist, edepth, ecolat, azim):
"""
:param phase: a string specifying the PHASE, -e.g P, ScP etc.
:param edist: epicentral distance to station (degrees)
:param edepth: depth of event (km)
:param ecolat: epicentral co-latitude of source (degrees)
:param azim: azimuth from source to station (degrees)
:return: ellipticity-correction (s)
"""
ip, abrt = select_phase(phase, edist)
if(abrt):
print('Warning: Phase {} not found. Returning a null correction..'.format(phase))
return 0
# end if
if ((phase, ip) not in self._coeffs.keys()):
t0, t1, t2, d1,d2,delta, abrt = coeffs(self._tbl_fn, ip)
if(not abrt):
self._coeffs[(phase, ip)] = [t0, t1, t2, d1, d2, delta]
return self.get_correction(phase, edist, edepth, ecolat, azim)
else:
return 0
# end if
else:
ecolat_rad = np.radians(ecolat)
azim_rad = np.radians(azim)
t0, t1, t2, d1, d2, delta = self._coeffs[(phase, ip)]
tcor, abrt = ellip(t0, t1, t2, d1, d2, delta, edist, edepth, ecolat_rad, azim_rad);
if(not abrt): return tcor
else: return 0
# end if
# end func
# end class
| 1,696 | 32.94 | 95 | py |
ellip-corr | ellip-corr-master/__init__.py | 0 | 0 | 0 | py | |
ellip-corr | ellip-corr-master/tests/test_ellipticity_corr.py | from __future__ import print_function
import pytest
from PyEllipCorr import PyEllipCorr
"""
The values below were extracted from ttimel
"""
"""
Source latitude: -30
Source depth (km): 124
Azimuth from source: 39
delta: 65
# code time time(el) dT/dD dT/dh d2T/dD2
"""
data= """ 1 P 626.6652 626.8218 6.4671 -1.09E-01 -4.26E-03
2 pP 656.8627 657.0470 6.5478 1.09E-01 -4.21E-03
3 PcP 658.6354 658.7878 4.1323 -1.18E-01 1.38E-02
4 sP 670.3326 670.4863 6.5283 2.14E-01 -4.23E-03
5 PP 771.0760 771.6174 8.7483 -9.46E-02 -1.25E-02
6 PKiKP 1023.1398 1023.5211 1.3232 -1.23E-01 1.85E-02
7 pPKiKP 1056.6406 1057.0413 1.3198 1.23E-01 1.85E-02
8 sPKiKP 1069.3484 1069.7469 1.3205 2.22E-01 1.85E-02
9 S 1138.7920 1139.0929 12.2313 -1.92E-01 -2.71E-03
10 SPn 1156.3033 1156.9156 13.6603 -1.83E-01 -2.31E-02
11 pS 1173.5881 1173.9027 12.5499 4.63E-02 -2.35E-03
12 PnS 1176.5562 1177.1404 13.3906 -1.75E-02 -5.56E-03
13 sS 1190.7726 1191.1244 12.3694 1.91E-01 -2.67E-03
14 SKSac 1209.6849 1209.9666 7.5880 -2.11E-01 -1.13E-01
15 SKKSac 1209.6868 1209.9685 7.5903 -2.11E-01 -4.81E-01
16 ScS 1209.8174 1210.1000 7.6990 -2.11E-01 7.04E-03
17 SKiKP 1224.7056 1225.1292 1.3785 -2.22E-01 1.83E-02
18 pSKSac 1252.3256 1252.6616 7.5901 1.03E-01 -2.07E-01
19 sSKSac 1266.1160 1266.4423 7.5896 2.11E-01 -1.63E-01
20 SS 1392.8646 1393.8425 15.5487 -1.70E-01 -9.15E-03
21 PKKPdf 1850.6364 1853.4150 -1.3739 -1.23E-01 -1.69E-02
22 SKKPdf 2052.1799 2055.6609 -1.3144 -2.22E-01 -1.71E-02
23 PKKSdf 2064.8875 2068.9170 -1.3136 -1.23E-01 -1.71E-02
24 SKKSdf 2266.2419 2271.1333 -1.2580 -2.22E-01 -1.75E-02
25 P'P'df 2350.1360 2348.5884 -1.6599 -1.23E-01 -2.16E-02
26 P'P'bc 2355.4346 2353.8870 -2.7297 -1.21E-01 -3.67E-03
27 P'P'ab 2359.7952 2358.2476 -3.9679 -1.19E-01 7.92E-03
28 S'S'df 3197.0603 3197.8882 -1.3915 -2.22E-01 -1.70E-02"""
data = data.split()
ellipcorr_dict = {}
pyellip = PyEllipCorr()
for i, d in enumerate(data):
if i % 7 == 1:
k = d
if i % 7 == 2:
tt = float(d)
if i % 7 == 3:
tcor = float(d) - tt
ellipcorr_dict[k] = tcor
@pytest.fixture(params=list(ellipcorr_dict.keys()))
def tcor_pair(request):
return request.param, ellipcorr_dict[request.param]
def test_ellipticity_corr(tcor_pair):
phase, corr = tcor_pair
result = pyellip.get_correction(phase, 65, 124, (90 - -30), 39)
print(result, corr)
assert abs(result - corr) < 1.0e-3
| 3,076 | 42.957143 | 80 | py |
ellip-corr | ellip-corr-master/ellip/__init__.py | 0 | 0 | 0 | py | |
ellip-corr | ellip-corr-master/tau/__init__.py | 0 | 0 | 0 | py | |
zpdgen | zpdgen-master/itg_ai_loc.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
from scipy.optimize import root
etai=2.5
LnbyR=0.2
rbyR=0.18
kpar=0.1
tau=1.0
def epsfun(v):
om=v[0]+1j*v[1]
omsi=-ky
omdi=2*omsi*LnbyR
za=-om/omdi
zb=-np.sqrt(2)*kpar/omdi
b=ky**2
i10=gp.Inm(za,zb,b,1,0)
i12=gp.Inm(za,zb,b,1,2)
i30=gp.Inm(za,zb,b,3,0)
eps=1+1/tau+(i10*(om-omsi*(1-1.5*etai))-omsi*etai*(i12+i30))/omdi
res=[float(np.real(eps)),float(np.imag(eps))]
return res
kys=np.arange(0.01,3.0,0.01)
ky0=0.7
om0=[-0.2,0.2]
omky=np.zeros(np.shape(kys))*(1+1j)
ind0=np.argmin((kys-ky0)**2)
numk=len(kys)
inds=np.concatenate((np.arange(ind0,numk), np.arange(ind0-1,-1,-1)))
for l in inds:
ky=kys[l]
res=root(epsfun,om0,tol=1e-8,method='hybr')
omky[l]=res.x[0]+1j*res.x[1]
if(l==numk-1):
om0=[float(np.real(omky[ind0])),float(np.imag(omky[ind0]))]
else:
om0=res.x
plt.plot(kys,np.imag(omky),'x-')
#plt.axis([0.0,2.0,-0.1,0.2])
#plt.plot(kys,-np.real(omky),'r')
plt.show()
| 1,029 | 21.888889 | 69 | py |
zpdgen | zpdgen-master/itg_ai_loc_mat.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
from scipy.optimize import root
etai=2.5
LnbyR=0.2
rbyR=0.18
kpar=0.1
tau=1.0
def epsfun(v):
om=v[0]+1j*v[1]
omsi=-ky
omdi=2*omsi*LnbyR
za=-om/omdi
zb=-np.sqrt(2)*kpar/omdi
b=ky**2
anm=np.zeros((4,3),dtype=np.complex128)
anm[1,0]=(om-omsi*(1-1.5*etai))/omdi
anm[1,2]=-omsi*etai/omdi
anm[3,0]=-omsi*etai/omdi
eps=1+1/tau+gp.sigmazpd(za,zb,b,anm)
res=[float(np.real(eps)),float(np.imag(eps))]
return res
kys=np.arange(0.01,3.0,0.01)
ky0=0.7
om0=[-0.2,0.2]
omky=np.zeros(np.shape(kys))*(1+1j)
ind0=np.argmin((kys-ky0)**2)
numk=len(kys)
inds=np.concatenate((np.arange(ind0,numk), np.arange(ind0-1,-1,-1)))
for l in inds:
ky=kys[l]
# print('ky:',ky)
res=root(epsfun,om0,tol=1e-8,method='hybr')
omky[l]=res.x[0]+1j*res.x[1]
if(l==numk-1):
om0=[float(np.real(omky[ind0])),float(np.imag(omky[ind0]))]
else:
om0=res.x
plt.plot(kys,np.imag(omky),'x-')
#plt.axis([0.0,2.0,-0.1,0.2])
#plt.plot(kys,-np.real(omky),'r')
plt.show()
| 1,084 | 22.085106 | 68 | py |
zpdgen | zpdgen-master/plot_eps.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
etai=2.5
LnbyR=0.2
rbyR=0.18
kpar=0.1
tau=1.0
ky=0.06
def epsfun(v):
om=v
omsi=-ky
omdi=2*omsi*LnbyR
za=-om/omdi
zb=-kpar/omdi*np.sqrt(2)
b=ky**2
i10=gp.Inm(za,zb,b,1,0)
i12=gp.Inm(za,zb,b,1,2)
i30=gp.Inm(za,zb,b,3,0)
eps=1+1/tau+(i10*(om-omsi*(1-1.5*etai))-omsi*etai*(i12+i30))/omdi
return eps
xx,yy=np.meshgrid(np.arange(-1,1,0.01),np.arange(-1,1,0.01))
om=xx+1j*yy;
cnts=np.arange(-2,2.2,0.2)
wdts=np.ones(cnts.shape);
wdts[0]=2.0
wdts[5]=2.0
wdts[10]=4.0
wdts[15]=2.0
wdts[20]=2.0
eps=epsfun(om)
#gp.Inm(om,0.0,0.09,1,0)
plt.pcolormesh(np.real(om),np.imag(om),np.imag(eps),shading='gouraud',rasterized=True)
plt.clim(-0.01,0.01)
plt.colorbar()
plt.contour(np.real(om),np.imag(om),np.real(eps),[0.0],colors='k')
plt.contour(np.real(om),np.imag(om),np.imag(eps),[0.0],colors='r')
plt.show()
| 908 | 21.170732 | 86 | py |
zpdgen | zpdgen-master/py_time.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
import time
nlist=[[1,0],[1,2],[3,0]]
ii=0;
xx,yy=np.meshgrid(np.arange(-6,6,0.1),np.arange(-6,6,0.1))
za=xx+1j*yy;
zb=0.0
b=0.09
for ns in nlist:
ii=ii+1;
[n,m]=ns;
print('computing I'+str(n)+str(m)+' ...')
t0 = time.clock()
inm=gp.Inm(za,zb,b,n,m)
print(time.clock()-t0,"seconds")
| 376 | 19.944444 | 58 | py |
zpdgen | zpdgen-master/gpdf.py | from numpy import reshape,shape,rank,transpose
from inmzpd import inmweid,gmweid
from epszpd import epsweid,sigweid
def Inm(za,zb,b,n,m):
res=inmweid(za,zb,b,n,m)
if (shape(za) ==()):
res=res[0]
return res
res=reshape(res,shape(transpose(za)))
res=transpose(res)
return res
def epsitg(om,pars):
res=epsweid(om,pars)
if (shape(om) ==()):
res=res[0]
return res
res=reshape(res,shape(transpose(om)))
res=transpose(res)
return res
def sigmazpd(za,zb,b,anm):
if (rank(anm)==2):
numn,numm=shape(anm)
elif (rank(anm)==1):
numn=size(anm)
numm=0
elif (rank(anm)==0):
numn=0
numm=0
else:
print("error the rank of anm is not 0,1 or 2")
return 0
res=sigweid(za,zb,b,anm,numn,numm)
if (shape(za) ==()):
res=res[0]
return res
res=reshape(res,shape(transpose(za)))
res=transpose(res)
return res
def Gm(z1,z2,m):
if(shape(z1)!=shape(z2)):
print("error: shape(z1)!=shape(z2)!\n");
res=()
return res;
res=gmweid(z1,z2,m)
if (shape(z1) ==() and shape(z2)==()):
res=res[0]
return res
res=reshape(res,shape(transpose(z1)))
res=transpose(res)
return res
| 1,280 | 22.290909 | 54 | py |
zpdgen | zpdgen-master/plot_inm_re.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
nlist=[[1,0],[2,0],[3,0],[1,2],[2,2],[3,2],[1,4],[2,4],[3,4]]
cnts=np.arange(-2,2.2,0.2)
wdts=np.ones(cnts.shape);
wdts[0]=2.0
wdts[5]=2.0
wdts[10]=4.0
wdts[15]=2.0
wdts[20]=2.0
vcb = np.arange(-12,14,2)
ii=0;
xx,yy=np.meshgrid(np.arange(-6,6,0.1),np.arange(-6,6,0.1))
za=xx+1j*yy;
zb=0.0
b=0.09
for ns in nlist:
ii=ii+1;
[n,m]=ns;
print('computing I'+str(n)+str(m)+' ...\n')
inm=gp.Inm(za,zb,b,n,m)
plt.subplot(3,3,ii)
plt.pcolormesh(np.real(za),np.imag(za),np.real(inm),shading='gouraud',rasterized=True)
plt.clim(-10,10)
cs=plt.contour(np.real(za),np.imag(za),np.real(inm),cnts,colors='k',linewidths=wdts*0.5)
aa=r'$Re[I_{'+str(n)+str(m)+r'}(\zeta_\alpha,\zeta_\beta,b)]$'
plt.text(-1,3,aa,fontsize=14)
if (m!=4 and n!=1):
plt.tick_params(labelbottom='off',labelleft='off')
elif(n==1 and m==4) :
plt.text(2,-7.5,r'$Re[\zeta_\alpha]$',fontsize=14)
plt.text(-7.5,3,r'$Im[\zeta_\alpha]$',fontsize=14,rotation=90)
plt.tick_params(axis='both', which='major', labelsize=9,labelbottom='on',labelleft='on')
elif(m==4) :
plt.text(2,-7.5,r'$Re[\zeta_\alpha]$',fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=9,labelbottom='on',labelleft='off')
elif(n==1):
plt.text(-7.5,3,r'$Im[\zeta_\alpha]$',fontsize=14,rotation=90)
plt.tick_params(axis='both', which='major', labelsize=9,labelleft='on',labelbottom='off')
plt.subplots_adjust(wspace=0.02,hspace=0.02,left=0.05,right=0.92,top=0.95,bottom=0.05)
fig=plt.gcf()
cax = fig.add_axes([0.93, 0.1, 0.02, 0.80])
plt.figure(2)
s=plt.contourf(np.real(za),np.imag(za),np.real(inm)*(np.real(inm)<14)*(np.real(inm)>-14),100)
plt.clim(-10,10)
plt.close()
plt.figure(1)
cbar=plt.colorbar(s, cax=cax,ticks=vcb)
cbar.ax.tick_params(labelsize=8)
cbar.solids.set_rasterized(True)
fig.set_size_inches(8,8)
plt.savefig("figures_re.pdf")
| 1,970 | 35.5 | 97 | py |
zpdgen | zpdgen-master/plot_itg_jykim94.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
from scipy.optimize import root
etai=2.5
LnbyR=0.2
rbyR=0.18
kpar=0.1
tau=1.0
def epsfun(v):
om=v[0]+1j*v[1]
omsi=-ky
omdi=2*omsi*LnbyR
za=-om/omdi
zb=-np.sqrt(2)*kpar/omdi
b=ky**2
i10=gp.Inm(za,zb,b,1,0)
i12=gp.Inm(za,zb,b,1,2)
i30=gp.Inm(za,zb,b,3,0)
eps=1+1/tau+(i10*(om-omsi*(1-1.5*etai))-omsi*etai*(i12+i30))/omdi
res=[float(np.real(eps)),float(np.imag(eps))]
return res
kys=np.arange(0.01,2.4,0.01)
ky0=0.7
om0=[-0.2,0.2]
omky=np.zeros(np.shape(kys))*(1+1j)
ind0=np.argmin((kys-ky0)**2)
numk=len(kys)
inds=np.concatenate((np.arange(ind0,numk), np.arange(ind0-1,-1,-1)))
for l in inds:
ky=kys[l]
res=root(epsfun,om0,tol=1e-8,method='hybr')
omky[l]=res.x[0]+1j*res.x[1]
if(l==numk-1):
om0=[float(np.real(omky[ind0])),float(np.imag(omky[ind0]))]
else:
om0=res.x
phi=np.loadtxt('jykim94a.dat')
ax1=plt.subplot(1,2,1)
plt.plot(kys,np.imag(omky),color='k',linewidth=2)
plt.plot(phi[:,0],phi[:,1],'--',color='r')
plt.xlabel('$k_y$',fontsize=18)
plt.ylabel('$\gamma$',fontsize=18)
plt.tight_layout()
#plt.axis([0.0,2.0,-0.1,0.2])
#plt.plot(kys,-np.real(omky),'r')
#plt.show()
x0,x1 = ax1.get_xlim()
y0,y1 = ax1.get_ylim()
ax1.set_aspect((x1-x0)/(y1-y0))
ax1=plt.subplot(1,2,2)
phi=np.loadtxt('jykim94a_re.dat')
plt.plot(kys,np.real(omky),color='k',linewidth=2)
plt.plot(phi[:,0],-phi[:,1],'--',color='r')
plt.xlabel('$k_y$',fontsize=18)
plt.ylabel('$\omega_r$',fontsize=18)
plt.tight_layout()
x0,x1 = ax1.get_xlim()
y0,y1 = ax1.get_ylim()
ax1.set_aspect((x1-x0)/(y1-y0))
plt.savefig("fig_kim94a.pdf")
gam=np.real(omky)
np.savetxt('test.txt',gam, delimiter=" ")
| 1,715 | 24.61194 | 69 | py |
zpdgen | zpdgen-master/itg_ai_loc_comb.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
from scipy.optimize import root
etai=2.5
LnbyR=0.2
rbyR=0.18
kpar=0.1
tau=1.0
def epsfun(v):
om=v[0]+1j*v[1]
omsi=-ky
omdi=2*omsi*LnbyR
za=-om/omdi
zb=-np.sqrt(2)*kpar/omdi
b=ky**2
pars=(omdi,etai,tau,ky,kpar)
eps=gp.epsitg(om,pars)
res=[float(np.real(eps)),float(np.imag(eps))]
return res
kys=np.arange(0.01,3.0,0.01)
ky0=0.7
om0=[-0.2,0.2]
omky=np.zeros(np.shape(kys))*(1+1j)
ind0=np.argmin((kys-ky0)**2)
numk=len(kys)
inds=np.concatenate((np.arange(ind0,numk), np.arange(ind0-1,-1,-1)))
for l in inds:
ky=kys[l]
res=root(epsfun,om0,tol=1e-8,method='hybr')
omky[l]=res.x[0]+1j*res.x[1]
if(l==numk-1):
om0=[float(np.real(omky[ind0])),float(np.imag(omky[ind0]))]
else:
om0=res.x
plt.plot(kys,np.imag(omky),'x-')
#plt.axis([0.0,2.0,-0.1,0.2])
#plt.plot(kys,-np.real(omky),'r')
plt.show()
| 935 | 20.767442 | 68 | py |
zpdgen | zpdgen-master/plot_inm_im.py | import numpy as np
import matplotlib.pyplot as plt
import gpdf as gp
nlist=[[1,0],[2,0],[3,0],[1,2],[2,2],[3,2],[1,4],[2,4],[3,4]]
cnts=np.arange(-2,2.2,0.2)
wdts=np.ones(cnts.shape);
wdts[0]=2.0
wdts[5]=2.0
wdts[10]=4.0
wdts[15]=2.0
wdts[20]=2.0
vcb = np.arange(-12,14,2)
ii=0;
xx,yy=np.meshgrid(np.arange(-6,6,0.1),np.arange(-6,6,0.1))
za=xx+1j*yy;
zb=0.0
b=0.09
for ns in nlist:
ii=ii+1;
[n,m]=ns;
print('computing I'+str(n)+str(m)+' ...\n')
inm=gp.Inm(za,zb,b,n,m)
plt.subplot(3,3,ii)
plt.pcolormesh(np.real(za),np.imag(za),np.imag(inm),shading='gouraud',rasterized=True)
plt.clim(-10,10)
cs=plt.contour(np.real(za),np.imag(za),np.imag(inm),cnts,colors='k',linewidths=wdts*0.5)
aa=r'$Im[I_{'+str(n)+str(m)+r'}(\zeta_\alpha,\zeta_\beta,b)]$'
plt.text(-1,3,aa,fontsize=14)
if (m!=4 and n!=1):
plt.tick_params(labelbottom='off',labelleft='off')
elif(n==1 and m==4) :
plt.text(2,-7.5,r'$Re[\zeta_\alpha]$',fontsize=14)
plt.text(-7.5,3,r'$Im[\zeta_\alpha]$',fontsize=14,rotation=90)
plt.tick_params(axis='both', which='major', labelsize=9,labelbottom='on',labelleft='on')
elif(m==4) :
plt.text(2,-7.5,r'$Re[\zeta_\alpha]$',fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=9,labelbottom='on',labelleft='off')
elif(n==1):
plt.text(-7.5,3,r'$Im[\zeta_\alpha]$',fontsize=14,rotation=90)
plt.tick_params(axis='both', which='major', labelsize=9,labelleft='on',labelbottom='off')
plt.subplots_adjust(wspace=0.02,hspace=0.02,left=0.05,right=0.92,top=0.95,bottom=0.05)
fig=plt.gcf()
cax = fig.add_axes([0.93, 0.1, 0.02, 0.80])
plt.figure(2)
s=plt.contourf(np.real(za),np.imag(za),np.imag(inm)*(np.imag(inm)<14)*(np.imag(inm)>-14),100)
plt.clim(-10,10)
plt.close()
plt.figure(1)
cbar=plt.colorbar(s, cax=cax,ticks=vcb)
cbar.ax.tick_params(labelsize=8)
cbar.solids.set_rasterized(True)
fig.set_size_inches(8,8)
plt.savefig("figures_im.pdf")
| 1,970 | 35.5 | 97 | py |
GAStimator | GAStimator-master/setup.py | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='gastimator',
version='0.4.7',
description='Implementation of a Python MCMC gibbs-sampler with adaptive stepping',
url='https://github.com/TimothyADavis/GAStimator',
author='Timothy A. Davis',
author_email='DavisT@cardiff.ac.uk',
long_description=long_description,
long_description_content_type="text/markdown",
license='GNU GPLv3',
packages=['gastimator'],
install_requires=[
'numpy',
'matplotlib',
'tqdm',
'plotbin',
'joblib>=0.16.0',
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
zip_safe=False)
| 962 | 29.09375 | 90 | py |
GAStimator | GAStimator-master/gastimator/priors.py | #!/usr/bin/env python3
# coding: utf-8
import numpy as np
class priors:
def __init__():
pass
class gaussian:
def __init__(self,mu,sigma):
self.mu=mu
self.sigma=sigma
def eval(self,x,**kwargs):
xs = (x - self.mu) / self.sigma
return (-(xs*xs)/2.0) - np.log(2.5066282746310002*self.sigma) #returns ln(prior)
| 422 | 22.5 | 92 | py |
GAStimator | GAStimator-master/gastimator/__init__.py | #!/usr/bin/env python3
# coding: utf-8
from gastimator.gastimator import gastimator
from gastimator.priors import priors | 120 | 29.25 | 44 | py |
GAStimator | GAStimator-master/gastimator/corner_plot.py | ##############################################################################
#
# This is the history of Michele Cappellari modifications
# to Daniel Foreman-Mackey corner_plot routine.
#
# V1.0.0: Included "like" and "xstry" optional inputs
# to show individual points coloured by their likelihood
# and to show all points tried by MCMC.
# Michele Cappellari, Oxford, 12 January 2014
# V1.1.0: Added "init" keyword to simply initialize an empty window.
# MC, Oxford, 10 October 2014
# V1.1.1: Check that input sizes of "extents" and "labels" match.
# MC, Portsmouth, 15 October 2014
# V1.1.2: Only show unique dots in "xs" and "like", but properly include
# duplicates in histograms. MC, Oxford, 4 November 2014
# V1.1.3: Allow for scaling of axes labels.
# Fix program stop when values are the same within numerical accuracy
# but np.std(x) is not exactly zero. MC, Oxford, 10 December 2014
# V1.1.4: Fix potential program stop with few input values.
# MC, Oxford, 20 November 2015
# V1.1.5: Included histo_bin_width(). MC, Oxford, 8 February 2017
# V1.1.6: Added `rasterized` keyword and default value based on points number.
# Use plt.plot for try values. MC, Oxford, 25 January 2018
# V1.1.7: Changes definition of `extents` for consistency with Scipy's `bounds`.
# MC, Oxford, 27 April 2018
#
##############################################################################
from __future__ import print_function, absolute_import, unicode_literals
__all__ = ["corner_plot", "hist2d", "error_ellipse"]
__version__ = "0.0.6"
__author__ = "Dan Foreman-Mackey (danfm@nyu.edu)"
__copyright__ = "Copyright 2013 Daniel Foreman-Mackey"
__contributors__ = [
# Alphabetical by first name.
"Adrian Price-Whelan @adrn",
"Brendon Brewer @eggplantbren",
"Ekta Patel @ekta1224",
"Emily Rice @emilurice",
"Geoff Ryan @geoffryan",
"Kyle Barbary @kbarbary",
"Phil Marshall @drphilmarshall",
"Pierre Gratier @pirg",
]
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Ellipse
import matplotlib.cm as cm
from plotbin.sauron_colormap import register_sauron_colormap
######################################################################
def corner_plot(xs, like=None, xstry=None, weights=None, labels=None,
extents=None, truths=None, truth_color="steelblue", fignum=None,
scale_hist=False, quantiles=[], verbose=True, init=False,
plot_contours=True, plot_datapoints=True, fig=None,
labels_scaling=1, rasterized=None, **kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
xstry : array_like (nsamples, ndim) (optional)
Contains all tried parameter, not only the accepted moves.
like : array_like (nsamples) (optional)
Likelihood of each xs[j, :] set of parameters to be shown on the plot.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
labels : iterable (ndim,) (optional)
A list of names for the dimensions.
extents : 2-tuple of array_like (2, ndim)
Lower and upper bounds on independent variables.
Each array must match the size ndim, e.g.,
[(0., 10., 30., 25.), (1., 15, 40., 55.)].
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
plot_datapoints : bool (optional)
Draw the individual data points.
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
"""
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
if xstry is not None:
xstry = xstry.T
if labels is not None: # note xs was already trasposed
assert len(labels) == xs.shape[0], 'lengths of labels must match number of dimensions'
if extents is not None:
extents = np.array(extents).T
assert len(extents) == xs.shape[0], 'lengths of extents must match number of dimensions'
if weights is not None:
weights = np.asarray(weights)
assert weights.ndim == 1, 'weights must be 1-D'
assert xs.shape[1] == weights.shape[0], 'lengths of weights must match number of samples'
if "sauron" not in cm._colormaps.keys():
register_sauron_colormap()
# backwards-compatibility
plot_contours = kwargs.get("smooth", plot_contours)
K = len(xs)
if fig is None:
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.05 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
fig, axes = plt.subplots(K, K, figsize=(dim, dim), num=fignum)
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
if init:
return fig
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
if extents is None:
extents = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in extents], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in column(s) "
"{0} have no dynamic range. Please provide an "
"`extent` argument.")
.format(", ".join(map("{0}".format,
np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
for i in range(len(extents)):
try:
emin, emax = extents[i]
except TypeError:
q = [0.5 - 0.5*extents[i], 0.5 + 0.5*extents[i]]
extents[i] = quantile(xs[i], q, weights=weights)
# Extract unique values to reduce the memory taken by the figure (MC)
# The unique version is used for the scatter plots but not for the histograms
if like is not None:
likeUnique, w = np.unique(like, return_index=True)
xsUnique = xs[:, w]
w = np.argsort(likeUnique) # Sort to plot most likely values last (MC)
mx = likeUnique[w[-1]] # Maximum likelihood value so far
likeUnique = likeUnique[w]
xsUnique = xsUnique[:, w]
if rasterized is None:
rasterized = True if xsUnique.size > 1e4 else False
for i, x in enumerate(xs):
ax = axes[i, i]
# Plot the histograms. Scott's rule for bin size
# For a Normal distribution sigma=1.4826*MAD, so the robust version of
# Scott (1979) rule: binsize = 3.49*sigma/n^{1/3} is binsize = 5.17*MAD/n^{1/3}
# binsize = 5.17*np.median(np.abs(x - np.median(x))) / x.size**(1./3.)
binsize = 3.49*np.std(x) / x.size**(1./3.)
# binsize = histo_bin_width(x)
if binsize > 0:
nbins = int((extents[i][1] - extents[i][0])/binsize)
# if nbins > 20:
# nbins //= 2 # divide by two for aesthetic reasons and keep integer
if nbins > 1e4: # x values are the same within numerical accuracy
nbins = 5
else:
nbins = 5
ax.cla() # clean current subplot before plotting
n, b, p = ax.hist(x, weights=weights, bins=kwargs.get("bins", nbins),
range=extents[i], color=kwargs.get("color", "b"),
histtype='stepfilled')
# Axes labels larger than tick labels
ax.yaxis.label.set_size(plt.rcParams['font.size']*labels_scaling)
ax.xaxis.label.set_size(plt.rcParams['font.size']*labels_scaling)
if truths is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=kwargs.get("color", "k"))
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
# Set up the axes.
ax.set_xlim(extents[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(5))
# Not so DRY.
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i])
ax.xaxis.set_label_coords(0.5, -0.3)
for j, y in enumerate(xs):
ax = axes[i, j]
if j > i:
ax.set_visible(False)
ax.set_frame_on(False)
continue
elif j == i:
continue
ax.cla() # clean current subplot before plotting
ax.yaxis.label.set_size(plt.rcParams['font.size']*labels_scaling)
ax.xaxis.label.set_size(plt.rcParams['font.size']*labels_scaling)
if xstry is not None:
ax.plot(xstry[j,:], xstry[i,:], ',m', ms=1, rasterized=rasterized, zorder=0)
# Plot black when DeltaLogLike=9/2-->DeltaChi2=9 (3sigma)
if like is not None:
ax.scatter(xsUnique[j,:], xsUnique[i,:], c=likeUnique,
vmin=mx-4.5, vmax=mx, edgecolors='None',
cmap='sauron', rasterized=rasterized, **kwargs)
ax.set_xlim(extents[j])
ax.set_ylim(extents[i])
else:
hist2d(y, x, ax=ax, extent=[extents[j], extents[i]],
plot_contours=plot_contours,
plot_datapoints=plot_datapoints,
weights=weights, **kwargs)
# ax.tick_params(width=2, which='major')
if truths is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
ax.axvline(truths[j], color=truth_color)
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j])
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.3, 0.5)
return fig
######################################################################
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
######################################################################
def error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):
"""
Plot the error ellipse at a point given its covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]) * factor,
height=2 * np.sqrt(S[1]) * factor,
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = plt.gca()
ax.add_patch(ellipsePlot)
return ellipsePlot
######################################################################
def hist2d(x, y, *args, **kwargs):
"""
Plot a 2-D histogram of samples.
"""
ax = kwargs.pop("ax", plt.gca())
extent = kwargs.pop("extent", [[x.min(), x.max()], [y.min(), y.max()]])
bins = kwargs.pop("bins", 50)
color = kwargs.pop("color", "k")
linewidths = kwargs.pop("linewidths", None)
plot_datapoints = kwargs.get("plot_datapoints", True)
plot_contours = kwargs.get("plot_contours", True)
cmap = cm.get_cmap("gray")
cmap._init()
cmap._lut[:-3, :-1] = 0.
cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)
X = np.linspace(extent[0][0], extent[0][1], bins + 1)
Y = np.linspace(extent[1][0], extent[1][1], bins + 1)
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y),
weights=kwargs.get('weights', None))
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"`extent` argument.")
V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
X, Y = X[:-1], Y[:-1]
if plot_datapoints:
ax.plot(x, y, "o", color=color, ms=1.5, zorder=-1, alpha=0.1,
rasterized=True)
if plot_contours:
ax.contourf(X1, Y1, H.T, [V[-1], H.max()],
cmap=LinearSegmentedColormap.from_list("cmap",
([1] * 3,
[1] * 3),
N=2), antialiased=False)
if plot_contours:
ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)
ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)
data = np.vstack([x, y])
mu = np.mean(data, axis=1)
cov = np.cov(data)
if kwargs.pop("plot_ellipse", False):
error_ellipse(mu, cov, ax=ax, edgecolor="r", ls="dashed")
ax.set_xlim(extent[0])
ax.set_ylim(extent[1])
######################################################################
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
######################################################################
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
######################################################################
def histo_bin_width(x):
"""
This function is copied from Numpy 1.11
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
######################################################################
| 19,149 | 35.406844 | 97 | py |
GAStimator | GAStimator-master/gastimator/gastimator.py | #!/usr/bin/env python3
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from joblib import Parallel, delayed,cpu_count
from joblib.externals.loky import get_reusable_executor
def lnlike(data,model,error):
# default log-likelihood function
chi2=np.nansum((data - model)**2 / error**2)
return -0.5*chi2
def unwrap_self(args, **kwarg):
return gastimator.run_a_chain(*args, **kwarg)
class gastimator:
def __init__(self, model,*args, **kwargs):
self.targetrate= 0.25
self.dec=0.95
self.inc=1.05
self.model=model
self.silent=False
self.rng=None
self.args=args
self.kwargs=kwargs
self.min=None
self.max=None
self.guesses=None
self.precision=None
self.prior_func=None
self.fixed=None
self.fdata=None
self.error=None
self.labels=None
self.change=None
self.npars=None
self.lastchain=None
self.lastchainll=None
self.lastchainaccept=None
self.nprocesses=int(cpu_count())-1
self.lnlike_func=lnlike
# def likelihood(self,values):
#
# priorval=1
# for ival,prior in enumerate(self.prior_func):
# if callable(prior):
# priorval*=prior(values[ival],allvalues=values,ival=ival)
#
# model=self.model(values,*self.args,**self.kwargs)
# return self.lnlike_func(self.fdata,model,self.error) + np.log(priorval, where=(priorval!=0))
def likelihood(self,values):
priorval=0
for ival,prior in enumerate(self.prior_func):
if callable(prior):
priorval+=prior(values[ival],allvalues=values,ival=ival)
model=self.model(values,*self.args,**self.kwargs)
return self.lnlike_func(self.fdata,model,self.error) + priorval
def take_step(self,values,changethistime, knob):
newvals=values.copy()
acceptable=False
cnt=0.0
while not acceptable:
newvals[changethistime]=values[changethistime]+(self.rng.randn(1)*knob[changethistime])
if (newvals[changethistime] >= self.min[changethistime]) * (newvals[changethistime] <= self.max[changethistime]): acceptable=True
if cnt > 5000: raise Exception('Cannot find a good step. Check input parameters')
cnt+=1
return newvals
def evaluate_step(self, oldvalues, oldll, changethistime, knob, holdknob=False):
accept=True
tryvals=self.take_step(oldvalues,changethistime, knob)
newll=self.likelihood(tryvals)
if np.isnan(newll):
print("Your function returned a Nan for the following parameters",tryvals)
print("Attempting to continue...")
ll=-1e20
ratio= newll-oldll
randacc=np.log(self.rng.uniform(0,1,1))
if randacc > ratio:
accept=False
newval = oldvalues
newll = oldll
if not holdknob: knob[changethistime]*=self.dec
else:
newval=tryvals
if (not holdknob) and (knob[changethistime] < (self.max[changethistime]-self.min[changethistime])*5.):
knob[changethistime]*=self.inc
return newval, newll, accept, tryvals, knob
def chain(self, values, niter, knob, plot=True, holdknob=False, fig=None, ax=None,progress=False,progid=0):
if plot:
fig, ax=self.make_plots(niter,progid=progid)
#plt.pause(0.00001)
gibbs_change=self.rng.randint(0,self.change.size,niter)
outputvals=np.empty((self.npars,niter))
outputll=np.zeros(niter)
accepted=np.zeros(niter)
bestll=self.likelihood(values)
outputvals[:,0]=values
outputll[0]=bestll
n_accept=0.
if progress and (progid==0):
pbar = tqdm(total=(niter)*self.nprocesses)
for i in range(1,niter):
newval, newll, accept, tryvals, knob =self.evaluate_step(outputvals[:,i-1],outputll[i-1],self.change[gibbs_change[i]],knob,holdknob=holdknob)
if progress and (progid==0):
pbar.update(self.nprocesses)
if plot:
self.update_plot(fig, ax,self.change[gibbs_change[i]],i,tryvals, accept)
outputvals[:,i]=newval
outputll[i]=newll
accepted[i]=accept
n_accept+=accept
acceptrate=float(n_accept)/niter
if (holdknob):
if (self.burn):
outputvals= outputvals[:,int(self.burn):-1]
outputll= outputll[int(self.burn):-1]
accepted=accepted[int(self.burn):-1]
outputvals=outputvals[:,accepted.astype(bool)]
outputll=outputll[accepted.astype(bool)]
if plot: plt.close()
if progress and (progid==0):
pbar.close()
return outputvals, outputll, accepted, acceptrate, knob
def factors(self,n):
facs=np.array(list(x for tup in ([i, n//i] for i in range(1, int(n**0.5)+1) if n % i == 0) for x in tup))
facs=np.sort(facs)
return facs[int(facs.size/2 - 1):int(facs.size/2 +1)]
def update_plot(self, fig, ax, index, i, new_data, accept):
plot2update=(ax.flat[index])
if accept:
plot2update.plot(i,new_data[index],'bo')
else: plot2update.plot(i,new_data[index],'ro')
plt.pause(0.000001)
#fig.canvas.draw()
#fig.canvas.start_event_loop(0.000001)
def make_plots(self, niter,progid=0):
nplots=self.npars
plotfac=self.factors(nplots)
fig, ax = plt.subplots(plotfac[1], plotfac[0], sharex='col')
for i in range(0,nplots):
(ax.flat[i]).set_ylabel(self.labels[i]+str(progid))
(ax.flat[i]).set_xlim([0,niter])
(ax.flat[i]).set_ylim([self.min[i],self.max[i]])
plt.show(block=False)
return fig, ax
def run_a_chain(self,startguess,niters,numatonce,knob,plot=True,final=False,progid=0):
count=0
converged=False
oldmean=self.guesses*0.0
newmean=0.0
startpoint=startguess.copy()
self.rng=np.random.RandomState()
if not final:
while (count < niters) and (not converged):
outputvals, outputll, accepted, acceptrate, knob = self.chain(startpoint, numatonce, knob, plot=plot, holdknob=False,progress=False)
if acceptrate*numatonce > 1:
w,=np.where(accepted)
startpoint=outputvals[:,w[-1]]
newmean=np.mean(outputvals[:,w],axis=1)
else: newmean=oldmean
if count == 0:
self.lastchain=outputvals
self.lastchainll=outputll
self.lastchainaccept=accepted
if (not self.silent): print(" Chain "+str(progid)+" has not converged - Accept rate: "+str(acceptrate))
else:
self.lastchain=np.append(self.lastchain,outputvals,axis=1)
self.lastchainll=np.append(self.lastchainll,outputll)
self.lastchainaccept=np.append(self.lastchainaccept,accepted)
test=(np.abs(newmean-oldmean) < self.precision)
if test[self.change].sum() == test[self.change].size and (acceptrate >= self.targetrate):
converged=1
if (not self.silent): print("Chain "+str(progid)+" converged: LL: "+str(np.max(outputll))+" - Accept rate:"+str(acceptrate))
else:
if (not self.silent): print(" Chain "+str(progid)+" has not converged - Accept rate: "+str(acceptrate))
if test[self.change].sum() == test[self.change].size:
if (not self.silent): print(" --> Target rate not reached")
else:
if (not self.silent): print(" --> Still varying: "+str(self.labels[self.change][~test[self.change]]))
oldmean=newmean
count += numatonce
if not converged: print('WARNING: Chain '+str(progid)+' did not converge in '+str(niters)+' steps')
else:
#if (self.silent == False)&(self.nprocesses==1):
# outputvals, outputll, accepted, acceptrate, knob = self.chain(startpoint, niters, knob, plot=False, holdknob=True,progress=True,progid=progid)
#else:
#self.rng=np.random.RandomState() #refresh the RNG
outputvals, outputll, accepted, acceptrate, knob = self.chain(startpoint, niters, knob, plot=False, holdknob=True,progress=True,progid=progid)
best_knob=knob
return outputvals, outputll, best_knob
def input_checks(self):
self.guesses=np.array(self.guesses)
self.min=np.array(self.min)
self.max=np.array(self.max)
self.precision=np.array(self.precision)
self.labels=np.array(self.labels)
if np.any(self.guesses == None):
raise Exception('Please set initial guesses')
self.npars=self.guesses.size
if np.all(np.array(self.prior_func) == None):
self.prior_func=(np.zeros(self.npars, dtype=bool)).tolist()
else:
try:
if len(self.prior_func) != self.npars:
raise Exception('Number of priors given does not match number of parameters')
except:
if self.npars != 1:
raise Exception('Number of priors given does not match number of parameters')
names=["minimum","maximum","precision","labels"]
check=[self.min,self.max,self.precision,self.labels]
for x, nam in zip(check,names):
if np.any(x == None):
raise Exception('Please set parameter '+str(nam))
else:
if x.size != self.npars:
raise Exception('Number of constraints in '+str(nam)+' does not match number of parameters')
if np.any(self.fixed == None):
self.fixed=np.zeros(self.npars, dtype=bool)
print("You did not specify if any variables are fixed - I will continue assuming that none are")
if np.any((self.max-self.min) < 0):
raise Exception('Parameter(s) '+str(self.labels[(self.max-self.min) < 0])+' have incorrect minumum/maximum bounds')
if np.any((self.guesses<self.min)):
raise Exception('Parameter(s) '+str(self.labels[(self.guesses<self.min)])+' have an initial guess ('+str(self.guesses[(self.guesses<self.min)])+') lower than the minimum allowed ('+str(self.min[(self.guesses<self.min)])+').')
if np.any((self.guesses>self.max)):
raise Exception('Parameter(s) '+str(self.labels[(self.guesses>self.max)])+' have an initial guess higher ('+str(self.guesses[(self.guesses>self.max)])+') than the maximum allowed ('+str(self.max[(self.guesses>self.max)])+').')
def run(self, fdata, error, niters, numatonce=None, burn=None, nchains=1, plot=True, output=None, seed=None):
# check all required inputs set
self.input_checks()
if not self.silent:
stri=''
for name,pr in zip(self.labels,self.prior_func):
if callable(pr):
stri=stri+' '+name
if stri!='':
print("Non uniform priors used: "+stri)
# set up variables needed
self.fdata=fdata
self.error=error
self.rng=np.random.RandomState(seed)
self.change, = np.where(self.fixed == False)
verybestvalues=self.guesses
verybestknob=None
verybestll=-1e31
if ((0.8*(niters/self.nprocesses)) < 1000) & (self.nprocesses > 1):
print("WARNING: The chain assigned to each processor will be very short (<1250 steps) - consider reducing 'nprocesses'.")
if not numatonce: numatonce=50*self.npars
if not burn:
self.burn=0.2*(niters/self.nprocesses)
else:
self.burn=burn
if (self.silent==False)&(self.nprocesses>1):
verboselev=10
else:
verboselev=0
self.progline=[]
for chainno in range(0,nchains):
self.progline.append('Doing chain '+str(chainno+1))
knob=(0.5*(self.max-self.min))
#breakpoint()
if (self.nprocesses>1)&(nchains>1):
## do initial chains in parallel
#breakpoint()
#try:
par= Parallel(n_jobs= self.nprocesses, verbose=verboselev)
results=par(delayed(unwrap_self)(i) for i in zip([self]*nchains, [self.guesses]*nchains,[int(float(niters))]*nchains,[numatonce]*nchains,[knob]*nchains, [plot]*nchains, [False]*nchains,np.arange(nchains)))
#except:
# par= Parallel(n_jobs= self.nprocesses, verbose=verboselev, prefer="threads")
# results=par(delayed(unwrap_self)(i) for i in zip([self]*nchains, [self.guesses]*nchains,[int(float(niters))]*nchains,[numatonce]*nchains,[knob]*nchains, [plot]*nchains, [False]*nchains,np.arange(nchains)))
results=np.array(results,dtype=object)
par._terminate_backend()
get_reusable_executor().shutdown(wait=True)
##
#debug line #results=unwrap_self((self, self.guesses,int(float(niters)),numatonce,knob, False, False,0))
bestchain=np.argmax(np.max(np.stack(results[:,1]),axis=1))
bestvalinbestchain=np.argmax(results[bestchain,1])
verybestvalues=np.stack(results[bestchain,0])[:,bestvalinbestchain]
verybestknob=results[bestchain,2]
verybestll=np.stack(results[bestchain,1])[bestvalinbestchain]
else:
for chainno in range(0,nchains):
if not self.silent: print('Doing chain '+str(chainno+1))
#knob=(0.5*(self.max-self.min))
outputvals, outputll, best_knob = self.run_a_chain(self.guesses,niters,numatonce,knob,plot=plot)
#breakpoint()
if (np.max(outputll) > verybestll) or (chainno == 0):
if not self.silent: print("Best chain so far!")
w,=np.where(outputll == np.max(outputll))
verybestvalues=outputvals[:,w[0]].reshape(self.npars)
verybestknob=best_knob
verybestll=np.max(outputll)
if not self.silent:
print("Best fit:")
for i in range(0,self.labels.size):
print(" "+self.labels[i]+":",verybestvalues[i])
print("Starting final chain")
results = []
#try:
par= Parallel(n_jobs= self.nprocesses, verbose=verboselev)
results=par(delayed(unwrap_self)(i) for i in zip([self]*self.nprocesses, [verybestvalues]*self.nprocesses,[int(float(niters)/float(self.nprocesses))]*self.nprocesses,[numatonce]*self.nprocesses,[verybestknob]*self.nprocesses, [False]*self.nprocesses, [True]*self.nprocesses,np.arange(self.nprocesses)))
# except:
# par= Parallel(n_jobs= self.nprocesses, verbose=verboselev,prefer="threads")
# results=par(delayed(unwrap_self)(i) for i in zip([self]*self.nprocesses, [verybestvalues]*self.nprocesses,[int(float(niters)/float(self.nprocesses))]*self.nprocesses,[numatonce]*self.nprocesses,[verybestknob]*self.nprocesses, [False]*self.nprocesses, [True]*self.nprocesses,np.arange(self.nprocesses)))
results=np.array(results,dtype=object)
par._terminate_backend()
get_reusable_executor().shutdown(wait=True)
outputvalue= np.concatenate(results[:,0],axis=1)
outputll= np.concatenate(results[:,1])
if outputll.size < 1:
print('WARNING: No accepted models. Perhaps you need to increase the number of steps?')
else:
if not self.silent:
w,=np.where(outputll == np.max(outputll))
perc = np.percentile(outputvalue, [15.86, 50, 84.14], axis=1)
sig_bestfit_up = (perc[2][:] - perc[1][:])
sig_bestfit_down = (perc[1][:] - perc[0][:])
print("Final best fit values and 1sigma errors:")
for i in range(0,self.labels.size):
if self.fixed[i]:
print(" "+self.labels[i]+":",perc[1][i],"(Fixed)")
else:
if np.abs(((sig_bestfit_up[i]/sig_bestfit_down[i])-1)) < 0.1:
print(" "+self.labels[i]+":",perc[1][i],"±",np.mean([sig_bestfit_up[i],sig_bestfit_down[i]]))
else:
print(" "+self.labels[i]+":",perc[1][i],"+",sig_bestfit_up[i],"-",sig_bestfit_down[i])
return outputvalue, outputll
| 16,793 | 38.146853 | 312 | py |
python-pesq | python-pesq-master/setup.py | from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
import os
includes = ['pypesq']
try:
import numpy as np
includes += [os.path.join(np.get_include(), 'numpy')]
except:
pass
extension = Extension("pesq_core",
sources=["pypesq/pesq.c", "pypesq/dsp.c", "pypesq/pesqdsp.c",
"pypesq/pesqio.c", "pypesq/pesqmain.c", "pypesq/pesqmod.c"],
include_dirs=includes,
language='c')
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
try:
__builtins__.__NUMPY_SETUP__ = False
except AttributeError:
print("Cannot set '__builtins__.__NUMPY_SETUP__ = False' This is not needed if numpy is already installed.")
import numpy
self.include_dirs.append(numpy.get_include())
setup(name='pypesq',
version='1.2.4',
description="A package to compute pesq score.",
url='https://github.com/vBaiCai/python-pesq',
author_email='zhuroubaicai@gmail.com',
keywords=['pesq', 'speech', 'speech quality'],
license='MIT',
packages=find_packages(),
ext_modules=[extension],
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
py_modules=['numpy'],
zip_safe=False,
install_requires=['numpy'],
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, <4',
)
| 1,498 | 30.893617 | 120 | py |
python-pesq | python-pesq-master/pypesq/__init__.py | import warnings
import numpy as np
from pesq_core import _pesq
from math import fabs
EPSILON = 1e-6
def pesq(ref, deg, fs=16000, normalize=False):
'''
params:
ref: ref signal,
deg: deg signal,
fs: sample rate,
'''
ref = np.array(ref, copy=True)
deg = np.array(deg, copy=True)
if normalize:
ref = ref/np.max(np.abs(ref)) if np.abs(ref) > EPSILON else ref
deg = deg/np.max(np.abs(deg)) if np.abs(deg) > EPSILON else deg
max_sample = np.max(np.abs(np.array([ref, deg])))
if max_sample > 1:
c = 1 / max_sample
ref = ref * c
deg = deg * c
if ref.ndim != 1 or deg.ndim != 1:
raise ValueError("signals must be 1-D array ")
if fs not in [16000, 8000]:
raise ValueError("sample rate must be 16000 or 8000")
if fabs(ref.shape[0] - deg.shape[0]) > fs / 4:
raise ValueError("ref and deg signals should be in same length.")
if np.count_nonzero(ref==0) == ref.size:
raise ValueError("ref is all zeros, processing error! ")
if np.count_nonzero(deg==0) == deg.size:
raise ValueError("deg is all zeros, pesq score is nan! ")
if ref.dtype != np.int16:
ref *= 32767
ref = ref.astype(np.int16)
if deg.dtype != np.int16:
deg *= 32767
deg = deg.astype(np.int16)
try:
score = _pesq(ref, deg, fs)
except:
warnings.warn('Processing Error! return NaN')
score = np.NAN
return score
| 1,501 | 25.350877 | 73 | py |
scaper | scaper-master/setup.py | from setuptools import setup
import imp
with open('README.md') as file:
long_description = file.read()
version = imp.load_source('scaper.version', 'scaper/version.py')
setup(
name='scaper',
version=version.version,
description='A library for soundscape synthesis and augmentation',
author='Justin Salamon & Duncan MacConnell',
author_email='justin.salamon@gmail.com',
url='https://github.com/justinsalamon/scaper',
download_url='http://github.com/justinsalamon/scaper/releases',
packages=['scaper'],
long_description=long_description,
long_description_content_type='text/markdown',
keywords='audio sound soundscape environmental dsp mixing',
license='BSD-3-Clause',
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Topic :: Multimedia :: Sound/Audio :: Sound Synthesis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
install_requires=[
'sox==1.4.0',
'jams>=0.3.2',
'numpy>=1.13.3',
"soxbindings>=1.2.2;platform_system!='Windows'",
'pyloudnorm',
'soundfile',
],
extras_require={
'docs': [
'sphinx', # autodoc was broken in 1.3.1
'sphinx_rtd_theme',
'sphinx_issues',
],
'tests': ['backports.tempfile', 'pytest', 'pytest-cov', 'tqdm']
}
)
| 1,903 | 33.618182 | 71 | py |
scaper | scaper-master/tests/test_core.py |
import scaper
from scaper.scaper_exceptions import ScaperError
from scaper.scaper_warnings import ScaperWarning
from scaper.util import _close_temp_files
import pytest
from scaper.core import EventSpec
import tempfile
import backports.tempfile
import os
import numpy as np
import soundfile
import jams
import numbers
from collections import namedtuple
from copy import deepcopy
import shutil
from contextlib import contextmanager
import csv
# FIXTURES
# Paths to files for testing
FG_PATH = 'tests/data/audio/foreground'
BG_PATH = 'tests/data/audio/background'
SHORT_BG_PATH = 'tests/data/audio/short_background'
ALT_FG_PATH = 'tests/data/audio_alt_path/foreground'
ALT_BG_PATH = 'tests/data/audio_alt_path/background'
# fg and bg labels for testing
FB_LABELS = ['car_horn', 'human_voice', 'siren']
BG_LABELS = ['park', 'restaurant', 'street']
_EXTS = ('wav', 'jams', 'txt')
_TestFiles = namedtuple('TestFiles', _EXTS)
SAMPLE_RATES = [44100, 22050]
def _get_test_paths(name):
return _TestFiles(*[
os.path.join('tests/data/regression/', name + '.' + ext)
for ext in _EXTS
])
TEST_PATHS = {
22050: {
'REG': _get_test_paths('soundscape_20200501_22050'),
'REG_BGONLY': _get_test_paths('bgonly_soundscape_20200501_22050'),
'REG_REVERB': _get_test_paths('reverb_soundscape_20200501_22050'),
},
44100: {
'REG': _get_test_paths('soundscape_20200501_44100'),
'REG_BGONLY': _get_test_paths('bgonly_soundscape_20200501_44100'),
'REG_REVERB': _get_test_paths('reverb_soundscape_20200501_44100'),
},
}
def _compare_scaper_jams(jam, regjam, exclude_additional_scaper_sandbox_keys=[]):
"""
Check whether two scaper jams objects are equal up to floating point
precision, ignoring jams_version and scaper_version.
Parameters
----------
jam : JAMS
In memory jams object
regjam : JAMS
Regression jams (loaded from disk)
Raises
------
AssertionError
If the comparison fails.
"""
# Note: can't compare directly, since:
# 1. scaper/and jams library versions may change
# 2. raw annotation sandbox stores specs as OrderedDict and tuples, whereas
# loaded ann (regann) simplifies those to dicts and lists
# 3. floats might be marginally different (need to use np.allclose())
# Must compare each part "manually"
# 1. compare file metadata
for k in set(jam.file_metadata.keys()) | set(regjam.file_metadata.keys()):
if k != 'jams_version':
assert jam.file_metadata[k] == regjam.file_metadata[k]
# 2. compare jams sandboxes
assert jam.sandbox == regjam.sandbox
# 3. compare annotations
assert len(jam.annotations) == len(regjam.annotations) == 1
ann = jam.annotations[0]
regann = regjam.annotations[0]
# 3.1 compare annotation metadata
assert ann.annotation_metadata == regann.annotation_metadata
# 3.2 compare sandboxes
# Note: can't compare sandboxes directly, since in raw jam scaper sandbox
# stores event specs in EventSpec object (named tuple), whereas in loaded
# jam these will get converted to list of lists.
# assert ann.sandbox == regann.sandbox
assert len(ann.sandbox.keys()) == len(regann.sandbox.keys()) == 1
assert 'scaper' in ann.sandbox.keys()
assert 'scaper' in regann.sandbox.keys()
excluded_scaper_sandbox_keys = [
'bg_spec', 'fg_spec', 'scaper_version', 'soundscape_audio_path',
'isolated_events_audio_path',
]
excluded_scaper_sandbox_keys.extend(exclude_additional_scaper_sandbox_keys)
# everything but the specs and version can be compared directly:
for k in set(ann.sandbox.scaper.keys()) | set(regann.sandbox.scaper.keys()):
if k not in excluded_scaper_sandbox_keys:
assert ann.sandbox.scaper[k] == regann.sandbox.scaper[k], (
'Unequal values for "{}"'.format(k))
# to compare specs need to covert raw specs to list of lists
bg_spec_list = [[list(x) if isinstance(x, tuple) else x for x in e] for e in
ann.sandbox.scaper['bg_spec']]
fg_spec_list = [[list(x) if isinstance(x, tuple) else x for x in e] for e in
ann.sandbox.scaper['fg_spec']]
assert (fg_spec_list == regann.sandbox.scaper['fg_spec'])
assert (bg_spec_list == regann.sandbox.scaper['bg_spec'])
# 3.3. compare namespace, time and duration
assert ann.namespace == regann.namespace
assert ann.time == regann.time
assert ann.duration == regann.duration
# 3.4 compare data
for obs, regobs in zip(ann.data, regann.data):
# compare time, duration and confidence
assert np.allclose(obs.time, regobs.time)
assert np.allclose(obs.duration, regobs.duration)
assert np.allclose(obs.confidence, regobs.confidence)
# compare value dictionaries
v, regv = obs.value, regobs.value
for k in set(v.keys()) | set(regv.keys()):
if isinstance(v[k], numbers.Number):
assert np.allclose(v[k], regv[k])
else:
assert v[k] == regv[k]
def _compare_txt_annotation(orig_txt_file, gen_txt_file):
# if string - read in both files.
# if list, assume data already loaded
if type(orig_txt_file) is str:
txt_data = _load_txt_annotation(orig_txt_file)
else:
txt_data = orig_txt_file
if type(gen_txt_file) is str:
gen_txt_data = _load_txt_annotation(gen_txt_file)
else:
gen_txt_data = gen_txt_file
# compare start and end times
for txt_row, gen_txt_row in zip(txt_data, gen_txt_data):
assert np.allclose(txt_row[:2], gen_txt_row[:2]) # compare times
assert txt_row[2] == gen_txt_row[2] # compare labels
def _load_txt_annotation(txt_file, delimiter='\t'):
"""
Load a tab-delimited simplified annotation as a list of lists. Assumes
first two elements are floats (start/end times) and the remaining item is a
string (the label).
Parameters
----------
txt_file : str
Path to simplified annotation txt file.
Returns
-------
data : list
List of annotation data, each row is one sound event represented as:
[start_time, end_time, label].
"""
data = []
with open(txt_file, 'r') as f:
reader = csv.reader(f, delimiter=delimiter)
for row in reader:
row = [float(x) if n <= 1 else x for n, x in enumerate(row)]
data.append(row)
return data
def test_generate_from_jams(atol=1e-5, rtol=1e-8):
# Test for invalid jams: no annotations
tmpfiles = []
with _close_temp_files(tmpfiles):
jam = jams.JAMS()
jam.file_metadata.duration = 10
jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
gen_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
jam.save(jam_file.name)
pytest.raises(ScaperError, scaper.generate_from_jams, jam_file.name,
gen_file.name)
# # Make sure we can load an old JAM file that doesn't have fix_cilpping or peak_normalization
old_jam_file = 'tests/data/regression/soundscape_20200501_44100_no_clipping_normalization_fields.jams'
tmpfiles = []
with _close_temp_files(tmpfiles):
gen_audio_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
gen_jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
gen_txt_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
tmpfiles.extend([gen_audio_file, gen_jam_file, gen_txt_file])
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(old_jam_file,
audio_outfile=gen_audio_file.name,
jams_outfile=gen_jam_file.name,
txt_path=gen_txt_file.name)
# validate return API
orig_wav, sr = soundfile.read(TEST_PATHS[44100]['REG'].wav, always_2d=True)
assert np.allclose(orig_wav, fj_soundscape_audio)
regjam = jams.load(TEST_PATHS[44100]['REG'].jams)
sandbox_exclude = ['fix_clipping', 'peak_normalization', 'quick_pitch_time']
_compare_scaper_jams(
regjam, fj_soundscape_jam,
exclude_additional_scaper_sandbox_keys=sandbox_exclude)
# _compare_txt_annotation(annotation_list, fj_annotation_list) # TODO
# TODO:
# for event, fj_event in zip(event_audio_list, fj_event_audio_list):
# assert np.allclose(event, fj_event, atol=1e-8, rtol=rtol)
# validate soundscape audio written to disk
gen_wav, sr = soundfile.read(gen_audio_file.name, always_2d=True)
assert np.allclose(gen_wav, orig_wav, atol=atol, rtol=rtol)
# validate jams
sandbox_exclude = ['fix_clipping', 'peak_normalization', 'quick_pitch_time']
gen_jam = jams.load(gen_jam_file.name)
_compare_scaper_jams(
regjam, gen_jam,
exclude_additional_scaper_sandbox_keys=sandbox_exclude)
# validate annotation txt
_compare_txt_annotation(TEST_PATHS[44100]['REG'].txt, gen_txt_file.name)
# Test for valid jams file
tmpfiles = []
with _close_temp_files(tmpfiles):
# Create all necessary temp files
orig_wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
orig_jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
orig_txt_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
gen_wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
gen_jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
gen_txt_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
tmpfiles.append(orig_wav_file)
tmpfiles.append(orig_jam_file)
tmpfiles.append(orig_txt_file)
tmpfiles.append(gen_wav_file)
tmpfiles.append(gen_jam_file)
tmpfiles.append(gen_txt_file)
# --- Define scaper --- *
sc = scaper.Scaper(10, FG_PATH, BG_PATH)
sc.protected_labels = []
sc.ref_db = -50
sc.add_background(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
# Add 5 events
for _ in range(5):
sc.add_event(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('choose', [1, 2, 3]),
snr=('uniform', 10, 20),
pitch_shift=('uniform', -1, 1),
time_stretch=('uniform', 0.8, 1.2))
# --- Define CLIPPING scaper --- *
sc_clipping = scaper.Scaper(10, FG_PATH, BG_PATH)
sc_clipping.protected_labels = []
sc_clipping.ref_db = 0
sc_clipping.add_background(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
# Add 5 events
for _ in range(5):
sc_clipping.add_event(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('choose', [1, 2, 3]),
snr=('uniform', 20, 30),
pitch_shift=('uniform', -1, 1),
time_stretch=('uniform', 0.8, 1.2))
def _validate_soundscape_and_event_audio(orig_wav_file,
gen_wav_file,
gen_events_path,
orig_events_path):
# validate audio
orig_wav, sr = soundfile.read(orig_wav_file.name)
gen_wav, sr = soundfile.read(gen_wav_file.name)
assert np.allclose(gen_wav, orig_wav, atol=atol, rtol=rtol)
# validate that the sum of event audio sums to trimmed soundscape
gen_event_files = [
os.path.join(gen_events_path, x)
for x in sorted(os.listdir(gen_events_path))
]
gen_audio = [soundfile.read(x)[0] for x in gen_event_files]
# Trim does not currently support trimming isolated events, but if/when
# we add that functionality, this test should be updated to test that
# as well, using the files in orig_events_path (currently unused).
assert np.allclose(gen_wav, sum(gen_audio), atol=1e-8, rtol=rtol)
# generate, then generate from the jams and compare audio files
# repeat 5 times
for _ in range(5):
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
disable_instantiation_warnings=True)
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
jams_outfile=gen_jam_file.name,
txt_path=gen_txt_file.name)
# validate return API
assert np.allclose(soundscape_audio, fj_soundscape_audio)
_compare_scaper_jams(soundscape_jam, fj_soundscape_jam)
_compare_txt_annotation(annotation_list, fj_annotation_list)
for event, fj_event in zip(event_audio_list, fj_event_audio_list):
assert np.allclose(event, fj_event, atol=1e-8, rtol=rtol)
# validate soundscape audio
orig_wav, sr = soundfile.read(orig_wav_file.name)
gen_wav, sr = soundfile.read(gen_wav_file.name)
assert np.allclose(gen_wav, orig_wav, atol=atol, rtol=rtol)
# validate jams
orig_jam = jams.load(orig_jam_file.name)
gen_jam = jams.load(gen_jam_file.name)
_compare_scaper_jams(orig_jam, gen_jam)
# validate annotation txt
_compare_txt_annotation(orig_txt_file.name, gen_txt_file.name)
# Test when we generate ONLY a JAMS file, and then generate audio from the JAMS
# Case 1: without clipping
for _ in range(5):
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
no_audio=True,
disable_instantiation_warnings=True)
assert soundscape_audio is None
assert event_audio_list is None
assert soundscape_jam is not None
assert annotation_list is not None
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
assert ann.sandbox.scaper.audio_path == orig_wav_file.name
assert ann.sandbox.scaper.jams_path == orig_jam_file.name
assert ann.sandbox.scaper.fix_clipping is False
assert ann.sandbox.scaper.peak_normalization is False
assert ann.sandbox.scaper.quick_pitch_time is False
assert ann.sandbox.scaper.save_isolated_events is False
assert ann.sandbox.scaper.isolated_events_path is None
assert ann.sandbox.scaper.disable_sox_warnings is True
assert ann.sandbox.scaper.no_audio is True
assert ann.sandbox.scaper.txt_path == orig_txt_file.name
assert ann.sandbox.scaper.txt_sep is '\t'
assert ann.sandbox.scaper.disable_instantiation_warnings is True
assert ann.sandbox.scaper.peak_normalization_scale_factor == 1.0
assert ann.sandbox.scaper.ref_db_change == 0
assert ann.sandbox.scaper.ref_db_generated == \
ann.sandbox.scaper.ref_db
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
jams_outfile=gen_jam_file.name,
txt_path=gen_txt_file.name)
# validate return API
_compare_scaper_jams(soundscape_jam, fj_soundscape_jam)
_compare_txt_annotation(annotation_list, fj_annotation_list)
# Test when we generate ONLY a JAMS file, and then generate audio from the JAMS
# Case 2: WITH CLIPPING
for _ in range(5):
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc_clipping.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
no_audio=True,
fix_clipping=True,
disable_instantiation_warnings=True)
assert soundscape_audio is None
assert event_audio_list is None
assert soundscape_jam is not None
assert annotation_list is not None
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
assert ann.sandbox.scaper.audio_path == orig_wav_file.name
assert ann.sandbox.scaper.jams_path == orig_jam_file.name
assert ann.sandbox.scaper.fix_clipping is True
assert ann.sandbox.scaper.peak_normalization is False
assert ann.sandbox.scaper.quick_pitch_time is False
assert ann.sandbox.scaper.save_isolated_events is False
assert ann.sandbox.scaper.isolated_events_path is None
assert ann.sandbox.scaper.disable_sox_warnings is True
assert ann.sandbox.scaper.no_audio is True
assert ann.sandbox.scaper.txt_path == orig_txt_file.name
assert ann.sandbox.scaper.txt_sep is '\t'
assert ann.sandbox.scaper.disable_instantiation_warnings is True
assert ann.sandbox.scaper.peak_normalization_scale_factor == 1.0
assert ann.sandbox.scaper.ref_db_change == 0
assert ann.sandbox.scaper.ref_db_generated == \
ann.sandbox.scaper.ref_db
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
jams_outfile=gen_jam_file.name,
txt_path=gen_txt_file.name)
assert fj_soundscape_audio is not None
assert fj_event_audio_list is not None
assert fj_soundscape_jam is not None
assert fj_annotation_list is not None
ann = fj_soundscape_jam.annotations.search(namespace='scaper')[0]
# assert ann.sandbox.scaper.audio_path == gen_wav_file.name
# assert ann.sandbox.scaper.jams_path == gen_jam_file.name
assert ann.sandbox.scaper.fix_clipping is True
assert ann.sandbox.scaper.peak_normalization is False
assert ann.sandbox.scaper.quick_pitch_time is False
assert ann.sandbox.scaper.save_isolated_events is False
assert ann.sandbox.scaper.isolated_events_path is None
assert ann.sandbox.scaper.disable_sox_warnings is True
assert ann.sandbox.scaper.no_audio is True # TODO
# assert ann.sandbox.scaper.txt_path == gen_txt_file.name
assert ann.sandbox.scaper.txt_sep is '\t'
assert ann.sandbox.scaper.disable_instantiation_warnings is True
assert ann.sandbox.scaper.peak_normalization_scale_factor != 1.0
assert ann.sandbox.scaper.ref_db_change != 0
assert ann.sandbox.scaper.ref_db_generated != \
ann.sandbox.scaper.ref_db
# Test when we generate ONLY a JAMS file, and then generate audio from the JAMS
# Case 3: WITH quick_pitch_time=True, no clipping
for _ in range(5):
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
no_audio=True,
fix_clipping=True,
quick_pitch_time=True,
disable_instantiation_warnings=True)
assert soundscape_audio is None
assert event_audio_list is None
assert soundscape_jam is not None
assert annotation_list is not None
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
assert ann.sandbox.scaper.audio_path == orig_wav_file.name
assert ann.sandbox.scaper.jams_path == orig_jam_file.name
assert ann.sandbox.scaper.fix_clipping is True
assert ann.sandbox.scaper.peak_normalization is False
assert ann.sandbox.scaper.quick_pitch_time is True
assert ann.sandbox.scaper.save_isolated_events is False
assert ann.sandbox.scaper.isolated_events_path is None
assert ann.sandbox.scaper.disable_sox_warnings is True
assert ann.sandbox.scaper.no_audio is True
assert ann.sandbox.scaper.txt_path == orig_txt_file.name
assert ann.sandbox.scaper.txt_sep is '\t'
assert ann.sandbox.scaper.disable_instantiation_warnings is True
assert ann.sandbox.scaper.peak_normalization_scale_factor == 1.0
assert ann.sandbox.scaper.ref_db_change == 0
assert ann.sandbox.scaper.ref_db_generated == \
ann.sandbox.scaper.ref_db
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
jams_outfile=gen_jam_file.name,
txt_path=gen_txt_file.name)
assert fj_soundscape_audio is not None
assert fj_event_audio_list is not None
assert fj_soundscape_jam is not None
assert fj_annotation_list is not None
ann = fj_soundscape_jam.annotations.search(namespace='scaper')[0]
# assert ann.sandbox.scaper.audio_path == gen_wav_file.name
# assert ann.sandbox.scaper.jams_path == gen_jam_file.name
assert ann.sandbox.scaper.fix_clipping is True
assert ann.sandbox.scaper.peak_normalization is False
assert ann.sandbox.scaper.quick_pitch_time is True
assert ann.sandbox.scaper.save_isolated_events is False
assert ann.sandbox.scaper.isolated_events_path is None
assert ann.sandbox.scaper.disable_sox_warnings is True
assert ann.sandbox.scaper.no_audio is True # TODO
# assert ann.sandbox.scaper.txt_path == gen_txt_file.name
assert ann.sandbox.scaper.txt_sep is '\t'
assert ann.sandbox.scaper.disable_instantiation_warnings is True
assert ann.sandbox.scaper.peak_normalization_scale_factor == 1.0
assert ann.sandbox.scaper.ref_db_change == 0
assert ann.sandbox.scaper.ref_db_generated == \
ann.sandbox.scaper.ref_db
# validate return API
# _compare_scaper_jams(soundscape_jam, fj_soundscape_jam)
# _compare_txt_annotation(annotation_list, fj_annotation_list)
# Now add in trimming!
for _ in range(5):
with backports.tempfile.TemporaryDirectory() as isolated_events_path:
orig_events_path = os.path.join(isolated_events_path, 'original')
gen_events_path = os.path.join(isolated_events_path, 'generated')
os.makedirs(orig_events_path)
os.makedirs(gen_events_path)
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
disable_instantiation_warnings=True,
save_isolated_events=True,
isolated_events_path=orig_events_path)
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 5), np.random.uniform(5, 10))
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
txt_path=gen_txt_file.name,
save_isolated_events=True,
isolated_events_path=gen_events_path)
_validate_soundscape_and_event_audio(orig_wav_file, gen_wav_file,
gen_events_path, orig_events_path)
# TODO
# _compare_txt_annotation(orig_txt_file.name, gen_txt_file.name)
# Double trimming
for _ in range(2):
with backports.tempfile.TemporaryDirectory() as isolated_events_path:
orig_events_path = os.path.join(isolated_events_path, 'original')
gen_events_path = os.path.join(isolated_events_path, 'generated')
os.makedirs(orig_events_path)
os.makedirs(gen_events_path)
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
disable_instantiation_warnings=True,
save_isolated_events=True,
isolated_events_path=orig_events_path)
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 2), np.random.uniform(8, 10))
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 2), np.random.uniform(4, 6))
scaper.generate_from_jams(orig_jam_file.name, gen_wav_file.name,
save_isolated_events=True,
isolated_events_path=gen_events_path)
_validate_soundscape_and_event_audio(orig_wav_file, gen_wav_file,
gen_events_path, orig_events_path)
# TODO
# _compare_txt_annotation(orig_txt_file.name, gen_txt_file.name)
# Triple trimming
for _ in range(2):
with backports.tempfile.TemporaryDirectory() as isolated_events_path:
orig_events_path = os.path.join(isolated_events_path, 'original')
gen_events_path = os.path.join(isolated_events_path, 'generated')
os.makedirs(orig_events_path)
os.makedirs(gen_events_path)
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
disable_instantiation_warnings=True,
save_isolated_events=True,
isolated_events_path=orig_events_path)
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 2), np.random.uniform(8, 10))
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 1), np.random.uniform(5, 6))
scaper.trim(orig_wav_file.name, orig_jam_file.name,
orig_wav_file.name, orig_jam_file.name,
np.random.uniform(0, 1), np.random.uniform(3, 4))
scaper.generate_from_jams(orig_jam_file.name, gen_wav_file.name,
save_isolated_events=True,
isolated_events_path=gen_events_path)
_validate_soundscape_and_event_audio(orig_wav_file, gen_wav_file,
gen_events_path, orig_events_path)
# TODO
# _compare_txt_annotation(orig_txt_file.name, gen_txt_file.name)
# Test with new FG and BG paths
for _ in range(5):
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc.generate(audio_path=orig_wav_file.name,
jams_path=orig_jam_file.name,
txt_path=orig_txt_file.name,
disable_instantiation_warnings=True)
(fj_soundscape_audio, fj_soundscape_jam, fj_annotation_list, fj_event_audio_list) = \
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
txt_path=gen_txt_file.name,
fg_path=ALT_FG_PATH,
bg_path=ALT_BG_PATH)
# validate return API
assert np.allclose(soundscape_audio, fj_soundscape_audio)
# TODO: can't compare jams due to change in FG/BG
# Need to be able to ignore specific keys in the event value dict
# exclude_sandbox = ['fg_path', 'bg_path']
# _compare_scaper_jams(
# soundscape_jam, fj_soundscape_jam,
# exclude_additional_scaper_sandbox_keys=exclude_sandbox)
_compare_txt_annotation(annotation_list, fj_annotation_list)
for event, fj_event in zip(event_audio_list, fj_event_audio_list):
assert np.allclose(event, fj_event, atol=1e-8, rtol=rtol)
# validate soundscape audio
orig_wav, sr = soundfile.read(orig_wav_file.name)
gen_wav, sr = soundfile.read(gen_wav_file.name)
assert np.allclose(gen_wav, orig_wav, atol=atol, rtol=rtol)
# TODO: can't compare jams due to change in FG/BG
# In the future update jam comparison such that any item can be
# excluded from the comparison on demand, which would allow for this
# test here.
# orig_jam = jams.load(orig_jam_file.name)
# gen_jam = jams.load(gen_jam_file.name)
# _compare_scaper_jams(orig_jam, gen_jam)
# validate annotation txt
_compare_txt_annotation(orig_txt_file.name, gen_txt_file.name)
# Ensure jam file saved correctly
scaper.generate_from_jams(orig_jam_file.name,
audio_outfile=gen_wav_file.name,
jams_outfile=gen_jam_file.name)
orig_jam = jams.load(orig_jam_file.name)
gen_jam = jams.load(gen_jam_file.name)
_compare_scaper_jams(orig_jam, gen_jam)
def test_trim(atol=1e-5, rtol=1e-8):
# Things we want to test:
# 1. Jam trimmed correctly (mainly handled by jams.slice)
# 2. value dict updated correctly (event_time, event_duration, source_time)
# 3. scaper sandbox updated correctly (n_events, poly, gini, duration)
# 4. audio trimmed correctly
tmpfiles = []
with _close_temp_files(tmpfiles):
# Create all necessary temp files
orig_wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
orig_jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
trim_wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
trim_jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
trimstrict_wav_file = tempfile.NamedTemporaryFile(
suffix='.wav', delete=True)
trimstrict_jam_file = tempfile.NamedTemporaryFile(
suffix='.jams', delete=True)
tmpfiles.append(orig_wav_file)
tmpfiles.append(orig_jam_file)
tmpfiles.append(trim_wav_file)
tmpfiles.append(trim_jam_file)
tmpfiles.append(trimstrict_wav_file)
tmpfiles.append(trimstrict_jam_file)
# --- Create soundscape and save to tempfiles --- #
sc = scaper.Scaper(10, FG_PATH, BG_PATH)
sc.protected_labels = []
sc.ref_db = -50
sc.add_background(label=('const', 'park'),
source_file=('choose', []),
source_time=('const', 0))
# Add 5 events
start_times = [0.5, 2.5, 4.5, 6.5, 8.5]
for event_time in start_times:
sc.add_event(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 5),
event_time=('const', event_time),
event_duration=('const', 1),
snr=('const', 10),
pitch_shift=None,
time_stretch=None)
sc.generate(orig_wav_file.name, orig_jam_file.name,
disable_instantiation_warnings=True)
# --- Trim soundscape using scaper.trim with strict=False --- #
scaper.trim(orig_wav_file.name, orig_jam_file.name,
trim_wav_file.name, trim_jam_file.name,
3, 7, no_audio=False)
# --- Validate output --- #
# validate JAMS
trimjam = jams.load(trim_jam_file.name)
trimann = trimjam.annotations.search(namespace='scaper')[0]
# Time and duration of annotation observation must be changed, but
# values in the value dict must remained unchanged!
for event in trimann.data:
if event.value['role'] == 'background':
assert (event.time == 0 and
event.duration == 4 and
event.value['event_time'] == 0 and
event.value['event_duration'] == 10 and
event.value['source_time'] == 0)
else:
if event.time == 0:
assert (event.duration == 0.5 and
event.value['event_time'] == 2.5 and
event.value['event_duration'] == 1 and
event.value['source_time'] == 5)
elif event.time == 1.5:
assert (event.duration == 1 and
event.value['event_time'] == 4.5 and
event.value['event_duration'] == 1 and
event.value['source_time'] == 5)
elif event.time == 3.5:
assert (event.duration == 0.5 and
event.value['event_time'] == 6.5 and
event.value['event_duration'] == 1 and
event.value['source_time'] == 5)
else:
assert False
# validate audio
orig_wav, sr = soundfile.read(orig_wav_file.name)
trim_wav, sr = soundfile.read(trim_wav_file.name)
assert np.allclose(trim_wav, orig_wav[3*sr:7*sr], atol=atol, rtol=rtol)
def test_get_value_from_dist():
rng = scaper.util._check_random_state(0)
# const
x = scaper.core._get_value_from_dist(('const', 1), rng)
assert x == 1
# choose
for _ in range(10):
x = scaper.core._get_value_from_dist(('choose', [1, 2, 3]), rng)
assert x in [1, 2, 3]
# uniform
for _ in range(10):
x = scaper.core._get_value_from_dist(('choose', [1, 2, 3]), rng)
assert x in [1, 2, 3]
# normal
for _ in range(10):
x = scaper.core._get_value_from_dist(('normal', 5, 1), rng)
assert scaper.util.is_real_number(x)
# truncnorm
for _ in range(10):
x = scaper.core._get_value_from_dist(('truncnorm', 5, 10, 0, 10), rng)
assert scaper.util.is_real_number(x)
assert 0 <= x <= 10
# COPY TESTS FROM test_validate_distribution (to ensure validation applied)
def __test_bad_tuple_list(tuple_list):
rng = scaper.util._check_random_state(0)
for t in tuple_list:
if isinstance(t, tuple):
print(t, len(t))
else:
print(t)
pytest.raises(ScaperError, scaper.core._get_value_from_dist, t, random_state=rng)
# not tuple = error
nontuples = [[], 5, 'yes']
__test_bad_tuple_list(nontuples)
# tuple must be at least length 2
shortuples = [tuple(), tuple(['const'])]
__test_bad_tuple_list(shortuples)
# unsupported distribution tuple name
badnames = [('invalid', 1), ('constant', 1, 2, 3)]
__test_bad_tuple_list(badnames)
# supported dist tuples, but bad arugments
badargs = [('const', 1, 2),
('choose', 1), ('choose', [], 1),
('uniform', 1), ('uniform', 1, 2, 3), ('uniform', 2, 1),
('uniform', 'one', 2), ('uniform', 1, 'two'),
('uniform', 0, 1j), ('uniform', 1j, 2),
('normal', 1),
('normal', 1, 2, 3), ('normal', 1, -1),
('normal', 0, 1j), ('normal', 1j, 1), ('normal', 'one', 2),
('normal', 1, 'two'),
('truncnorm', 1), ('truncnorm', 1, 2, 3),
('truncnorm', 1, -1, 0, 1),
('truncnorm', 0, 1j, 0, 1), ('truncnorm', 1j, 1, 0, 1),
('truncnorm', 'one', 2, 0, 1), ('truncnorm', 1, 'two', 0, 1),
('truncnorm', 1, 2, 'three', 5),
('truncnorm', 1, 2, 3, 'four'),
('truncnorm', 0, 2, 2, 0)]
__test_bad_tuple_list(badargs)
def test_ensure_satisfiable_source_time_tuple():
# Documenting the expected behavior of _ensure_satisfiable_source_time_tuple
source_duration = 10
event_duration = 5
_test_dist = ('uniform', 4, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1:], (4, 5))
_test_dist = ('truncnorm', 8, 1, 4, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1:], (5, 1, 4, 5))
_test_dist = ('const', 6)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1:], (5))
_test_dist = ('uniform', 1, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1:], (1, 5))
_test_dist = ('truncnorm', 4, 1, 1, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1:], (4, 1, 1, 5))
_test_dist = ('uniform', 6, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1], (5))
_test_dist = ('truncnorm', 8, 1, 6, 10)
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1], (5))
_test_dist = ('choose', [0, 1, 2, 10, 12, 15, 20])
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1], [0, 1, 2, 5])
_test_dist = ('choose_weighted', [0, 1, 2, 10, 12, 15, 20],
[0.1, 0.2, 0.05, 0.05, 0.3, 0.1, 0.2])
_adjusted, warn = scaper.core._ensure_satisfiable_source_time_tuple(
_test_dist, source_duration, event_duration)
assert (warn)
assert np.allclose(_adjusted[1], [0, 1, 2, 5, 5, 5, 5])
assert np.allclose(_adjusted[2], [0.1, 0.2, 0.05, 0.05, 0.3, 0.1, 0.2])
def test_validate_distribution():
def __test_bad_tuple_list(tuple_list):
for t in tuple_list:
if isinstance(t, tuple):
print(t, len(t))
else:
print(t)
pytest.raises(ScaperError, scaper.core._validate_distribution, t)
# not tuple = error
nontuples = [[], 5, 'yes']
__test_bad_tuple_list(nontuples)
# tuple must be at least length 2
shortuples = [tuple(), tuple(['const'])]
__test_bad_tuple_list(shortuples)
# unsupported distribution tuple name
badnames = [('invalid', 1), ('constant', 1, 2, 3)]
__test_bad_tuple_list(badnames)
# supported dist tuples, but bad arugments
badargs = [('const', 1, 2),
('choose', 1),
('choose', [], 1),
('choose_weighted'),
('choose_weighted', []),
('choose_weighted', [1, 2, 3]),
('choose_weighted', [1, 2, 3], 5),
('choose_weighted', 5, [0.2, 0.3, 0.5]),
('choose_weighted', [1, 2, 3], []),
('choose_weighted', [1, 2, 3], [0.4, 0.6]),
('choose_weighted', [1, 2, 3], [0.4, 0.6, 0.1]),
('choose_weighted', [1, 2, 3], [0.4, 0.3, 0.2]),
('choose_weighted', [1, 2, 3], [0.7, -0.5, 0.8]),
('choose_weighted', [1, 2, 3], [1.2, -0.3, 0.1]),
('choose_weighted', [1, 2, 3], [1.2, 0.3, 0.1]),
('uniform', 1), ('uniform', 1, 2, 3), ('uniform', 2, 1),
('uniform', 'one', 2), ('uniform', 1, 'two'),
('uniform', 0, 1j), ('uniform', 1j, 2),
('normal', 1),
('normal', 1, 2, 3), ('normal', 1, -1),
('normal', 0, 1j), ('normal', 1j, 1), ('normal', 'one', 2),
('normal', 1, 'two'),
('truncnorm', 1), ('truncnorm', 1, 2, 3),
('truncnorm', 1, -1, 0, 1),
('truncnorm', 0, 1j, 0, 1), ('truncnorm', 1j, 1, 0, 1),
('truncnorm', 'one', 2, 0, 1), ('truncnorm', 1, 'two', 0, 1),
('truncnorm', 1, 2, 'three', 5), ('truncnorm', 1, 2, 3, 'four'),
('truncnorm', 0, 2, 2, 0)]
__test_bad_tuple_list(badargs)
def test_validate_label():
# label must be in list of allowed labels
allowed_labels = ['yes']
pytest.raises(ScaperError, scaper.core._validate_label, ('const', 'no'),
allowed_labels)
# Choose list must be subset of allowed labels
allowed_labels = ['yes', 'hello']
pytest.raises(ScaperError, scaper.core._validate_label, ('choose', ['no']),
allowed_labels)
# Label tuple must start with either 'const' or 'choose'
bad_label_dists = [('uniform', 0, 1), ('normal', 0, 1),
('truncnorm', 0, 1, 0, 1)]
for bld in bad_label_dists:
pytest.raises(ScaperError, scaper.core._validate_label, bld,
allowed_labels)
def test_validate_source_file():
# file must exist
# create temp folder so we have path to file we know doesn't exist
with backports.tempfile.TemporaryDirectory() as tmpdir:
nonfile = os.path.join(tmpdir, 'notafile')
pytest.raises(ScaperError, scaper.core._validate_source_file,
('const', nonfile), ('const', 'siren'))
# label must be const and match file foldername
sourcefile = 'tests/data/audio/foreground/siren/69-Siren-1.wav'
pytest.raises(ScaperError, scaper.core._validate_source_file,
('const', sourcefile), ('choose', []))
pytest.raises(ScaperError, scaper.core._validate_source_file,
('const', sourcefile), ('const', 'car_horn'))
# if choose, all files in list of files must exist
sourcefile = 'tests/data/audio/foreground/siren/69-Siren-1.wav'
with backports.tempfile.TemporaryDirectory() as tmpdir:
nonfile = os.path.join(tmpdir, 'notafile')
source_file_list = [sourcefile, nonfile]
pytest.raises(ScaperError, scaper.core._validate_source_file,
('choose', source_file_list), ('const', 'siren'))
# must be const or choose
bad_label_dists = [('uniform', 0, 1), ('normal', 0, 1),
('truncnorm', 0, 1, 0, 1)]
for bld in bad_label_dists:
pytest.raises(ScaperError, scaper.core._validate_source_file, bld,
('const', 'siren'))
def test_validate_time():
def __test_bad_time_tuple(time_tuple):
pytest.raises(ScaperError, scaper.core._validate_time, time_tuple)
# bad consts
bad_time_values = [None, -1, 1j, 'yes', [], [5]]
for btv in bad_time_values:
__test_bad_time_tuple(('const', btv))
# empty list for choose
__test_bad_time_tuple(('choose', []))
# bad consts in list for choose
for btv in bad_time_values:
__test_bad_time_tuple(('choose', [btv]))
# uniform can't have negative min value
__test_bad_time_tuple(('uniform', -1, 1))
# using normal will issue a warning since it can generate neg values
pytest.warns(ScaperWarning, scaper.core._validate_time, ('normal', 5, 2))
# truncnorm can't have negative min value
__test_bad_time_tuple(('truncnorm', 0, 1, -1, 1))
def test_validate_duration():
def __test_bad_duration_tuple(duration_tuple):
pytest.raises(ScaperError, scaper.core._validate_duration,
duration_tuple)
# bad consts
bad_dur_values = [None, -1, 0, 1j, 'yes', [], [5]]
for bdv in bad_dur_values:
__test_bad_duration_tuple(('const', bdv))
# empty list for choose
__test_bad_duration_tuple(('choose', []))
# bad consts in list for choose
for bdv in bad_dur_values:
__test_bad_duration_tuple(('choose', [bdv]))
# uniform can't have negative or 0 min value
__test_bad_duration_tuple(('uniform', -1, 1))
__test_bad_duration_tuple(('uniform', 0, 1))
# using normal will issue a warning since it can generate neg values
pytest.warns(ScaperWarning, scaper.core._validate_duration,
('normal', 5, 2))
# truncnorm can't have negative or zero min value
__test_bad_duration_tuple(('truncnorm', 0, 1, -1, 1))
__test_bad_duration_tuple(('truncnorm', 0, 1, 0, 1))
def test_validate_snr():
def __test_bad_snr_tuple(snr_tuple):
pytest.raises(ScaperError, scaper.core._validate_snr, snr_tuple)
# bad consts
bad_snr_values = [None, 1j, 'yes', [], [5]]
for bsv in bad_snr_values:
__test_bad_snr_tuple(('const', bsv))
# empty list for choose
__test_bad_snr_tuple(('choose', []))
# bad consts in list for choose
for bsv in bad_snr_values:
__test_bad_snr_tuple(('choose', [bsv]))
def test_validate_pitch_shift():
def __test_bad_ps_tuple(ps_tuple):
pytest.raises(ScaperError, scaper.core._validate_pitch_shift, ps_tuple)
# bad consts
bad_ps_values = [None, 1j, 'yes', [], [5]]
for bv in bad_ps_values:
__test_bad_ps_tuple(('const', bv))
# empty list for choose
__test_bad_ps_tuple(('choose', []))
# bad consts in list for choose
for bv in bad_ps_values:
__test_bad_ps_tuple(('choose', [bv]))
def test_validate_time_stretch():
def __test_bad_ts_tuple(ts_tuple):
pytest.raises(ScaperError, scaper.core._validate_time_stretch,
ts_tuple)
# bad consts
bad_ps_values = [None, 1j, 'yes', [], [5], -5, 0]
for bv in bad_ps_values:
__test_bad_ts_tuple(('const', bv))
# empty list for choose
__test_bad_ts_tuple(('choose', []))
# bad consts in list for choose
for bv in bad_ps_values:
__test_bad_ts_tuple(('choose', [bv]))
# bad start time in distributions
__test_bad_ts_tuple(('uniform', 0, 1))
__test_bad_ts_tuple(('uniform', -5, 1))
__test_bad_ts_tuple(('truncnorm', 5, 1, 0, 10))
__test_bad_ts_tuple(('truncnorm', 5, 1, -5, 10))
# Using normal dist must raise warning since can give neg or 0 values
pytest.warns(
ScaperWarning, scaper.core._validate_time_stretch, ('normal', 5, 1))
def test_validate_event():
bad_allowed_labels = [0, 'yes', 1j, np.array([1, 2, 3])]
for bal in bad_allowed_labels:
pytest.raises(ScaperError, scaper.core._validate_event,
label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('const', 0),
event_duration=('const', 1),
snr=('const', 0),
allowed_labels=bal,
pitch_shift=None,
time_stretch=None)
def test_scaper_init():
'''
Test creation of Scaper object.
'''
# bad duration
sc = pytest.raises(ScaperError, scaper.Scaper, -5, FG_PATH, BG_PATH)
# all args valid
sc = scaper.Scaper(10.0, FG_PATH, BG_PATH)
assert sc.fg_path == FG_PATH
assert sc.bg_path == BG_PATH
# bad fg path
sc = pytest.raises(ScaperError, scaper.Scaper, 10.0,
'tests/data/audio/wrong',
BG_PATH)
# bad bg path
sc = pytest.raises(ScaperError, scaper.Scaper, 10.0,
FG_PATH,
'tests/data/audio/wrong')
# ensure fg_labels and bg_labels populated properly
sc = scaper.Scaper(10.0, FG_PATH, BG_PATH)
assert sc.fg_labels == FB_LABELS
assert sc.bg_labels == BG_LABELS
# ensure default values have been set
assert sc.sr == 44100
assert sc.ref_db == -12
assert sc.n_channels == 1
assert sc.fade_in_len == 0.01 # 10 ms
assert sc.fade_out_len == 0.01 # 10 ms
def test_reset_fg_bg_event_spec():
def _add_fg_event(sc):
sc.add_event(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 2, 1, 1, 3),
snr=('uniform', 10, 20),
pitch_shift=('normal', 0, 1),
time_stretch=('uniform', 0.8, 1.2))
def _add_bg_event(sc):
sc.add_background(("const", "park"), ("choose", []), ("const", 0))
sc = scaper.Scaper(
10.0, fg_path=FG_PATH, bg_path=BG_PATH, random_state=0)
# there should be no events initially
assert len(sc.fg_spec) == 0
assert len(sc.bg_spec) == 0
# there should be one foreground event now
_add_fg_event(sc)
assert len(sc.fg_spec) == 1
first_fg_spec = deepcopy(sc.fg_spec)
# after this there should be no foreground events
sc.reset_fg_event_spec()
assert len(sc.fg_spec) == 0
# add the foreground event back. now the original fg_spec and this one should be
# the same.
_add_fg_event(sc)
assert first_fg_spec == sc.fg_spec
# start over, this time using reset_bg_spec too.
sc.reset_fg_event_spec()
# there should be one background event and one foreground event now
_add_fg_event(sc)
_add_bg_event(sc)
assert len(sc.fg_spec) == 1
assert len(sc.bg_spec) == 1
first_fg_spec = deepcopy(sc.fg_spec)
first_bg_spec = deepcopy(sc.bg_spec)
# after this there should be no foreground or background events
sc.reset_fg_event_spec()
sc.reset_bg_event_spec()
assert len(sc.fg_spec) == 0
assert len(sc.bg_spec) == 0
# add the both events back. now both event spec sshould match the original
_add_fg_event(sc)
_add_bg_event(sc)
assert first_fg_spec == sc.fg_spec
assert first_bg_spec == sc.bg_spec
def test_scaper_add_background():
'''
Test Scaper.add_background function
'''
sc = scaper.Scaper(10.0, FG_PATH, BG_PATH)
# Set concrete background label
# label, source_file, source_time
sc.add_background(("const", "park"), ("choose", []), ("const", 0))
# Check that event has been added to the background spec, and that the
# values that are set automatically by this method (event_time,
# event_duration, snr and role) are correctly set to their expected values.
bg_event_expected = EventSpec(label=("const", "park"),
source_file=("choose", []),
source_time=("const", 0),
event_time=("const", 0),
event_duration=("const", sc.duration),
snr=("const", 0),
role='background',
pitch_shift=None,
time_stretch=None)
assert sc.bg_spec == [bg_event_expected]
def test_scaper_add_event():
sc = scaper.Scaper(10.0, FG_PATH, BG_PATH)
# Initially fg_spec should be empty
assert sc.fg_spec == []
# Add one event
sc.add_event(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 2, 1, 1, 3),
snr=('uniform', 10, 20),
pitch_shift=('normal', 0, 1),
time_stretch=('uniform', 0.8, 1.2))
# Now should be one event in fg_spec
assert len(sc.fg_spec) == 1
fg_event_expected = EventSpec(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 2, 1, 1, 3),
snr=('uniform', 10, 20),
role='foreground',
pitch_shift=('normal', 0, 1),
time_stretch=('uniform', 0.8, 1.2))
assert sc.fg_spec[0] == fg_event_expected
def test_scaper_instantiate_event():
# GF EVENT TO WORK WITH
fg_event = EventSpec(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 2, 1, 1, 3),
snr=('uniform', 10, 20),
role='foreground',
pitch_shift=('normal', 0, 1),
time_stretch=('uniform', 0.8, 1.2))
# test valid case
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
instantiated_event = sc._instantiate_event(
fg_event, isbackground=False, allow_repeated_label=True,
allow_repeated_source=True, used_labels=[], used_source_files=[],
disable_instantiation_warnings=True)
assert instantiated_event.label == 'siren'
assert instantiated_event.source_file == (
'tests/data/audio/foreground/siren/69-Siren-1.wav')
assert instantiated_event.source_time == 0
assert 0 <= instantiated_event.event_time <= 9
assert 1 <= instantiated_event.event_duration <= 3
assert 10 <= instantiated_event.snr <= 20
assert instantiated_event.role == 'foreground'
assert scaper.util.is_real_number(instantiated_event.pitch_shift)
assert 0.8 <= instantiated_event.time_stretch <= 1.2
# when a label needs to be replaced because it's used already
fg_event8 = fg_event._replace(label=('choose', []))
# repeat several times to increase chance of hitting the line we need to
# test
for _ in range(20):
instantiated_event = sc._instantiate_event(
fg_event8, isbackground=False, allow_repeated_label=False,
allow_repeated_source=True, used_labels=['siren', 'human_voice'],
disable_instantiation_warnings=True)
assert instantiated_event.label == 'car_horn'
# when a source file needs to be replaced because it's used already
fg_event9 = fg_event._replace(label=('const', 'human_voice'))
# repeat several times to increase chance of hitting the line we need to
# test
for _ in range(20):
instantiated_event = sc._instantiate_event(
fg_event9, isbackground=False, allow_repeated_label=True,
allow_repeated_source=False,
used_labels=[],
used_source_files=(
['tests/data/audio/foreground/human_voice/'
'42-Human-Vocal-Voice-all-aboard_edit.wav',
'tests/data/audio/foreground/human_voice/'
'42-Human-Vocal-Voice-taxi-1_edit.wav']),
disable_instantiation_warnings=True)
assert instantiated_event.source_file == (
'tests/data/audio/foreground/human_voice/'
'42-Human-Vocal-Voice-taxi-2_edit.wav')
# Protected labels must have original source duration and source time 0
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH,
protected_labels='human_voice')
fg_event10 = fg_event._replace(
label=('const', 'human_voice'),
source_file=('const', 'tests/data/audio/foreground/human_voice/'
'42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0.3),
event_duration=('const', 0.4))
instantiated_event = sc._instantiate_event(
fg_event10, disable_instantiation_warnings=True)
assert instantiated_event.source_time == 0
assert np.allclose(instantiated_event.event_duration, 0.806236, atol=1e-5)
# repeated label when not allowed throws error
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
pytest.raises(ScaperError, sc._instantiate_event, fg_event,
isbackground=False,
allow_repeated_label=False,
allow_repeated_source=True,
used_labels=['siren'])
# repeated source when not allowed throws error
pytest.raises(ScaperError, sc._instantiate_event, fg_event,
isbackground=False,
allow_repeated_label=True,
allow_repeated_source=False,
used_labels=['siren'],
used_source_files=(
['tests/data/audio/foreground/siren/69-Siren-1.wav']))
# event duration longer than source duration: warning
fg_event2 = fg_event._replace(label=('const', 'car_horn'),
event_duration=('const', 5))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event2)
# event duration longer than soundscape duration: warning
fg_event3 = fg_event._replace(event_time=('const', 0),
event_duration=('const', 15),
time_stretch=None)
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event3)
# stretched event duration longer than soundscape duration: warning
fg_event4 = fg_event._replace(event_time=('const', 0),
event_duration=('const', 6),
time_stretch=('const', 2))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event4)
# 'const' source_time + event_duration > source_duration: warning
fg_event5a = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('const', 20))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event5a)
# 'choose' source_time + event_duration > source_duration: warning
fg_event5b = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('choose', [20, 20]))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event5b)
# 'uniform' source_time + event_duration > source_duration: warning
fg_event5c = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('uniform', 20, 25))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event5c)
# 'normal' source_time + event_duration > source_duration: warning
fg_event5d = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('normal', 20, 2))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event5d)
# 'truncnorm' source_time + event_duration > source_duration: warning
fg_event5e = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('truncnorm', 20, 2, 20, 20))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event5e)
# 'normal' random draw above mean with mean = source_duration - event_duration
# source_time + event_duration > source_duration: warning
fg_event5f = fg_event._replace(event_time=('const', 0),
event_duration=('const', 8),
source_time=('normal', 18.25, 2))
def _repeat_instantiation(event):
# keep going till we hit a draw that covers when the draw exceeds
# source_duration - event_duration (18.25). Use max_draws
# just in case so that testing is guaranteed to terminate.
source_time = 0
num_draws = 0
max_draws = 1000
while source_time < 18.25 and num_draws < max_draws:
instantiated_event = sc._instantiate_event(event)
source_time = instantiated_event.source_time
num_draws += 1
pytest.warns(ScaperWarning, _repeat_instantiation, fg_event5f)
# event_time + event_duration > soundscape duration: warning
fg_event6 = fg_event._replace(event_time=('const', 8),
event_duration=('const', 5),
time_stretch=None)
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event6)
# event_time + stretched event_duration > soundscape duration: warning
fg_event7 = fg_event._replace(event_time=('const', 5),
event_duration=('const', 4),
time_stretch=('const', 2))
pytest.warns(ScaperWarning, sc._instantiate_event, fg_event7)
# stretched duration should always be adjusted to be <= self.duration
for stretch in [2, 3, 1.5]:
fg_event11 = fg_event._replace(event_time=('const', 2),
event_duration=('const', 7),
time_stretch=('const', stretch))
fg_event11_inst = sc._instantiate_event(fg_event11)
assert fg_event11_inst.event_time == 0
assert fg_event11_inst.event_duration == sc.duration / stretch
# Make sure event time is respected when possible
for e_stretch, e_duration in zip([1, 1.25, 0.5], [7, 7, 18]):
fg_event12 = fg_event._replace(event_time=('const', 1),
event_duration=('const', e_duration),
time_stretch=('const', e_stretch))
fg_event12_inst = sc._instantiate_event(fg_event12)
assert fg_event12_inst.event_time == 1
assert fg_event12_inst.event_duration == e_duration
def test_scaper_instantiate():
for sr in SAMPLE_RATES:
REG_JAM_PATH = TEST_PATHS[sr]['REG'].jams
# Here we just instantiate a known fixed spec and check if that jams
# we get back is as expected.
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = sr
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('const', 5),
event_time=('const', 2),
event_duration=('const', 5),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('const', 2),
snr=('const', 20),
pitch_shift=('const', 1),
time_stretch=None)
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=('const', 1.2))
# Instantiate
jam = sc._instantiate(disable_instantiation_warnings=True)
# Ignore all fields set by generate but not by instantiate
sandbox_exclude = [
'txt_path',
'jams_path',
'audio_path',
'no_audio',
'save_isolated_events',
'fix_clipping',
'peak_normalization',
'peak_normalization_scale_factor',
'quick_pitch_time',
'ref_db_change',
'ref_db_generated',
'txt_sep',
'disable_sox_warnings',
'disable_instantiation_warnings'
]
# Load regression jam
regjam = jams.load(REG_JAM_PATH)
_compare_scaper_jams(jam, regjam,
exclude_additional_scaper_sandbox_keys=sandbox_exclude)
def test_generate_with_seeding(atol=1e-4, rtol=1e-8):
# test a scaper generator with different random seeds. init with same random seed
# over and over to make sure the output wav stays the same
seeds = [
0, 10, 20,
scaper.util._check_random_state(0),
scaper.util._check_random_state(10),
scaper.util._check_random_state(20)
]
num_generators = 2
for seed in seeds:
generators = []
for i in range(num_generators):
generators.append(_create_scaper_with_random_seed(deepcopy(seed)))
_compare_generators(generators)
def test_set_random_state(atol=1e-4, rtol=1e-8):
# test a scaper generator with different random seeds. this time use
# set_random_state to change the seed instead
seeds = [
0, 10, 20,
scaper.util._check_random_state(0),
scaper.util._check_random_state(10),
scaper.util._check_random_state(20)
]
num_generators = 2
for seed in seeds:
generators = []
for i in range(num_generators):
_sc = _create_scaper_with_random_seed(None)
_sc.set_random_state(deepcopy(seed))
generators.append(_sc)
_compare_generators(generators)
def _compare_generators(generators, atol=1e-4, rtol=1e-8):
tmpfiles = []
with _close_temp_files(tmpfiles):
wav_files = [
tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
for i in range(len(generators))
]
jam_files = [
tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
for i in range(len(generators))
]
txt_files = [
tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
for i in range(len(generators))
]
tmpfiles += wav_files + jam_files + txt_files
for i, sc in enumerate(generators):
generators[i].generate(
wav_files[i].name, jam_files[i].name, txt_path=txt_files[i].name,
disable_instantiation_warnings=True
)
audio = [soundfile.read(wav_file.name)[0] for wav_file in wav_files]
for i, a in enumerate(audio):
assert np.allclose(audio[0], a, atol=atol, rtol=rtol)
# load all the jams data
# make sure they are all the same as the first one
exclude_sandbox = ['audio_path', 'jams_path', 'txt_path']
jams_data = [jams.load(jam_file.name) for jam_file in jam_files]
for x in jams_data:
_compare_scaper_jams(
x, jams_data[0],
exclude_additional_scaper_sandbox_keys=exclude_sandbox)
# load the txt files and compare them
def _load_txt(txt_file):
txt_data = []
with open(txt_file.name) as file:
reader = csv.reader(file, delimiter='\t')
for row in reader:
txt_data.append(row)
txt_data = np.asarray(txt_data)
return txt_data
txt_data = [_load_txt(txt_file) for txt_file in txt_files]
regtxt_data = txt_data[0]
for t in txt_data:
assert np.allclose([float(x) for x in t[:, 0]],
[float(x) for x in regtxt_data[:, 0]])
assert np.allclose([float(x) for x in t[:, 1]],
[float(x) for x in regtxt_data[:, 1]])
# compare labels
assert (t[:, 2] == regtxt_data[:, 2]).all()
def _create_scaper_with_random_seed(seed):
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH, random_state=deepcopy(seed))
sc.ref_db = -50
sc.sr = 44100
# background
sc.add_background(
label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('choose', []),
source_file=('choose', []),
source_time=('uniform', 0, 8),
event_time=('truncnorm', 4, 1, 0, 8),
event_duration=('normal', 4, 1),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('choose', []),
source_file=('choose', []),
source_time=('uniform', 0, 8),
event_time=('truncnorm', 4, 1, 0, 8),
event_duration=('normal', 4, 1),
snr=('const', 20),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=None)
return sc
def test_generate_audio():
for sr in SAMPLE_RATES:
for n_ch in range(1, 3):
REG_WAV_PATH = TEST_PATHS[sr]['REG'].wav
REG_BGONLY_WAV_PATH = TEST_PATHS[sr]['REG_BGONLY'].wav
REG_REVERB_WAV_PATH = TEST_PATHS[sr]['REG_REVERB'].wav
_test_generate_audio(sr, REG_WAV_PATH, REG_BGONLY_WAV_PATH, REG_REVERB_WAV_PATH, n_ch)
def _test_generate_audio(SR, REG_WAV_PATH, REG_BGONLY_WAV_PATH, REG_REVERB_WAV_PATH, N_CHANNELS, atol=1e-4, rtol=1e-8):
# Regression test: same spec, same audio (not this will fail if we update
# any of the audio processing techniques used (e.g. change time stretching
# algorithm.
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = SR
sc.n_channels = N_CHANNELS
print("TEST SR: {}, # OF CHANNELS: {}".format(SR, N_CHANNELS))
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('const', 5),
event_time=('const', 2),
event_duration=('const', 5),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('const', 2),
snr=('const', 20),
pitch_shift=('const', 1),
time_stretch=None)
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=('const', 1.2))
tmpfiles = []
with _close_temp_files(tmpfiles):
wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(wav_file)
jam = sc._instantiate(disable_instantiation_warnings=True)
sc._generate_audio(wav_file.name, jam.annotations[0])
# validate audio
wav, sr = soundfile.read(wav_file.name, always_2d=True)
regwav, sr = soundfile.read(REG_WAV_PATH, always_2d=True)
# TODO: Add multi-channel regression data.
if N_CHANNELS == 1:
assert np.allclose(wav, regwav, atol=atol, rtol=rtol)
# with reverb
sc._generate_audio(wav_file.name, jam.annotations[0], reverb=0.2)
# validate audio
wav, sr = soundfile.read(wav_file.name, always_2d=True)
regwav, sr = soundfile.read(REG_REVERB_WAV_PATH, always_2d=True)
# TODO: Add multi-channel regression data.
if N_CHANNELS == 1:
assert np.allclose(wav, regwav, atol=atol, rtol=rtol)
# Don't disable sox warnings (just to cover line)
sc._generate_audio(wav_file.name, jam.annotations[0],
disable_sox_warnings=False)
# validate audio
wav, sr = soundfile.read(wav_file.name, always_2d=True)
regwav, sr = soundfile.read(REG_WAV_PATH, always_2d=True)
# TODO: Add multi-channel regression data.
if N_CHANNELS == 1:
assert np.allclose(wav, regwav, atol=atol, rtol=rtol)
# namespace must be scaper
jam.annotations[0].namespace = 'tag_open'
pytest.raises(ScaperError, sc._generate_audio, wav_file.name,
jam.annotations[0])
# unsupported event role must raise error
jam.annotations[0].namespace = 'scaper'
jam.annotations[0].data[3].value['role'] = 'ewok'
pytest.raises(ScaperError, sc._generate_audio, wav_file.name,
jam.annotations[0])
# soundscape with no events will raise warning and won't generate audio
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
jam = sc._instantiate(disable_instantiation_warnings=True)
pytest.warns(ScaperWarning, sc._generate_audio, wav_file.name,
jam.annotations[0])
# soundscape with only one event will use transformer (regression test)
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -20
sc.sr = SR
# background
sc.add_background(
label=('const', 'park'),
source_file=('const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
reverb = 0.2
jam = sc._instantiate(disable_instantiation_warnings=True, reverb=reverb)
sc._generate_audio(wav_file.name, jam.annotations[0], reverb=reverb)
# validate audio
wav, sr = soundfile.read(wav_file.name, always_2d=True)
regwav, sr = soundfile.read(REG_BGONLY_WAV_PATH, always_2d=True)
# TODO: Add multi-channel regression data.
if N_CHANNELS == 1:
assert np.allclose(wav, regwav, atol=atol, rtol=rtol)
def create_scaper_scene_without_random_seed():
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = 44100
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('uniform', 0, 5),
event_time=('normal', 5, 1),
event_duration=('truncnorm', 5, 1, 4, 6),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('truncnorm', 3, 1, 1, 10),
snr=('uniform', 10, 20),
pitch_shift=('uniform', -1, 1),
time_stretch=(None))
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=('uniform', -1, 1),
time_stretch=('uniform', .8, 1.2))
return sc
def _test_generate_isolated_events(SR, isolated_events_path=None, atol=1e-4, rtol=1e-8):
sc = create_scaper_scene_without_random_seed()
tmpfiles = []
@contextmanager
def _delete_files(mix_file, directory):
yield
try:
shutil.rmtree(directory)
os.remove(mix_file)
except:
pass
wav_file = 'tests/mix.wav'
if isolated_events_path is None:
isolated_events_path = 'tests/mix_events'
with _delete_files(wav_file, isolated_events_path):
jam = sc._instantiate(disable_instantiation_warnings=True)
sc._generate_audio(wav_file, jam.annotations[0], save_isolated_events=True,
isolated_events_path=isolated_events_path)
source_directory = os.path.splitext(wav_file)[0] + '_events'
isolated_events = []
ann = jam.annotations.search(namespace='scaper')[0]
soundscape_audio, _ = soundfile.read(ann.sandbox.scaper.soundscape_audio_path)
isolated_event_audio_paths = ann.sandbox.scaper.isolated_events_audio_path
isolated_audio = []
role_counter = {
'background': 0,
'foreground': 0
}
for event_spec, event_audio_path in zip(ann, isolated_event_audio_paths):
# event_spec contains the event description, label, etc
# event_audio contains the path to the actual audio
# make sure the path matches the event description
look_for = '{:s}{:d}_{:s}.wav'.format(
event_spec.value['role'],
role_counter[event_spec.value['role']],
event_spec.value['label']
)
expected_path = os.path.join(isolated_events_path, look_for)
# make sure the path exists
assert os.path.exists(expected_path)
# make sure what's in the sandbox also exists
assert os.path.exists(event_audio_path)
# is an audio file with the same contents as what we expect
_isolated_expected_audio, sr = soundfile.read(expected_path)
_isolated_sandbox_audio, sr = soundfile.read(event_audio_path)
assert np.allclose(_isolated_sandbox_audio, _isolated_expected_audio)
# make sure the filename matches
assert look_for == os.path.basename(event_audio_path)
# increment for the next role
role_counter[event_spec.value['role']] += 1
isolated_audio.append(_isolated_sandbox_audio)
# the sum of the isolated audio should sum to the soundscape
assert np.allclose(sum(isolated_audio), soundscape_audio, atol=1e-8, rtol=1e-8)
jam = sc._instantiate(disable_instantiation_warnings=True)
# Running with save_isolated_events = True and reverb not None raises a warning
pytest.warns(ScaperWarning, sc._generate_audio, wav_file,
jam.annotations[0], save_isolated_events=True, reverb=.5)
def test_generate_isolated_events():
for sr, isolated_events_path in zip(
(16000, 22050, 44100), (None, 'tests/mix_events', None)):
# try it a bunch of times
for i in range(10):
_test_generate_isolated_events(sr, isolated_events_path)
def test_generate():
for sr in SAMPLE_RATES:
REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH = TEST_PATHS[sr]['REG']
_test_generate(sr, REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH)
def _make_test_generate_scaper(SR):
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = SR
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('const', 5),
event_time=('const', 2),
event_duration=('const', 5),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('const', 2),
snr=('const', 20),
pitch_shift=('const', 1),
time_stretch=None)
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=('const', 1.2))
return sc
def _test_generate(SR, REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH, atol=1e-4, rtol=1e-8):
sc = _make_test_generate_scaper(SR)
tmpfiles = []
with _close_temp_files(tmpfiles):
wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
txt_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
tmpfiles.append(wav_file)
tmpfiles.append(jam_file)
tmpfiles.append(txt_file)
sc.generate(wav_file.name, jam_file.name, txt_path=txt_file.name,
disable_instantiation_warnings=True)
# validate audio
wav, sr = soundfile.read(wav_file.name)
regwav, sr = soundfile.read(REG_WAV_PATH)
assert np.allclose(wav, regwav, atol=atol, rtol=rtol)
# validate jams
jam = jams.load(jam_file.name)
regjam = jams.load(REG_JAM_PATH)
sandbox_exclude = ['audio_path', 'jams_path', 'txt_path']
_compare_scaper_jams(
jam, regjam,
exclude_additional_scaper_sandbox_keys=sandbox_exclude)
# validate txt
_compare_txt_annotation(txt_file.name, REG_TXT_PATH)
# reverb value must be in (0, 1) range
for reverb in [-1, 2]:
pytest.raises(ScaperError, sc.generate, wav_file.name,
jam_file.name, reverb=reverb,
disable_instantiation_warnings=True)
def test_generate_return_api():
for sr in SAMPLE_RATES:
REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH = TEST_PATHS[sr]['REG']
_test_generate_return_api(sr, REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH)
def _test_generate_return_api(SR, REG_WAV_PATH, REG_JAM_PATH, REG_TXT_PATH,
atol=1e-4, rtol=1e-8):
sc = _make_test_generate_scaper(SR)
tmpfiles = []
with _close_temp_files(tmpfiles):
wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
txt_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=True)
tmpfiles.append(wav_file)
tmpfiles.append(jam_file)
tmpfiles.append(txt_file)
with backports.tempfile.TemporaryDirectory() as isolated_events_path:
saved_events_path = os.path.join(isolated_events_path, 'original')
(soundscape_audio, soundscape_jam, annotation_list, event_audio_list) = \
sc.generate(audio_path=wav_file.name,
jams_path=jam_file.name,
txt_path=txt_file.name,
save_isolated_events=True,
isolated_events_path=saved_events_path,
disable_instantiation_warnings=True)
# print(soundscape_audio.shape)
# validate audio
wav, sr = soundfile.read(wav_file.name, always_2d=True)
regwav, sr = soundfile.read(REG_WAV_PATH, always_2d=True)
# print(wav.shape)
# print(regwav.shape)
# assert 1 == 2
assert np.allclose(soundscape_audio, wav, atol=atol, rtol=rtol)
assert np.allclose(soundscape_audio, regwav, atol=atol, rtol=rtol)
# validate jams
jam = jams.load(jam_file.name)
regjam = jams.load(REG_JAM_PATH)
sandbox_exclude = [
'isolated_events_path', 'save_isolated_events', 'jams_path',
'txt_path', 'audio_path']
_compare_scaper_jams(soundscape_jam, jam)
_compare_scaper_jams(
soundscape_jam,
regjam,
exclude_additional_scaper_sandbox_keys=sandbox_exclude)
# validate txt annotation
txt_data = _load_txt_annotation(txt_file.name)
reg_txt_data = _load_txt_annotation(REG_TXT_PATH)
_compare_txt_annotation(annotation_list, txt_data)
_compare_txt_annotation(annotation_list, reg_txt_data)
# validate event audio
saved_event_files = [
os.path.join(saved_events_path, x)
for x in sorted(os.listdir(saved_events_path))
]
orig_event_audio = [soundfile.read(x, always_2d=True)[0] for x in saved_event_files]
for return_event, orig_event in zip(event_audio_list, orig_event_audio):
assert np.allclose(return_event, orig_event, atol=1e-8, rtol=rtol)
def test_scaper_off_by_one_with_jams():
# this file broke in Scaper 1.3.3 and before as the duration
# of the generated audio was incorrect. it was addressed by PR #88.
# using it to test if it will ever break again
jam_file = 'tests/data/regression/scaper_133_off_by_one_regression_test.jams'
tmpfiles = []
with _close_temp_files(tmpfiles):
gen_wav_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
scaper.generate_from_jams(jam_file, gen_wav_file.name)
gen_wav, sr = soundfile.read(gen_wav_file.name)
assert gen_wav.shape[0] == 10 * 44100
def test_backwards_compat_for_duration():
for sr in SAMPLE_RATES:
REG_JAM_PATH = TEST_PATHS[sr]['REG'].jams
tmpfiles = []
with _close_temp_files(tmpfiles):
orig_wav = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
gen_wav = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
jam_without_orig_duration = tempfile.NamedTemporaryFile(
suffix='.jams', delete=True)
jam = jams.load(REG_JAM_PATH)
scaper.generate_from_jams(REG_JAM_PATH, orig_wav.name)
ann = jam.annotations[0]
ann.sandbox.scaper.pop('original_duration')
jam.save(jam_without_orig_duration.name)
scaper.generate_from_jams(
jam_without_orig_duration.name, gen_wav.name)
orig_audio, sr = soundfile.read(orig_wav.name)
gen_audio, sr = soundfile.read(gen_wav.name)
assert np.allclose(orig_audio, gen_audio)
pytest.warns(ScaperWarning, scaper.generate_from_jams,
jam_without_orig_duration.name, gen_wav.name)
def _generate_soundscape_with_short_background(background_file, audio_path, jams_path, ref_db):
with backports.tempfile.TemporaryDirectory() as tmpdir:
subdir = os.path.join(tmpdir, 'audio')
shutil.copytree(SHORT_BG_PATH, subdir)
OUTPUT_PATH = os.path.join(subdir, 'noise', 'noise.wav')
shutil.copyfile(background_file, OUTPUT_PATH)
sc = scaper.Scaper(10, FG_PATH, subdir, random_state=0)
sc.sr = 16000
sc.ref_db = ref_db
sc.fade_in_len = 0
sc.fade_out_len = 0
sc.add_background(
label=('const', 'noise'),
source_file=('const', OUTPUT_PATH),
source_time=('const', 0)
)
sc.generate(audio_path, jams_path)
def test_scaper_generate_with_fade():
# Test scaper generate with different fade lengths
# Works by using a fade of 0 at first then comparing
# samples of the event using different fades.
fade_lens = [0, 0.01, 0.05, 0.1]
outputs = {}
for fade_in in fade_lens:
for fade_out in fade_lens:
sc = scaper.Scaper(0.2, FG_PATH, BG_PATH, random_state=0)
sc.sr = 16000
sc.ref_db = -20
sc.fade_in_len = fade_in
sc.fade_out_len = fade_out
sc.add_event(
label=('const', 'siren'),
source_file=('choose', []),
source_time=('uniform', 0, 10),
event_time=('const', 0),
event_duration=('const', 0.2),
snr=('uniform', -5, 5),
pitch_shift=('uniform', -1, 1),
time_stretch=('uniform', 0.8, 1.2))
_, _, _, event_audio_list = sc.generate()
outputs[(fade_in, fade_out)] = event_audio_list[0]
no_fade = outputs[(0, 0)]
for key, val in outputs.items():
fade_in, fade_out = key
fade_in_samples, fade_out_samples = (
int(fade_in * sc.sr), int(fade_out * sc.sr)
)
# Compare first fade_in_samples with no_fade
if fade_in_samples > 0:
ratio = val[:fade_in_samples] / no_fade[:fade_in_samples]
fade_in_window = np.sin(
np.linspace(0, np.pi / 2, fade_in_samples))[..., None]
mask = np.invert(np.isnan(ratio))
assert np.allclose(ratio[mask], fade_in_window[mask])
if fade_out_samples > 0:
ratio = val[-fade_out_samples:] / no_fade[-fade_out_samples:]
fade_out_window = np.sin(
np.linspace(np.pi / 2, 0, fade_out_samples))[..., None]
# Ignore points where the signal has no energy
mask = np.invert(np.isnan(ratio))
assert np.allclose(ratio[mask], fade_out_window[mask])
def test_scaper_with_short_background():
SHORT_BG_FILE = os.path.join(
SHORT_BG_PATH, 'noise', 'noise-free-sound-0145.wav')
tmpfiles = []
with _close_temp_files(tmpfiles):
jam_file = tempfile.NamedTemporaryFile(suffix='.jams', delete=True)
tiled_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
wav1_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
wav2_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(wav1_file)
tmpfiles.append(wav2_file)
tmpfiles.append(tiled_file)
tmpfiles.append(jam_file)
_generate_soundscape_with_short_background(
SHORT_BG_FILE, wav1_file.name, jam_file.name, ref_db=-40)
# what it should be is the file tiled with itself and then cut to 10s
# write it to disk and then use it in a new scaper object
source_audio, sr = soundfile.read(SHORT_BG_FILE)
duration_samples = int(10 * sr)
# tile the audio to what we expect
tiled_audio = np.tile(
source_audio, 1 + int(duration_samples / source_audio.shape[0]))
# cut it to what we want
tiled_audio = tiled_audio[:duration_samples]
# save it somewhere to be used in a new Scaper object
soundfile.write(tiled_file.name, tiled_audio, sr)
_generate_soundscape_with_short_background(
tiled_file.name, wav2_file.name, jam_file.name, ref_db=-40)
# compare what is generated with a short bg compared to a long bg
# should be the same
audio1, sr = soundfile.read(wav1_file.name)
audio2, sr = soundfile.read(wav2_file.name)
assert np.allclose(audio1, audio2)
def test_clipping_and_normalization():
for sr in [16000, 44100]:
sc = scaper.Scaper(10, FG_PATH, BG_PATH, random_state=0)
sc.sr = sr
sc.ref_db = -20
sc.add_event(
label=('const', 'siren'),
source_file=('choose', []),
source_time=('uniform', 0, 10),
event_time=('const', 0),
event_duration=('const', 10),
snr=('const', 20),
pitch_shift=None,
time_stretch=None)
# extreme clipping
sc_extreme = scaper.Scaper(10, FG_PATH, BG_PATH, random_state=0)
sc_extreme.sr = 16000
sc_extreme.ref_db = -20
sc_extreme.add_event(
label=('const', 'siren'),
source_file=('choose', []),
source_time=('uniform', 0, 10),
event_time=('const', 0),
event_duration=('const', 10),
snr=('const', 40),
pitch_shift=None,
time_stretch=None)
tmpfiles = []
with _close_temp_files(tmpfiles):
# Make sure a warning is raised when there's clipping
audio_path = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path)
pytest.warns(ScaperWarning, sc.generate, audio_path, fix_clipping=False)
# Make sure a second warning is raised if we're fixing the clipping
audio_path2 = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path2)
with pytest.warns(None) as record:
sc.generate(audio_path2, fix_clipping=True)
assert len(record) == 2
assert str(record[0].message) == 'Soundscape audio is clipping!'
assert 'Peak normalization applied to fix clipping' in str(record[1].message)
# Make sure we get a third warning when the scaling factor is < 0.05
audio_path3 = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path3)
with pytest.warns(None) as record:
sc_extreme.generate(audio_path3, fix_clipping=True)
assert len(record) == 3
assert str(record[0].message) == 'Soundscape audio is clipping!'
assert 'Peak normalization applied to fix clipping' in str(record[1].message)
assert 'Scale factor for peak normalization is extreme' in str(record[2].message)
# PEAK NORMALIZATION TESTS
# Make sure a warning is raised when there's clipping
audio_path4 = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path4)
pytest.warns(ScaperWarning, sc.generate, audio_path4,
fix_clipping=False, peak_normalization=True)
# Make sure a second warning is NOT raised if we're peak normalizing by default
audio_path5 = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path5)
with pytest.warns(None) as record:
sc.generate(audio_path5, fix_clipping=False, peak_normalization=True)
assert len(record) == 1
assert str(record[0].message) == 'Soundscape audio is clipping!'
# Make sure we get two warnings when w're normalizing but not fixing
# clipping explicitly and the scaling factor is < 0.05
audio_path6 = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(audio_path6)
with pytest.warns(None) as record:
sc_extreme.generate(audio_path6, fix_clipping=False, peak_normalization=True)
assert len(record) == 2
assert str(record[0].message) == 'Soundscape audio is clipping!'
assert 'Scale factor for peak normalization is extreme' in str(record[1].message)
| 99,295 | 39.661753 | 119 | py |
scaper | scaper-master/tests/profile_speed.py | """
This is a profiling script to check the performance of
Scaper. It generates 100 soundscapes in sequence
(no parallelization). Running it on 2019 Macbook Pro
currently takes 158.68 seconds (02:38).
"""
import scaper
import numpy as np
import tempfile
import os
import tqdm
import zipfile
import subprocess
import time
import csv
import platform
import psutil
import datetime
import math
import multiprocessing
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--quick', action='store_true')
args = parser.parse_args()
cmd_line = ' '.join(sys.argv)
cmd_line = 'python ' + cmd_line
# Download the audio automatically
FIX_DIR = 'tests/data/'
QUICK_PITCH_TIME = args.quick
def get_git_commit_hash():
process = subprocess.Popen(
['git', 'rev-parse', 'HEAD'], shell=False, stdout=subprocess.PIPE)
git_head_hash = process.communicate()[0].strip().decode('utf-8')
return git_head_hash
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
with tempfile.TemporaryDirectory() as tmpdir:
path_to_audio = os.path.join(FIX_DIR, 'audio/')
# OUTPUT FOLDER
outfolder = tmpdir
# SCAPER SETTINGS
fg_folder = os.path.join(path_to_audio, 'foreground')
bg_folder = os.path.join(path_to_audio, 'background')
# If we parallelize this script, change this accordingly
n_workers = 1
n_soundscapes = 100
ref_db = -50
duration = 10.0
min_events = 1
max_events = 9
event_time_dist = 'truncnorm'
event_time_mean = 5.0
event_time_std = 2.0
event_time_min = 0.0
event_time_max = 10.0
source_time_dist = 'const'
source_time = 0.0
event_duration_dist = 'uniform'
event_duration_min = 0.5
event_duration_max = 4.0
snr_dist = 'uniform'
snr_min = 6
snr_max = 30
pitch_dist = 'uniform'
pitch_min = -3.0
pitch_max = 3.0
time_stretch_dist = 'uniform'
time_stretch_min = 0.8
time_stretch_max = 1.2
# generate a random seed for this Scaper object
seed = 123
# create a scaper that will be used below
sc = scaper.Scaper(duration, fg_folder, bg_folder, random_state=seed)
sc.protected_labels = []
sc.ref_db = ref_db
# Generate 100 soundscapes using a truncated normal distribution of start times
start_time = time.time()
for n in tqdm.trange(n_soundscapes):
print('Generating soundscape: {:d}/{:d}'.format(n+1, n_soundscapes))
# reset the event specifications for foreground and background at the
# beginning of each loop to clear all previously added events
sc.reset_bg_event_spec()
sc.reset_fg_event_spec()
# add background
sc.add_background(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
sc.fade_in_len = 0.01
sc.fade_out_len = 0.01
# add random number of foreground events
n_events = np.random.randint(min_events, max_events+1)
for _ in range(n_events):
sc.add_event(label=('choose', []),
source_file=('choose', []),
source_time=(source_time_dist, source_time),
event_time=(event_time_dist, event_time_mean, event_time_std, event_time_min, event_time_max),
event_duration=(event_duration_dist, event_duration_min, event_duration_max),
snr=(snr_dist, snr_min, snr_max),
pitch_shift=(pitch_dist, pitch_min, pitch_max),
time_stretch=(time_stretch_dist, time_stretch_min, time_stretch_max)
)
# generate
audiofile = os.path.join(outfolder, "soundscape_unimodal{:d}.wav".format(n))
jamsfile = os.path.join(outfolder, "soundscape_unimodal{:d}.jams".format(n))
txtfile = os.path.join(outfolder, "soundscape_unimodal{:d}.txt".format(n))
sc.generate(audiofile, jamsfile,
allow_repeated_label=True,
allow_repeated_source=True,
reverb=0.1,
disable_sox_warnings=True,
quick_pitch_time=QUICK_PITCH_TIME,
no_audio=False,
txt_path=txtfile)
time_taken = time.time() - start_time
uname = platform.uname()
row = {
'command': cmd_line,
'time_of_run': str(datetime.datetime.now()),
'scaper_version': scaper.__version__,
'python_version': platform.python_version(),
'system': uname.system,
'machine': uname.machine,
'processor': uname.processor,
'n_cpu': multiprocessing.cpu_count(),
'n_workers': n_workers,
'memory': convert_size(psutil.virtual_memory().total),
'n_soundscapes': n_soundscapes,
'execution_time': np.round(time_taken, 4),
'git_commit_hash': get_git_commit_hash(),
}
fieldnames = list(row.keys())
results_path = 'tests/profile_results.csv'
write_header = not os.path.exists(results_path)
with open(results_path, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
if write_header:
writer.writeheader()
writer.writerow(row)
with open(results_path, 'r') as f:
csv_f = csv.reader(f)
for row in csv_f:
print('{:<30} {:<15} {:<15} {:<10} {:<10} {:<10} {:<5} {:<10} {:<10} {:<15} {:<10} {:}'.format(*row))
| 5,749 | 30.944444 | 122 | py |
scaper | scaper-master/tests/test_util.py | # CREATED: 10/15/16 7:52 PM by Justin Salamon <justin.salamon@nyu.edu>
'''
Tests for functions in util.py
'''
from scaper.util import _close_temp_files
from scaper.util import _set_temp_logging_level
from scaper.util import _validate_folder_path
from scaper.util import _get_sorted_files
from scaper.util import _populate_label_list
from scaper.util import _sample_trunc_norm, _sample_choose, _sample_choose_weighted
from scaper.util import max_polyphony
from scaper.util import polyphony_gini
from scaper.util import is_real_number, is_real_array
from scaper.util import _check_random_state
from scaper.scaper_exceptions import ScaperError
from scaper.scaper_warnings import ScaperWarning
import tempfile
import os
import logging
import pytest
import shutil
import numpy as np
from scipy.stats import truncnorm
import jams
from scaper.core import EventSpec
from scaper import Scaper
# FIXTURES
BG_PATH = 'tests/data/audio/background/'
FG_PATH = 'tests/data/audio/foreground/'
FG_PATH_HUMANVOICE = 'tests/data/audio/foreground/human_voice'
FG_LABEL_LIST = ['car_horn', 'human_voice', 'siren']
HUMANVOICE_FILES = (
[os.path.join(FG_PATH_HUMANVOICE,
'42-Human-Vocal-Voice-all-aboard_edit.wav'),
os.path.join(FG_PATH_HUMANVOICE, '42-Human-Vocal-Voice-taxi-1_edit.wav'),
os.path.join(FG_PATH_HUMANVOICE, '42-Human-Vocal-Voice-taxi-2_edit.wav')])
SIREN_FILE = os.path.join(FG_PATH, 'siren', '69-Siren-1.wav')
def test_close_temp_files():
'''
Create a bunch of temp files and then make sure they've been closed and
deleted.
'''
# With delete=True
tmpfiles = []
with _close_temp_files(tmpfiles):
for _ in range(5):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=True))
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
# With delete=False
tmpfiles = []
with _close_temp_files(tmpfiles):
for _ in range(5):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=False))
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
# with an exception before exiting
try:
tmpfiles = []
with _close_temp_files(tmpfiles):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=True))
raise ScaperError
except ScaperError:
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
else:
assert False, 'Exception was not reraised.'
def test_set_temp_logging_level():
'''
Ensure temp logging level is set as expected
'''
logger = logging.getLogger()
logger.setLevel('DEBUG')
with _set_temp_logging_level('CRITICAL'):
assert logging.getLevelName(logger.level) == 'CRITICAL'
assert logging.getLevelName(logger.level) == 'DEBUG'
def test_get_sorted_files():
'''
Ensure files are returned in expected order.
'''
assert _get_sorted_files(FG_PATH_HUMANVOICE) == HUMANVOICE_FILES
def test_validate_folder_path():
'''
Make sure invalid folder paths are caught
'''
# bad folder path
pytest.raises(ScaperError, _validate_folder_path,
'/path/to/invalid/folder/')
# good folder path should raise no error
# make temp folder
tmpdir = tempfile.mkdtemp()
# validate
_validate_folder_path(tmpdir)
# remove it
shutil.rmtree(tmpdir)
def test_populate_label_list():
'''
Should add folder names contained within provided folder to provided list.
'''
labellist = []
_populate_label_list(FG_PATH, labellist)
assert sorted(labellist) == sorted(FG_LABEL_LIST)
def test_check_random_state():
# seed is None
rng_type = type(np.random.RandomState(10))
rng = _check_random_state(None)
assert type(rng) == rng_type
# seed is int
rng = _check_random_state(10)
assert type(rng) == rng_type
# seed is RandomState
rng_test = np.random.RandomState(10)
rng = _check_random_state(rng_test)
assert type(rng) == rng_type
# seed is none of the above : error
pytest.raises(ValueError, _check_random_state, 'random')
def test_sample_choose():
# using choose with duplicates will issue a warning
rng = _check_random_state(0)
pytest.warns(ScaperWarning, _sample_choose, [0, 1, 2, 2, 2], rng)
def test_sample_choose_weighted():
# make sure probabilities are factored in
rng = _check_random_state(0)
assert _sample_choose_weighted([0, 1, 2], [1, 0, 0], rng) == 0
assert _sample_choose_weighted([0, 1, 2], [0, 1, 0], rng) == 1
assert _sample_choose_weighted([0, 1, 2], [0, 0, 1], rng) == 2
samples = []
for _ in range(100000):
samples.append(_sample_choose_weighted([0, 1], [0.3, 0.7], rng))
samples = np.asarray(samples)
zero_ratio = (samples == 0).sum() / len(samples)
one_ratio = (samples == 1).sum() / len(samples)
assert np.allclose(zero_ratio, 0.3, atol=1e-2)
assert np.allclose(one_ratio, 0.7, atol=1e-2)
def test_sample_trunc_norm():
'''
Should return values from a truncated normal distribution.
'''
rng = _check_random_state(0)
# sample values from a distribution
mu, sigma, trunc_min, trunc_max = 2, 1, 0, 5
x = [_sample_trunc_norm(mu, sigma, trunc_min, trunc_max, random_state=rng) for _ in range(100000)]
x = np.asarray(x)
# simple check: values must be within truncated bounds
assert (x >= trunc_min).all() and (x <= trunc_max).all()
# trickier check: values must approximate distribution's PDF
hist, bins = np.histogram(x, bins=np.arange(0, 10.1, 0.2), density=True)
xticks = bins[:-1] + 0.1
a, b = (trunc_min - mu) / float(sigma), (trunc_max - mu) / float(sigma)
trunc_closed = truncnorm.pdf(xticks, a, b, mu, sigma)
assert np.allclose(hist, trunc_closed, atol=0.015)
def test_max_polyphony():
'''
Test the computation of polyphony of a scaper soundscape instantiation.
'''
def __create_annotation_with_overlapping_events(n_events):
ann = jams.Annotation(namespace='scaper')
ann.duration = n_events / 2. + 10
for ind in range(n_events):
instantiated_event = EventSpec(label='siren',
source_file='/the/source/file.wav',
source_time=0,
event_time=ind / 2.,
event_duration=10,
snr=0,
role='foreground',
pitch_shift=None,
time_stretch=None)
ann.append(time=ind / 2.,
duration=10,
value=instantiated_event._asdict(),
confidence=1.0)
return ann
def __create_annotation_without_overlapping_events(n_events):
ann = jams.Annotation(namespace='scaper')
ann.duration = n_events * 10
for ind in range(n_events):
instantiated_event = EventSpec(label='siren',
source_file='/the/source/file.wav',
source_time=0,
event_time=ind * 10,
event_duration=5,
snr=0,
role='foreground',
pitch_shift=None,
time_stretch=None)
ann.append(time=ind * 10,
duration=5,
value=instantiated_event._asdict(),
confidence=1.0)
return ann
# 0 through 10 overlapping events
for poly in range(11):
ann = __create_annotation_with_overlapping_events(poly)
est_poly = max_polyphony(ann)
assert est_poly == poly
# 1 through 10 NON-overlapping events
for n_events in range(1, 11):
ann = __create_annotation_without_overlapping_events(n_events)
est_poly = max_polyphony(ann)
assert est_poly == 1
def test_polyphony_gini():
'''
Test computation of polyphony gini
'''
# Annotation must have namespace scaper, otherwise raise error
ann = jams.Annotation('tag_open', duration=10)
gini = pytest.raises(ScaperError, polyphony_gini, ann)
# Annotation without duration set should raise error
ann = jams.Annotation('scaper', duration=None)
gini = pytest.raises(ScaperError, polyphony_gini, ann)
# Annotation with no foreground events returns a gini of 0
sc = Scaper(10.0, FG_PATH, BG_PATH)
# add background
sc.add_background(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
jam = sc._instantiate()
ann = jam.annotations[0]
gini = polyphony_gini(ann)
assert gini == 0
def __test_gini_from_event_times(event_time_list, expected_gini,
hop_size=0.01):
print(event_time_list)
# create scaper
sc = Scaper(10.0, FG_PATH, BG_PATH)
# add background
sc.add_background(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0))
# add foreground events based on the event time list
# always use siren file since it is 26 s long, so we can choose the
# event duration flexibly
for onset, offset in event_time_list:
sc.add_event(label=('const', 'siren'),
source_file=('const', SIREN_FILE),
source_time=('const', 0),
event_time=('const', onset),
event_duration=('const', offset - onset),
snr=('uniform', 6, 30),
pitch_shift=('uniform', -3, 3),
time_stretch=None)
jam = sc._instantiate()
ann = jam.annotations[0]
gini = polyphony_gini(ann, hop_size=hop_size)
print(gini, expected_gini)
assert np.allclose([gini], [expected_gini], atol=1e-5)
event_time_lists = ([
[],
[(0, 1)],
[(0, 5), (5, 10)],
[(0, 10), (3, 7), (4, 6)]
])
expected_ginis = [0, 0.1, 1, 0.75]
for etl, g in zip(event_time_lists, expected_ginis):
__test_gini_from_event_times(etl, g, hop_size=0.01)
for etl, g in zip(event_time_lists, expected_ginis):
__test_gini_from_event_times(etl, g, hop_size=1.0)
def test_is_real_number():
non_reals = [None, 1j, 'yes']
yes_reals = [-1e12, -1, -1.0, 0, 1, 1.0, 1e12]
# test single value
for nr in non_reals:
assert not is_real_number(nr)
for yr in yes_reals:
assert is_real_number(yr)
def test_is_real_array():
non_reals = [None, 1j, 'yes']
yes_reals = [-1e12, -1, -1.0, 0, 1, 1.0, 1e12]
# non-list non-array types must return false
for x in non_reals + yes_reals:
assert not is_real_array(x)
# test array
for nr in non_reals:
assert not is_real_array([nr])
for yr in yes_reals:
assert is_real_array([yr])
| 11,535 | 30.605479 | 102 | py |
scaper | scaper-master/tests/create_regression_data.py | import os
import scaper
import jams
os.chdir('..')
# FIXTURES
# Paths to files for testing
FG_PATH = 'tests/data/audio/foreground'
BG_PATH = 'tests/data/audio/background'
ALT_FG_PATH = 'tests/data/audio_alt_path/foreground'
ALT_BG_PATH = 'tests/data/audio_alt_path/background'
REG_NAME = 'soundscape_20200501'
# REG_NAME = 'soundscape_20190326_22050'
# REG_WAV_PATH = 'tests/data/regression/soundscape_20170928.wav'
# REG_JAM_PATH = 'tests/data/regression/soundscape_20170928.jams'
# REG_TXT_PATH = 'tests/data/regression/soundscape_20170928.txt'
REG_BGONLY_NAME = 'bgonly_soundscape_20200501'
# REG_BGONLY_NAME = 'bgonly_soundscape_20190326_22050'
# REG_BGONLY_WAV_PATH = 'tests/data/regression/bgonly_soundscape_20170928.wav'
# REG_BGONLY_JAM_PATH = 'tests/data/regression/bgonly_soundscape_20170928.jams'
# REG_BGONLY_TXT_PATH = 'tests/data/regression/bgonly_soundscape_20170928.txt'
REG_REVERB_NAME = 'reverb_soundscape_20200501'
# REG_REVERB_NAME = 'reverb_soundscape_20190326_22050'
# REG_REVERB_WAV_PATH = 'tests/data/regression/reverb_soundscape_20170928.wav'
# REG_REVERB_JAM_PATH = 'tests/data/regression/reverb_soundscape_20170928.jams'
# REG_REVERB_TXT_PATH = 'tests/data/regression/reverb_soundscape_20170928.txt'
# fg and bg labels for testing
FB_LABELS = ['car_horn', 'human_voice', 'siren']
BG_LABELS = ['park', 'restaurant', 'street']
SAMPLE_RATES = [22050, 44100]
def test_names(name, rate, exts=('wav', 'jams', 'txt')):
return [os.path.join('tests/data/regression', '{}_{}.{}'.format(name, rate, ext)) for ext in exts]
for rate in SAMPLE_RATES:
test_names(REG_NAME, rate)
print("==========USING BELOW FOR TESTS==============")
VAR_NAMES_PARTIAL = ('REG', 'REG_BGONLY', 'REG_REVERB')
FILE_BASENAMES = (REG_NAME, REG_BGONLY_NAME, REG_REVERB_NAME)
FILE_TYPES = ('WAV', 'JAM', 'TXT')
for var, name in zip(VAR_NAMES_PARTIAL, FILE_BASENAMES):
for type, path in zip(FILE_TYPES, test_names(name, rate)):
print("{}_{}_PATH = '{}'".format(var, type, path))
print()
print("==========USING ABOVE FOR TESTS==============")
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = rate
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('const', 5),
event_time=('const', 2),
event_duration=('const', 5),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('const', 2),
snr=('const', 20),
pitch_shift=('const', 1),
time_stretch=None)
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=('const', 1.2))
wav_file, jam_file, txt_file = test_names(REG_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
wav_file, jam_file, txt_file = test_names(REG_REVERB_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, reverb=0.2, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
jams.load(jam_file)
# soundscape with only one event will use transformer (regression test)
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -20
sc.sr = rate
# background
sc.add_background(
label=('const', 'park'),
source_file=('const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
wav_file, jam_file, txt_file = test_names(REG_BGONLY_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, reverb=0.2, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
| 4,773 | 34.626866 | 103 | py |
scaper | scaper-master/tests/__init__.py | 0 | 0 | 0 | py | |
scaper | scaper-master/tests/test_audio.py | # CREATED: 5/5/17 14:36 by Justin Salamon <justin.salamon@nyu.edu>
from scaper.audio import get_integrated_lufs, match_sample_length
from scaper.audio import peak_normalize
from scaper.util import _close_temp_files
import numpy as np
import scipy.signal as sg
import os
import pytest
from scaper.scaper_exceptions import ScaperError
from pkg_resources import resource_filename
import shutil
import soundfile as sf
import tempfile
import random
# fixtures
SIREN_FILE = 'tests/data/audio/foreground/siren/69-Siren-1.wav'
CARHORN_FILE = (
'tests/data/audio/foreground/car_horn/17-CAR-Rolls-Royce-Horn.wav')
HUMANVOICE_FILE = (
'tests/data/audio/foreground/human_voice/'
'42-Human-Vocal-Voice-all-aboard_edit.wav')
DOGBARK_FILE = 'tests/data/lufs/dogbark.wav'
SIREN_LUFS_I = -23.071089944980127
CARHORN_LUFS_I = -13.66146520099299
HUMANVOICE_LUFS_I = -20.061513106500225
DOGBARK_LUFS_I = -11.1952428800271 # for x4 concatenated file
def test_get_integrated_lufs():
# test correct functionality
audiofiles = [SIREN_FILE, CARHORN_FILE, HUMANVOICE_FILE, DOGBARK_FILE]
lufsi = [SIREN_LUFS_I, CARHORN_LUFS_I, HUMANVOICE_LUFS_I, DOGBARK_LUFS_I]
for af, li in zip(audiofiles, lufsi):
audio, sr = sf.read(af)
i = get_integrated_lufs(audio, sr)
assert np.allclose(i, li)
def change_format_and_subtype(audio_path):
audio, sr = sf.read(audio_path)
audio_info = sf.info(audio_path)
formats = ['WAV', 'FLAC']
if audio_info.format in formats:
formats.remove(audio_info.format)
_format = random.choice(formats)
subtypes = sf.available_subtypes(_format)
accepted_subtypes = ['PCM_16', 'PCM_32', 'PCM_24', 'FLOAT', 'DOUBLE']
subtypes = [s for s in subtypes.keys() if s in accepted_subtypes]
if audio_info.subtype in subtypes:
subtypes.remove(audio_info.subtype)
_subtype = random.choice(subtypes)
sf.write(audio_path, audio, sr, subtype=_subtype, format=_format)
def test_match_sample_length():
durations_to_match = [1, 2, 5, 7, 22500, 44100, 88200, 100001]
invalid_durations_to_match = [0, -1, .5, 1.0]
tmpfiles = []
with _close_temp_files(tmpfiles):
carhorn = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(carhorn)
siren = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
tmpfiles.append(siren)
for _duration in durations_to_match:
shutil.copyfile(SIREN_FILE, siren.name)
shutil.copyfile(CARHORN_FILE, carhorn.name)
change_format_and_subtype(siren.name)
change_format_and_subtype(carhorn.name)
prev_audio_info = sf.info(carhorn.name)
match_sample_length(carhorn.name, _duration)
carhorn_audio, _ = sf.read(carhorn.name)
next_audio_info = sf.info(carhorn.name)
assert carhorn_audio.shape[0] == _duration
assert prev_audio_info.format == next_audio_info.format
assert prev_audio_info.subtype == next_audio_info.subtype
prev_audio_info = sf.info(siren.name)
match_sample_length(siren.name, _duration)
siren_audio, _ = sf.read(siren.name)
next_audio_info = sf.info(siren.name)
assert siren_audio.shape[0] == _duration
assert prev_audio_info.format == next_audio_info.format
assert prev_audio_info.subtype == next_audio_info.subtype
# should be summable
summed_events = sum([carhorn_audio, siren_audio])
assert summed_events.shape[0] == _duration
for _duration in invalid_durations_to_match:
pytest.raises(ScaperError, match_sample_length, carhorn.name, _duration)
pytest.raises(ScaperError, match_sample_length, siren.name, _duration)
def test_peak_normalize():
def sine(x, f, sr, A):
return A * np.sin(2 * np.pi * f * x / sr)
def square(x,f, sr, A):
return A * sg.square(2 * np.pi * f * x / sr)
def saw(x, f, sr, A):
return A * sg.sawtooth(2 * np.pi * f * x / sr)
samplerates = [16000, 44100]
frequencies = [50, 100, 500, 1000, 5000, 10000, 15000, 20000]
amplitudes = [0.1, 0.5, 1.0, 1.5, 2.0]
event_factors = [0.5, 0.8]
eps = 1e-10
# test with toy data
for waveform in [sine, square, saw]:
for sr in samplerates:
for f in frequencies:
for A in amplitudes:
n_samples = sr
x = np.arange(n_samples)
audio = waveform(x, f, sr, A)
max_sample = np.max(np.abs(audio))
estimated_scale_factor = 1.0 / (A + eps)
print("\nsr: {}, f: {}, A: {}".format(sr, f, A))
print("max sample audio: {}".format(max_sample))
print("estimated scale_factor: {}".format(estimated_scale_factor))
event_audio_list = []
for factor in event_factors:
event_audio_list.append(waveform(x, f, sr, A * factor))
audio_norm, event_audio_list_norm, scale_factor = \
peak_normalize(audio, event_audio_list)
print("allclose on factors: {}".format(np.allclose(
scale_factor, estimated_scale_factor)))
print("actual scale factor: {}".format(scale_factor))
# test scale factor
max_sample_audio = np.max(np.abs(audio_norm))
print("max sample audio norm: {}".format(max_sample_audio))
assert np.allclose(scale_factor, estimated_scale_factor,
atol=1e-03)
# test soundscape audio
assert max_sample_audio < 1.0
assert np.allclose(max_sample_audio, 1.0)
# test event audio
for event_audio, factor in zip(event_audio_list_norm,
event_factors):
max_sample_event = np.max(np.abs(event_audio))
if not np.allclose(
max_sample_event, A * factor * scale_factor):
print(max_sample_audio, max_sample_event,
A * factor * scale_factor)
assert np.allclose(max_sample_event,
A * factor * scale_factor,
atol=1e-3)
| 6,611 | 37.219653 | 86 | py |
scaper | scaper-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scaper documentation build configuration file, created by
# sphinx-quickstart on Thu May 4 17:32:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import six
import sphinx_rtd_theme
from sphinx.ext.autodoc import between
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = []
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_issues'
]
# Link Github repo for referencing issues/PRs
issues_github_path = 'justinsalamon/scaper'
# Disable warnings about nonexisting document
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'scaper'
copyright = '2017, Justin Salamon & Duncan MacConnell'
author = 'Justin Salamon & Duncan MacConnell'
# Mock the dependencies
if six.PY3:
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'sox', 'jams', 'scipy', 'numpy', 'pandas', 'soundfile',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import imp
scaper_version = imp.load_source('scaper.version', '../scaper/version.py')
version = scaper_version.short_version
# The full version, including alpha/beta/rc tags.
release = scaper_version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# html_theme = 'classic'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'scaper v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'scaperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scaper.tex', 'scaper Documentation',
'Justin Salamon \\& Duncan MacConnell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scaper', 'scaper Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scaper', 'scaper Documentation',
author, 'scaper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
def maybe_skip_member(app, what, name, obj, skip, options):
# print(app)
# print(what)
# print(name)
# print(obj)
# print(skip)
# print(options)
if name == 'EventSpec' or name[0] == '_':
return True
else:
return False
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member)
| 11,254 | 27.20802 | 80 | py |
scaper | scaper-master/scaper/scaper_warnings.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED: 10/13/16 7:08 PM by Justin Salamon <justin.salamon@nyu.edu>
'''Warning classes for Scaper'''
class ScaperWarning(Warning):
'''The root Scaper warning class'''
pass
| 235 | 18.666667 | 70 | py |
scaper | scaper-master/scaper/core.py | try:
import soxbindings as sox
except: # pragma: no cover
import sox # pragma: no cover
import soundfile
import os
import warnings
import jams
from collections import namedtuple
import logging
import tempfile
import numpy as np
import shutil
import csv
from copy import deepcopy
from .scaper_exceptions import ScaperError
from .scaper_warnings import ScaperWarning
from .util import _close_temp_files
from .util import _set_temp_logging_level
from .util import _get_sorted_files
from .util import _validate_folder_path
from .util import _populate_label_list
from .util import _check_random_state
from .util import _sample_trunc_norm
from .util import _sample_uniform
from .util import _sample_choose
from .util import _sample_choose_weighted
from .util import _sample_normal
from .util import _sample_const
from .util import max_polyphony
from .util import polyphony_gini
from .util import is_real_number, is_real_array
from .audio import get_integrated_lufs
from .audio import peak_normalize
from .version import version as scaper_version
# HEADS UP! Adding a new distribution tuple?
# Make sure it's properly handled in all of the following:
# SUPPORTED_DIST
# _validate_distribution
# _validate_time
# _validate_duration
# _validate_time_stretch
# _validate_pitch_shift
# _validate_label
# _validate_source_file
# _validate_snr
# _ensure_satisfiable_source_time_tuple
# _instantiate_event
# add_background (docstring)
# add_event (docstring)
SUPPORTED_DIST = {"const": _sample_const,
"choose": _sample_choose,
"choose_weighted": _sample_choose_weighted,
"uniform": _sample_uniform,
"normal": _sample_normal,
"truncnorm": _sample_trunc_norm}
# Define single event spec as namedtuple
EventSpec = namedtuple(
'EventSpec',
['label', 'source_file', 'source_time', 'event_time', 'event_duration',
'snr', 'role', 'pitch_shift', 'time_stretch'])
'''
Container for storing event specifications, either probabilistic (i.e. using
distribution tuples to specify possible values) or instantiated (i.e. storing
constants directly).
'''
def generate_from_jams(jams_infile,
audio_outfile=None,
fg_path=None,
bg_path=None,
jams_outfile=None,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True,
txt_path=None,
txt_sep='\t'):
'''
Generate a soundscape based on an existing scaper JAMS file and return as
an audio file, a JAMS annotation, a simplified annotation list, and a
list containing the audio samples of each individual background and
foreground event. If output paths are provided, these objects will also
be saved to disk.
Parameters
----------
jams_infile : str
Path to JAMS file (must be a file previously generated by Scaper).
audio_outfile : str
Path for saving the generated soundscape audio.
fg_path : str or None
Specifies a different path for foreground audio than the one stored in
the input jams file. For the reconstruction to be successful the folder
and file structure inside this path must be identical the one that was
used to create the input jams file. If None (default), the fg_path from
the input jams file will be used.
bg_path : str or None
Specifies a different path for background audio than the one stored in
the input jams file. For the reconstruction to be successful the folder
and file structure inside this path must be identical the one that was
used to create the input jams file. If None (default), the bg_path from
the input jams file will be used.
jams_outfile : str or None
Path for saving new JAMS file, if None (default) a new JAMS is not
saved. Useful when either fg_path or bg_path is not None, as it saves
a new JAMS files where the source file paths match the new fg_path
and/or bg_path.
save_isolated_events : bool
If True, this will save the isolated event audio in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The audio of the isolated events sum
up to the mixture if reverb is not applied. Isolated events can be found
(by default) at `<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`, where idx
is the index of the isolated event in
self.fg_spec or self.bg_spec (this allows events of the same label to be added more than
once to the soundscape without breaking things). Role is "background" or "foreground".
For example: `foreground0_siren.wav` or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_outfile parent folder>/<audio_outfile name>_events`.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``. If you're experiencing issues related
to audio I/O setting this parameter to False may help with debugging.
txt_path: str or None
Path for saving a simplified annotation in a space separated format
[onset offset label] where onset and offset are in seconds. Good
for loading labels in e.g. Audacity. If None, does not save txt
annotation to disk.
txt_sep: str
The separator to use when saving a simplified annotation as a text
file (default is tab for compatibility with Audacity label files).
Only relevant if txt_path is not None.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape. Returns None if
no_audio=True.
soundscape_jam: jams.JAMS
The JAMS object containing the full soundscape annotation.
annotation_list : list
A simplified annotation in a space-separated format
[onset offset label] where onset and offset are in seconds.
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
Raises
------
ScaperError
If jams_infile does not point to a valid JAMS file that was previously
generated by Scaper and contains an annotation of the scaper
namespace.
'''
soundscape_jam = jams.load(jams_infile)
anns = soundscape_jam.search(namespace='scaper')
if len(anns) == 0:
raise ScaperError(
'JAMS file does not contain any annotation with namespace '
'scaper.')
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
# Update paths
if fg_path is None:
new_fg_path = ann.sandbox.scaper['fg_path']
else:
new_fg_path = os.path.expanduser(fg_path)
# Update source files
for obs in ann.data:
if obs.value['role'] == 'foreground':
sourcefile = obs.value['source_file']
sourcefilename = os.path.basename(sourcefile)
parent = os.path.dirname(sourcefile)
parentname = os.path.basename(parent)
newsourcefile = os.path.join(
new_fg_path, parentname, sourcefilename)
obs.value['source_file'] = newsourcefile # hacky
# Update sandbox
ann.sandbox.scaper['fg_path'] = new_fg_path
if bg_path is None:
new_bg_path = ann.sandbox.scaper['bg_path']
else:
new_bg_path = os.path.expanduser(bg_path)
# Update source files
for obs in ann.data:
if obs.value['role'] == 'background':
sourcefile = obs.value['source_file']
sourcefilename = os.path.basename(sourcefile)
parent = os.path.dirname(sourcefile)
parentname = os.path.basename(parent)
newsourcefile = os.path.join(
new_bg_path, parentname, sourcefilename)
obs.value['source_file'] = newsourcefile # hacky
# Update sandbox
ann.sandbox.scaper['bg_path'] = new_bg_path
# Create scaper object
if 'original_duration' in ann.sandbox.scaper:
duration = ann.sandbox.scaper['original_duration']
else:
duration = ann.sandbox.scaper['duration']
warnings.warn(
"Couldn't find original_duration field in the scaper sandbox, "
"using duration field instead. This can lead to incorrect behavior "
"if generating from a jams file that has been trimmed previously.",
ScaperWarning)
protected_labels = ann.sandbox.scaper['protected_labels']
sc = Scaper(duration, new_fg_path, new_bg_path, protected_labels)
# Set synthesis parameters
if 'sr' in ann.sandbox.scaper: # backwards compatibility
sc.sr = ann.sandbox.scaper['sr']
sc.ref_db = ann.sandbox.scaper['ref_db']
sc.n_channels = ann.sandbox.scaper['n_channels']
sc.fade_in_len = ann.sandbox.scaper['fade_in_len']
sc.fade_out_len = ann.sandbox.scaper['fade_out_len']
# Pull generation parameters from annotation
reverb = ann.sandbox.scaper['reverb']
if 'fix_clipping' in ann.sandbox.scaper.keys():
fix_clipping = ann.sandbox.scaper['fix_clipping']
else:
fix_clipping = False
if 'peak_normalization' in ann.sandbox.scaper.keys():
peak_normalization = ann.sandbox.scaper['peak_normalization']
else:
peak_normalization = False
if 'quick_pitch_time' in ann.sandbox.scaper.keys():
quick_pitch_time = ann.sandbox.scaper['quick_pitch_time']
else:
quick_pitch_time = False
# Cast ann.sandbox.scaper to a Sandbox object
ann.sandbox.scaper = jams.Sandbox(**ann.sandbox.scaper)
# Generate audio
soundscape_audio, event_audio_list, scale_factor, ref_db_change = \
sc._generate_audio(audio_outfile,
ann,
reverb=reverb,
fix_clipping=fix_clipping,
peak_normalization=peak_normalization,
quick_pitch_time=quick_pitch_time,
save_isolated_events=save_isolated_events,
isolated_events_path=isolated_events_path,
disable_sox_warnings=disable_sox_warnings)
# TODO: Stick to heavy handed overwriting for now, in the future we
# should consolidate this with what happens inside _instantiate().
ann.sandbox.scaper.reverb = reverb
ann.sandbox.scaper.fix_clipping = fix_clipping
ann.sandbox.scaper.peak_normalization = peak_normalization
ann.sandbox.scaper.quick_pitch_time = quick_pitch_time
ann.sandbox.scaper.save_isolated_events = save_isolated_events
ann.sandbox.scaper.isolated_events_path = isolated_events_path
ann.sandbox.scaper.disable_sox_warnings = disable_sox_warnings
ann.sandbox.scaper.peak_normalization_scale_factor = scale_factor
ann.sandbox.scaper.ref_db_change = ref_db_change
ann.sandbox.scaper.ref_db_generated = sc.ref_db + ref_db_change
# If there are slice (trim) operations, need to perform them!
# Need to add this logic for the isolated events too.
if 'slice' in ann.sandbox.keys():
for sliceop in ann.sandbox['slice']:
# must use temp file in order to save to same file
tmpfiles = []
audio_files = [audio_outfile] + ann.sandbox.scaper.isolated_events_audio_path
with _close_temp_files(tmpfiles):
for audio_file in audio_files:
# Create tmp file
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=False))
# Save trimmed result to temp file
tfm = sox.Transformer()
tfm.trim(sliceop['slice_start'], sliceop['slice_end'])
tfm.build(audio_file, tmpfiles[-1].name)
# Copy result back to original file
shutil.copyfile(tmpfiles[-1].name, audio_file)
# Optionally save new jams file
if jams_outfile is not None:
soundscape_jam.save(jams_outfile)
# Create annotation list
annotation_list = []
for obs in ann.data:
if obs.value['role'] == 'foreground':
annotation_list.append(
[obs.time, obs.time + obs.duration, obs.value['label']])
if txt_path is not None:
with open(txt_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=txt_sep)
writer.writerows(annotation_list)
return soundscape_audio, soundscape_jam, annotation_list, event_audio_list
def trim(audio_infile, jams_infile, audio_outfile, jams_outfile, start_time,
end_time, no_audio=False):
'''
Trim an audio file and corresponding Scaper JAMS file and save to disk.
Given an input audio file and corresponding jams file, trim both the audio
and all annotations in the jams file to the time range ``[start_time,
end_time]`` and save the result to ``audio_outfile`` and ``jams_outfile``
respectively. This function uses ``jams.slice()`` for trimming the jams
file while ensuring the start times of the jam's annotations and
observations they contain match the trimmed audio file.
Parameters
----------
audio_infile : str
Path to input audio file
jams_infile : str
Path to input jams file
audio_outfile : str
Path to output trimmed audio file
jams_outfile : str
Path to output trimmed jams file
start_time : float
Start time for trimmed audio/jams
end_time : float
End time for trimmed audio/jams
no_audio : bool
If true, operates on the jams only. Audio input and output paths
don't have to point to valid files.
'''
# First trim jams (might raise an error)
jam = jams.load(jams_infile)
jam_sliced = jam.slice(start_time, end_time, strict=False)
# Special work for annotations of the scaper 'scaper' namespace
for ann in jam_sliced.annotations:
if ann.namespace == 'scaper':
# DON'T MODIFY event's value dict! Keeps original instantiated
# values for reconstruction / reproducibility.
# Count number of FG events
n_events = 0
for obs in ann.data:
if obs.value['role'] == 'foreground':
n_events += 1
# Re-compute max polyphony
poly = max_polyphony(ann)
# Re-compute polyphony gini
gini = polyphony_gini(ann)
# Update specs in sandbox
ann.sandbox.scaper['n_events'] = n_events
ann.sandbox.scaper['polyphony_max'] = poly
ann.sandbox.scaper['polyphony_gini'] = gini
ann.sandbox.scaper['duration'] = ann.duration
# Save result to output jams file
jam_sliced.save(jams_outfile)
# Next, trim audio
if not no_audio:
tfm = sox.Transformer()
tfm.trim(start_time, end_time)
if audio_outfile != audio_infile:
tfm.build(audio_infile, audio_outfile)
else:
# must use temp file in order to save to same file
tmpfiles = []
with _close_temp_files(tmpfiles):
# Create tmp file
tmpfiles.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# Save trimmed result to temp file
tfm.build(audio_infile, tmpfiles[-1].name)
# Copy result back to original file
shutil.copyfile(tmpfiles[-1].name, audio_outfile)
def _get_value_from_dist(dist_tuple, random_state):
'''
Sample a value from the provided distribution tuple.
Given a distribution tuple, validate its format/values and then sample
and return a single value from the distribution specified by the tuple.
Parameters
----------
dist_tuple : tuple
Distribution tuple to be validated. See ``Scaper.add_event`` for
details about the expected format for the distribution tuple.
Returns
-------
value
A value from the specified distribution.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
_validate_distribution : Check whether a tuple specifying a parameter
distribution has a valid format, if not raise an error.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(dist_tuple)
return SUPPORTED_DIST[dist_tuple[0]](*dist_tuple[1:], random_state=random_state)
def _validate_distribution(dist_tuple):
'''
Check whether a tuple specifying a parameter distribution has a valid
format, if not raise an error.
Parameters
----------
dist_tuple : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
Raises
------
ScaperError
If the tuple does not have a valid format.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
# Make sure it's a tuple
if not isinstance(dist_tuple, tuple):
raise ScaperError('Distribution tuple must be of type tuple.')
# Make sure the tuple contains at least 2 items
if len(dist_tuple) < 2:
raise ScaperError('Distribution tuple must be at least of length 2.')
# Make sure the first item is one of the supported distribution names
if dist_tuple[0] not in SUPPORTED_DIST.keys():
raise ScaperError(
"Unsupported distribution name: {:s}".format(dist_tuple[0]))
# If it's a constant distribution, tuple must be of length 2
if dist_tuple[0] == 'const':
if len(dist_tuple) != 2:
raise ScaperError('"const" distribution tuple must be of length 2')
# If it's a choose, tuple must be of length 2 and second item of type list
elif dist_tuple[0] == 'choose':
if len(dist_tuple) != 2 or not isinstance(dist_tuple[1], list):
raise ScaperError(
'The "choose" distribution tuple must be of length 2 where '
'the second item is a list.')
# If it's a choose_weighted, tuple must be of length 3, items 2 and 3 must
# be lists of the same length, and the list in item 3 must contain floats
# in the range [0, 1] that sum to 1 (i.e. valid probabilities).
elif dist_tuple[0] == 'choose_weighted':
if len(dist_tuple) != 3:
raise ScaperError('"choose_weighted" distribution tuple must have length 3')
if not isinstance(dist_tuple[1], list) or \
not isinstance(dist_tuple[2], list) or \
len(dist_tuple[1]) != len(dist_tuple[2]):
msg = ('The 2nd and 3rd items of the "choose_weighted" distribution tuple '
'must be lists of the same length.')
raise ScaperError(msg)
probabilities = np.asarray(dist_tuple[2])
if probabilities.min() < 0 or probabilities.max() > 1:
msg = ('Values in the probabilities list of the "choose_weighted" '
'distribution tuple must be in the range [0, 1].')
raise ScaperError(msg)
if not np.allclose(probabilities.sum(), 1):
msg = ('Values in the probabilities list of the "choose_weighted" '
'distribution tuple must sum to 1.')
raise ScaperError(msg)
# If it's a uniform distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be real and greater/equal to the 2nd.
elif dist_tuple[0] == 'uniform':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[1] > dist_tuple[2]):
raise ScaperError(
'The "uniform" distribution tuple be of length 2, where the '
'2nd item is a real number and the 3rd item is a real number '
'and greater/equal to the 2nd item.')
# If it's a normal distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be a non-negative real
elif dist_tuple[0] == 'normal':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[2] < 0):
raise ScaperError(
'The "normal" distribution tuple must be of length 3, where '
'the 2nd item (mean) is a real number and the 3rd item (std '
'dev) is real and non-negative.')
elif dist_tuple[0] == 'truncnorm':
if (len(dist_tuple) != 5 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
not is_real_number(dist_tuple[3]) or
not is_real_number(dist_tuple[4]) or
dist_tuple[2] < 0 or
dist_tuple[4] < dist_tuple[3]):
raise ScaperError(
'The "truncnorm" distribution tuple must be of length 5, '
'where the 2nd item (mean) is a real number, the 3rd item '
'(std dev) is real and non-negative, the 4th item (trunc_min) '
'is a real number and the 5th item (trun_max) is a real '
'number that is equal to or greater than trunc_min.')
def _ensure_satisfiable_source_time_tuple(source_time, source_duration, event_duration):
'''
Modify a source_time distribution tuple according to the duration of the
source and the duration of the event. This allows you to sample from
anywhere in a source file without knowing the exact duration of every
source file.
Parameters
----------
source_time : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
source_duration : float
Duration of the source audio file.
event_duration : float
Duration of the event to be extracted from the source file.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
_validate_distribution(source_time)
old_source_time = deepcopy(source_time)
source_time = list(source_time)
# If it's a constant distribution, just make sure it's within bounds.
if source_time[0] == 'const':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
# If it's a choose, iterate through the list to make sure it's all in bounds.
# Some logic here so we don't add stuff out of bounds more than once.
elif source_time[0] == 'choose':
for i, t in enumerate(source_time[1]):
if t + event_duration > source_duration:
source_time[1][i] = max(0, source_duration - event_duration)
source_time[1] = list(set(source_time[1]))
# For weighted_choose we do the same as choose but without removing duplicates
elif source_time[0] == 'choose_weighted':
for i, t in enumerate(source_time[1]):
if t + event_duration > source_duration:
source_time[1][i] = max(0, source_duration - event_duration)
# If it's a uniform distribution, tuple must be of length 3, We change the 3rd
# item to source_duration - event_duration so that we stay in bounds. If the min
# out of bounds, we change it to be source_duration - event_duration.
elif source_time[0] == 'uniform':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
if source_time[2] + event_duration > source_duration:
source_time[2] = max(0, source_duration - event_duration)
if (source_time[1] == source_time[2]):
# switch to const
source_time = ['const', source_time[1]]
# If it's a normal distribution, we change the mean of the distribution to
# source_duration - event_duration if source_duration - mean < event_duration.
elif source_time[0] == 'normal':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
# If it's a truncated normal distribution, we change the mean as we did above for a
# normal distribution, and change the max (5th item) to
# source_duration - event_duration if it's bigger. If the min is out of bounds, we
# change it like in the uniform case.
elif source_time[0] == 'truncnorm':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
if source_time[3] + event_duration > source_duration:
source_time[3] = max(0, source_duration - event_duration)
if source_time[4] + event_duration > source_duration:
source_time[4] = max(0, source_duration - event_duration)
if (source_time[3] == source_time[4]):
# switch to const
source_time = ['const', source_time[1]]
source_time = tuple(source_time)
# check if the source_time changed from the old_source_time to throw a warning.
# it gets set here but the warning happens after the return from this call
warn = (source_time != old_source_time)
return tuple(source_time), warn
def _validate_label(label, allowed_labels):
'''
Validate that a label tuple is in the right format and that it's values
are valid.
Parameters
----------
label : tuple
Label tuple (see ```Scaper.add_event``` for required format).
allowed_labels : list
List of allowed labels.
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(label)
# Make sure it's one of the allowed distributions for a label and that the
# label value is one of the allowed labels.
if label[0] == "const":
if not label[1] in allowed_labels:
raise ScaperError(
'Label value must match one of the available labels: '
'{:s}'.format(str(allowed_labels)))
elif label[0] == "choose" or label[0] == "choose_weighted":
if label[1]: # list is not empty
if not set(label[1]).issubset(set(allowed_labels)):
raise ScaperError(
'Label list provided must be a subset of the available '
'labels: {:s}'.format(str(allowed_labels)))
else:
raise ScaperError(
'Label must be specified using a "const" or "choose" tuple.')
def _validate_source_file(source_file_tuple, label_tuple):
'''
Validate that a source_file tuple is in the right format a that it's values
are valid.
Parameters
----------
source_file : tuple
Source file tuple (see ```Scaper.add_event``` for required format).
label : str
Label tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(source_file_tuple)
_validate_distribution(label_tuple)
# If source file is specified explicitly
if source_file_tuple[0] == "const":
# 1. the filepath must point to an existing file
if not os.path.isfile(source_file_tuple[1]):
raise ScaperError(
"Source file not found: {:s}".format(source_file_tuple[1]))
# 2. the label must match the file's parent folder name
parent_name = os.path.basename(os.path.dirname(source_file_tuple[1]))
if label_tuple[0] != "const" or label_tuple[1] != parent_name:
raise ScaperError(
"Source file's parent folder name does not match label.")
# Otherwise it must be specified using one of "choose" or "choose_weighted"
elif source_file_tuple[0] == "choose" or source_file_tuple[0] == "choose_weighted":
if source_file_tuple[1]: # list is not empty
if not all(os.path.isfile(x) for x in source_file_tuple[1]):
raise ScaperError(
'Source file list must either be empty or all paths in '
'the list must point to valid files.')
else:
raise ScaperError(
'Source file must be specified using a "const" or "choose" tuple.')
def _validate_time(time_tuple):
'''
Validate that a time tuple has the right format and that the
specified distribution cannot result in a negative time.
Parameters
----------
time_tuple : tuple
Time tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(time_tuple)
# Ensure the values are valid for time
if time_tuple[0] == "const":
if (time_tuple[1] is None or
not is_real_number(time_tuple[1]) or
time_tuple[1] < 0):
raise ScaperError(
'Time must be a real non-negative number.')
elif time_tuple[0] == "choose" or time_tuple[0] == "choose_weighted":
if (not time_tuple[1] or
not is_real_array(time_tuple[1]) or
not all(x is not None for x in time_tuple[1]) or
not all(x >= 0 for x in time_tuple[1])):
raise ScaperError(
'Time list must be a non-empty list of non-negative real '
'numbers.')
elif time_tuple[0] == "uniform":
if time_tuple[1] < 0:
raise ScaperError(
'A "uniform" distribution tuple for time must have '
'min_value >= 0')
elif time_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for time can result in '
'negative values, in which case the distribution will be '
're-sampled until a positive value is returned: this can result '
'in an infinite loop!',
ScaperWarning)
elif time_tuple[0] == "truncnorm":
if time_tuple[3] < 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time must specify a non-'
'negative trunc_min value.')
def _validate_duration(duration_tuple):
'''
Validate that a duration tuple has the right format and that the
specified distribution cannot result in a negative or zero value.
Parameters
----------
duration : tuple
Duration tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(duration_tuple)
# Ensure the values are valid for duration
if duration_tuple[0] == "const":
if (not is_real_number(duration_tuple[1]) or
duration_tuple[1] <= 0):
raise ScaperError(
'Duration must be a real number greater than zero.')
elif duration_tuple[0] == "choose" or duration_tuple[0] == "choose_weighted":
if (not duration_tuple[1] or
not is_real_array(duration_tuple[1]) or
not all(x > 0 for x in duration_tuple[1])):
raise ScaperError(
'Duration list must be a non-empty list of positive real '
'numbers.')
elif duration_tuple[0] == "uniform":
if duration_tuple[1] <= 0:
raise ScaperError(
'A "uniform" distribution tuple for duration must have '
'min_value > 0')
elif duration_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for duration can result in '
'non-positives values, in which case the distribution will be '
're-sampled until a positive value is returned: this can result '
'in an infinite loop!',
ScaperWarning)
elif duration_tuple[0] == "truncnorm":
if duration_tuple[3] <= 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time must specify a '
'positive trunc_min value.')
def _validate_snr(snr_tuple):
'''
Validate that an snr distribution tuple has the right format.
Parameters
----------
snr : tuple
SNR tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(snr_tuple)
# Ensure the values are valid for SNR
if snr_tuple[0] == "const":
if not is_real_number(snr_tuple[1]):
raise ScaperError(
'SNR must be a real number.')
elif snr_tuple[0] == "choose" or snr_tuple[0] == "choose_weighted":
if (not snr_tuple[1] or
not is_real_array(snr_tuple[1])):
raise ScaperError(
'SNR list must be a non-empty list of real numbers.')
# No need to check for "uniform" and "normal" since they must produce a
# real number and technically speaking any real number is a valid SNR.
# TODO: do we want to impose limits on the possible SNR values?
def _validate_pitch_shift(pitch_shift_tuple):
'''
Validate that a pitch_shift distribution tuple has the right format.
Parameters
----------
pitch_shift_tuple : tuple
Pitch shift tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# If the tuple is none then it's valid
if pitch_shift_tuple is not None:
# Make sure it's a valid distribution tuple
_validate_distribution(pitch_shift_tuple)
# Ensure the values are valid for pitch shift
if pitch_shift_tuple[0] == "const":
if not is_real_number(pitch_shift_tuple[1]):
raise ScaperError(
'Pitch shift must be a real number.')
elif pitch_shift_tuple[0] == "choose" or pitch_shift_tuple[0] == "choose_weighted":
if (not pitch_shift_tuple[1] or
not is_real_array(pitch_shift_tuple[1])):
raise ScaperError(
'Pitch shift list must be a non-empty list of real '
'numbers.')
# No need to check for "uniform" and "normal" since they must produce a
# real number and technically speaking any real number is a valid pitch
# shift
# TODO: do we want to impose limits on the possible pitch shift values?
def _validate_time_stretch(time_stretch_tuple):
'''
Validate that a time_stretch distribution tuple has the right format.
Parameters
----------
time_stretch_tuple: tuple
Time stretch tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# if the tuple is none then its valid
if time_stretch_tuple is not None:
# Make sure it's a valid distribution tuple
_validate_distribution(time_stretch_tuple)
# Ensure the values are valid for time stretch
if time_stretch_tuple[0] == "const":
if (not is_real_number(time_stretch_tuple[1]) or
time_stretch_tuple[1] <= 0):
raise ScaperError(
'Time stretch must be a real number greater than zero.')
elif time_stretch_tuple[0] == "choose" or time_stretch_tuple[0] == "choose_weighted":
if (not time_stretch_tuple[1] or
not is_real_array(time_stretch_tuple[1]) or
not all(x > 0 for x in time_stretch_tuple[1])):
raise ScaperError(
'Time stretch list must be a non-empty list of positive '
'real numbers.')
elif time_stretch_tuple[0] == "uniform":
if time_stretch_tuple[1] <= 0:
raise ScaperError(
'A "uniform" distribution tuple for time stretch must have '
'min_value > 0')
elif time_stretch_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for time stretch can result in '
'non-positives values, in which case the distribution will be '
're-sampled until a positive value is returned: this can '
'result in an infinite loop!',
ScaperWarning)
elif time_stretch_tuple[0] == "truncnorm":
if time_stretch_tuple[3] <= 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time stretch must '
'specify a positive trunc_min value.')
# TODO: do we want to impose limits on the possible time stretch
# values?
def _validate_event(label, source_file, source_time, event_time,
event_duration, snr, allowed_labels, pitch_shift,
time_stretch):
'''
Check that event parameter values are valid.
Parameters
----------
label : tuple
source_file : tuple
source_time : tuple
event_time : tuple
event_duration : tuple
snr : tuple
allowed_labels : list
List of allowed labels for the event.
pitch_shift : tuple or None
time_stretch: tuple or None
Raises
------
ScaperError :
If any of the input parameters has an invalid format or value.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
# allowed_labels must be a list. All other parameters will be validated
# individually.
if not isinstance(allowed_labels, list):
raise ScaperError('allowed_labels must be of type list.')
# SOURCE FILE
_validate_source_file(source_file, label)
# LABEL
_validate_label(label, allowed_labels)
# SOURCE TIME
_validate_time(source_time)
# EVENT TIME
_validate_time(event_time)
# EVENT DURATION
_validate_duration(event_duration)
# SNR
_validate_snr(snr)
# Pitch shift
_validate_pitch_shift(pitch_shift)
# Time stretch
_validate_time_stretch(time_stretch)
class Scaper(object):
'''
Create a Scaper object.
Parameters
----------
duration : float
Duration of the soundscape, in seconds.
fg_path : str
Path to foreground folder.
bg_path : str
Path to background folder.
protected_labels : list
Provide a list of protected foreground labels. When a foreground
label is in the protected list it means that when a sound event
matching the label gets added to a soundscape instantiation the
duration of the source audio file cannot be altered, and the
duration value that was provided in the specification will be
ignored. Adding labels to the protected list is useful for sound events
whose semantic validity would be lost if the sound were trimmed
before the sound event ends, for example an animal vocalization
such as a dog bark.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Note that if the random state is passed as a
RandomState instance, it is passed by reference, not value. This will lead to
the Scaper object advancing the state of the random state object if you use
it elsewhere.
'''
def __init__(self, duration, fg_path, bg_path, protected_labels=[], random_state=None):
'''
Create a Scaper object.
Parameters
----------
duration : float
Duration of the soundscape, in seconds.
fg_path : str
Path to foreground folder.
bg_path : str
Path to background folder.
protected_labels : list
Provide a list of protected foreground labels. When a foreground
label is in the protected list it means that when a sound event
matching the label gets added to a soundscape instantiation the
duration of the source audio file cannot be altered, and the
duration value that was provided in the specification will be
ignored. Adding labels to the protected list is useful for sound events
whose semantic validity would be lost if the sound were trimmed
before the sound event ends, for example an animal vocalization
such as a dog bark.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Note that if the random state is passed as a
RandomState instance, it is passed by reference, not value. This will lead to
the Scaper object advancing the state of the random state object if you use
it elsewhere.
'''
# Duration must be a positive real number
if np.isrealobj(duration) and duration > 0:
self.duration = duration
else:
raise ScaperError('Duration must be a positive real value')
# Initialize parameters
self.sr = 44100
self.ref_db = -12
self.n_channels = 1
self.fade_in_len = 0.01 # 10 ms
self.fade_out_len = 0.01 # 10 ms
# Start with empty specifications
self.fg_spec = []
self.bg_spec = []
# Validate paths and set
expanded_fg_path = os.path.expanduser(fg_path)
expanded_bg_path = os.path.expanduser(bg_path)
_validate_folder_path(expanded_fg_path)
_validate_folder_path(expanded_bg_path)
self.fg_path = expanded_fg_path
self.bg_path = expanded_bg_path
# Populate label lists from folder paths
self.fg_labels = []
self.bg_labels = []
_populate_label_list(self.fg_path, self.fg_labels)
_populate_label_list(self.bg_path, self.bg_labels)
# Copy list of protected labels
self.protected_labels = protected_labels[:]
# Get random number generator
self.random_state = _check_random_state(random_state)
def reset_fg_event_spec(self):
'''
Resets the foreground event specification to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_bg_event_spec : Same functionality but resets the background
event specification instead of the foreground specification.
'''
self.fg_spec = []
def reset_bg_event_spec(self):
'''
Resets the background event specification to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_fg_event_spec : Same functionality but resets the foreground
event specification instead of the foreground specification.
'''
self.bg_spec = []
def set_random_state(self, random_state):
'''
Allows the user to set the random state after creating the Scaper object.
Parameters
----------
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random.
'''
self.random_state = _check_random_state(random_state)
def add_background(self, label, source_file, source_time):
'''
Add a background recording to the background specification.
The background duration will be equal to the duration of the
soundscape ``Scaper.duration`` specified when initializing the Scaper
object. If the source file is shorter than this duration then it will
be concatenated to itself as many times as necessary to produce the
specified duration when calling ``Scaper.generate``.
Parameters
----------
label : tuple
Specifies the label of the background. See Notes below for the
expected format of this tuple and the allowed values.
NOTE: The label specified by this tuple must match one
of the labels in the Scaper's background label list
``Scaper.bg_labels``. Furthermore, if ``source_file`` is
specified using "const" (see Notes), then ``label`` must also be
specified using "const" and its value (see Notes) must
match the source file's parent folder's name.
source_file : tuple
Specifies the audio file to use as the source. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: If ``source_file`` is specified using "const" (see Notes),
then ``label`` must also be specified using "const" and its
value (see Notes) must match the source file's parent folder's
name.
source_time : tuple
Specifies the desired start time in the source file. See Notes
below for the expected format of this tuple and the allowed values.
NOTE: the source time specified by this tuple should be equal to or
smaller than ``<source file duration> - <soundscape duration>``.
Larger values will be automatically changed to fulfill this
requirement when calling ``Scaper.generate``.
Notes
-----
Each parameter of this function is set by passing a distribution
tuple, whose first item is always the distribution name and subsequent
items are distribution specific. The supported distribution tuples are:
* ``("const", value)`` : a constant, given by ``value``.
* ``("choose", valuelist)`` : choose a value from
``valuelist`` at random (uniformly). The ``label`` and
``source_file`` parameters also support providing an empty
``valuelist`` i.e. ``("choose", [])``, in which case the
value will be chosen at random from all available labels or files
as determined automatically by Scaper by examining the file
structure of ``bg_path`` provided during initialization.
* ``("choose_weighted", valuelist, probabilities)``: choose a value
from ``valuelist`` via weighted sampling, where the probability of
sampling ``valuelist[i]`` is given by ``probabilities[i]``. The
``probabilities`` list must contain a valid probability distribution,
i.e., all values must be in the range [0, 1] and sum to one.
* ``("uniform", min_value, max_value)`` : sample a random
value from a uniform distribution between ``min_value``
and ``max_value``.
* ``("normal", mean, stddev)`` : sample a random value from a
normal distribution defined by its mean ``mean`` and
standard deviation ``stddev``.
* ``("truncnorm", mean, stddev, min, max)``: sapmle a random value from
a truncated normal distribution defined by its mean ``mean``, standard
deviation ``stddev``, minimum value ``min`` and maximum value ``max``.
IMPORTANT: not all parameters support all distribution tuples. In
particular, ``label`` and ``source_file`` only support ``"const"``,
``"choose"`` and ``choose_weighted``, whereas ``source_time`` supports
all distribution tuples. As noted above, only ``label`` and ``source_file``
support providing an empty ``valuelist`` with ``"choose"``.
'''
# These values are fixed for the background sound
event_time = ("const", 0)
event_duration = ("const", self.duration)
snr = ("const", 0)
role = 'background'
pitch_shift = None
time_stretch = None
# Validate parameter format and values
_validate_event(label, source_file, source_time, event_time,
event_duration, snr, self.bg_labels, None, None)
# Create background sound event
bg_event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role=role,
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Add event to background spec
self.bg_spec.append(bg_event)
def add_event(self, label, source_file, source_time, event_time,
event_duration, snr, pitch_shift, time_stretch):
'''
Add a foreground sound event to the foreground specification.
Parameters
----------
label : tuple
Specifies the label of the sound event. See Notes below for the
expected format of this tuple and the allowed values.
NOTE: The label specified by this tuple must match one
of the labels in the Scaper's foreground label list
``Scaper.fg_labels``. Furthermore, if ``source_file`` is
specified using "const" (see Notes), then ``label`` must also be
specified using "const" and its ``value `` (see Notes) must
match the source file's parent folder's name.
source_file : tuple
Specifies the audio file to use as the source. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: If ``source_file`` is specified using "const" (see Notes),
then ``label`` must also be specified using "const" and its
``value`` (see Notes) must match the source file's parent
folder's name.
source_time : tuple
Specifies the desired start time in the source file. See Notes
below for the expected format of this tuple and the allowed values.
NOTE: the source time specified by this tuple should be equal to or
smaller than ``<source file duration> - event_duration``. Larger
values will be automatically changed to fulfill this requirement
when calling ``Scaper.generate``.
event_time : tuple
Specifies the desired start time of the event in the soundscape.
See Notes below for the expected format of this tuple and the
allowed values.
NOTE: The value specified by this tuple should be equal to or
smaller than ``<soundscapes duration> - event_duration``, and
larger values will be automatically changed to fulfill this
requirement when calling ``Scaper.generate``.
event_duration : tuple
Specifies the desired duration of the event. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: The value specified by this tuple should be equal to or
smaller than the source file's duration, and larger values will be
automatically changed to fulfill this requirement when calling
``Scaper.generate``.
snr : tuple
Specifies the desired signal to noise ratio (SNR) between the event
and the background. See Notes below for the expected format of
this tuple and the allowed values.
pitch_shift : tuple
Specifies the number of semitones to shift the event by. None means
no pitch shift.
time_stretch: tuple
Specifies the time stretch factor (value>1 will make it slower and
longer, value<1 will makes it faster and shorter).
Notes
-----
Each parameter of this function is set by passing a distribution
tuple, whose first item is always the distribution name and subsequent
items are distribution specific. The supported distribution tuples are:
* ``("const", value)`` : a constant, given by ``value``.
* ``("choose", valuelist)`` : choose a value from
``valuelist`` at random (uniformly). The ``label`` and
``source_file`` parameters also support providing an empty
``valuelist`` i.e. ``("choose", [])``, in which case the
value will be chosen at random from all available labels or
source files as determined automatically by Scaper by examining
the file structure of ``fg_path`` provided during
initialization.
* ``("choose_weighted", valuelist, probabilities)``: choose a value
from ``valuelist`` via weighted sampling, where the probability of
sampling ``valuelist[i]`` is given by ``probabilities[i]``. The
``probabilities`` list must contain a valid probability distribution,
i.e., all values must be in the range [0, 1] and sum to one.
* ``("uniform", min_value, max_value)`` : sample a random
value from a uniform distribution between ``min_value``
and ``max_value`` (including ``max_value``).
* ``("normal", mean, stddev)`` : sample a random value from a
normal distribution defined by its mean ``mean`` and
standard deviation ``stddev``.
* ``("truncnorm", mean, stddev, min, max)``: sapmle a random value from
a truncated normal distribution defined by its mean ``mean``, standard
deviation ``stddev``, minimum value ``min`` and maximum value ``max``.
IMPORTANT: not all parameters support all distribution tuples. In
particular, ``label`` and ``source_file`` only support ``"const"``,
``"choose"`` and ``"choose_weighted"``, whereas the remaining parameters
support all distribution tuples. As noted above, only ``label`` and
``source_file`` support providing an empty ``valuelist`` with ``"choose"``.
See Also
--------
_validate_event : Check that event parameter values are valid.
Scaper.generate : Generate a soundscape based on the current
specification and save to disk as both an audio file and a JAMS file
describing the soundscape.
'''
# SAFETY CHECKS
_validate_event(label, source_file, source_time, event_time,
event_duration, snr, self.fg_labels, pitch_shift,
time_stretch)
# Create event
event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role='foreground',
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Add event to foreground specification
self.fg_spec.append(event)
def _instantiate_event(self, event, isbackground=False,
allow_repeated_label=True,
allow_repeated_source=True,
used_labels=[],
used_source_files=[],
disable_instantiation_warnings=False):
'''
Instantiate an event specification.
Given an event specification containing distribution tuples,
instantiate the event, i.e. samples values for the label, source_file,
source_time, event_time, event_duration and snr from their respective
distribution tuples, and return the sampled values in as a new event
specification.
Parameters
----------
event : EventSpec
Event specification containing distribution tuples.
isbackground : bool
Flag indicating whether the event to instantiate is a background
event or not (False implies it is a foreground event).
allow_repeated_label : bool
When True (default) any label can be used, including a label that
has already been used for another event. When False, only a label
that is not already in ``used_labels`` can be selected.
allow_repeated_source : bool
When True (default) any source file matching the selected label can
be used, including a source file that has already been used for
another event. When False, only a source file that is not already
in ``used_source_files`` can be selected.
used_labels : list
List labels that have already been used in the current soundscape
instantiation. The label selected for instantiating the event will
be appended to this list unless its already in it.
used_source_files : list
List of full paths to source files that have already been used in
the current soundscape instantiation. The source file selected for
instantiating the event will be appended to this list unless its
already in it.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
instantiated_event : EventSpec
Event specification containing values sampled from the distribution
tuples of the input event specification.
Raises
------
ScaperError
If allow_repeated_source is False and there is no valid source file
to select.
'''
# set paths and labels depending on whether its a foreground/background
# event
if isbackground:
file_path = self.bg_path
allowed_labels = self.bg_labels
else:
file_path = self.fg_path
allowed_labels = self.fg_labels
# determine label
# special case: choose tuple with empty list
if event.label[0] == "choose" and not event.label[1]:
label_tuple = list(event.label)
label_tuple[1] = allowed_labels
label_tuple = tuple(label_tuple)
else:
label_tuple = event.label
label = _get_value_from_dist(label_tuple, self.random_state)
# Make sure we can use this label
if (not allow_repeated_label) and (label in used_labels):
if (len(allowed_labels) == len(used_labels) or
label_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available labels "
"have already been used and "
"allow_repeated_label=False.".format(label))
else:
while label in used_labels:
label = _get_value_from_dist(label_tuple, self.random_state)
# Update the used labels list
if label not in used_labels:
used_labels.append(label)
# determine source file
# special case: choose tuple with empty list
if event.source_file[0] == "choose" and not event.source_file[1]:
source_files = _get_sorted_files(os.path.join(file_path, label))
source_file_tuple = list(event.source_file)
source_file_tuple[1] = source_files
source_file_tuple = tuple(source_file_tuple)
else:
source_file_tuple = event.source_file
source_file = _get_value_from_dist(source_file_tuple, self.random_state)
# Make sure we can use this source file
if (not allow_repeated_source) and (source_file in used_source_files):
source_files = _get_sorted_files(os.path.join(file_path, label))
if (len(source_files) == len(used_source_files) or
source_file_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available source "
"files have already been used and "
"allow_repeated_source=False.".format(label))
else:
while source_file in used_source_files:
source_file = _get_value_from_dist(source_file_tuple, self.random_state)
# Update the used source files list
if source_file not in used_source_files:
used_source_files.append(source_file)
# Get the duration of the source audio file
source_duration = soundfile.info(source_file).duration
# If this is a background event, the event duration is the
# duration of the soundscape.
if isbackground:
event_duration = self.duration
# If the foreground event's label is in the protected list, use the
# source file's duration without modification.
elif label in self.protected_labels:
event_duration = source_duration
else:
# determine event duration
# For background events the duration is fixed to self.duration
# (which must be > 0), but for foreground events it could
# potentially be non-positive, hence the loop.
event_duration = -np.Inf
while event_duration <= 0:
event_duration = _get_value_from_dist(
event.event_duration, self.random_state
)
# Check if chosen event duration is longer than the duration of the
# selected source file, if so adjust the event duration.
if (event_duration > source_duration):
old_duration = event_duration # for warning
event_duration = source_duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) is greater that source "
"duration ({:.2f}), changing to {:.2f}".format(
label, old_duration, source_duration, event_duration),
ScaperWarning)
# Get time stretch value
if event.time_stretch is None:
time_stretch = None
event_duration_stretched = event_duration
else:
time_stretch = -np.Inf
while time_stretch <= 0:
time_stretch = _get_value_from_dist(
event.time_stretch, self.random_state
)
# compute duration after stretching
event_duration_stretched = event_duration * time_stretch
# If the event duration is longer than the soundscape we can trim it
# without losing validity (since the event will end when the soundscape
# ends).
if time_stretch is None:
if event_duration > self.duration:
old_duration = event_duration # for warning
event_duration = self.duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) is greater than the "
"soundscape duration ({:.2f}), changing to "
"{:.2f}".format(
label, old_duration, self.duration, self.duration),
ScaperWarning)
else:
if event_duration_stretched > self.duration:
old_duration = event_duration # for warning
event_duration = self.duration / float(time_stretch)
event_duration_stretched = self.duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) with stretch factor "
"{:.2f} gives {:.2f} which is greater than the "
"soundscape duration ({:.2f}), changing to "
"{:.2f} ({:.2f} after time stretching)".format(
label, old_duration, time_stretch,
old_duration * time_stretch, self.duration,
event_duration, event_duration_stretched),
ScaperWarning)
# Modify event.source_time so that sampling from the source time distribution
# stays within the bounds of the audio file - event_duration. This allows users
# to sample from anywhere in a source file without knowing the exact duration
# of every source file. Only modify if label is not in protected labels.
if label not in self.protected_labels:
tuple_still_invalid = False
modified_source_time, warn = _ensure_satisfiable_source_time_tuple(
event.source_time, source_duration, event_duration
)
# determine source time and also check again just in case (for normal dist).
# if it happens again, just use the old method.
source_time = -np.Inf
while source_time < 0:
source_time = _get_value_from_dist(
modified_source_time, self.random_state)
if source_time + event_duration > source_duration:
source_time = max(0, source_duration - event_duration)
warn = True
tuple_still_invalid = True
if warn and not disable_instantiation_warnings:
old_source_time = ', '.join(map(str, event.source_time))
new_source_time = ', '.join(map(str, modified_source_time))
if not tuple_still_invalid:
warnings.warn(
"{:s} source time tuple ({:s}) could not be satisfied given "
"source duration ({:.2f}) and event duration ({:.2f}), "
"source time tuple changed to ({:s})".format(
label, old_source_time, source_duration,
event_duration, new_source_time),
ScaperWarning)
else:
warnings.warn(
"{:s} source time tuple ({:s}) could not be satisfied given "
"source duration ({:.2f}) and event duration ({:.2f}), "
"source time tuple changed to ({:s}) but was still not "
"satisfiable, likely due to using 'normal' distribution with "
"bounds too close to the start or end of the audio file".format(
label, old_source_time, source_duration,
event_duration, new_source_time),
ScaperWarning)
else:
source_time = 0.0
# determine event time
# for background events the event time is fixed to 0, but for
# foreground events it's not.
event_time = -np.Inf
while event_time < 0:
event_time = _get_value_from_dist(
event.event_time, self.random_state
)
# Make sure the selected event time + event duration are is not greater
# than the total duration of the soundscape, if it is adjust the event
# time. This means event duration takes precedence over the event
# start time.
if time_stretch is None:
if event_time + event_duration > self.duration:
old_event_time = event_time
event_time = self.duration - event_duration
if not disable_instantiation_warnings:
warnings.warn(
'{:s} event time ({:.2f}) is too great given event '
'duration ({:.2f}) and soundscape duration ({:.2f}), '
'changed to {:.2f}.'.format(
label, old_event_time, event_duration,
self.duration, event_time),
ScaperWarning)
else:
if event_time + event_duration_stretched > self.duration:
old_event_time = event_time
event_time = self.duration - event_duration_stretched
if not disable_instantiation_warnings:
warnings.warn(
'{:s} event time ({:.2f}) is too great given '
'stretched event duration ({:.2f}) and soundscape '
'duration ({:.2f}), changed to {:.2f}.'.format(
label, old_event_time, event_duration_stretched,
self.duration, event_time),
ScaperWarning)
# determine snr
snr = _get_value_from_dist(event.snr, self.random_state)
# get role (which can only take "foreground" or "background" and
# is set internally, not by the user).
role = event.role
# determine pitch_shift
if event.pitch_shift is not None:
pitch_shift = _get_value_from_dist(event.pitch_shift, self.random_state)
else:
pitch_shift = None
# pack up instantiated values in an EventSpec
instantiated_event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role=role,
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Return
return instantiated_event
def _instantiate(self, allow_repeated_label=True,
allow_repeated_source=True, reverb=None,
disable_instantiation_warnings=False):
'''
Instantiate a specific soundscape in JAMS format based on the current
specification.
Any non-deterministic event values (i.e. distribution tuples) will be
sampled randomly from based on the distribution parameters.
Parameters
----------
allow_repeated_label : bool
When True (default) the same label can be used more than once
in a soundscape instantiation. When False every label can
only be used once.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in a soundscape instantiation. When False every source file can
only be used once.
reverb : float or None
Has no effect on this function other than being documented in the
instantiated annotation's sandbox. Passed by ``Scaper.generate``.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
jam : JAMS object
A JAMS object containing a scaper annotation representing the
instantiated soundscape.
See Also
--------
Scaper.generate
'''
jam = jams.JAMS()
ann = jams.Annotation(namespace='scaper')
# Set annotation duration (might be changed later due to cropping)
ann.duration = self.duration
# INSTANTIATE BACKGROUND AND FOREGROUND EVENTS AND ADD TO ANNOTATION
# NOTE: logic for instantiating bg and fg events is NOT the same.
# Add background sounds
bg_labels = []
bg_source_files = []
for event in self.bg_spec:
value = self._instantiate_event(
event,
isbackground=True,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
used_labels=bg_labels,
used_source_files=bg_source_files,
disable_instantiation_warnings=disable_instantiation_warnings)
# Note: add_background doesn't allow to set a time_stretch, i.e.
# it's hardcoded to time_stretch=None, so we don't need to check
# if value.time_stretch is not None, since it always will be.
ann.append(time=value.event_time,
duration=value.event_duration,
value=value._asdict(),
confidence=1.0)
# Add foreground events
fg_labels = []
fg_source_files = []
for event in self.fg_spec:
value = self._instantiate_event(
event,
isbackground=False,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
used_labels=fg_labels,
used_source_files=fg_source_files,
disable_instantiation_warnings=disable_instantiation_warnings)
if value.time_stretch is not None:
event_duration_stretched = (
value.event_duration * value.time_stretch)
else:
event_duration_stretched = value.event_duration
ann.append(time=value.event_time,
duration=event_duration_stretched,
value=value._asdict(),
confidence=1.0)
# Compute max polyphony
poly = max_polyphony(ann)
# Compute the number of foreground events
n_events = len(self.fg_spec)
# Compute gini
gini = polyphony_gini(ann)
# Add specs and other info to sandbox
ann.sandbox.scaper = jams.Sandbox(
duration=self.duration,
original_duration=self.duration,
fg_path=self.fg_path,
bg_path=self.bg_path,
fg_spec=self.fg_spec,
bg_spec=self.bg_spec,
fg_labels=self.fg_labels,
bg_labels=self.bg_labels,
protected_labels=self.protected_labels,
sr=self.sr,
ref_db=self.ref_db,
n_channels=self.n_channels,
fade_in_len=self.fade_in_len,
fade_out_len=self.fade_out_len,
n_events=n_events,
polyphony_max=poly,
polyphony_gini=gini,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
reverb=reverb,
scaper_version=scaper_version,
soundscape_audio_path=None,
isolated_events_audio_path=[],
# Initialize missing generate parameters
audio_path=None,
jams_path=None,
fix_clipping=None,
peak_normalization=None,
save_isolated_events=None,
isolated_events_path=None,
disable_sox_warnings=None,
no_audio=None,
txt_path=None,
txt_sep=None,
disable_instantiation_warnings=None,
peak_normalization_scale_factor=None,
ref_db_change=None,
ref_db_generated=None)
# Add annotation to jams
jam.annotations.append(ann)
# Set jam metadata
jam.file_metadata.duration = ann.duration
# Return
return jam
def _generate_audio(self,
audio_path,
ann,
reverb=None,
fix_clipping=False,
peak_normalization=False,
quick_pitch_time=False,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True):
'''
Generate audio based on a scaper annotation and save to disk.
Parameters
----------
audio_path : str
Path for saving soundscape audio file.
ann : jams.Annotation
Annotation of the scaper namespace.
reverb : float or None
Amount of reverb to apply to the generated soundscape between 0
(no reverberation) and 1 (maximum reverberation). Use None
(default) to prevent the soundscape from going through the reverb
module at all.
fix_clipping: bool
When True (default=False), checks the soundscape audio for clipping
(abs(sample) > 1). If so, the soundscape waveform is peak normalized,
i.e., scaled such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The scaling
factor that was used is returned.
peak_normalization : bool
When True (default=False), normalize the generated soundscape audio
such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The scaling
factor that was used is returned.
quick_pitch_time : bool
When True (default=False), time stretching and pitch shifting will be
applied with `quick=True`. This is much faster but the resultant
audio is generally of lower audio quality.
save_isolated_events : bool
If True, this will save the isolated foreground events and
backgrounds in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The
audio of the isolated events sum up to the mixture if reverb is not
applied. Isolated events can be found (by default) at
`<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`,
where idx is the index of the isolated event in self.fg_spec or
self.bg_spec (this allows events of the same label to be added more
than once to the soundscape without breaking things). Role is
"background" or "foreground". For example: `foreground0_siren.wav`
or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_path parent folder>/<audio_path name>_events`.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
scale_factor : float
If peak_normalization is True, or fix_clipping is True and the
soundscape audio needs to be scaled to avoid clipping, scale_factor
is the value used to scale the soundscape audio and the audio of the
isolated events. Otherwise will return 1.0.
ref_db_change : float
The change (in dB) to the soundscape audio's ref_db if peak
normalization is applied to fix clipping or because the user
specified it. Otherwise will return 0.
Raises
------
ScaperError
If annotation is not of the scpaper namespace.
See Also
--------
Scaper.generate
'''
if ann.namespace != 'scaper':
raise ScaperError(
'Annotation namespace must be scaper, found: {:s}'.format(
ann.namespace))
# disable sox warnings
if disable_sox_warnings:
temp_logging_level = 'CRITICAL' # only critical messages please
else:
temp_logging_level = logging.getLogger().level
# List for storing all generated audio (one array for every event)
soundscape_audio = None
event_audio_list = []
scale_factor = 1.0
ref_db_change = 0
with _set_temp_logging_level(temp_logging_level):
isolated_events_audio_path = []
duration_in_samples = int(self.duration * self.sr)
for i, e in enumerate(ann.data):
if e.value['role'] == 'background':
# Concatenate background if necessary.
source_duration = soundfile.info(e.value['source_file']).duration
ntiles = int(
max(self.duration // source_duration + 1, 1))
# Create transformer
tfm = sox.Transformer()
# Ensure consistent sampling rate and channels
# Need both a convert operation (to do the conversion),
# and set_output_format (to have sox interpret the output
# correctly).
tfm.convert(
samplerate=self.sr,
n_channels=self.n_channels,
bitdepth=None
)
tfm.set_output_format(
rate=self.sr,
channels=self.n_channels
)
# PROCESS BEFORE COMPUTING LUFS
tmpfiles_internal = []
with _close_temp_files(tmpfiles_internal):
# create internal tmpfile
tmpfiles_internal.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# read in background off disk, using start and stop
# to only read the necessary audio
event_sr = soundfile.info(e.value['source_file']).samplerate
start = int(e.value['source_time'] * event_sr)
stop = int((e.value['source_time'] + e.value['event_duration']) * event_sr)
event_audio, event_sr = soundfile.read(
e.value['source_file'], always_2d=True,
start=start, stop=stop)
# tile the background along the appropriate dimensions
event_audio = np.tile(event_audio, (ntiles, 1))
event_audio = event_audio[:stop]
event_audio = tfm.build_array(
input_array=event_audio,
sample_rate_in=event_sr
)
event_audio = event_audio.reshape(-1, self.n_channels)
# NOW compute LUFS
bg_lufs = get_integrated_lufs(event_audio, self.sr)
# Normalize background to reference DB.
gain = self.ref_db - bg_lufs
event_audio = np.exp(gain * np.log(10) / 20) * event_audio
event_audio_list.append(event_audio[:duration_in_samples])
elif e.value['role'] == 'foreground':
# Create transformer
tfm = sox.Transformer()
# Ensure consistent sampling rate and channels
# Need both a convert operation (to do the conversion),
# and set_output_format (to have sox interpret the output
# correctly).
tfm.convert(
samplerate=self.sr,
n_channels=self.n_channels,
bitdepth=None
)
tfm.set_output_format(
rate=self.sr,
channels=self.n_channels
)
# Pitch shift
if e.value['pitch_shift'] is not None:
tfm.pitch(e.value['pitch_shift'], quick=quick_pitch_time)
# Time stretch
if e.value['time_stretch'] is not None:
factor = 1.0 / float(e.value['time_stretch'])
tfm.tempo(factor, audio_type='s', quick=quick_pitch_time)
# PROCESS BEFORE COMPUTING LUFS
tmpfiles_internal = []
with _close_temp_files(tmpfiles_internal):
# create internal tmpfile
tmpfiles_internal.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# synthesize edited foreground sound event,
# doing the trim via soundfile
event_sr = soundfile.info(e.value['source_file']).samplerate
start = int(e.value['source_time'] * event_sr)
stop = int((e.value['source_time'] + e.value['event_duration']) * event_sr)
event_audio, event_sr = soundfile.read(
e.value['source_file'], always_2d=True,
start=start, stop=stop)
event_audio = tfm.build_array(
input_array=event_audio,
sample_rate_in=event_sr
)
event_audio = event_audio.reshape(-1, self.n_channels)
# NOW compute LUFS
fg_lufs = get_integrated_lufs(event_audio, self.sr)
# Normalize to specified SNR with respect to
# background
gain = self.ref_db + e.value['snr'] - fg_lufs
event_audio = np.exp(gain * np.log(10) / 20) * event_audio
# Apply short fade in and out
# (avoid unnatural sound onsets/offsets)
if self.fade_in_len > 0:
fade_in_samples = int(self.fade_in_len * self.sr)
fade_in_window = np.sin(np.linspace(0, np.pi / 2, fade_in_samples))[..., None]
event_audio[:fade_in_samples] *= fade_in_window
if self.fade_out_len > 0:
fade_out_samples = int(self.fade_out_len * self.sr)
fade_out_window = np.sin(np.linspace(np.pi / 2, 0, fade_out_samples))[..., None]
event_audio[-fade_out_samples:] *= fade_out_window
# Pad with silence before/after event to match the
# soundscape duration
prepad = int(self.sr * e.value['event_time'])
postpad = max(0, duration_in_samples - (event_audio.shape[0] + prepad))
event_audio = np.pad(event_audio, ((prepad, postpad), (0, 0)),
mode='constant', constant_values=(0, 0))
event_audio = event_audio[:duration_in_samples]
event_audio_list.append(event_audio[:duration_in_samples])
else:
raise ScaperError(
'Unsupported event role: {:s}'.format(
e.value['role']))
# Finally combine all the files and optionally apply reverb.
# If there are no events, throw a warning.
if len(event_audio_list) == 0:
warnings.warn(
"No events to synthesize (silent soundscape), no audio "
"generated.", ScaperWarning)
else:
# Sum all events to get soundscape audio
soundscape_audio = sum(event_audio_list)
# Check for clipping and fix [optional]
max_sample = np.max(np.abs(soundscape_audio))
clipping = max_sample > 1
if clipping:
warnings.warn('Soundscape audio is clipping!',
ScaperWarning)
if peak_normalization or (clipping and fix_clipping):
# normalize soundscape audio and scale event audio
soundscape_audio, event_audio_list, scale_factor = \
peak_normalize(soundscape_audio, event_audio_list)
ref_db_change = 20 * np.log10(scale_factor)
if clipping and fix_clipping:
warnings.warn(
'Peak normalization applied to fix clipping with '
'scale factor = {}. The actual ref_db of the '
'generated soundscape audio will change by '
'approximately {:.2f}dB with respect to the target '
'ref_db of {})'.format(
scale_factor, ref_db_change, self.ref_db),
ScaperWarning)
if scale_factor < 0.05:
warnings.warn(
'Scale factor for peak normalization is extreme '
'(<0.05), event SNR values in the generated soundscape '
'audio may not perfectly match their specified values.',
ScaperWarning
)
# Optionally apply reverb
# NOTE: must apply AFTER peak normalization: applying reverb
# to a clipping signal with sox and then normalizing doesn't
# work as one would hope.
if reverb is not None:
tfm = sox.Transformer()
tfm.reverb(reverberance=reverb * 100)
soundscape_audio = tfm.build_array(
input_array=soundscape_audio,
sample_rate_in=self.sr,
)
# Reshape to ensure data are 2d
soundscape_audio = soundscape_audio.reshape(-1, self.n_channels)
# Optionally save soundscape audio to disk
if audio_path is not None:
soundfile.write(audio_path, soundscape_audio, self.sr,
subtype='PCM_32')
# Optionally save isolated events to disk
if save_isolated_events:
base, ext = os.path.splitext(audio_path)
if isolated_events_path is None:
event_folder = '{:s}_events'.format(base)
else:
event_folder = isolated_events_path
os.makedirs(event_folder, exist_ok=True)
iso_idx = 0
role_counter = {'background': 0, 'foreground': 0}
for i, e in enumerate(ann.data):
_role_count = role_counter[e.value['role']]
event_audio_path = os.path.join(
event_folder,
'{:s}{:d}_{:s}{:s}'.format(
e.value['role'], _role_count, e.value['label'], ext))
role_counter[e.value['role']] += 1
soundfile.write(event_audio_path, event_audio_list[iso_idx], self.sr, subtype='PCM_32')
isolated_events_audio_path.append(event_audio_path)
iso_idx += 1
# TODO what do we do in this case? for now throw a warning
if reverb is not None:
warnings.warn(
"Reverb is on and save_isolated_events is True. Reverberation "
"is applied to the mixture but not output "
"source files. In this case the sum of the "
"audio of the isolated events will not add up to the "
"mixture", ScaperWarning)
# Document output paths
# TODO: this is redundant with audio_path and isolated_events_path that
# are also stored in ann.sandbox.scaper. For now we're keeping these
# here for now for backwards compatibility e.g. with FUSS. Eventually
# we should remove these two lines and consolidate how/where JAMS
# metadata is stored (cf. generate() and generate_from_jams()).
ann.sandbox.scaper.soundscape_audio_path = audio_path
ann.sandbox.scaper.isolated_events_audio_path = isolated_events_audio_path
# Return audio for in-memory processing
return soundscape_audio, event_audio_list, scale_factor, ref_db_change
def generate(self,
audio_path=None,
jams_path=None,
allow_repeated_label=True,
allow_repeated_source=True,
reverb=None,
fix_clipping=False,
peak_normalization=False,
quick_pitch_time=False,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True,
no_audio=False,
txt_path=None,
txt_sep='\t',
disable_instantiation_warnings=False):
"""
Generate a soundscape based on the current specification and return as
an audio file, a JAMS annotation, a simplified annotation list, and a
list containing the audio samples of each individual background and
foreground event. If output paths are provided, these objects will also
be saved to disk.
Parameters
----------
audio_path : str
Path for saving soundscape audio to disk. If None, does not save
audio to disk.
jams_path : str
Path for saving soundscape jams annotation to disk. If None, does
not save JAMS to disk.
allow_repeated_label : bool
When True (default) the same label can be used more than once
in a soundscape instantiation. When False every label can
only be used once.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in a soundscape instantiation. When False every source file can
only be used once.
reverb : float or None
Amount of reverb to apply to the generated soundscape between 0
(no reverberation) and 1 (maximum reverberation). Use None
(default) to prevent the soundscape from going through the reverb
module at all.
fix_clipping: bool
When True (default=False), checks the soundscape audio for clipping
(abs(sample) > 1). If so, the soundscape waveform is peak normalized,
i.e., scaled such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The updated
`ref_db` value will be stored in the JAMS annotation. The SNR of
foreground events with respect to the background is unaffected except
when extreme scaling is required to prevent clipping.
peak_normalization : bool
When True (default=False), normalize the generated soundscape audio
such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The updated
`ref_db` value will be stored in the JAMS annotation. The SNR of
foreground events with respect to the background is unaffected except
when extreme scaling is required to achieve peak normalization.
quick_pitch_time : bool
When True (default=False), time stretching and pitch shifting will be
applied with `quick=True`. This is much faster but the resultant
audio is generally of lower audio quality.
save_isolated_events : bool
If True, this will save the isolated foreground events and
backgrounds in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The
audio of the isolated events sum up to the mixture if reverb is not
applied. Isolated events can be found (by default) at
`<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`,
where count is the index of the isolated event in self.fg_spec or
self.bg_spec (this allows events of the same label to be added more
than once to the soundscape without breaking things). Role is
"background" or "foreground". For example: `foreground0_siren.wav`
or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_path parent folder>/<audio_path name>_events`. Only relevant
if save_isolated_events=True.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``. If you're experiencing issues related
to audio I/O setting this parameter to False may help with debugging.
no_audio : bool
If True this function will only generates a JAMS file but will not
generate any audio (neither in memory nor saved to disk). Useful for
efficiently generating a large number of soundscape JAMS for
later synthesis via `generate_from_jams()`.
txt_path: str or None
Path for saving a simplified annotation in a space separated format
[onset offset label] where onset and offset are in seconds. Good
for loading labels in e.g. Audacity. If None, does not save txt
annotation to disk.
txt_sep: str
The separator to use when saving a simplified annotation as a text
file (default is tab for compatibility with Audacity label files).
Only relevant if txt_path is not None.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape. Returns None if
no_audio=True.
soundscape_jam: jams.JAMS
The JAMS object containing the full soundscape annotation.
annotation_list : list
A simplified annotation in a space-separated format
[onset offset label] where onset and offset are in seconds.
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
Raises
------
ScaperError
If the reverb parameter is passed an invalid value.
See Also
--------
Scaper.generate_from_jams
Scaper._instantiate
Scaper._generate_audio
"""
# Check parameter validity
if reverb is not None:
if not (0 <= reverb <= 1):
raise ScaperError(
'Invalid value for reverb: must be in range [0, 1] or '
'None.')
# Create specific instance of a soundscape based on the spec
soundscape_jam = self._instantiate(
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
reverb=reverb,
disable_instantiation_warnings=disable_instantiation_warnings)
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
soundscape_audio, event_audio_list = None, None
# Generate the audio and save to disk
scale_factor = 1.0
ref_db_change = 0
if not no_audio:
soundscape_audio, event_audio_list, scale_factor, ref_db_change = \
self._generate_audio(audio_path, ann,
reverb=reverb,
save_isolated_events=save_isolated_events,
isolated_events_path=isolated_events_path,
disable_sox_warnings=disable_sox_warnings,
fix_clipping=fix_clipping,
peak_normalization=peak_normalization,
quick_pitch_time=quick_pitch_time)
# TODO: Stick to heavy handed overwriting for now, in the future we
# should consolidate this with what happens inside _instantiate().
ann.sandbox.scaper.audio_path = audio_path
ann.sandbox.scaper.jams_path = jams_path
ann.sandbox.scaper.allow_repeated_label = allow_repeated_label
ann.sandbox.scaper.allow_repeated_source = allow_repeated_source
ann.sandbox.scaper.reverb = reverb
ann.sandbox.scaper.fix_clipping = fix_clipping
ann.sandbox.scaper.peak_normalization = peak_normalization
ann.sandbox.scaper.quick_pitch_time = quick_pitch_time
ann.sandbox.scaper.save_isolated_events = save_isolated_events
ann.sandbox.scaper.isolated_events_path = isolated_events_path
ann.sandbox.scaper.disable_sox_warnings = disable_sox_warnings
ann.sandbox.scaper.no_audio = no_audio
ann.sandbox.scaper.txt_path = txt_path
ann.sandbox.scaper.txt_sep = txt_sep
ann.sandbox.scaper.disable_instantiation_warnings = disable_instantiation_warnings
ann.sandbox.scaper.peak_normalization_scale_factor = scale_factor
ann.sandbox.scaper.ref_db_change = ref_db_change
ann.sandbox.scaper.ref_db_generated = self.ref_db + ref_db_change
# Save JAMS to disk too
if jams_path is not None:
soundscape_jam.save(jams_path)
# Create annotation list
annotation_list = []
for obs in ann.data:
if obs.value['role'] == 'foreground':
annotation_list.append(
[obs.time, obs.time + obs.duration, obs.value['label']])
if txt_path is not None:
with open(txt_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=txt_sep)
writer.writerows(annotation_list)
# Return
return soundscape_audio, soundscape_jam, annotation_list, event_audio_list
| 106,280 | 43.712242 | 111 | py |
scaper | scaper-master/scaper/scaper_exceptions.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED: 10/11/16 6:07 PM by Justin Salamon <justin.salamon@nyu.edu>
'''Exception classes for Scaper'''
class ScaperError(Exception):
'''The root Scaper exception class'''
pass
| 239 | 19 | 70 | py |
scaper | scaper-master/scaper/audio.py | # CREATED: 4/23/17 15:37 by Justin Salamon <justin.salamon@nyu.edu>
import numpy as np
import pyloudnorm
import soundfile
from .scaper_exceptions import ScaperError
def get_integrated_lufs(audio_array, samplerate, min_duration=0.5,
filter_class='K-weighting', block_size=0.400):
"""
Returns the integrated LUFS for a numpy array containing
audio samples.
For files shorter than 400 ms pyloudnorm throws an error. To avoid this,
files shorter than min_duration (by default 500 ms) are self-concatenated
until min_duration is reached and the LUFS value is computed for the
concatenated file.
Parameters
----------
audio_array : np.ndarray
numpy array containing samples or path to audio file for computing LUFS
samplerate : int
Sample rate of audio, for computing duration
min_duration : float
Minimum required duration for computing LUFS value. Files shorter than
this are self-concatenated until their duration reaches this value
for the purpose of computing the integrated LUFS. Caution: if you set
min_duration < 0.4, a constant LUFS value of -70.0 will be returned for
all files shorter than 400 ms.
filter_class : str
Class of weighting filter used.
- 'K-weighting' (default)
- 'Fenton/Lee 1'
- 'Fenton/Lee 2'
- 'Dash et al.'
block_size : float
Gating block size in seconds. Defaults to 0.400.
Returns
-------
loudness
Loudness in terms of LUFS
"""
duration = audio_array.shape[0] / float(samplerate)
if duration < min_duration:
ntiles = int(np.ceil(min_duration / duration))
audio_array = np.tile(audio_array, (ntiles, 1))
meter = pyloudnorm.Meter(
samplerate, filter_class=filter_class, block_size=block_size
)
loudness = meter.integrated_loudness(audio_array)
# silent audio gives -inf, so need to put a lower bound.
loudness = max(loudness, -70)
return loudness
def match_sample_length(audio_path, duration_in_samples):
'''
Takes a path to an audio file and a duration defined in samples. The audio
is loaded from the specifid audio_path and padded or trimmed such that it
matches the duration_in_samples. The modified audio is then saved back to
audio_path. This ensures that the durations match exactly. If the audio
needed to be padded, it is padded with zeros to the end of the audio file.
If the audio needs to be trimmed, the function will trim samples from the end of
the audio file. The sample rate of the saved audio is the same as the sample
rate of the input file.
Parameters
----------
audio_path : str
Path to the audio file that will be modified.
duration_in_samples : int
Duration that the audio will be padded or trimmed to.
'''
if duration_in_samples <= 0:
raise ScaperError(
'Duration in samples must be > 0.')
if not isinstance(duration_in_samples, int):
raise ScaperError(
'Duration in samples must be an integer.')
audio, sr = soundfile.read(audio_path)
audio_info = soundfile.info(audio_path)
current_duration = audio.shape[0]
if duration_in_samples < current_duration:
audio = audio[:duration_in_samples]
elif duration_in_samples > current_duration:
n_pad = duration_in_samples - current_duration
pad_width = [(0, 0) for _ in range(len(audio.shape))]
pad_width[0] = (0, n_pad)
audio = np.pad(audio, pad_width, 'constant')
soundfile.write(audio_path, audio, sr,
subtype=audio_info.subtype, format=audio_info.format)
def peak_normalize(soundscape_audio, event_audio_list):
"""
Compute the scale factor required to peak normalize the audio such that
max(abs(soundscape_audio)) = 1.
Parameters
----------
soundscape_audio : np.ndarray
The soudnscape audio.
event_audio_list : list
List of np.ndarrays containing the audio samples of each isolated
foreground event.
Returns
-------
scaled_soundscape_audio : np.ndarray
The peak normalized soundscape audio.
scaled_event_audio_list : list
List of np.ndarrays containing the scaled audio samples of
each isolated foreground event. All events are scaled by scale_factor.
scale_factor : float
The scale factor used to peak normalize the soundscape audio.
"""
eps = 1e-10
max_sample = np.max(np.abs(soundscape_audio))
scale_factor = 1.0 / (max_sample + eps)
# scale the event audio and the soundscape audio:
scaled_soundscape_audio = soundscape_audio * scale_factor
scaled_event_audio_list = []
for event_audio in event_audio_list:
scaled_event_audio_list.append(event_audio * scale_factor)
return scaled_soundscape_audio, scaled_event_audio_list, scale_factor
| 4,982 | 35.108696 | 85 | py |
scaper | scaper-master/scaper/version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Version info"""
short_version = '1.6'
version = '1.6.5'
| 106 | 14.285714 | 23 | py |
scaper | scaper-master/scaper/util.py | # CREATED: 10/14/16 12:35 PM by Justin Salamon <justin.salamon@nyu.edu>
'''
Utility functions
=================
'''
from contextlib import contextmanager
import logging
import os
import glob
from .scaper_exceptions import ScaperError
import warnings
from .scaper_warnings import ScaperWarning
import scipy
import numpy as np
import numbers
from copy import deepcopy
@contextmanager
def _close_temp_files(tmpfiles):
'''
Utility function for creating a context and closing all temporary files
once the context is exited. For correct functionality, all temporary file
handles created inside the context must be appended to the ```tmpfiles```
list.
Parameters
----------
tmpfiles : list
List of temporary file handles
'''
def _close():
for t in tmpfiles:
try:
t.close()
os.unlink(t.name)
except:
pass
try:
yield
except:
_close()
raise
_close()
@contextmanager
def _set_temp_logging_level(level):
'''
Utility function for temporarily changing the logging level using contexts.
Parameters
----------
level : str or int
The desired temporary logging level. For allowed values see:
https://docs.python.org/2/library/logging.html#logging-levels
'''
logger = logging.getLogger()
current_level = logger.level
logger.setLevel(level)
yield
logger.setLevel(current_level)
def _get_sorted_files(folder_path):
'''
Return a list of absolute paths to all valid files contained within the
folder specified by ```folder_path```.
Parameters
----------
folder_path : str
Path to the folder to scan for files.
Returns
-------
files : list
List of absolute paths to all valid files contained within
```folder_path```.
'''
# Ensure path points to valid folder
_validate_folder_path(folder_path)
# Get folder contents and filter for valid files
# Note, we sort the list to ensure consistent behavior across operating
# systems.
files = sorted(glob.glob(os.path.join(folder_path, "*")))
files = [f for f in files if os.path.isfile(f)]
return files
def _validate_folder_path(folder_path):
'''
Validate that a provided path points to a valid folder.
Parameters
----------
folder_path : str
Path to a folder.
Raises
------
ScaperError
If ```folder_path``` does not point to a valid folder.
'''
if not os.path.isdir(folder_path):
raise ScaperError(
'Folder path "{:s}" does not point to a valid folder'.format(
str(folder_path)))
def _populate_label_list(folder_path, label_list):
'''
Given a path to a folder and a list, add the names of all subfolders
contained in this folder (excluding folders whose name starts with '.') to
the provided list. This is used in scaper to populate the lists of valid
foreground and background labels, which are determined by the names of the
folders contained in ```fg_path`` and ```bg_path``` provided during
initialization.
Parameters
----------
folder_path : str
Path to a folder
label_list : list
List to which label (subfolder) names will be added.
See Also
--------
_validate_folder_path : Validate that a provided path points to a valid
folder.
'''
# Make sure folder path is valid
_validate_folder_path(folder_path)
folder_names = os.listdir(folder_path)
for fname in folder_names:
if (os.path.isdir(os.path.join(folder_path, fname)) and
fname[0] != '.'):
label_list.append(fname)
# ensure consistent ordering of labels
label_list.sort()
def _check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
elif isinstance(seed, (numbers.Integral, np.integer, int)):
return np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
return seed
else:
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _sample_const(item, random_state):
'''
Return a value sampled from a constant distribution (just the item).
Parameters
----------
item : any
What to return
random_state : mtrand.RandomState
RandomState object used to sample from this distribution (ignored).
This is here to match the other function specifications.
Returns
-------
value : any
item, returned.
'''
return item
def _sample_uniform(minimum, maximum, random_state):
'''
Return a random value sampled from a uniform distribution
between ```minimum``` and ```maximum```.
Parameters
----------
minimum : float
Minimum of uniform distribution
maximum : float
Maximum of uniform distribution
random_state : mtrand.RandomState
RandomState object used to sample from this distribution.
Returns
-------
value : float
A random value sampled from the uniform distribution defined
by ```minimum```, ```maximum```.
'''
return random_state.uniform(minimum, maximum)
def _sample_normal(mu, sigma, random_state):
'''
Return a random value sampled from a normal distribution with
mean ```mu``` and standard deviation ```sigma```.
Parameters
----------
mu : float
The mean of the truncated normal distribution
sig : float
The standard deviation of the truncated normal distribution
random_state : mtrand.RandomState
RandomState object used to sample from this distribution.
Returns
-------
value : float
A random value sampled from the normal distribution defined
by ```mu```, ```sigma```.
'''
return random_state.normal(mu, sigma)
def _sample_choose(list_of_options, random_state):
'''
Return a random item from ```list_of_options```, using random_state.
If there are duplicates in ```list_of_options```, we remove them from the
list before sampling an item from the list.
Parameters
----------
list_of_options : list
List of items to choose from.
random_state : mtrand.RandomState
RandomState object used to sample from this distribution.
Returns
-------
value : any
A random item chosen from ```list_of_options```.
'''
new_list_of_options = sorted(list(set(list_of_options)))
if len(new_list_of_options) < len(list_of_options):
warnings.warn(
'Removed duplicates from choose list. List length changed '
'from {:d} to {:d}'.format(len(list_of_options), len(new_list_of_options)),
ScaperWarning)
index = random_state.randint(len(new_list_of_options))
return new_list_of_options[index]
def _sample_choose_weighted(list_of_options, probabilities, random_state):
'''
Return a random item from ```list_of_options``` using weighted sampling defined
by ```probabilities```, using random_state. The number of items in ```list_of_options```
and ```probabilities``` must match, and the values in ```probabilities``` must be in the
range [0, 1] and sum to 1. Unlike ```_sample_choose```, duplicates in
```list_of_options``` are not removed prior to sampling.
Parameters
----------
list_of_options : list
List of items to choose from.
probabilities : list of floats
List of probabilities corresponding to the elements in ```list_of_options```, such
that the item in ```list_of_options[i]``` is chosen with probability ```probabilities[i]```.
random_state : mtrand.RandomState
RandomState object used to sample from this distribution.
Returns
-------
value : any
A random item chosen from ```list_of_options```.
'''
return random_state.choice(list_of_options, p=probabilities)
def _sample_trunc_norm(mu, sigma, trunc_min, trunc_max, random_state):
'''
Return a random value sampled from a truncated normal distribution with
mean ```mu``` and standard deviation ```sigma``` whose values are limited
between ```trunc_min``` and ```trunc_max```.
Parameters
----------
mu : float
The mean of the truncated normal distribution
sig : float
The standard deviation of the truncated normal distribution
trunc_min : float
The minimum value allowed for the distribution (lower boundary)
trunc_max : float
The maximum value allowed for the distribution (upper boundary)
random_state : mtrand.RandomState
RandomState object used to sample from this distribution.
Returns
-------
value : float
A random value sampled from the truncated normal distribution defined
by ```mu```, ```sigma```, ```trunc_min``` and ```trunc_max```.
'''
# By default truncnorm expects a (lower boundary) and b (upper boundary)
# values for a standard normal distribution (mu=0, sigma=1), so we need
# to recompute a and b given the user specified parameters.
a, b = (trunc_min - mu) / float(sigma), (trunc_max - mu) / float(sigma)
sample = scipy.stats.truncnorm.rvs(a, b, mu, sigma, random_state=random_state)
# scipy 1.5.1 returns an array while scipy 1.4.0 returns a scalar.
# To maintain backwards compat we have to cast the sample to an array
# then back to a scalar.
return np.array(sample).item()
def max_polyphony(ann):
'''
Given an annotation of sound events, compute the maximum polyphony, i.e.
the maximum number of simultaneous events at any given point in time. Only
foreground events are taken into consideration for computing the polyphony.
Parameters
----------
ann : JAMS.Annotation
Returns
-------
polyphony : int
Maximum number of simultaneous events at any point in the annotation.
'''
# If there are no foreground events the polyphony is 0
roles = [obs.value['role'] for obs in ann.data]
if 'foreground' not in roles:
return 0
else:
# Keep only foreground events
int_time, int_val = ann.to_interval_values()
int_time_clean = []
for t, v in zip(int_time, int_val):
if v['role'] == 'foreground':
int_time_clean.append(t)
int_time_clean = np.asarray(int_time_clean)
# Sort and reshape
arrivals = np.sort(int_time_clean[:, 0]).reshape(-1, 1)
departures = np.sort(int_time_clean[:, 1]).reshape(-1, 1)
# Onsets are +1, offsets are -1
arrivals = np.concatenate(
(arrivals, np.ones(arrivals.shape)), axis=1)
departures = np.concatenate(
(departures, -np.ones(departures.shape)), axis=1)
# Merge arrivals and departures and sort
entry_log = np.concatenate((arrivals, departures), axis=0)
entry_log_sorted = entry_log[entry_log[:, 0].argsort()]
# Get maximum number of simultaneously occurring events
polyphony = np.max(np.cumsum(entry_log_sorted[:, 1]))
return int(polyphony)
def polyphony_gini(ann, hop_size=0.01):
'''
Compute the gini coefficient of the annotation's polyphony time series.
Useful as an estimate of the polyphony "flatness" or entropy. The
coefficient is in the range [0,1] and roughly inverse to entropy: a
distribution that's close to uniform will have a low gini coefficient
(high entropy), vice versa.
https://en.wikipedia.org/wiki/Gini_coefficient
Parameters
----------
ann : jams.Annotation
Annotation for which to compute the normalized polyphony entropy. Must
be of the scaper namespace.
hop_size : float
The hop size for sampling the polyphony time series.
Returns
-------
polyphony_gini: float
Gini coefficient computed from the annotation's polyphony time series.
Raises
------
ScaperError
If the annotation does not have a duration value or if its namespace is
not scaper.
'''
if not ann.duration:
raise ScaperError('Annotation does not have a duration value set.')
if ann.namespace != 'scaper':
raise ScaperError(
'Annotation namespace must be scaper, found {:s}.'.format(
ann.namespace))
# If there are no foreground events the gini coefficient is 0
roles = [obs.value['role'] for obs in ann.data]
if 'foreground' not in roles:
return 0
# Sample the polyphony using the specified hop size
n_samples = int(np.floor(ann.duration / float(hop_size)) + 1)
times = np.linspace(0, (n_samples-1) * hop_size, n_samples)
values = np.zeros_like(times)
# for idx in ann.data.index:
for obs in ann.data:
# if ann.data.loc[idx, 'value']['role'] == 'foreground':
if obs.value['role'] == 'foreground':
start_time = obs.time
end_time = start_time + obs.duration
start_idx = np.argmin(np.abs(times - start_time))
end_idx = np.argmin(np.abs(times - end_time)) - 1
values[start_idx:end_idx + 1] += 1
values = values[:-1]
# DEBUG
# vstring = ('{:d} ' * len(values)).format(*tuple([int(v) for v in values]))
# print(vstring)
# print(' ')
# Compute gini as per:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
values += 1e-6 # all values must be positive
values = np.sort(values) # sort values
n = len(values)
i = np.arange(n) + 1
gini = np.sum((2*i - n - 1) * values) / (n * np.sum(values))
return (1 - gini)
def is_real_number(num):
'''
Check if a value is a real scalar by aggregating several numpy checks.
Parameters
----------
num : any type
The parameter to check
Returns
------
check : bool
True if ```num``` is a real scalar, False otherwise.
'''
if (not np.isreal(num) or
not np.isrealobj(num) or
not np.isscalar(num)):
return False
else:
return True
def is_real_array(array):
'''
Check if a value is a list or array of real scalars by aggregating several
numpy checks.
Parameters
----------
array: any type
The parameter to check
Returns
------
check : bool
True if ```array``` is a list or array of a real scalars, False
otherwise.
'''
if not (type(array) is list or type(array) is np.ndarray):
return False
else:
if (not np.all([np.isreal(x) for x in array]) or
not np.isrealobj(array) or
not np.asarray(list(map(np.isscalar, array))).all()):
return False
else:
return True
| 15,371 | 28.964912 | 100 | py |
scaper | scaper-master/scaper/__init__.py | #!/usr/bin/env python
"""Top-level module for scaper"""
from .core import Scaper
from .core import generate_from_jams
from .core import trim
from .version import version as __version__
| 186 | 22.375 | 43 | py |
rpg_svo | rpg_svo-master/svo_analysis/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['svo_analysis'],
package_dir={'': 'src'},
install_requires=['rospkg', 'yaml'],
)
setup(**d) | 266 | 21.25 | 60 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/filter_groundtruth_smooth.py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import transformations
from scipy import signal
save = True
data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth.txt'
filtered_data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth_filtered.txt'
file = open(data_filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
n = np.shape(D)[0]
rpy = np.empty([n,3])
for i in range(n):
quat = D[i,4:8]
rpy[i,:] = transformations.euler_from_quaternion(quat, axes='sxyz')
# filter rpy
f_sensor = 200.0; # sampling frequency in hz
f_cut = 15.0; # cutoff frequency in hz
b,a = signal.butter(5,f_cut/(f_sensor/2));
print a
print b
rpy_filt = np.empty([n,3])
rpy_filt[:,0] = signal.filtfilt(b, a, rpy[:,0])
rpy_filt[:,1] = signal.filtfilt(b, a, rpy[:,1])
rpy_filt[:,2] = signal.filtfilt(b, a, rpy[:,2])
fig = plt.figure()
ax = fig.add_subplot(111, title='orientation filtered')
ax.plot(rpy[:,0], 'r-')
ax.plot(rpy[:,1], 'g-')
ax.plot(rpy[:,2], 'b-')
ax.plot(rpy_filt[:,0], 'k-', linewidth=2)
ax.plot(rpy_filt[:,1], 'k-', linewidth=2)
ax.plot(rpy_filt[:,2], 'k-', linewidth=2)
fig = plt.figure()
ax = fig.add_subplot(111, title='position')
ax.plot(D[:,1], 'r')
ax.plot(D[:,2], 'g')
ax.plot(D[:,3], 'b')
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory from top')
ax.plot(D[:,1], D[:,2])
if save:
f = open(filtered_data_filename,'w')
for i in range(np.shape(D)[0]):
quat = transformations.quaternion_from_euler(rpy_filt[i,0], rpy_filt[i,1], rpy_filt[i,2], axes='sxyz')
f.write('%.7f %.5f %.5f %.5f %.5f %.5f %.5f %.5f\n' % (D[i,0], D[i,1], D[i,2], D[i,3], quat[0], quat[1], quat[2], quat[3]))
f.close()
| 1,875 | 29.754098 | 137 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/analyse_logs.py | #!/usr/bin/python
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
def analyse_logs(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_kf = np.argwhere( (D['dropout'] == 1) & (D['repr_n_mps'] >= 0))
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
is_nokf = np.argwhere( (D['dropout'] == 0) & (D['repr_n_mps'] >= 0))
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot number of reprojected points
mean_n_reproj_points = np.mean(D['repr_n_mps'][is_frame]);
mean_n_reproj_matches = np.mean(D['repr_n_new_references'][is_frame]);
mean_n_edges_final = np.mean(D['sfba_n_edges_final'][is_frame]);
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['repr_n_mps'][is_frame], 'r-',
label='Reprojected Points, avg = %.2f'%mean_n_reproj_points)
ax.plot(D['timestamp'][is_frame], D['repr_n_new_references'][is_frame], 'b-',
label='Feature Matches, avg = %.2f'%mean_n_reproj_matches)
ax.plot(D['timestamp'][is_frame], D['sfba_n_edges_final'][is_frame], 'g-',
label='Points after Optimization, avg = %.2f'%mean_n_edges_final)
ax.set_ylim(bottom=0)
ax.legend(loc='lower right')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'num_reprojected.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot median error before and after pose-optimzation and bundle adjustment
init_error_avg = np.mean(D['sfba_error_init'][is_frame])
opt1_avg = np.mean(D['sfba_error_final'][is_frame])
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='error [px]')
ax.plot(D['timestamp'][is_frame], D['sfba_error_init'][is_frame], 'r-', label='Initial error')
ax.plot(D['timestamp'][is_frame], D['sfba_error_final'][is_frame], 'b-', label='Final error')
ax.legend(ncol=2)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'reprojection_error.pdf'), bbox_inches="tight")
print 'average reprojection error improvement: ' + str(init_error_avg - opt1_avg)
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['n_candidates'][is_frame], 'r-', label='Candidate Points')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'candidate_points.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='px')
ax.plot(D['timestamp'][is_frame], D['sfba_thresh'][is_frame], 'r-', label='Threshold')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'optimization_thresh.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# write other statistics to file
stat = {'num_frames': len(is_frame),
'num_kfs': len(is_kf),
'reproj_error_avg_improvement': float(init_error_avg - opt1_avg)}
with open(os.path.join(trace_dir,'dataset_stats.yaml'),'w') as outfile:
outfile.write(yaml.dump(stat, default_flow_style=False))
| 3,497 | 45.64 | 96 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/analyse_timing.py | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
def analyse_timing(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
n_frames = len(is_frame)
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot total time for frame processing
avg_time = np.mean(D['tot_time'][is_frame])*1000;
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(111, ylabel='processing time [ms]', xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['tot_time'][is_frame]*1000, 'g-', label='total time [ms]')
ax.plot(D['timestamp'][is_frame], np.ones(n_frames)*avg_time, 'b--', label=str('%(time).1fms mean time' % {'time': avg_time}))
ax.legend()
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([
D['tot_time'][is_frame]*1000,
# D['t_local_ba'][is_kf]*1000,
D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000,
D['reproject'][is_frame]*1000,
D['sparse_img_align'][is_frame]*1000,
D['pyramid_creation'][is_frame]*1000
], 0,'', vert=0)
boxplot_labels = [
r'\textbf{Total Motion Estimation: %2.2fms}' % np.median(D['tot_time'][is_frame]*1000),
# 'Local BA (KF only): %.2fms ' % np.median(D['local_ba'][is_kf]*1000),
'Refinement: %2.2fms' % np.median(D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['reproject'][is_frame]*1000),
'Sparse Image Alignment: %2.2fms' % np.median(D['sparse_img_align'][is_frame]*1000),
'Pyramid Creation: %2.2fms' % np.median(D['pyramid_creation'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_boxplot.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot reprojection
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([ D['reproject'][is_frame]*1000,
D['feature_align'][is_frame]*1000,
D['reproject_candidates'][is_frame]*1000,
D['reproject_kfs'][is_frame]*1000 ], 0, '', vert=0)
boxplot_labels = [r'\textbf{Total Reprojection: %2.2fms}' % np.median(D['reproject'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['feature_align'][is_frame]*1000),
'Reproject Candidates: %2.2fms' % np.median(D['reproject_candidates'][is_frame]*1000),
'Reproject Keyframes: %2.2fms' % np.median(D['reproject_kfs'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_reprojection.pdf'), bbox_inches="tight") | 3,476 | 50.132353 | 128 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.