repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
UniVL | UniVL-main/modules/until_config.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import logging
import tarfile
import tempfile
import shutil
import torch
from .file_utils import cached_path
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ""
weights_name = ""
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if os.path.exists(archive_file) is False:
if pretrained_model_name in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if task_config is None or task_config.local_rank == 0:
logger.error(
"Model name '{}' was not found in model name list. "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
archive_file))
return None
if resolved_archive_file == archive_file:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {}".format(archive_file))
else:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
if task_config is None or task_config.local_rank == 0:
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if task_config is None or task_config.local_rank == 0:
logger.info("Model config {}".format(config))
if state_dict is None:
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
else:
if task_config is None or task_config.local_rank == 0:
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return config, state_dict
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" | 5,036 | 38.97619 | 105 | py |
UniVL | UniVL-main/dataloaders/dataloader_msrvtt_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import pickle
import pandas as pd
from collections import defaultdict
import json
import random
class MSRVTT_Caption_DataLoader(Dataset):
"""MSRVTT train dataset loader."""
def __init__(
self,
csv_path,
json_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
split_type=""
):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv['video_id'].values[0]].shape[-1]
assert split_type in ["train", "val", "test"]
# Train: video0 : video6512 (6513)
# Val: video6513 : video7009 (497)
# Test: video7010 : video9999 (2990)
video_ids = [self.data['videos'][idx]['video_id'] for idx in range(len(self.data['videos']))]
split_dict = {"train": video_ids[:6513], "val": video_ids[6513:6513 + 497], "test": video_ids[6513 + 497:]}
choiced_video_ids = split_dict[split_type]
self.sample_len = 0
self.sentences_dict = {}
self.video_sentences_dict = defaultdict(list)
if split_type == "train": # expand all sentence to train
for itm in self.data['sentences']:
if itm['video_id'] in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
elif split_type == "val" or split_type == "test":
for itm in self.data['sentences']:
if itm['video_id'] in choiced_video_ids:
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
for vid in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (vid, self.video_sentences_dict[vid][0])
else:
raise NotImplementedError
self.sample_len = len(self.sentences_dict)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = []
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
# For generate captions
if caption is not None:
caption_words = self.tokenizer.tokenize(caption)
else:
caption_words = self._get_single_text(video_id)
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids
def _get_single_text(self, video_id):
rind = random.randint(0, len(self.sentences[video_id]) - 1)
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
video_id, caption = self.sentences_dict[idx]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, \
pairs_output_caption_ids, choice_video_ids = self._get_text(video_id, caption)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 10,371 | 44.095652 | 115 | py |
UniVL | UniVL-main/dataloaders/dataloader_youcook_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import pickle
import random
class Youcook_DataLoader(Dataset):
"""Youcook dataset loader."""
def __init__(
self,
csv,
data_path,
features_path,
tokenizer,
feature_framerate=1.0,
max_words=30,
max_frames=100,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.data_dict = pickle.load(open(data_path, 'rb'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
iter_idx_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[iter_idx_] = (video_id, sub_id)
iter_idx_ += 1
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_text(self, video_id, sub_id):
data_dict = self.data_dict[video_id]
k, r_ind = 1, [sub_id]
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
words = self.tokenizer.tokenize(data_dict['text'][ind])
start_, end_ = data_dict['start'][ind], data_dict['end'][ind]
starts[i], ends[i] = start_, end_
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, starts, ends
def _get_video(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video_features = self.feature_dict[self.csv["feature_file"].values[idx]]
video = np.zeros((len(s), self.max_frames, video_features.shape[-1]), dtype=np.float)
for i in range(len(s)):
start = int(s[i] * self.feature_framerate)
end = int(e[i] * self.feature_framerate) + 1
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}, start: {}, end: {}".format(self.csv["video_id"].values[idx], start, end))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, feature_idx):
video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, starts, ends = self._get_text(video_id, sub_id)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
| 7,820 | 40.163158 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_youcook_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import pickle
import re
import random
import io
class Youcook_Caption_DataLoader(Dataset):
"""Youcook dataset loader."""
def __init__(
self,
csv,
data_path,
features_path,
tokenizer,
feature_framerate=1.0,
max_words=30,
max_frames=100,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.data_dict = pickle.load(open(data_path, 'rb'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv["feature_file"].values[0]].shape[-1]
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
iter_idx_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[iter_idx_] = (video_id, sub_id)
iter_idx_ += 1
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_text(self, video_id, sub_id):
data_dict = self.data_dict[video_id]
k = 1
r_ind = [sub_id]
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
start_, end_ = data_dict['start'][ind], data_dict['end'][ind]
starts[i], ends[i] = start_, end_
total_length_with_CLS = self.max_words - 1
words = self.tokenizer.tokenize(data_dict['transcript'][ind])
words = ["[CLS]"] + words
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
# For generate captions
caption_words = self.tokenizer.tokenize(data_dict['text'][ind])
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, starts, ends
def _get_video(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video_features = self.feature_dict[self.csv["feature_file"].values[idx]]
video = np.zeros((len(s), self.max_frames, self.feature_size), dtype=np.float)
for i in range(len(s)):
start = int(s[i] * self.feature_framerate)
end = int(e[i] * self.feature_framerate) + 1
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}, start: {}, end: {}".format(self.csv["video_id"].values[idx], start, end))
# pass
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, feature_idx):
video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, \
pairs_decoder_mask, pairs_output_caption_ids, starts, ends = self._get_text(video_id, sub_id)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 9,662 | 41.756637 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_msrvtt_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import pickle
import pandas as pd
from collections import defaultdict
import json
import random
class MSRVTT_DataLoader(Dataset):
"""MSRVTT dataset loader."""
def __init__(
self,
csv_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
):
self.data = pd.read_csv(csv_path)
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.data['video_id'].values[0]].shape[-1]
def __len__(self):
return len(self.data)
def _get_text(self, video_id, sentence):
choice_video_ids = [video_id]
n_caption = len(choice_video_ids)
k = n_caption
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(sentence)
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, choice_video_ids
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
video_id = self.data['video_id'].values[idx]
sentence = self.data['sentence'].values[idx]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, choice_video_ids = self._get_text(video_id, sentence)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
class MSRVTT_TrainDataLoader(Dataset):
"""MSRVTT train dataset loader."""
def __init__(
self,
csv_path,
json_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
unfold_sentences=False,
):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv['video_id'].values[0]].shape[-1]
self.unfold_sentences = unfold_sentences
self.sample_len = 0
if self.unfold_sentences:
train_video_ids = list(self.csv['video_id'].values)
self.sentences_dict = {}
for itm in self.data['sentences']:
if itm['video_id'] in train_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.sample_len = len(self.sentences_dict)
else:
num_sentences = 0
self.sentences = defaultdict(list)
s_video_id_set = set()
for itm in self.data['sentences']:
self.sentences[itm['video_id']].append(itm['caption'])
num_sentences += 1
s_video_id_set.add(itm['video_id'])
# Use to find the clips in the same video
self.parent_ids = {}
self.children_video_ids = defaultdict(list)
for itm in self.data['videos']:
vid = itm["video_id"]
url_posfix = itm["url"].split("?v=")[-1]
self.parent_ids[vid] = url_posfix
self.children_video_ids[url_posfix].append(vid)
self.sample_len = len(self.csv)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
if caption is not None:
words = self.tokenizer.tokenize(caption)
else:
words = self._get_single_text(video_id)
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, choice_video_ids
def _get_single_text(self, video_id):
rind = random.randint(0, len(self.sentences[video_id]) - 1)
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
if self.unfold_sentences:
video_id, caption = self.sentences_dict[idx]
else:
video_id, caption = self.csv['video_id'].values[idx], None
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, choice_video_ids = self._get_text(video_id, caption)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
| 15,263 | 42.240793 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_howto100m.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import random
class Youtube_DataLoader(Dataset):
"""
Youtube dataset loader.
Note: Use transcript as caption, for mask decoder pretrain task.
"""
def __init__(
self,
csv,
features_path,
data_dict,
tokenizer,
min_time=10.0,
feature_framerate=1.0,
max_words=30,
min_words=0,
n_pair=-1,
max_frames=100,
with_long_context=True,
use_mil=False,
only_sim=False, # set automatically from model choice
sampled_use_mil=False,
pretrain_enhance_vmodal=False,
video_dim=1024,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.features_path = features_path
self.data_dict = data_dict
self.min_time = min_time
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.min_words = min_words
self.tokenizer = tokenizer
self.n_pair = n_pair
self.with_long_context = with_long_context
self.feature_size = video_dim
self.only_sim = only_sim
self.pretrain_enhance_vmodal = pretrain_enhance_vmodal
self.iter_num = len(self.csv)
self.use_mil = use_mil
self.sampled_use_mil = sampled_use_mil
if self.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
self.use_mil = True
if self.use_mil:
positive_n_pair = self.n_pair
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
self.iter2video_pairslist_dict = {}
iter_idx_mil_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
sub_list = []
if self.n_pair < 0 or self.n_pair == 1:
for sub_id in range(n_caption):
sub_list.append([sub_id])
else:
sb_ls_ = list(range(n_caption))
if self.n_pair > n_caption:
sb_ls_ = sb_ls_ * (self.n_pair // n_caption + 1)
sb_ls_ = sb_ls_[:self.n_pair]
for sub_id in np.arange(0, len(sb_ls_), self.n_pair):
sub_list.append(sb_ls_[sub_id: sub_id + self.n_pair])
else:
sb_ls_ = sb_ls_ + sb_ls_[:(((n_caption+positive_n_pair-1)//positive_n_pair)*positive_n_pair-n_caption)]
for sub_id in np.arange(0, len(sb_ls_), positive_n_pair):
pos_ls = sb_ls_[sub_id: sub_id + positive_n_pair]
sub_list.append(pos_ls)
for sub_e in sub_list:
self.iter2video_pairs_dict[iter_idx_mil_] = (video_id, sub_e)
iter_idx_mil_ += 1
self.iter2video_pairslist_dict[video_id] = sub_list
if self.use_mil and self.sampled_use_mil is False:
self.iter_num = len(self.iter2video_pairs_dict)
def __len__(self):
return self.iter_num
def _mask_tokens(self, words):
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
if prob < 0.15:
prob /= 0.15
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
token_labels.append(self.tokenizer.vocab["[UNK]"])
else:
token_labels.append(-1)
return masked_tokens, token_labels
def _get_text(self, video_id, n_pair_max, sub_ids=None, only_sim=False, enhance_vmodel=False):
data_dict = self.data_dict[video_id]
if self.use_mil:
k = len(sub_ids)
r_ind = sub_ids
else:
n_caption = len(data_dict['start'])
if n_pair_max == -1:
k = n_caption
r_ind = range(n_caption)
else:
k = n_pair_max
if k <= n_caption:
r_ind = np.random.choice(range(n_caption), k, replace=False)
else:
r_ind_must = np.array(range(n_caption))
r_ind_rand = np.random.choice(range(n_caption), k-n_caption, replace=True)
r_ind = np.concatenate((r_ind_must, r_ind_rand), axis=0)
np.random.shuffle(r_ind)
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
words, start_, end_ = self._get_single_transcript(data_dict, ind, with_long_context=self.with_long_context)
caption_words = words.copy()
starts[i], ends[i] = start_, end_
if enhance_vmodel:
words = [] # mask all input text
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
if only_sim is False:
# For generate captions
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
masked_tokens, token_labels = self._mask_tokens(words)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
masked_input_caption_words, input_token_labels = self._mask_tokens(input_caption_words)
input_caption_words = masked_input_caption_words.copy()
while len(masked_token_ids) < self.max_words:
masked_token_ids.append(0)
token_labels.append(-1)
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, starts, ends
def _get_single_transcript(self, data_dict, ind, with_long_context=True):
start, end = ind, ind
words = self.tokenizer.tokenize(str(data_dict['text'][ind]))
diff = data_dict['end'][end] - data_dict['start'][start]
while with_long_context and (len(words) < self.min_words or diff < self.min_time):
if start > 0 and end < len(data_dict['end']) - 1:
next_words = self.tokenizer.tokenize(str(data_dict['text'][end + 1]))
prev_words = self.tokenizer.tokenize(str(data_dict['text'][start - 1]))
d1 = data_dict['end'][end + 1] - data_dict['start'][start]
d2 = data_dict['end'][end] - data_dict['start'][start - 1]
if (self.min_time > 0 and d2 <= d1) or \
(self.min_time == 0 and len(next_words) <= len(prev_words)):
start -= 1
words = prev_words + words
else:
end += 1
words.extend(next_words)
elif start > 0:
words = self.tokenizer.tokenize(str(data_dict['text'][start - 1])) + words
start -= 1
elif end < len(data_dict['end']) - 1:
words.extend(self.tokenizer.tokenize(str(data_dict['text'][end + 1])))
end += 1
else:
break
diff = data_dict['end'][end] - data_dict['start'][start]
return words, data_dict['start'][start], data_dict['end'][end]
def _expand_video_slice(self, s, e, si, ei, fps, video_features):
start = int(s[si] * fps)
end = int(e[ei] * fps) + 1
if start > end:
start, end = end, start
video_slice = video_features[start:end]
expand_left = True
while len(video_slice) < 1:
if si==0 and ei==len(s)-1:
break
if expand_left:
expand_left = False
si = si-1 if si>0 else si
else:
expand_left = True
ei = ei+1 if ei<len(e)-1 else ei
start = int(s[si] * fps)
end = int(e[ei] * fps) + 1
if start > end:
start, end = end, start
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
return video_slice, start, end
def _get_video(self, idx, s, e, only_sim=False):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video = np.zeros((len(s), self.max_frames, self.feature_size), dtype=np.float)
feature_file = os.path.join(self.features_path, self.csv["feature_file"].values[idx])
try:
video_features = np.load(feature_file)
for i in range(len(s)):
if len(video_features) < 1:
raise ValueError("{} is empty.".format(feature_file))
video_slice, start, end = self._expand_video_slice(s, e, i, i, self.feature_framerate, video_features)
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
pass
else:
video[i][:slice_shape[0]] = video_slice
except Exception as e:
print("video_id: {} error.".format(feature_file))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
if only_sim is False:
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def second_to_stamp(self, in_seconds):
m, s = divmod(in_seconds, 60)
h, m2 = divmod(m, 60)
return "%02d:%02d:%02d" % (h, m2, s)
def __getitem__(self, feature_idx):
if self.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
idx = feature_idx
video_id = self.csv['video_id'].values[idx]
sub_list = self.iter2video_pairslist_dict[video_id]
ranint = np.random.randint(0, len(sub_list))
sub_ids = sub_list[ranint]
elif self.use_mil:
video_id, sub_ids = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
else:
idx = feature_idx
video_id = self.csv['video_id'].values[idx]
sub_ids = None
enhance_vmodel = False
if self.only_sim is False and self.pretrain_enhance_vmodal:
prob = random.random()
if prob < 0.15: # mask all text by rate 0.15
enhance_vmodel = True
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, \
pairs_decoder_mask, pairs_output_caption_ids, \
starts, ends = self._get_text(video_id, self.n_pair, sub_ids, only_sim=self.only_sim, enhance_vmodel=enhance_vmodel)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends, only_sim=self.only_sim)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 15,835 | 41.8 | 127 | py |
nuts-ml | nuts-ml-master/setup.py | import os
import sys
import glob
import shutil
import nutsml
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
class CleanCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for folder in ['build', 'dist']:
if os.path.exists(folder):
shutil.rmtree(folder)
for egg_file in glob.glob('*egg-info'):
shutil.rmtree(egg_file)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def load_readme():
with open('README.rst') as f:
return f.read()
setup(
name=nutsml.__name__,
version=nutsml.__version__,
url='https://maet3608.github.io/nuts-ml',
download_url='https://github.com/maet3608/nuts-ml',
license='Apache Software License ('
'http://www.apache.org/licenses/LICENSE-2.0)',
author='Stefan Maetschke',
author_email='stefan.maetschke@gmail.com',
description='Flow-based data pre-processing for Machine Learning',
long_description=load_readme(),
long_description_content_type='text/x-rst',
install_requires=[
'nutsflow >= 1.2.3',
'openpyxl >= 3.0.5',
'pandas > 0.21.0',
'six >= 1.10.0',
'scipy >= 0.17.0',
'pillow >= 3.0.0',
'scikit-image >= 0.12.3',
],
tests_require=['pytest >= 3.0.3'],
platforms='any',
packages=find_packages(exclude=['setup']),
include_package_data=True,
cmdclass={
'test': PyTest,
'clean': CleanCommand,
},
keywords=['machine learning', 'deep learning', 'image processing'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Natural Language :: English',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 2,577 | 27.644444 | 71 | py |
nuts-ml | nuts-ml-master/sphinx/source/conf.py | # -*- coding: utf-8 -*-
#
# nutsml documentation build configuration file, created by
# sphinx-quickstart on Mon May 23 17:01:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import nutsml
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../nutsml'))
# Add documentation for special methods method.
def skip(app, what, name, obj, skip, options):
if name in {"__init__", "__call__", "__rrshift__"}:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosectionlabel',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nutsml'
copyright = u'2017, IBM Research Australia'
author = u'Stefan Maetschke'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = str(nutsml.__version__.split('.')[:2])
version = nutsml.__version__
# The full version, including alpha/beta/rc tags.
release = nutsml.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['modules.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# html_theme = 'bizstyle'
# html_theme = 'classic'
# html_theme = 'nature'
# https://pythonhosted.org/cloud_sptheme/cloud_theme.html
#import cloud_sptheme as csp
#html_theme = "cloud" # There is also "redcloud"
#html_theme_path = [csp.get_theme_dir()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'nutsml v0.0.01'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
html_last_updated_fmt = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'nutsmldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nutsml.tex', u'Nutsml Documentation',
u'IBM', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nutsml', u'Nutsml Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nutsml', u'nutsml Documentation',
author, 'nutsml', 'Flow-based data preprocessing for machine learning',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 10,445 | 31.240741 | 80 | py |
nuts-ml | nuts-ml-master/tests/__init__.py | 0 | 0 | 0 | py | |
nuts-ml | nuts-ml-master/tests/nutsml/test_checkpoint.py | """
.. module:: test_checkpoint
:synopsis: Unit tests for checkpoint module
"""
import pytest
import os
import shutil
import time
import nutsml.checkpoint as nc
from os.path import join
from nutsflow.config import Config
from nutsml.network import Network
BASEPATH = 'tests/data/checkpoints'
@pytest.fixture(scope="function")
def checkpointdirs(request):
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def rmdir(path):
if os.path.exists(path):
os.rmdir(path)
checkpoint1 = join(BASEPATH, 'checkpoint1')
checkpoint2 = join(BASEPATH, 'checkpoint2')
mkdir(checkpoint1)
time.sleep(0.1) # ensure diff in creation time of checkpoints
mkdir(checkpoint2)
def fin():
if os.path.exists(BASEPATH):
shutil.rmtree(BASEPATH)
request.addfinalizer(fin)
return checkpoint1, checkpoint2
class FakeNetwork(Network):
def __init__(self):
self.weights = 'weights'
def save_weights(self, weightspath=None):
with open(weightspath, 'w') as f:
f.write(self.weights)
def load_weights(self, weightspath=None):
with open(weightspath, 'r') as f:
self.weights = f.read()
def create_net(lr=0.1):
network = FakeNetwork()
optimizer = Config(lr=lr)
return network, optimizer
def parameters(network, optimizer):
return dict(lr=optimizer.lr)
def create_net0():
network = FakeNetwork()
return network
def parameters0(network):
return dict()
@pytest.fixture(scope="function")
def create_checkpoint(request):
def fin():
if os.path.exists(BASEPATH):
shutil.rmtree(BASEPATH)
request.addfinalizer(fin)
return nc.Checkpoint(create_net, parameters, BASEPATH)
def test_constructor_single():
checkpoint = nc.Checkpoint(create_net0, parameters0, BASEPATH)
network = checkpoint.load()
assert network.weights == 'weights'
network.weights = 'new_weights'
checkpoint.save('checkpoint0')
network = checkpoint.load()
assert network.weights == 'new_weights'
shutil.rmtree(BASEPATH)
def test_dirs_empty(create_checkpoint):
checkpoint = create_checkpoint
assert checkpoint.dirs() == []
def test_dirs(checkpointdirs, create_checkpoint):
checkpoint1, checkpoint2 = checkpointdirs
checkpoint = create_checkpoint
assert sorted(checkpoint.dirs()) == [checkpoint1, checkpoint2]
def test_latest_empty(create_checkpoint):
checkpoint = create_checkpoint
assert checkpoint.latest() is None
def test_latest(checkpointdirs, create_checkpoint):
checkpoint1, checkpoint2 = checkpointdirs
checkpoint = create_checkpoint
assert checkpoint.latest() == checkpoint2
def test_datapaths_empty(create_checkpoint):
checkpoint = create_checkpoint
assert checkpoint.datapaths() == (None, None, None)
def test_datapaths(checkpointdirs, create_checkpoint):
checkpoint1, checkpoint2 = checkpointdirs
checkpoint = create_checkpoint
wgt, par, cfg = (join(checkpoint2, 'weights'),
join(checkpoint2, 'params.json'),
join(checkpoint2, 'config.json'))
assert checkpoint.datapaths() == (wgt, par, cfg)
wgt, par, cfg = (join(checkpoint1, 'weights'),
join(checkpoint1, 'params.json'),
join(checkpoint1, 'config.json'))
assert checkpoint.datapaths('checkpoint1') == (wgt, par, cfg)
def test_load(create_checkpoint):
checkpoint = create_checkpoint
network, optimizer = checkpoint.load()
assert optimizer.lr == 0.1
assert isinstance(network, FakeNetwork)
assert network.weights == 'weights'
def test_save(create_checkpoint):
checkpoint = create_checkpoint
network, optimizer = checkpoint.load()
optimizer.lr = 0.2
network.weights = 'new_weights'
checkpoint.save('checkpoint0')
network, optimizer = checkpoint.load()
assert optimizer.lr == 0.2
assert network.weights == 'new_weights'
def test_savebest(create_checkpoint):
checkpoint = create_checkpoint
network, optimizer = checkpoint.load()
for loss in [5, 3, 1, 2, 7]:
network.weights = 'weights:loss=' + str(loss)
checkpoint.save_best(loss, isloss=True)
network, optimizer = checkpoint.load()
assert network.weights == 'weights:loss=1'
network.weights = 'weights:loss=3'
checkpoint.save_best(3, isloss=True)
network, optimizer = checkpoint.load()
assert network.weights == 'weights:loss=1'
network.weights = 'weights:loss=0'
checkpoint.save_best(0, isloss=True)
network, optimizer = checkpoint.load()
assert network.weights == 'weights:loss=0'
| 4,703 | 24.846154 | 66 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_common.py | """
.. module:: test_common
:synopsis: Unit tests for common module
"""
import pytest
import numpy as np
from six.moves import zip, range
from nutsflow import Consume, Collect, Map
from nutsflow.common import StableRandom, shapestr
from nutsml import (SplitRandom, SplitLeaveOneOut, CheckNaN, PartitionByCol,
ConvertLabel)
def test_CheckNaN():
assert [1, 2] >> CheckNaN() >> Collect() == [1, 2]
with pytest.raises(RuntimeError) as ex:
[1, np.NaN, 3] >> CheckNaN() >> Consume()
assert str(ex.value).startswith('NaN encountered')
with pytest.raises(RuntimeError) as ex:
[(1, np.NaN), (2, 4)] >> CheckNaN() >> Consume()
assert str(ex.value).startswith('NaN encountered')
def test_PartitionByCol():
samples = [(1, 1), (2, 0), (2, 4), (1, 3), (3, 0)]
ones, twos = samples >> PartitionByCol(0, [1, 2])
assert ones == [(1, 1), (1, 3)]
assert twos == [(2, 0), (2, 4)]
twos, ones = samples >> PartitionByCol(0, [2, 1])
assert ones == [(1, 1), (1, 3)]
assert twos == [(2, 0), (2, 4)]
ones, fours = samples >> PartitionByCol(0, [1, 4])
assert ones == [(1, 1), (1, 3)]
assert fours == []
ones, twos = [] >> PartitionByCol(0, [1, 2])
assert ones == []
assert twos == []
def test_SplitRandom_split():
train, val = range(1000) >> SplitRandom(ratio=0.7)
assert len(train) == 700
assert len(val) == 300
assert not set(train).intersection(val)
def test_SplitRandom_ratios():
train, val, test = range(1000) >> SplitRandom(ratio=(0.6, 0.3, 0.1))
assert len(train) == 600
assert len(val) == 300
assert len(test) == 100
with pytest.raises(ValueError) as ex:
range(100) >> SplitRandom(ratio=(0.6, 0.7))
assert str(ex.value).startswith('Ratios must sum up to one')
with pytest.raises(ValueError) as ex:
range(10) >> SplitRandom(ratio=(1, 0))
assert str(ex.value).startswith('Ratios cannot be zero')
def test_SplitRandom_stable_default():
split1 = range(10) >> SplitRandom()
split2 = range(10) >> SplitRandom()
assert split1 == split2
def test_SplitRandom_seed():
split1 = range(10) >> SplitRandom(rand=StableRandom(0))
split2 = range(10) >> SplitRandom(rand=StableRandom(0))
split3 = range(10) >> SplitRandom(rand=StableRandom(1))
assert split1 == split2
assert split1 != split3
def test_SplitRandom_constraint():
same_letter = lambda t: t[0]
data = zip('aabbccddee', range(10))
train, val = data >> SplitRandom(rand=None, ratio=0.6,
constraint=same_letter) >> Collect()
train.sort()
val.sort()
assert train == [('a', 0), ('a', 1), ('b', 2), ('b', 3), ('d', 6), ('d', 7)]
assert val == [('c', 4), ('c', 5), ('e', 8), ('e', 9)]
def test_SplitLeaveOneOut():
samples = [1, 2, 3]
splits = samples >> SplitLeaveOneOut() >> Collect()
assert splits == [([2, 3], [1]),
([1, 3], [2]),
([1, 2], [3])]
def test_ConvertLabel():
labels = ['class0', 'class1', 'class2']
convert = ConvertLabel(None, labels)
assert [1, 0] >> convert >> Collect() == ['class1', 'class0']
assert ['class1', 'class0'] >> convert >> Collect() == [1, 0]
assert [0.9, 1.6] >> convert >> Collect() == ['class1', 'class2']
assert [[0.1, 0.7, 0.2]] >> convert >> Collect() == ['class1']
convert = ConvertLabel(0, labels)
assert [('class2',)] >> convert >> Collect() == [(2,)]
assert [(1,)] >> convert >> Collect() == [('class1',)]
assert [(0.1,)] >> convert >> Collect() == [('class0',)]
assert [(0.9,)] >> convert >> Collect() == [('class1',)]
assert [(1.7,)] >> convert >> Collect() == [('class2',)]
assert [([0.1, 0.7, 0.2],)] >> convert >> Collect() == [('class1',)]
assert [(2,), (0,)] >> convert >> Collect() == [('class2',), ('class0',)]
assert [(1, 'data')] >> convert >> Collect() == [('class1', 'data')]
convert = ConvertLabel(None, labels, True)
expected = [[0, 1, 0], [1, 0, 0]]
assert ['class1', 'class0'] >> convert >> Collect() == expected
| 4,121 | 31.976 | 80 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_reader.py | """
.. module:: test_reader
:synopsis: Unit tests for reader module
"""
import pytest
import pandas as pd
import numpy as np
import numpy.testing as nt
from collections import namedtuple
from nutsflow import Collect, Sort
from nutsml import ReadNumpy, ReadImage, ReadLabelDirs, ReadPandas
def test_ReadLabelDirs():
read = ReadLabelDirs('tests/data/labeldirs', '*.txt')
samples = read >> Sort()
assert samples == [('tests/data/labeldirs/0/test0.txt', '0'),
('tests/data/labeldirs/1/test1.txt', '1'),
('tests/data/labeldirs/1/test11.txt', '1')]
read = ReadLabelDirs('tests/data/labeldirs', '*.txt', '')
samples = read >> Sort()
assert samples == [('tests/data/labeldirs/0/test0.txt', '0'),
('tests/data/labeldirs/1/test1.txt', '1'),
('tests/data/labeldirs/1/test11.txt', '1'),
('tests/data/labeldirs/_2/test2.txt', '_2')]
def test_ReadNumpy():
arr0 = np.load('tests/data/img_arrays/nut_color.gif.npy')
arr1 = np.load('tests/data/img_arrays/nut_grayscale.gif.npy')
samples = [('nut_color', 1), ('nut_grayscale', 2)]
filepath = 'tests/data/img_arrays/*.gif.npy'
np_samples = samples >> ReadNumpy(0, filepath) >> Collect()
nt.assert_equal(np_samples[0][0], arr0)
nt.assert_equal(np_samples[1][0], arr1)
assert np_samples[0][1] == 1
assert np_samples[1][1] == 2
pathfunc = lambda s: 'tests/data/img_arrays/{0}.gif.npy'.format(*s)
np_samples = samples >> ReadNumpy(0, pathfunc) >> Collect()
nt.assert_equal(np_samples[0][0], arr0)
nt.assert_equal(np_samples[1][0], arr1)
samples = [('label', 'tests/data/img_arrays/nut_color.gif.npy')]
np_samples = samples >> ReadImage(1) >> Collect()
nt.assert_equal(np_samples[0][1], arr0)
def test_ReadImage():
arr0 = np.load('tests/data/img_arrays/nut_color.gif.npy')
arr1 = np.load('tests/data/img_arrays/nut_grayscale.gif.npy')
samples = [('nut_color', 1), ('nut_grayscale', 2)]
imagepath = 'tests/data/img_formats/*.gif'
img_samples = samples >> ReadImage(0, imagepath) >> Collect()
nt.assert_equal(img_samples[0][0], arr0)
nt.assert_equal(img_samples[1][0], arr1)
assert img_samples[0][1] == 1
assert img_samples[1][1] == 2
pathfunc = lambda sample: 'tests/data/img_formats/{0}.gif'.format(*sample)
img_samples = samples >> ReadImage(0, pathfunc) >> Collect()
nt.assert_equal(img_samples[0][0], arr0)
nt.assert_equal(img_samples[1][0], arr1)
samples = [('label', 'tests/data/img_formats/nut_color.gif')]
img_samples = samples >> ReadImage(1, as_grey=False) >> Collect()
assert img_samples[0][1].shape == (213, 320, 3)
img_samples = samples >> ReadImage(1, as_grey=True) >> Collect()
assert img_samples[0][1].shape == (213, 320)
samples = ['tests/data/img_formats/nut_color.gif']
img_samples = samples >> ReadImage(None, as_grey=False) >> Collect()
assert img_samples[0][0].shape == (213, 320, 3)
samples = ['tests/data/img_formats/nut_color.gif']
img_samples = samples >> ReadImage(None, dtype=float) >> Collect()
assert img_samples[0][0].dtype == float
def test_ReadPandas_isnull():
assert not ReadPandas.isnull(1.0)
assert not ReadPandas.isnull(0)
assert ReadPandas.isnull(None)
assert ReadPandas.isnull(np.NaN)
def test_ReadPandas_pkl():
df = pd.read_csv('tests/data/pandas_table.csv')
df.to_pickle('tests/data/pandas_table.pkl')
assert True
def test_ReadPandas():
for ext in ['.pkl', '.csv', '.tsv', '.xlsx']:
filepath = 'tests/data/pandas_table' + ext
samples = ReadPandas(filepath, dropnan=True) >> Collect()
Row = namedtuple('Row', 'col1,col2')
expected = [Row(col1=1.0, col2=4.0), Row(col1=3.0, col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, dropnan=True, rowname='R') >> Collect()
R = namedtuple('R', 'col1,col2')
expected = [R(col1=1.0, col2=4.0), R(col1=3.0, col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, dropnan=False) >> Collect()
Row = namedtuple('Row', 'col1,col2')
expected = [Row(col1=1.0, col2=4.0), Row(col1=2.0, col2=np.NaN),
Row(col1=3.0, col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, replacenan=None) >> Collect()
Row = namedtuple('Row', 'col1,col2')
expected = [Row(col1=1.0, col2=4.0), Row(col1=2.0, col2=None),
Row(col1=3.0, col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, colnames=['col2', 'col1']) >> Collect()
Row = namedtuple('Row', 'col2,col1')
expected = [Row(col2=4.0, col1=1.0), Row(col2=6.0, col1=3.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, colnames=['col1']) >> Collect()
Row = namedtuple('Row', 'col1')
expected = [Row(col1=1.0), Row(col1=2.0), Row(col1=3.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath, colnames=['col2']) >> Collect()
Row = namedtuple('Row', 'col2')
expected = [Row(col2=4.0), Row(col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath,
colnames=['col2'], replacenan='NA') >> Collect()
Row = namedtuple('Row', 'col2')
expected = [Row(col2=4.0), Row(col2='NA'), Row(col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath,
rows='col1 > 1', replacenan=0) >> Collect()
Row = namedtuple('Row', 'col1,col2')
expected = [Row(col1=2.0, col2=0), Row(col1=3.0, col2=6.0)]
nt.assert_equal(samples, expected)
samples = ReadPandas(filepath,
rows='col1 < 3', colnames=['col1']) >> Collect()
Row = namedtuple('Row', 'col1')
expected = [Row(col1=1), Row(col1=2)]
nt.assert_equal(samples, expected)
| 6,107 | 37.904459 | 78 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_batcher.py | """
.. module:: test_batcher
:synopsis: Unit tests for batcher module
"""
import pytest
import numpy as np
import nutsml.batcher as nb
from pytest import approx
from nutsflow import Collect, Consume
def test_build_number_batch():
numbers = [1, 2, 3, 1]
batch = nb.build_number_batch(numbers, 'uint8')
assert batch.dtype == np.uint8
assert np.array_equal(batch, numbers)
def test_build_one_hot_batch():
class_ids = [0, 1, 2, 1]
batch = nb.build_one_hot_batch(class_ids, 'uint8', 3)
one_hot = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
assert batch.dtype == np.uint8
assert np.array_equal(batch, one_hot)
def test_build_vector_batch():
vectors = [np.array([1, 2, 3]), np.array([2, 3, 4])]
batch = nb.build_vector_batch(vectors, 'uint8')
expected = np.array([[1, 2, 3], [2, 3, 4]], dtype='uint8')
assert batch.dtype == np.uint8
assert np.array_equal(batch, expected)
with pytest.raises(ValueError) as ex:
nb.build_vector_batch([], 'uint8')
assert str(ex.value).startswith('No vectors ')
def test_build_tensor_batch():
tensors = [np.zeros((2, 3, 2)), np.ones((2, 3, 2))]
batch = nb.build_tensor_batch(tensors, 'uint8')
expected = np.stack(tensors)
assert batch.dtype == np.uint8
assert np.array_equal(batch, expected)
batch = nb.build_tensor_batch(tensors, 'uint8', expand=0)
expected = np.stack([np.expand_dims(t, 0) for t in tensors])
assert np.array_equal(batch, expected)
batch = nb.build_tensor_batch(tensors, float, axes=(1, 0, 2))
expected = np.stack([np.transpose(t, (1, 0, 2)) for t in tensors])
assert batch.dtype == float
assert np.array_equal(batch, expected)
with pytest.raises(ValueError) as ex:
nb.build_tensor_batch([], 'uint8')
assert str(ex.value).startswith('No tensors ')
def test_build_image_batch():
images = [np.zeros((10, 5)), np.ones((10, 5))]
batch = nb.build_image_batch(images, 'uint8', channelfirst=True)
assert batch.dtype == np.uint8
assert batch.shape == (2, 1, 10, 5)
assert np.array_equal(batch[0], np.zeros((1, 10, 5)))
assert np.array_equal(batch[1], np.ones((1, 10, 5)))
images = [np.zeros((10, 5)), np.ones((10, 5))]
batch = nb.build_image_batch(images, 'uint8', channelfirst=False)
assert batch.shape == (2, 10, 5, 1)
assert np.array_equal(batch[0], np.zeros((10, 5, 1)))
assert np.array_equal(batch[1], np.ones((10, 5, 1)))
images = [np.zeros((10, 5, 1)), np.ones((10, 5, 1))]
batch = nb.build_image_batch(images, 'uint8', channelfirst=True)
assert batch.shape == (2, 1, 10, 5)
assert np.array_equal(batch[0], np.zeros((1, 10, 5)))
assert np.array_equal(batch[1], np.ones((1, 10, 5)))
images = [np.zeros((10, 5, 3)), np.ones((10, 5, 3))]
batch = nb.build_image_batch(images, 'uint8', channelfirst=False)
assert batch.shape == (2, 10, 5, 3)
assert np.array_equal(batch[0], np.zeros((10, 5, 3)))
assert np.array_equal(batch[1], np.ones((10, 5, 3)))
def test_build_image_batch_exceptions():
with pytest.raises(ValueError) as ex:
nb.build_image_batch([], 'uint8')
assert str(ex.value).startswith('No images ')
with pytest.raises(ValueError) as ex:
images = [np.zeros((3, 10, 5)), np.ones((3, 10, 5))]
nb.build_image_batch(images, 'uint8')
assert str(ex.value).startswith('Channel not at last axis')
with pytest.raises(ValueError) as ex:
images = [np.zeros((10, 5)), np.ones((15, 5))]
nb.build_image_batch(images, 'uint8')
assert str(ex.value).startswith('Images vary in shape')
def test_BuildBatch():
numbers = [4.1, 3.2, 1.1]
vectors = [np.array([1, 2, 3]), np.array([2, 3, 4]), np.array([3, 4, 5])]
images = [np.zeros((5, 3)), np.ones((5, 3)), np.ones((5, 3))]
class_ids = [1, 2, 1]
samples = zip(numbers, vectors, images, class_ids)
build_batch = (nb.BuildBatch(2, prefetch=0)
.input(0, 'number', float)
.input(1, 'vector', np.uint8)
.input(2, 'image', np.uint8, False)
.output(3, 'one_hot', 'uint8', 3))
batches = samples >> build_batch >> Collect()
assert len(batches) == 2
batch = batches[0]
assert len(batch) == 2, 'Expect inputs and outputs'
ins, outs = batch
assert len(ins) == 3, 'Expect three input columns in batch'
assert len(outs) == 1, 'Expect one output column in batch'
assert np.array_equal(ins[0], nb.build_number_batch(numbers[:2], float))
assert np.array_equal(ins[1], nb.build_vector_batch(vectors[:2], 'uint8'))
assert np.array_equal(ins[2], nb.build_image_batch(images[:2], 'uint8'))
assert np.array_equal(outs[0],
nb.build_one_hot_batch(class_ids[:2], 'uint8', 3))
def test_BuildBatch_exceptions():
class_ids = [1, 2]
numbers = [4.1, 3.2]
samples = zip(numbers, class_ids)
with pytest.raises(ValueError) as ex:
build_batch = (nb.BuildBatch(2, prefetch=0)
.input(0, 'number', float)
.output(1, 'invalid', 'uint8', 3))
samples >> build_batch >> Collect()
assert str(ex.value).startswith('Invalid builder')
def test_BuildBatch_prefetch():
samples = [[1], [2]]
build_batch = (nb.BuildBatch(2, prefetch=1)
.input(0, 'number', 'uint8'))
batches = samples >> build_batch >> Collect()
batch = batches[0][0]
assert np.array_equal(batch, np.array([1, 2], dtype='uint8'))
def test_Mixup():
numbers1 = [1, 2, 3]
numbers2 = [4, 5, 6]
samples = list(zip(numbers1, numbers2))
build_batch = (nb.BuildBatch(3, prefetch=0)
.input(0, 'number', float)
.output(1, 'number', float))
# no mixup, return original batch
mixup = nb.Mixup(0.0)
batches = samples >> build_batch >> mixup >> Collect()
inputs, outputs = batches[0]
assert list(inputs[0]) == numbers1
assert list(outputs[0]) == numbers2
# mixup with alpaha=1.0
mixup = nb.Mixup(1.0)
batches = samples >> build_batch >> mixup >> Collect()
for input, output in batches:
input, output = input[0], output[0]
assert min(input) >= 1 and max(input) <= 3
assert min(output) >= 4 and max(output) <= 6
ri, ro = input[0] - samples[0][0], output[0] - samples[0][1]
assert approx(ri, 1e-3) == ro
| 6,437 | 34.766667 | 78 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_datautil.py | """
.. module:: test_datautil
:synopsis: Unit tests for datautil module
"""
import pytest
import numpy as np
import collections as cl
import nutsml.datautil as util
from collections import namedtuple
from nutsflow.common import StableRandom
@pytest.fixture(scope="function")
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg
def test_random_upsample(sampleset):
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
stratified = sorted(util.upsample(samples, 1, rand=StableRandom(0)))
assert stratified == [('neg', 0), ('neg', 0), ('pos', 1), ('pos', 1)]
stratified1 = util.upsample(sampleset, 0, rand=StableRandom(0))
_, labelcnts = util.group_samples(stratified1, 0)
assert labelcnts == {0: 50, 1: 50}
stratified2 = util.upsample(sampleset, 0, rand=StableRandom(1))
assert stratified1 != stratified2, 'Order should be random'
def test_random_downsample(sampleset):
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
stratified = sorted(
util.random_downsample(samples, 1, rand=StableRandom(0)))
assert stratified == [('neg', 0), ('pos', 1)]
stratified1 = util.random_downsample(sampleset, 0, rand=StableRandom(0))
_, labelcnts = util.group_samples(stratified1, 0)
assert labelcnts == {0: 10, 1: 10}
stratified2 = util.random_downsample(sampleset, 0, rand=StableRandom(1))
assert stratified1 != stratified2, 'Order should be random'
def test_group_samples():
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
groups, labelcnts = util.group_samples(samples, 1)
assert groups == {0: [('neg', 0)], 1: [('pos', 1), ('pos', 1)]}
assert labelcnts == cl.Counter({1: 2, 0: 1})
def test_group_by():
is_odd = lambda e: bool(e % 2)
numbers = [0, 1, 2, 3, 4]
assert util.group_by(numbers, is_odd) == {False: [0, 2, 4], True: [1, 3]}
assert util.group_by([1, 3], is_odd) == {True: [1, 3]}
assert util.group_by([], is_odd) == dict()
def test_col_map():
sample = (1, 2, 3)
add_n = lambda x, n: x + n
assert util.col_map(sample, 1, add_n, 10) == (1, 12, 3)
assert util.col_map(sample, (0, 2), add_n, 10) == (11, 2, 13)
def test_shuffle_sublists():
sublists = [[1, 2, 3], [4, 5, 6, 7]]
util.shuffle_sublists(sublists, StableRandom(0))
assert sublists == [[1, 3, 2], [4, 5, 7, 6]]
| 2,437 | 30.662338 | 77 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_plotter.py | """
.. module:: test_plotter
:synopsis: Unit tests for plotter module
"""
from __future__ import print_function
import os
import numpy as np
import nutsml.imageutil as ni
import nutsml.plotter as pl
import numpy.testing as nt
from nutsflow import Collect
# Set to True to create test data
CREATE_DATA = False
def assert_equal_image(imagepath, image, rtol=0.01, atol=0.01):
if CREATE_DATA:
ni.save_image(imagepath, image)
expected = ni.load_image(imagepath)
nt.assert_allclose(expected, image, rtol=rtol, atol=atol)
# TODO: This test is successful when run individually
# pytest tests\nutsml\test_plotter.py
# but fails when running as part of the test suite.
def DISABLED_test_plotlines():
filepath = 'tests/data/temp_plotlines.png'
xs = np.arange(0, 6.3, 1.2)
ysin, ycos = np.sin(xs), np.cos(xs)
data = zip(xs, ysin, ycos)
out = data >> pl.PlotLines(1, 0, filepath=filepath) >> Collect()
assert out == data
expected = 'tests/data/img/plotlines.png'
image = ni.load_image(filepath)
os.remove(filepath)
assert_equal_image(expected, image)
| 1,111 | 24.860465 | 68 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_network.py | """
.. module:: test_network
:synopsis: Unit tests for network module
"""
import pytest
import numpy as np
from nutsflow import Collect, GetCols, Print
from nutsml.network import Network, TrainValNut, PredictNut, EvalNut
class FakeModel(object):
def __init__(self):
self.saved_weights = None
self.loaded_weights = None
def train(self, X, y):
return sum(X) + 1, sum(y) + 1
def validate(self, X, y):
return sum(X) + 2, sum(y) + 2
def predict(self, X):
return X
def save_weights(self, weightspath):
self.saved_weights = weightspath
def load_weights(self, weightspath):
self.loaded_weights = weightspath
def summary(self):
return "network summary"
class FakeNetwork(Network):
def __init__(self, model, weightspath):
Network.__init__(self, weightspath)
self.model = model
def train(self):
return TrainValNut(self.model.train)
def validate(self):
return TrainValNut(self.model.validate)
def predict(self, flatten=True):
return PredictNut(self.model.predict, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
return metric(targets, preds)
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
weightspath = super(FakeNetwork, self)._weightspath(weightspath)
self.model.save_weights(weightspath)
def load_weights(self, weightspath=None):
weightspath = super(FakeNetwork, self)._weightspath(weightspath)
self.model.load_weights(weightspath)
def print_layers(self):
self.model.summary()
def test_TrainValNut():
batches = [(1, 2), (3, 4)]
nut = TrainValNut(lambda X, y: X + y)
assert batches >> nut >> Collect() == [3, 7]
def test_PredictNut():
batches = [(1, 2), (3, 4)]
nut = PredictNut(lambda X: X, flatten=True)
assert batches >> nut >> Collect() == [1, 2, 3, 4]
def test_EvalNut():
model = FakeModel()
network = FakeNetwork(model, 'dummy_filepath')
acc = lambda X, y: np.sum(X == y)
compute = lambda m, t, p: m(t, p)
nut = EvalNut(network, [acc], compute)
batches = [((1, 2), (1, 2)), ((5, 6), (5, 6))]
assert batches >> nut == 4
batches = [((1, 2), (1, 2)), ((5, 0), (5, 6))]
assert batches >> nut == 3
batches = [(((0, 1), (0, 2)), (0, 2)), (((5, 5), (6, 6)), (6, 6))]
nut = EvalNut(network, [acc], compute, predcol=1)
assert batches >> nut == 4
nut = EvalNut(network, [acc], compute, predcol=0)
assert batches >> nut == 1
def test_Network_constructor():
weightspath = 'dummy_filepath'
network = Network(weightspath)
assert network.weightspath == weightspath
def test_Network_weightspath():
weightspath = 'dummy_filepath'
network = Network(weightspath)
assert network._weightspath(None) == weightspath
assert network._weightspath('new_path') == 'new_path'
def test_Network_save_load_weights():
model = FakeModel()
weightspath = 'dummy_filepath'
network = FakeNetwork(model, weightspath)
assert not model.saved_weights
assert not model.loaded_weights
network.save_weights()
assert model.saved_weights == weightspath
network.load_weights()
assert model.loaded_weights == weightspath
def test_Network_save_best():
model = FakeModel()
weightspath = 'dummy_filepath'
network = FakeNetwork(model, weightspath)
network.save_best(2.0)
assert network.best_score == 2.0
assert model.saved_weights == weightspath
network.save_best(1.0, isloss=True)
assert network.best_score == 1.0
network.save_best(3.0, isloss=False)
assert network.best_score == 3.0
def test_Network_exceptions():
network = Network('dummy_filepath')
with pytest.raises(NotImplementedError) as ex:
network.train()
assert str(ex.value) == 'Implement train()!'
with pytest.raises(NotImplementedError) as ex:
network.validate()
assert str(ex.value) == 'Implement validate()!'
with pytest.raises(NotImplementedError) as ex:
network.predict()
assert str(ex.value) == 'Implement predict()!'
with pytest.raises(NotImplementedError) as ex:
network.evaluate([])
assert str(ex.value) == 'Implement evaluate()!'
with pytest.raises(NotImplementedError) as ex:
network.save_weights()
assert str(ex.value) == 'Implement save_weights()!'
with pytest.raises(NotImplementedError) as ex:
network.load_weights()
assert str(ex.value) == 'Implement load_weights()!'
with pytest.raises(NotImplementedError) as ex:
network.print_layers()
assert str(ex.value) == 'Implement print_layers()!'
def test_Network():
model = FakeModel()
weightspath = 'dummy_filepath'
network = FakeNetwork(model, weightspath)
assert network.weightspath == weightspath
batches = [((1, 2), (3, 4)), ((5, 6), (7, 8))]
train_err = batches >> network.train() >> Collect()
assert train_err == [(4, 8), (12, 16)]
val_err = batches >> network.validate() >> Collect()
assert val_err == [(5, 9), (13, 17)]
prediction = batches >> GetCols(0) >> network.predict() >> Collect()
assert prediction == [(1, 2), (5, 6)]
prediction = batches >> GetCols(0) >> network.predict(False) >> Collect()
assert prediction == [((1, 2),), ((5, 6),)]
batches = [((1, 2), (1, 2)), ((5, 6), (5, 6))]
acc = lambda X, y: np.sum(X == y)
assert batches >> network.evaluate([acc]) == 4
batches = [(((0, 1), (0, 2)), (0, 2)), (((5, 5), (6, 6)), (6, 6))]
assert batches >> network.evaluate([acc], predcol=1) == 4
assert batches >> network.evaluate([acc], predcol=0) == 1
| 5,786 | 27.367647 | 77 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_logger.py | """
.. module:: test_logger
:synopsis: Unit tests for logger module
"""
import pytest
import os
import numpy as np
from nutsflow import Collect
from nutsml import LogToFile, LogCols
@pytest.fixture(scope='function')
def filepath():
filepath = 'tests/data/temp_logger.csv'
def fin():
if os.path.exists(filepath):
os.remove(filepath)
return filepath
@pytest.mark.filterwarnings('ignore:LogCols is deprecated. Use LogToFile!')
def test_LogCols(filepath):
data = [[1, 2], [3, 4]]
with LogCols(filepath) as logcols:
assert data >> logcols >> Collect() == data
with open(filepath) as f:
assert f.read() == '1,2\n3,4\n'
def test_LogToFile(filepath):
data = [[1, 2], [3, 4]]
with LogToFile(filepath) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1,2\n3,4\n'
with LogToFile(filepath, delimiter='; ') as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1; 2\n3; 4\n'
with LogToFile(filepath, cols=0, reset=True) as logtofile:
assert data >> logtofile >> Collect() == data
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1\n3\n1\n3\n'
with LogToFile(filepath, cols=(1, 0), colnames=('a', 'b')) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == 'a,b\n2,1\n4,3\n'
def test_LogToFile_reset(filepath):
data = [[1, 2], [3, 4]]
with LogToFile(filepath, cols=0, reset=True) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1\n3\n'
with LogToFile(filepath, cols=1, reset=False) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1\n3\n2\n4\n'
def test_LogToFile_numpy(filepath):
data = [np.array([1, 2]), np.array([3, 4])]
with LogToFile(filepath) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1,2\n3,4\n'
data = [np.array(1), np.array(2)]
with LogToFile(filepath) as logtofile:
assert data >> logtofile >> Collect() == data
with open(filepath) as f:
assert f.read() == '1\n2\n'
def test_LogToFile_delete(filepath):
data = [[1, 2], [3, 4]]
logtofile = LogToFile(filepath)
assert data >> logtofile >> Collect() == data
assert os.path.exists(filepath)
logtofile.delete()
assert not os.path.exists(filepath)
| 2,699 | 27.125 | 76 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_booster.py | """
.. module:: test_booster
:synopsis: Unit tests for booster module
"""
import pytest
import numpy as np
from nutsflow import Collect
from nutsflow.common import shapestr
from nutsml.network import Network, PredictNut
from nutsml import Boost, BuildBatch
def predict_all_positive(batch):
return [np.array([0.0, 1.0]) for _ in batch]
def predict_all_negative(batch):
return [np.array([1.0, 0.0]) for _ in batch]
def predict_all_perfect(batch):
pos = np.array([0.0, 1.0])
neg = np.array([1.0, 0.0])
return [neg if o < 2 else pos for o in batch]
def predict_all_wrong(batch):
pos = np.array([0.0, 1.0])
neg = np.array([1.0, 0.0])
return [pos if o < 2 else neg for o in batch]
class FakeNetwork(Network):
def __init__(self, func):
self.func = func
def predict(self, flatten=True):
return PredictNut(self.func, flatten)
def test_Boost():
negatives = [(0, 0), (1, 0)]
positives = [(2, 1), (3, 1), (4, 1)]
samples = negatives + positives
build_batch = (BuildBatch(3, prefetch=0)
.input(0, 'number', 'uint8')
.output(1, 'one_hot', 'uint8', 2))
network = FakeNetwork(predict_all_positive)
boost = Boost(build_batch, network)
boosted = samples >> boost >> Collect()
assert boosted == negatives, 'Expect negatives boosted'
network = FakeNetwork(predict_all_negative)
boost = Boost(build_batch, network)
boosted = samples >> boost >> Collect()
assert boosted == positives, 'Expect positives boosted'
network = FakeNetwork(predict_all_perfect)
boost = Boost(build_batch, network)
boosted = samples >> boost >> Collect()
assert boosted == [], 'Expect no samples left for boosting'
network = FakeNetwork(predict_all_wrong)
boost = Boost(build_batch, network)
boosted = samples >> boost >> Collect()
assert boosted == samples, 'Expect all samples boosted'
| 1,940 | 25.958333 | 63 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_stratify.py | """
.. module:: test_stratify
:synopsis: Unit tests for stratify module
"""
import pytest
from nutsflow import Collect, Sort, Get, CountValues
from nutsflow.common import StableRandom
from nutsml import Stratify, CollectStratified
def test_CollectStratified():
rand = StableRandom(0)
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
stratify = CollectStratified(1, mode='up', rand=rand)
stratified = samples >> stratify >> Sort()
assert stratified == [('neg', 0), ('neg', 0), ('pos', 1), ('pos', 1)]
samples = [('pos', 1), ('pos', 1), ('pos', 1), ('neg1', 0), ('neg2', 0)]
stratify = CollectStratified(1, mode='downrnd', rand=rand)
stratified = samples >> stratify >> Sort()
assert stratified == [('neg1', 0), ('neg2', 0), ('pos', 1), ('pos', 1)]
with pytest.raises(ValueError) as ex:
samples >> CollectStratified(1, mode='invalid')
assert str(ex.value).startswith('Unknown mode')
def test_Stratify():
samples = [('pos', 1)] * 1000 + [('neg', 0)] * 100
dist = samples >> CountValues(1)
stratify = Stratify(1, dist, rand=StableRandom(0))
stratified1 = samples >> stratify >> Collect()
stratified2 = samples >> stratify >> Collect()
assert stratified1 != stratified2
dist1 = stratified1 >> Get(1) >> CountValues()
print(dist1)
assert dist1[0] == 100
assert 90 < dist1[1] < 110
dist2 = stratified2 >> Get(1) >> CountValues()
print(dist2)
assert dist1[0] == 100
assert 90 < dist1[1] < 110
| 1,507 | 29.16 | 76 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_viewer.py | """
.. module:: test_viewer
:synopsis: Unit tests for viewer module
"""
| 75 | 14.2 | 42 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_imageutil.py | """
.. module:: test_imageutil
:synopsis: Unit tests for imageutil module
"""
from __future__ import print_function
import pytest
import sys
import os
import os.path as op
import PIL as pil
import numpy as np
import skimage as ski
import nutsml.imageutil as ni
import numpy.testing as nt
import matplotlib.patches as plp
from warnings import warn
from glob import glob
from PIL import ImageEnhance as ie
# Set to True to create test data
CREATE_TEST_DATA = False
@pytest.fixture(scope="function")
def datadirs():
imagedir = 'tests/data/img/'
formatsdir = 'tests/data/img_formats/'
arraysdir = 'tests/data/img_arrays/'
processeddir = 'tests/data/img_processed/'
return imagedir, formatsdir, arraysdir, processeddir
def assert_equal_image(imagepath, image, rtol=0.01, atol=0.01):
if CREATE_TEST_DATA:
ni.save_image(imagepath, image)
expected = ni.load_image(imagepath)
diff = abs(expected - image).sum()
print('absolute difference', diff)
print('relative difference', 100.0 * diff / expected.sum())
nt.assert_allclose(expected, image, rtol=rtol, atol=atol)
def test_load_image(datadirs):
h, w = 213, 320
_, formatsdir, arraydir, _ = datadirs
pathpattern = formatsdir + '*'
for filepath in glob(pathpattern):
img = ni.load_image(filepath)
is_color = 'color' in filepath
assert img.shape == (h, w, 3) if is_color else (h, w)
assert isinstance(img, np.ndarray)
assert img.dtype == np.uint8
assert np.max(img) <= 255
assert np.min(img) >= 0
for filepath in glob(pathpattern):
img = ni.load_image(filepath, as_grey=True)
assert img.shape == (h, w)
assert np.max(img) <= 255
assert np.min(img) >= 0
for filepath in glob(pathpattern):
img = ni.load_image(filepath)
fdir, fname = op.split(filepath)
arr = np.load(arraydir + fname + '.npy')
if not filepath.endswith('.tif'):
nt.assert_allclose(img, arr, rtol=0.1, atol=255)
def test_save_image(datadirs):
_, formatsdir, _, _ = datadirs
formats = ['gif', 'png', 'jpg', 'bmp', 'tif', 'npy']
for format in formats:
inpath = formatsdir + 'nut_color.' + format
outpath = formatsdir + 'temp_nut_color.' + format
image = ni.load_image(inpath)
ni.save_image(outpath, image)
loaded = ni.load_image(outpath)
os.remove(outpath)
# Saved and loaded JPG images can vary greatly (lossy format) when using
# skimage under different OS and a direct comparison often fails.
# Therefore, only shape and mean value are verified for JPG images.
if format == 'jpg':
assert abs(np.mean(image) - np.mean(loaded)) < 0.1
assert loaded.shape == (213, 320, 3)
else:
nt.assert_allclose(image, loaded, rtol=0.1, atol=255)
def test_arr_to_pil():
rgb_arr = np.ones((5, 4, 3), dtype='uint8')
pil_img = ni.arr_to_pil(rgb_arr)
assert pil_img.size == (4, 5)
assert pil_img.mode == 'RGB'
gray_arr = np.ones((5, 4), dtype='uint8')
pil_img = ni.arr_to_pil(gray_arr)
assert pil_img.size == (4, 5)
assert pil_img.mode == 'L'
with pytest.raises(ValueError) as ex:
image = np.ones((10,), dtype='float')
ni.arr_to_pil(image)
assert str(ex.value).startswith('Expect uint8 dtype but got')
with pytest.raises(ValueError) as ex:
image = np.ones((10,), dtype='uint8')
ni.arr_to_pil(image)
assert str(ex.value).startswith('Expect gray scale or RGB image')
def test_pil_to_arr():
rgb_arr = np.ones((5, 4, 3), dtype='uint8')
pil_img = ni.arr_to_pil(rgb_arr)
arr = ni.pil_to_arr(pil_img)
nt.assert_allclose(rgb_arr, arr)
assert arr.dtype == np.uint8
gray_arr = np.ones((5, 4), dtype='uint8')
pil_img = ni.arr_to_pil(gray_arr)
arr = ni.pil_to_arr(pil_img)
nt.assert_allclose(gray_arr, arr)
assert arr.dtype == np.uint8
with pytest.raises(ValueError) as ex:
rgb_arr = np.ones((5, 4, 3), dtype='uint8')
hsv_img = pil.Image.fromarray(rgb_arr, 'HSV')
ni.pil_to_arr(hsv_img)
assert str(ex.value).startswith('Expect RBG or grayscale but got')
def test_set_default_order():
kwargs = {}
ni.set_default_order(kwargs)
assert kwargs['order'] == 0
kwargs = {'order': 1}
ni.set_default_order(kwargs)
assert kwargs['order'] == 1
def test_add_channel():
image = np.ones((10, 20))
assert ni.add_channel(image, True).shape == (1, 10, 20)
assert ni.add_channel(image, False).shape == (10, 20, 1)
image = np.ones((10, 20, 3))
assert ni.add_channel(image, True).shape == (3, 10, 20)
assert ni.add_channel(image, False).shape == (10, 20, 3)
with pytest.raises(ValueError) as ex:
image = np.ones((10,))
ni.add_channel(image, True)
assert str(ex.value).startswith('Image must be 2 or 3 channel!')
with pytest.raises(ValueError) as ex:
image = np.ones((10, 20, 3, 1))
ni.add_channel(image, True)
assert str(ex.value).startswith('Image must be 2 or 3 channel!')
def test_floatimg2uint8():
image = np.eye(10, 20, dtype=float)
arr = ni.floatimg2uint8(image)
assert np.min(arr) == 0
assert np.max(arr) == 255
assert arr.shape == image.shape
def test_rerange():
image = np.eye(3, 4, dtype=float)
arr = ni.rerange(image, 0.0, 1.0, -10, +20, int)
assert np.min(arr) == -10
assert np.max(arr) == +20
assert arr.dtype == int
assert arr.shape == image.shape
def test_identical():
image = np.ones((5, 4, 3), dtype='uint8')
nt.assert_allclose(image, ni.identical(image))
def test_crop():
rgb_arr = np.ones((6, 7, 3), dtype='uint8')
cropped = ni.crop(rgb_arr, 1, 2, 5, 5)
assert cropped.shape == (3, 4, 3)
assert cropped.dtype == np.uint8
gray_arr = np.ones((6, 7), dtype='uint8')
cropped = ni.crop(gray_arr, 1, 2, 5, 5)
assert cropped.shape == (3, 4)
image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
cropped = ni.crop(image, 1, 2, 5, 5)
expected = np.array([[9, 10, 11], [13, 14, 15]])
nt.assert_allclose(expected, cropped)
def test_crop_square():
rgb_arr = np.ones((6, 7, 3), dtype='uint8')
cropped = ni.crop_square(rgb_arr)
assert cropped.shape == (6, 6, 3)
assert cropped.dtype == np.uint8
gray_arr = np.ones((6, 7), dtype='uint8')
cropped = ni.crop_square(gray_arr)
assert cropped.shape == (6, 6)
image = np.reshape(np.arange(12, dtype='uint8'), (4, 3))
cropped = ni.crop_square(image)
expected = np.array([[3, 4, 5], [6, 7, 8], [9, 10, 11]])
nt.assert_allclose(expected, cropped)
def test_crop_center():
image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
cropped = ni.crop_center(image, 3, 2)
expected = np.array([[4, 5, 6], [8, 9, 10]])
nt.assert_allclose(expected, cropped)
with pytest.raises(ValueError) as ex:
ni.crop_center(image, 5, 6)
assert str(ex.value).startswith('Image too small for crop')
def test_occlude():
image = np.ones((4, 5)).astype('uint8')
occluded = ni.occlude(image, 3, 2, 2, 3, color=3)
expected = np.array([[1, 1, 1, 1, 1], [1, 1, 3, 3, 1],
[1, 1, 3, 3, 1], [1, 1, 3, 3, 1]], dtype='uint8')
assert occluded.dtype == np.uint8
nt.assert_allclose(expected, occluded)
image = np.ones((4, 5)).astype('uint8')
occluded = ni.occlude(image, 0.5, 0.5, 3, 2)
expected = np.array([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1],
[1, 0, 0, 0, 1], [1, 1, 1, 1, 1]], dtype='uint8')
assert occluded.dtype == np.uint8
nt.assert_allclose(expected, occluded)
rgb_arr = np.ones((2, 2, 3)).astype('uint8')
color = (2, 3, 4)
occluded = ni.occlude(rgb_arr, 1, 1, 1, 1, color=color)
assert tuple(occluded[1, 1, :]) == color
def test_enhance():
image = np.ones((5, 4), dtype='uint8')
expected = np.zeros((5, 4), dtype='uint8')
enhanced = ni.enhance(image, ie.Brightness, 0.0)
nt.assert_allclose(expected, enhanced)
assert enhanced.dtype == np.uint8
def test_contrast(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.change_contrast(img_arr, 0.5)
imagepath = processeddir + 'nut_color_contrast.bmp'
assert_equal_image(imagepath, new_img)
def test_brightness(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.change_brightness(img_arr, 0.5)
imagepath = processeddir + 'nut_color_brightness.bmp'
assert_equal_image(imagepath, new_img)
def test_sharpness(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.change_sharpness(img_arr, 0.5)
imagepath = processeddir + 'nut_color_sharpness.bmp'
assert_equal_image(imagepath, new_img, rtol=0, atol=10)
def test_change_color(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.change_color(img_arr, 2.0)
imagepath = processeddir + 'nut_color_color.bmp'
assert_equal_image(imagepath, new_img, rtol=0.1, atol=255)
def test_extract_edges(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.extract_edges(img_arr, 2.0)
imagepath = processeddir + 'nut_color_edges.bmp'
assert_equal_image(imagepath, new_img, rtol=0.1, atol=255)
def test_gray2rgb(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_grayscale.bmp')
new_img = ni.gray2rgb(img_arr)
imagepath = processeddir + 'nut_grayscale_2rgb.bmp'
assert_equal_image(imagepath, new_img)
def test_rgb2gray(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.rgb2gray(img_arr)
imagepath = processeddir + 'nut_color_2gray.bmp'
assert_equal_image(imagepath, new_img)
def test_translate(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.translate(img_arr, 50, 100)
imagepath = processeddir + 'nut_color_translated.bmp'
assert_equal_image(imagepath, new_img)
def test_translate(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.translate(img_arr, 50, 100)
imagepath = processeddir + 'nut_color_translated.bmp'
assert_equal_image(imagepath, new_img)
def test_rotate(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.rotate(img_arr, 45, order=1)
imagepath = processeddir + 'nut_color_rotated.bmp'
assert_equal_image(imagepath, new_img, rtol=0.1, atol=0.1)
img_arr = np.eye(4, dtype=np.uint8)
exp_img = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
new_img = ni.rotate(img_arr, 90)
nt.assert_allclose(new_img, exp_img)
def test_shear(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.shear(img_arr, 0.5)
imagepath = processeddir + 'nut_color_sheared.bmp'
assert_equal_image(imagepath, new_img)
def test_distort_elastic(datadirs):
imagedir, _, _, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.distort_elastic(img_arr, 10, 5000)
imagepath = processeddir + 'nut_color_elastic.bmp'
assert_equal_image(imagepath, new_img)
img_arr = ni.load_image(imagedir + 'nut_grayscale.bmp')
new_img = ni.distort_elastic(img_arr, 10, 5000)
imagepath = processeddir + 'nut_grayscale_elastic.bmp'
assert_equal_image(imagepath, new_img)
def test_mask_where():
expected = np.array([[0, 0], [1, 1], [2, 2]], dtype='int64')
mask = np.eye(3, dtype='uint8')
points = ni.mask_where(mask, 1)
nt.assert_allclose(expected, points)
def test_mask_choice():
np.random.seed(1)
expected = np.array([[0, 0], [2, 2]], dtype='int64')
mask = np.eye(3, dtype='uint8')
points = ni.mask_choice(mask, 1, 2)
nt.assert_allclose(expected, points)
def test_extract_patch():
expected = np.array([[5, 6, 7], [9, 10, 11]], dtype='uint8')
image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
patch = ni.extract_patch(image, (2, 3), 2, 2)
nt.assert_allclose(expected, patch)
def test_patch_iter():
img = np.reshape(np.arange(12), (3, 4))
patches = list(ni.patch_iter(img, (2, 2), 2))
expected = [np.array([[0, 1], [4, 5]]),
np.array([[2, 3], [6, 7]])]
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
patches = list(ni.patch_iter(img, (2, 2), 3))
expected = [np.array([[0, 1], [4, 5]])]
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
patches = list(ni.patch_iter(img, (1, 3), 1))
expected = [np.array([[0, 1, 2]]), np.array([[1, 2, 3]]),
np.array([[4, 5, 6]]), np.array([[5, 6, 7]]),
np.array([[8, 9, 10]]), np.array([[9, 10, 11]])]
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
@pytest.mark.filterwarnings(
'ignore:Image is not contiguous and will be copied!')
def test_patch_iter_notcontiguous():
img = np.asfortranarray(np.reshape(np.arange(12), (3, 4)))
patches = list(ni.patch_iter(img, (2, 2), 2))
expected = [np.array([[0, 1], [4, 5]]),
np.array([[2, 3], [6, 7]])]
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
def test_patch_iter_warning():
with pytest.warns(UserWarning):
img = np.asfortranarray(np.reshape(np.arange(12), (3, 4)))
list(ni.patch_iter(img, (2, 2), 2))
warn("Image is not contiguous and will be copied!", UserWarning)
def test_patch_iter_3channel():
img = np.reshape(np.arange(12 * 3), (3, 4, 3))
patches = list(ni.patch_iter(img, (2, 2), 2))
expected = [
np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17]]]),
np.array([[[6, 7, 8], [9, 10, 11]], [[18, 19, 20], [21, 22, 23]]])]
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
def test_centers_inside():
centers = np.array([[1, 2], [0, 1]])
image = np.zeros((3, 4))
expected = np.array([[1, 2]])
result = ni.centers_inside(centers, image, (3, 3))
nt.assert_allclose(expected, result)
result = ni.centers_inside(centers, image, (4, 3))
assert not result.size
def test_sample_mask():
mask = np.zeros((3, 4))
mask[1, 2] = 1
centers = ni.sample_mask(mask, 1, (1, 1), 1)
nt.assert_allclose(centers, [[1, 2]])
centers = ni.sample_mask(mask, 1, (1, 1), 0)
nt.assert_allclose(centers, np.empty((0, 2)))
centers = ni.sample_mask(mask, 2, (1, 1), 1)
nt.assert_allclose(centers, np.empty((0, 2)))
centers = ni.sample_mask(mask, 1, (4, 3), 1)
nt.assert_allclose(centers, np.empty((0, 2)))
def test_sample_labeled_patch_centers():
mask = np.zeros((3, 4))
mask[1, 2] = 1
centers = ni.sample_labeled_patch_centers(mask, 1, (1, 1), 1, 0)
nt.assert_allclose(centers, [[1, 2, 0]])
def test_sample_pn_patches():
np.random.seed(0)
mask = np.zeros((3, 4), dtype='uint8')
img = np.reshape(np.arange(12, dtype='uint8'), (3, 4))
mask[1, 2] = 255
results = list(ni.sample_pn_patches(img, mask, (2, 2), 1, 1))
assert len(results) == 2
img_patch, mask_patch, label = results[0]
assert label == 0
nt.assert_allclose(img_patch, [[0, 1], [4, 5]])
nt.assert_allclose(mask_patch, [[0, 0], [0, 0]])
img_patch, mask_patch, label = results[1]
assert label == 1
nt.assert_allclose(img_patch, [[1, 2], [5, 6]])
nt.assert_allclose(mask_patch, [[0, 0], [0, 255]])
def test_polyline2coords():
rr, cc = ni.polyline2coords([(0, 0), (2.1, 2.0), (2, 4)])
nt.assert_allclose(rr, [0, 1, 2, 2, 3, 4])
nt.assert_allclose(cc, [0, 1, 2, 2, 2, 2])
def test_annotation2coords():
img = np.zeros((5, 5), dtype='uint8')
assert list(ni.annotation2coords(img, np.NaN)) == []
assert list(ni.annotation2coords(img, ())) == []
anno = ('point', ((1, 1), (1, 2)))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [1])
nt.assert_allclose(cc, [1])
rr, cc = list(ni.annotation2coords(img, anno))[1]
nt.assert_allclose(rr, [2])
nt.assert_allclose(cc, [1])
anno = ('circle', ((2, 2, 2),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [1, 1, 1, 2, 2, 2, 3, 3, 3])
nt.assert_allclose(cc, [1, 2, 3, 1, 2, 3, 1, 2, 3])
anno = ('ellipse', ((2, 2, 2, 2, 0),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [1, 1, 1, 2, 2, 2, 3, 3, 3])
nt.assert_allclose(cc, [1, 2, 3, 1, 2, 3, 1, 2, 3])
anno = ('ellipse', ((2, 2, 1, 3, 1.5),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [1, 1, 2, 2, 2, 2, 2, 3, 3])
nt.assert_allclose(cc, [1, 2, 0, 1, 2, 3, 4, 2, 3])
anno = ('rect', ((1, 2, 2, 3),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [2, 2, 3, 3, 4, 4])
nt.assert_allclose(cc, [1, 2, 1, 2, 1, 2])
anno = ('polyline', (((1, 2), (3, 2), (3, 4), (1, 4), (1, 2)),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
print('sys.version_info', sys.version_info)
print('scikit-image.__version', ski.__version__)
if ski.__version__ == '0.17.2':
# skd.polyline behaves incorrectly for version 0.17.2 :(
nt.assert_allclose(rr, [2, 2, 3, 3])
nt.assert_allclose(cc, [1, 2, 1, 2])
else:
nt.assert_allclose(rr, [2, 2, 2, 3, 3, 3, 4, 4, 4])
nt.assert_allclose(cc, [1, 2, 3, 1, 2, 3, 1, 2, 3])
anno = ('polyline', (((0, 0), (2, 2), (2, 4)),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
nt.assert_allclose(rr, [0, 1, 2, 2, 3, 4])
nt.assert_allclose(cc, [0, 1, 2, 2, 2, 2])
with pytest.raises(ValueError) as ex:
anno = ('point', ((10, 10),))
rr, cc = list(ni.annotation2coords(img, anno))[0]
assert str(ex.value).startswith('Annotation point:(10, 10) outside image')
with pytest.raises(ValueError) as ex:
anno = ('invalid', ((1,),))
coords = list(ni.annotation2coords(img, anno))
assert str(ex.value).startswith('Invalid kind of annotation')
def test_annotation2pltpatch():
assert list(ni.annotation2pltpatch(np.NaN)) == []
assert list(ni.annotation2pltpatch(())) == []
anno = ('point', ((1, 1), (1, 2)))
pltpatches = list(ni.annotation2pltpatch(anno))
assert len(pltpatches) == 2
assert isinstance(pltpatches[0], plp.CirclePolygon)
assert isinstance(pltpatches[1], plp.CirclePolygon)
anno = ('circle', ((2, 2, 2),))
pltpatches = list(ni.annotation2pltpatch(anno))
assert isinstance(pltpatches[0], plp.Circle)
anno = ('rect', ((1, 2, 2, 3),))
pltpatches = list(ni.annotation2pltpatch(anno))
assert isinstance(pltpatches[0], plp.Rectangle)
anno = ('polyline', (((1, 2), (3, 2), (3, 4), (1, 4), (1, 2)),))
pltpatches = list(ni.annotation2pltpatch(anno))
assert isinstance(pltpatches[0], plp.Polygon)
anno = ('polyline', (((0, 0), (2, 2), (2, 4)),))
pltpatches = list(ni.annotation2pltpatch(anno))
assert isinstance(pltpatches[0], plp.Polygon)
with pytest.raises(ValueError) as ex:
anno = ('invalid', ((1,),))
list(ni.annotation2pltpatch(anno))
assert str(ex.value).startswith('Invalid kind of annotation')
def test_annotation2mask():
img = np.zeros((3, 3), dtype='uint8')
anno = ('point', ((0, 1), (2, 0)))
mask = ni.annotation2mask(img, anno)
expected = np.array([[0, 0, 255], [255, 0, 0], [0, 0, 0]], dtype='uint8')
nt.assert_allclose(mask, expected)
# Note: For some reason this test causes the unrelated test_annotation2coords
# to fail when executed before. This is related to ske.equalize_adapthist but
# how is unknown.
@pytest.mark.skip(reason="Temporarily disabled :(")
@pytest.mark.filterwarnings('ignore:Possible precision loss')
def test_normalize_histo(datadirs):
imagedir, _, arraydir, processeddir = datadirs
img_arr = ni.load_image(imagedir + 'nut_color.bmp')
new_img = ni.normalize_histo(img_arr)
imagepath = processeddir + 'nut_color_normalize_histo.bmp'
assert_equal_image(imagepath, new_img)
| 20,658 | 32.59187 | 80 | py |
nuts-ml | nuts-ml-master/tests/nutsml/__init__.py | 0 | 0 | 0 | py | |
nuts-ml | nuts-ml-master/tests/nutsml/test_metrics.py | """
.. module:: test_metrics
:synopsis: Unit tests for metrics module
Note: In contrast to losses, which operate on batches, metricies operate on
entire collections of samples.
"""
import pytest
from pytest import approx
import nutsml.metrics as nm
def test_box_intersection():
# no overlap
assert nm.box_intersect((0, 0, 1, 1), (2, 2, 0, 0)) == (0, 0, 0, 0)
# perfect overlap
assert nm.box_intersect((1, 2, 3, 4), (1, 2, 3, 4)) == (1, 2, 3, 4)
# completely inside
assert nm.box_intersect((1, 2, 3, 4), (0, 1, 4, 5)) == (1, 2, 3, 4)
# symmetric
assert nm.box_intersect((0, 1, 4, 5), (1, 2, 3, 4)) == (1, 2, 3, 4)
# four edge cases
assert nm.box_intersect((1 + 1, 2, 3, 4), (1, 2, 3, 4)) == (2, 2, 2, 4)
assert nm.box_intersect((1, 2 + 1, 3, 4), (1, 2, 3, 4)) == (1, 3, 3, 3)
assert nm.box_intersect((1, 2, 3 - 1, 4), (1, 2, 3, 4)) == (1, 2, 2, 4)
assert nm.box_intersect((1, 2, 3, 4 - 1), (1, 2, 3, 4)) == (1, 2, 3, 3)
def test_box_iou():
# no overlap
assert nm.box_iou((0, 0, 1, 1), (2, 2, 0, 0)) == 0.0
# perfect overlap
assert nm.box_iou((1, 2, 3, 4), (1, 2, 3, 4)) == 1.0
# completely inside
assert nm.box_iou((1, 2, 3, 4), (0, 1, 4, 5)) == 0.6
# symmetric
assert nm.box_iou((0, 1, 4, 5), (1, 2, 3, 4)) == 0.6
# four edge cases
assert nm.box_iou((1 + 1, 2, 3, 4), (1, 2, 3, 4)) == 0.5
assert nm.box_iou((1, 2 + 1, 3, 4), (1, 2, 3, 4)) == 0.6
assert nm.box_iou((1, 2, 3 - 1, 4), (1, 2, 3, 4)) == approx(0.66, 0.1)
assert nm.box_iou((1, 2, 3, 4 - 1), (1, 2, 3, 4)) == 0.75
def test_box_matches():
boxes = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
box = (2, 2, 3, 1)
expected = [((1, 1, 3, 3), 0.2), ((4, 2, 2, 3), 0.125), ((5, 5, 2, 1), 0.0)]
assert nm.box_matches(box, boxes) == expected
def test_box_best_match():
boxes = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
box = (2, 2, 3, 1)
assert nm.box_best_match(box, boxes) == ((1, 1, 3, 3), 0.2)
assert nm.box_best_match(box, []) == (None, 0.0)
def test_box_pr_curve():
approx = lambda prc: [(round(p, 2), round(r, 2), s) for p, r, s in prc]
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
scores1 = [0.5, 0.2, 0.1]
scores2 = [0.5, 0.2]
pr_curve = list(nm.box_pr_curve(boxes2, boxes2, scores2))
expected = [(1.0, 0.5, 0.5), (1.0, 1.0, 0.2)]
assert pr_curve == expected
pr_curve = list(nm.box_pr_curve(boxes1, boxes2, scores2))
expected = [(1.0, 0.33, 0.5), (1.0, 0.67, 0.2)]
assert approx(pr_curve) == expected
pr_curve = list(nm.box_pr_curve(boxes2, boxes1, scores1))
expected = [(1.0, 0.5, 0.5), (1.0, 1.0, 0.2), (0.67, 1.0, 0.1)]
assert approx(pr_curve) == expected
pr_curve = list(nm.box_pr_curve(boxes1, [], []))
assert pr_curve == []
pr_curve = list(nm.box_pr_curve([], boxes1, scores1))
assert pr_curve == []
def test_box_avg_precision():
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
scores1 = [0.5, 0.2, 0.1]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
scores2 = [0.5, 0.2]
ap = nm.box_avg_precision(boxes2, boxes2, scores2)
assert ap == 1.0
ap = nm.box_avg_precision(boxes1, boxes2, scores2)
assert ap == approx(0.63, abs=1e-2)
ap = nm.box_avg_precision(boxes2, boxes1, scores1)
assert ap == 1.0
ap = nm.box_avg_precision(boxes1, [], [])
assert ap == 0.0
ap = nm.box_avg_precision([], boxes1, scores1)
assert ap == 0.0
def test_box_mean_avg_precision():
boxes1 = [(1, 1, 3, 3), (4, 2, 2, 3), (5, 5, 2, 1)]
labels1 = ['class1', 'class2', 'class1']
scores1 = [0.5, 0.2, 0.1]
boxes2 = [(2, 1, 2, 3), (4, 3, 2, 3)]
labels2 = ['class1', 'class2']
scores2 = [0.5, 0.2]
mAP = nm.box_mean_avg_precision(boxes1, labels1, boxes1, labels1, scores1)
assert mAP == 1.0
mAP = nm.box_mean_avg_precision(boxes2, labels2, boxes2, labels2, scores2)
assert mAP == 1.0
mAP = nm.box_mean_avg_precision(boxes1, labels1, [], [], [])
assert mAP == 0.0
mAP = nm.box_mean_avg_precision([], [], boxes1, labels1, scores1)
assert mAP == 0.0
mAP = nm.box_mean_avg_precision(boxes1, labels1, boxes2, labels2, scores2)
assert mAP == approx(0.77, abs=1e-2)
| 4,293 | 31.530303 | 80 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_writer.py | """
.. module:: test_writer
:synopsis: Unit tests for writer module
"""
import pytest
import os
import numpy as np
from nutsml.imageutil import load_image
from nutsflow import Collect, Get, Consume
from nutsml import ReadImage, WriteImage
def test_ImageWriter():
samples = [('nut_color', 1), ('nut_grayscale', 2)]
inpath = 'tests/data/img_formats/*.bmp'
img_samples = samples >> ReadImage(0, inpath) >> Collect()
imagepath = 'tests/data/test_*.bmp'
names = samples >> Get(0) >> Collect()
img_samples >> WriteImage(0, imagepath, names) >> Consume()
for sample, name in zip(img_samples, names):
filepath = 'tests/data/test_{}.bmp'.format(name)
arr = load_image(filepath)
assert np.array_equal(arr, sample[0])
os.remove(filepath)
pathfunc = lambda sample, name: 'tests/data/test_{}.jpg'.format(name)
img_samples >> WriteImage(0, pathfunc) >> Consume()
for i, sample in enumerate(img_samples):
filepath = 'tests/data/test_{}.jpg'.format(i)
os.path.exists(filepath)
os.remove(filepath)
pathfunc = lambda sample, name: 'tests/data/test_{}.jpg'.format(name)
img_samples >> Get(0) >> WriteImage(None, pathfunc) >> Consume()
for i, sample in enumerate(img_samples):
filepath = 'tests/data/test_{}.jpg'.format(i)
os.path.exists(filepath)
os.remove(filepath)
namefunc=lambda sample: 'img'+str(sample[1])
img_samples >> WriteImage(0, imagepath, namefunc) >> Consume()
for sample, name in zip(img_samples, ['test_img1', 'test_img2']):
filepath = 'tests/data/{}.bmp'.format(name)
os.path.exists(filepath)
os.remove(filepath)
with pytest.raises(ValueError) as ex:
img_samples >> WriteImage(0, ()) >> Consume()
assert str(ex.value).startswith('Expect path or function')
| 1,854 | 31.54386 | 73 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_transformer.py | """
.. module:: test_transformer
:synopsis: Unit tests for transformer module
"""
import pytest
import os
import numpy as np
import numpy.testing as nt
from nutsflow import Collect, Count
from nutsml.transformer import (map_transform, TransformImage, AugmentImage,
RegularImagePatches, RandomImagePatches,
ImagePatchesByMask, ImageAnnotationToMask,
ImagePatchesByAnnotation, ImageMean,
ImageChannelMean)
def test_map_transform():
def fake_transformation(e, *a, **kw):
return e, a, kw
TransformImage.register('fake_trans', fake_transformation)
sample = (1, 2, 3)
transspec = ('fake_trans')
assert map_transform(sample, 0, transspec) == ((1, (), {}), 2, 3)
transspec = (('fake_trans', [2], {'o': 2}))
assert map_transform(sample, 1, transspec) == (1, (2, (2,), {'o': 2}), 3)
def test_TransformImage():
TransformImage.register('fake_trans1', lambda e: e + 1)
TransformImage.register('fake_trans2', lambda e, x: e + x)
samples = [(1, 2), (3, 4)]
transform = TransformImage(0).by('fake_trans1')
assert samples >> transform >> Collect() == [(2, 2), (4, 4)]
transform = TransformImage((0, 1)).by('fake_trans1').by('fake_trans2', 3)
assert samples >> transform >> Collect() == [(5, 6), (7, 8)]
def test_AugmentImage():
TransformImage.register('fake_trans1', lambda e: e + 1)
TransformImage.register('fake_trans2', lambda e: e + 2)
samples = [(1, 2), (3, 4)]
augment = AugmentImage(0).by('fake_trans1', 1.0).by('fake_trans2', 1.0)
assert samples >> augment >> Collect() == [(2, 2), (3, 2), (4, 4), (5, 4)]
augment = AugmentImage(0).by('fake_trans1', 1.0).by('fake_trans2', 0.0)
assert samples >> augment >> Collect() == [(2, 2), (4, 4)]
samples = [(x,) for x in range(1000)]
augment = AugmentImage(0).by('identical', 0.5)
n = samples >> augment >> Count()
assert 400 < n < 600
augment = AugmentImage(0).by('identical', 10)
assert [(1,)] >> augment >> Count() == 10
def test_RegularImagePatches():
img1 = np.reshape(np.arange(12), (3, 4))
samples = [(img1, 0)]
get_patches = RegularImagePatches(0, (2, 2), 2)
expected = [(np.array([[0, 1], [4, 5]]), 0),
(np.array([[2, 3], [6, 7]]), 0)]
patches = samples >> get_patches >> Collect()
for (p, ps), (e, es) in zip(patches, expected):
nt.assert_allclose(p, e)
assert ps == es
samples = [(img1, img1 + 1)]
get_patches = RegularImagePatches((0, 1), (1, 1), 3)
expected = [(np.array([[0]]), np.array([[1]])),
(np.array([[3]]), np.array([[4]]))]
patches = samples >> get_patches >> Collect()
for p, e in zip(patches, expected):
nt.assert_allclose(p, e)
def test_RandomImagePatches():
img = np.reshape(np.arange(30), (5, 6))
samples = [(img, 0)]
np.random.seed(0)
get_patches = RandomImagePatches(0, (3, 2), 2)
patches = samples >> get_patches >> Collect()
assert len(patches) == 2
p, l = patches[0]
img_patch0 = np.array([[7, 8], [13, 14], [19, 20]])
nt.assert_allclose(p, img_patch0)
p, l = patches[1]
img_patch1 = np.array([[8, 9], [14, 15], [20, 21]])
nt.assert_allclose(p, img_patch1)
def test_ImagePatchesByMask():
img = np.reshape(np.arange(25), (5, 5))
mask = np.eye(5, dtype='uint8') * 255
samples = [(img, mask)]
np.random.seed(0)
get_patches = ImagePatchesByMask(0, 1, (3, 3), 1, 1, retlabel=False)
patches = samples >> get_patches >> Collect()
assert len(patches) == 2
p, m = patches[0]
img_patch0 = np.array([[12, 13, 14], [17, 18, 19], [22, 23, 24]])
mask_patch0 = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]])
nt.assert_allclose(p, img_patch0)
nt.assert_allclose(m, mask_patch0)
p, m = patches[1]
img_patch1 = np.array([[10, 11, 12], [15, 16, 17], [20, 21, 22]])
mask_patch1 = np.array([[0, 0, 255], [0, 0, 0], [0, 0, 0]])
nt.assert_allclose(p, img_patch1)
nt.assert_allclose(m, mask_patch1)
with pytest.raises(ValueError) as ex:
mask = np.eye(3, dtype='uint8') * 255
samples = [(img, mask)]
get_patches = ImagePatchesByMask(0, 1, (3, 3), 1, 1)
samples >> get_patches >> Collect()
assert str(ex.value).startswith('Image and mask size don''t match!')
def test_ImagePatchesByMask_3channel():
img = np.reshape(np.arange(25 * 3), (5, 5, 3))
mask = np.eye(5, dtype='uint8') * 255
samples = [(img, mask)]
np.random.seed(0)
get_patches = ImagePatchesByMask(0, 1, (3, 3), 1, 1, retlabel=False)
patches = samples >> get_patches >> Collect()
assert len(patches) == 2
p, m = patches[0]
img_patch0 = np.array([[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[51, 52, 53], [54, 55, 56], [57, 58, 59]],
[[66, 67, 68], [69, 70, 71], [72, 73, 74]]])
mask_patch0 = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]])
nt.assert_allclose(p, img_patch0)
nt.assert_allclose(m, mask_patch0)
def test_ImagePatchesByAnnotation():
img = np.reshape(np.arange(25), (5, 5))
anno = ('point', ((3, 2), (2, 3),))
samples = [(img, anno)]
np.random.seed(0)
get_patches = ImagePatchesByAnnotation(0, 1, (3, 3), 1, 1)
patches = samples >> get_patches >> Collect()
assert len(patches) == 3
p, l = patches[0]
img_patch0 = np.array([[12, 13, 14], [17, 18, 19], [22, 23, 24]])
assert l == 0
nt.assert_allclose(p, img_patch0)
p, l = patches[1]
img_patch1 = np.array([[11, 12, 13], [16, 17, 18], [21, 22, 23]])
assert l == 1
nt.assert_allclose(p, img_patch1)
p, l = patches[2]
img_patch1 = np.array([[7, 8, 9], [12, 13, 14], [17, 18, 19]])
assert l == 1
nt.assert_allclose(p, img_patch1)
np.random.seed(0)
get_patches = ImagePatchesByAnnotation(0, 1, (3, 3), 1, 1, retlabel=False)
patches = samples >> get_patches >> Collect()
p, m = patches[0]
img_patch0 = np.array([[12, 13, 14], [17, 18, 19], [22, 23, 24]])
img_mask0 = np.array([[0, 255, 0], [255, 0, 0], [0, 0, 0]])
nt.assert_allclose(m, img_mask0)
nt.assert_allclose(p, img_patch0)
def test_ImageAnnotationToMask():
img = np.zeros((3, 3), dtype='uint8')
anno = ('point', ((0, 1), (2, 0)))
samples = [(img, anno)]
masks = samples >> ImageAnnotationToMask(0, 1) >> Collect()
expected = np.array([[0, 0, 255], [255, 0, 0], [0, 0, 0]], dtype='uint8')
assert str(masks[0][1]) == str(expected) # nt.assert_allclose fails!
def test_ImageMean():
meansfile = 'tests/data/temp_image_mean.npy'
img1 = np.eye(3, dtype='uint8') * 2
img2 = np.ones((3, 3), dtype='uint8') * 4
samples = [(img1,), (img2,)]
img_mean = ImageMean(0, meansfile)
# compute means
results = samples >> img_mean.train() >> Collect()
expected = np.array([[3., 2., 2.], [2., 3., 2.], [2., 2., 3.]])
nt.assert_allclose(img_mean.means, expected)
assert results == samples
assert os.path.exists(meansfile)
# re-loading means from file
img_mean = ImageMean(0, meansfile)
nt.assert_allclose(img_mean.means, expected)
# subtract means
results = samples >> img_mean >> Collect()
expected0 = np.array([[-1., -2., -2.], [-2., -1., -2.], [-2., -2., -1.]])
expected1 = np.array([[1., 2., 2.], [2., 1., 2.], [2., 2., 1.]])
nt.assert_allclose(results[0][0], expected0)
nt.assert_allclose(results[1][0], expected1)
with pytest.raises(ValueError) as ex:
other_samples = [(np.eye(5),)]
img_mean = ImageMean(0, meansfile)
other_samples >> img_mean >> Collect()
assert str(ex.value).startswith('Mean loaded was computed on different')
with pytest.raises(ValueError) as ex:
img_mean = ImageMean(0, 'file does not exist')
samples >> img_mean >> Collect()
assert str(ex.value).startswith('Mean has not yet been computed!')
os.remove(meansfile)
def test_ImageChannelMean():
meansfile = 'tests/data/temp_image_channel_mean.npy'
img1 = np.dstack([np.ones((3, 3)), np.ones((3, 3))])
img2 = np.dstack([np.ones((3, 3)), np.ones((3, 3)) * 3])
samples = [(img1,), (img2,)]
# provide means directly
img_mean = ImageChannelMean(0, means=[1, 2])
expected = np.array([[[1., 2.]]])
nt.assert_allclose(img_mean.means, expected)
# compute means
img_mean = ImageChannelMean(0, filepath=meansfile)
results = samples >> img_mean.train() >> Collect()
expected = np.array([[[1., 2.]]])
nt.assert_allclose(img_mean.means, expected)
assert results == samples
assert os.path.exists(meansfile)
# re-loading means from file
img_mean = ImageChannelMean(0, filepath=meansfile)
nt.assert_allclose(img_mean.means, expected)
# subtract means
results = samples >> img_mean >> Collect()
expected0 = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
expected1 = np.array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])
nt.assert_allclose(results[1][0][:, :, 0], expected0)
nt.assert_allclose(results[1][0][:, :, 1], expected1)
with pytest.raises(ValueError) as ex:
other_samples = [(np.eye(5),)]
img_mean = ImageChannelMean(0, filepath=meansfile)
other_samples >> img_mean >> Collect()
assert str(ex.value).startswith('Mean loaded was computed on different')
with pytest.raises(ValueError) as ex:
img_mean = ImageChannelMean(0, 'file does not exist')
samples >> img_mean >> Collect()
assert str(ex.value).startswith('Mean has not yet been computed!')
os.remove(meansfile)
| 9,729 | 34.126354 | 78 | py |
nuts-ml | nuts-ml-master/tests/nutsml/test_fileutil.py | """
.. module:: fileutil
:synopsis: Unit tests for fileutil module
"""
import os
import os.path as op
import shutil
import nutsml.fileutil as fu
import pytest
@pytest.fixture()
def init_test_folders(request):
"""Remove folder 'foo' and sub-folders at setup and teardown."""
path = op.join("data", "foo")
def cleanup():
if os.path.exists(path):
shutil.rmtree(path)
cleanup()
request.addfinalizer(cleanup)
return path
def test_create_filename():
assert len(fu.create_filename()) > 0
assert fu.create_filename('prefix', '').startswith('prefix')
assert fu.create_filename('', 'ext').endswith('.ext')
def test_create_filename_is_unique():
# Create set of 100 file names and verify that they are unique.
nameset = {fu.create_filename() for _ in range(100)}
assert len(nameset) == 100
def test_create_temp_filepath():
assert fu.create_temp_filepath().startswith(fu.TEMP_FOLDER)
assert fu.create_temp_filepath(relative=False).startswith(os.getcwd())
assert fu.create_temp_filepath('', 'ext').endswith('.ext')
assert os.path.exists(fu.TEMP_FOLDER), "temp folder should exist"
fu.delete_folders(fu.TEMP_FOLDER) # cleaning up.
def test_delete_file():
path = 'data/' + fu.create_filename(ext='txt')
fu.create_folders('data')
fu.delete_file(path) # file does not exist. Should be fine.
with open(path, 'w') as f:
f.write('foo')
assert os.path.exists(path)
fu.delete_file(path)
assert not os.path.exists(path), "files should be deleted"
def test_create_folders(init_test_folders):
path = init_test_folders
fu.create_folders(path) # make new folder.
assert os.path.exists(path), "foo should exist"
fu.create_folders(path) # make foo again.
assert os.path.exists(path), "foo should still exist"
fu.create_folders(op.join(path, "bar"))
assert os.path.exists(path), "foo/bar should exist"
def test_delete_folders(init_test_folders):
path = init_test_folders
fu.delete_folders(path) # delete non-existing folder is fine.
os.makedirs(path)
fu.delete_folders(path) # delete existing folder.
assert not os.path.exists(path), "foo should not exist"
os.makedirs(op.join(path, "bar"))
fu.delete_folders(path)
assert not os.path.exists(path), "foo should not exist"
def test_delete_temp_data():
fu.create_folders(fu.TEMP_FOLDER)
fu.delete_temp_data()
assert not os.path.exists(fu.TEMP_FOLDER), "temp folder should not exist"
def test_clear_folder(init_test_folders):
path = init_test_folders
bardir, bazfile = op.join(path, "bar"), op.join(path, "baz.txt")
os.makedirs(bardir)
open(bazfile, "w").close()
fu.clear_folder(path)
assert os.path.exists(path), "foo folder should exist"
assert not os.path.exists(bardir), "bar folder should not exist"
assert not os.path.isfile(bazfile), "baz file should not exist"
def test_reader_filepath():
filename = "nut_color"
pathfunc = "data/*.jpg"
expected = "data/nut_color.jpg"
assert fu.reader_filepath(None, filename, pathfunc) == expected
sample = (1, "nut_gray")
pathfunc = lambda sample: 'data/{1}.png'.format(*sample)
expected = "data/nut_gray.png"
assert fu.reader_filepath(sample, None, pathfunc) == expected
filename = "nut.gif"
expected = "nut.gif"
assert fu.reader_filepath(None, filename, None) == expected
| 3,439 | 29.990991 | 77 | py |
nuts-ml | nuts-ml-master/nutsml/writer.py | """
.. module:: writer
:synopsis: Writing of sample and image data
"""
from __future__ import absolute_import
import os
import skimage.io as sio
from inspect import isfunction
from nutsml.fileutil import create_folders
from nutsflow.base import NutFunction
from nutsflow.source import Enumerate
class WriteImage(NutFunction):
"""
Write images within samples.
"""
def __init__(self, column, pathfunc, namefunc=None):
"""
Write images within samples to file.
Writes jpg, gif, png, tif and bmp format depending on file extension.
Images in samples are expected to be numpy arrays.
See nutsml.util.load_image for details.
Folders on output file path are created if missing.
>>> from nutsml import ReadImage
>>> from nutsflow import Collect, Get, GetCols, Consume, Unzip
>>> samples = [('nut_color', 1), ('nut_grayscale', 2)]
>>> inpath = 'tests/data/img_formats/*.bmp'
>>> img_samples = samples >> ReadImage(0, inpath) >> Collect()
>>> imagepath = 'tests/data/test_*.bmp'
>>> names = samples >> Get(0) >> Collect()
>>> img_samples >> WriteImage(0, imagepath, names) >> Consume()
>>> imagepath = 'tests/data/test_*.bmp'
>>> names = samples >> Get(0) >> Collect()
>>> images = img_samples >> Get(0)
>>> images >> WriteImage(None, imagepath, names) >> Consume()
>>> imagepath = 'tests/data/test_*.bmp'
>>> namefunc = lambda sample: sample[1]
>>> (samples >> GetCols(0,0,1) >> ReadImage(0, inpath) >>
... WriteImage(0, imagepath, namefunc) >> Consume())
:param int|None column: Column in sample that contains image or
take sample itself if column is None.
:param str|function pathfunc: Filepath with wildcard '*',
which is replaced by the name provided names e.g.
'tests/data/img_formats/*.jpg' for names = ['nut_grayscale']
will become 'tests/data/img_formats/nut_grayscale.jpg'
or
Function to compute path to image file from sample and name, e.g.
pathfunc=lambda sample, name: 'tests/data/test_{}.jpg'.format(name)
:param iterable|function|None namefunc: Iterable over names to generate
image paths from (length need to be the same as samples),
or
Function to compute filenames from sample, e.g.
namefunc=lambda samples: sample[0]
if None, Enumerate() is used.
"""
namefunc = Enumerate() if namefunc is None else namefunc
self.namefunc = namefunc if isfunction(namefunc) else iter(namefunc)
self.column = column
self.pathfunc = pathfunc
def __call__(self, sample):
"""Return sample and write image within sample"""
pathfunc, namefunc = self.pathfunc, self.namefunc
name = namefunc(sample) if isfunction(namefunc) else next(namefunc)
if isinstance(pathfunc, str):
filepath = pathfunc.replace('*', str(name))
elif isfunction(pathfunc):
filepath = pathfunc(sample, name)
else:
raise ValueError('Expect path or function: ' + str(pathfunc))
create_folders(os.path.split(filepath)[0])
img = sample if self.column is None else sample[self.column]
sio.imsave(filepath, img)
return sample
| 3,414 | 37.806818 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/reader.py | """
.. module:: reader
:synopsis: Reading of sample data and images
"""
from __future__ import absolute_import
import os
import pandas as pd
import numpy as np
from glob import glob
from collections import namedtuple
from fnmatch import fnmatch
from nutsml.imageutil import load_image
from nutsml.fileutil import reader_filepath
from nutsflow import NutSource, nut_function, nut_source
from nutsflow.common import as_set
@nut_source
def ReadLabelDirs(basedir, filepattern='*', exclude='_*'):
"""
Read file paths from label directories.
Typically used when classification data is organized in folders,
where the folder name represents the class label and the files in
the folder the data samples (images, documents, ...) for that class.
>>> from __future__ import print_function
>>> from nutsflow import Sort
>>> read = ReadLabelDirs('tests/data/labeldirs', '*.txt')
>>> samples = read >> Sort()
>>> for sample in samples:
... print(sample)
...
('tests/data/labeldirs/0/test0.txt', '0')
('tests/data/labeldirs/1/test1.txt', '1')
('tests/data/labeldirs/1/test11.txt', '1')
:param string basedir: Path to folder that contains label directories.
:param string filepattern: Pattern for filepaths to read from
label directories, e.g. '*.jpg', '*.txt'
:param string exclude: Pattern for label directories to exclude.
Default is '_*' which excludes all label folders prefixed with '_'.
:return: iterator over labeled file paths
:rtype: iterator
"""
for label in os.listdir(basedir):
if os.path.isdir(os.path.join(basedir, label)):
if fnmatch(label, exclude):
continue
pathname = os.path.join(basedir, label, filepattern)
for filepath in glob(pathname):
yield filepath.replace('\\', '/'), label
@nut_function
def ReadNumpy(sample, columns, pathfunc=None, allow_pickle=False):
"""
Load numpy arrays from filesystem.
Note that the loaded numpy array replace the file name|path in the
sample.
>>> from nutsflow import Consume, Collect, PrintType
>>> samples = ['tests/data/img_arrays/nut_color.jpg.npy']
>>> samples >> ReadNumpy(None) >> PrintType() >> Consume()
(<ndarray> 213x320x3:uint8)
>>> samples = [('tests/data/img_arrays/nut_color.jpg.npy', 'class0')]
>>> samples >> ReadNumpy(0) >> PrintType() >> Consume()
(<ndarray> 213x320x3:uint8, <str> class0)
>>> filepath = 'tests/data/img_arrays/*.jpg.npy'
>>> samples = [(1, 'nut_color'), (2, 'nut_grayscale')]
>>> samples >> ReadNumpy(1, filepath) >> PrintType() >> Consume()
(<int> 1, <ndarray> 213x320x3:uint8)
(<int> 2, <ndarray> 213x320:uint8)
>>> pathfunc = lambda s: 'tests/data/img_arrays/{1}.jpg.npy'.format(*s)
>>> samples >> ReadNumpy(1, pathfunc) >> PrintType() >> Consume()
(<int> 1, <ndarray> 213x320x3:uint8)
(<int> 2, <ndarray> 213x320:uint8)
:param tuple|list sample: ('nut_data', 1)
:param None|int|tuple columns: Indices of columns in sample to be replaced
by numpy array (based on fileid in that column)
If None then a flat samples is assumed and
a tuple with the numpy array is returned.
:param string|function|None pathfunc: Filepath with wildcard '*',
which is replaced by the file id/name provided in the sample, e.g.
'tests/data/img_arrays/*.jpg.npy' for sample ('nut_grayscale', 2)
will become 'tests/data/img_arrays/nut_grayscale.jpg.npy'
or
Function to compute path to numnpy file from sample, e.g.
lambda sample: 'tests/data/img_arrays/{1}.jpg.npy'.format(*sample)
or
None, in this case the file id/name is taken as the filepath.
:param bool allow_pickle : Allow loading pickled object arrays in npy files.
:return: Sample with file ids/names replaced by numpy arrays.
:rtype: tuple
"""
def load(filename):
"""Load numpy array for given fileid"""
filepath = reader_filepath(sample, filename, pathfunc)
return np.load(filepath, allow_pickle=allow_pickle)
if columns is None:
return (load(sample),) # numpy array as tuple with one element
colset = as_set(columns)
elems = enumerate(sample)
return tuple(load(e) if i in colset else e for i, e in elems)
@nut_function
def ReadImage(sample, columns, pathfunc=None, as_grey=False, dtype='uint8'):
"""
Load images from filesystem for samples.
Loads images in jpg, gif, png, tif and bmp format.
Images are returned as numpy arrays of shape (h, w, c) or (h, w) for
color images or gray scale images respectively.
See nutsml.imageutil.load_image for details.
Note that the loaded images replace the image file name|path in the
sample. If the images file paths are directly proved (not as a tuple
sample) still tuples with the loaded image are returned.
>>> from nutsflow import Consume, Collect
>>> from nutsml import PrintColType
>>> images = ['tests/data/img_formats/nut_color.gif']
>>> images >> ReadImage(None) >> PrintColType() >> Consume()
item 0: <tuple>
0: <ndarray> shape:213x320x3 dtype:uint8 range:0..255
>>> samples = [('tests/data/img_formats/nut_color.gif', 'class0')]
>>> img_samples = samples >> ReadImage(0) >> Collect()
>>> imagepath = 'tests/data/img_formats/*.gif'
>>> samples = [(1, 'nut_color'), (2, 'nut_grayscale')]
>>> samples >> ReadImage(1, imagepath) >> PrintColType() >> Consume()
item 0: <tuple>
0: <int> 1
1: <ndarray> shape:213x320x3 dtype:uint8 range:0..255
item 1: <tuple>
0: <int> 2
1: <ndarray> shape:213x320 dtype:uint8 range:20..235
>>> pathfunc = lambda s: 'tests/data/img_formats/{1}.jpg'.format(*s)
>>> img_samples = samples >> ReadImage(1, pathfunc) >> Collect()
:param tuple|list sample: ('nut_color', 1)
:param None|int|tuple columns: Indices of columns in sample to be replaced
by image (based on image id in that column)
If None then a flat samples is assumed and
a tuple with the image is returned.
:param string|function|None pathfunc: Filepath with wildcard '*',
which is replaced by the imageid provided in the sample, e.g.
'tests/data/img_formats/*.jpg' for sample ('nut_grayscale', 2)
will become 'tests/data/img_formats/nut_grayscale.jpg'
or
Function to compute path to image file from sample, e.g.
lambda sample: 'tests/data/img_formats/{1}.jpg'.format(*sample)
or
None, in this case the image id is taken as the filepath.
:param bool as_grey: If true, load as grayscale image.
:param dtype dtype: Numpy data type of the image.
:return: Sample with image ids replaced by image (=ndarray)
of shape (h, w, c) or (h, w)
:rtype: tuple
"""
def load(filename):
"""Load image for given fileid"""
filepath = reader_filepath(sample, filename, pathfunc)
return load_image(filepath, as_grey=as_grey, dtype=dtype)
if columns is None:
return (load(sample),) # image as tuple with one element
colset = as_set(columns)
elems = enumerate(sample)
return tuple(load(e) if i in colset else e for i, e in elems)
class ReadPandas(NutSource):
"""
Read data as Pandas table from file system.
"""
def __init__(self, filepath, rows=None, colnames=None, dropnan=True,
replacenan=False, rowname='Row', **kwargs):
"""
Create reader for Pandas tables.
The reader returns the table contents as an interator over named tuples,
where the column names are derived from the table columns. The order
and selection of columns can be changed.
>>> from nutsflow import Collect, Consume, Print
>>> filepath = 'tests/data/pandas_table.csv'
>>> ReadPandas(filepath) >> Print() >> Consume()
Row(col1=1.0, col2=4.0)
Row(col1=3.0, col2=6.0)
>>> (ReadPandas(filepath, dropnan=False, rowname='Sample') >>
... Print() >> Consume())
Sample(col1=1.0, col2=4.0)
Sample(col1=2.0, col2=nan)
Sample(col1=3.0, col2=6.0)
>>> ReadPandas(filepath, replacenan=None) >> Print() >> Consume()
Row(col1=1.0, col2=4.0)
Row(col1=2.0, col2=None)
Row(col1=3.0, col2=6.0)
>>> colnames=['col2', 'col1'] # swap order
>>> ReadPandas(filepath, colnames=colnames) >> Print() >> Consume()
Row(col2=4.0, col1=1.0)
Row(col2=6.0, col1=3.0)
>>> ReadPandas(filepath, rows='col1 > 1', replacenan=0) >> Collect()
[Row(col1=2.0, col2=0), Row(col1=3.0, col2=6.0)]
:param str filepath: Path to a table in CSV, TSV, XLSX or
Pandas pickle format. Depending on file extension (e.g. .csv)
the table format is picked.
Note tables must have a header with the column names.
:param str rows: Rows to filter. Any Pandas filter expression. If
rows = None all rows of the table are returned.
:param list columns: List of names for the table columns to return.
For columns = None all columns are returned.
:param bool dropnan: If True all rows that contain NaN are dropped.
:param object replacenan: If not False all NaNs are replaced by
the value of replacenan
:param str rowname: Name of named tuple return as rows.
:param kwargs kwargs: Key word arguments passed on the the Pandas
methods for data reading, e.g, header=None.
See pandas/pandas/io/parsers.py for detais
"""
self.filepath = filepath
self.rows = rows
self.colnames = colnames
self.dropnan = dropnan
self.replacenan = replacenan
self.rowname = rowname
self.kwargs = kwargs
self.dataframe = self._load_table(filepath)
@staticmethod
def isnull(value):
"""
Return true if values is NaN or None.
>>> import numpy as np
>>> ReadPandas.isnull(np.NaN)
True
>>> ReadPandas.isnull(None)
True
>>> ReadPandas.isnull(0)
False
:param value: Value to test
:return: Return true for NaN or None values.
:rtype: bool
"""
return pd.isnull(value)
def _replacenan(self, row):
"""
Replace NaN values in row by None
:param iterable row: Any iterable.
:return: Row with None instead of NaN
:rtype: tuple
"""
value = self.replacenan
return tuple(value if pd.isnull(v) else v for v in row)
def _load_table(self, filepath):
"""
Load table from file system.
:param str filepath: Path to table in CSV, TSV, XLSX or
Pandas pickle format.
:return: Pandas table
:rtype: pandas.core.frame.DataFrame
"""
_, ext = os.path.splitext(filepath.lower())
if ext == '.tsv':
return pd.read_csv(filepath, sep='\t', **self.kwargs)
if ext == '.csv':
return pd.read_csv(filepath, **self.kwargs)
if ext == '.xlsx':
return pd.read_excel(filepath, engine='openpyxl', **self.kwargs)
return pd.read_pickle(filepath, **self.kwargs)
def __iter__(self):
"""
Return iterator over rows in table.
:return: Iterator over rows.
:rtype: iterator
"""
df = self.dataframe
rows = df.query(self.rows) if self.rows else df
series = rows[self.colnames] if self.colnames else rows
Row = namedtuple(self.rowname, series.columns.to_list())
if not self.replacenan is False:
values = (self._replacenan(row) for row in series.values)
elif self.dropnan:
values = series.dropna().values
else:
values = series.values
return (Row(*v) for v in values)
| 12,144 | 36.140673 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/checkpoint.py | """
.. module:: checkpoint
:synopsis: Conveniency class to create checkpoints for network training.
"""
import os
import time
from os.path import join, exists, isdir, getmtime
from nutsflow.config import Config
"""
.. module:: checkpoint
:synopsis: Creation of checkpoints with network weights and parameters.
"""
class Checkpoint(object):
"""
A factory for checkpoints to periodically save network weights and other
hyper/configuration parameters.
| Example usage:
|
| def create_network(lr=0.01, momentum=0.9):
| model = Sequential()
| ...
| optimizer = opt.SGD(lr=lr, momentum=momentum)
| model.compile(optimizer=optimizer, metrics=['accuracy'])
| return KerasNetwork(model), optimizer
|
| def parameters(network, optimizer):
| return dict(lr = optimizer.lr, momentum = optimizer.momentum)
|
| def train_network():
| checkpoint = Checkpoint(create_network, parameters)
| network, optimizer = checkpoint.load()
|
| for epoch in xrange(EPOCHS):
| train_err = train_network()
| val_err = validate_network()
|
| if epoch % 10 == 0: # Reduce learning rate every 10 epochs
| optimizer.lr /= 2
|
| checkpoint.save_best(val_err)
|
Checkpoints can also be saved under different names, e.g.
| checkpoint.save_best(val_err, 'checkpoint'+str(epoch))
And specific checkpoints can be loaded:
| network, config = checkpoint.load('checkpoint103')
If no checkpoint is specified the most recent one is loaded.
"""
def __init__(self, create_net, parameters, checkpointspath='checkpoints'):
"""
Create checkpoint factory.
>>> def create_network(lr=0.1):
... return 'MyNetwork', lr
>>> def parameters(network, lr):
... return dict(lr = lr)
>>> checkpoint = Checkpoint(create_network, parameters)
>>> network, lr = checkpoint.load()
>>> network, lr
('MyNetwork', 0.1)
:param function create_net: Function that takes keyword parameters
and returns a nuts-ml Network and and any other values or objects
needed to describe the state to be checkpointed.
Note: parameters(*create_net()) must work!
:param function parameters: Function that takes output of create_net()
and returns dictionary with parameters (same as the one that are
used in create_net(...))
:param string checkpointspath: Path to folder that will contain
checkpoint folders.
"""
if not exists(checkpointspath):
os.makedirs(checkpointspath)
self.basepath = checkpointspath
self.create_net = create_net
self.parameters = parameters
self.state = None # network and other objets
self.network = None # only the network
self.config = None # bestscore and other checkpoint params
def dirs(self):
"""
Return full paths to all checkpoint folders.
:return: Paths to all folders under the basedir.
:rtype: list
"""
paths = (join(self.basepath, d) for d in os.listdir(self.basepath))
return [p for p in paths if isdir(p)]
def latest(self):
"""
Find most recently modified/created checkpoint folder.
:return: Full path to checkpoint folder if it exists otherwise None.
:rtype: str | None
"""
dirs = sorted(self.dirs(), key=getmtime, reverse=True)
return dirs[0] if dirs else None
def datapaths(self, checkpointname=None):
"""
Return paths to network weights, parameters and config files.
If no checkpoints exist under basedir (None, None, None) is returned.
:param str|None checkpointname: Name of checkpoint. If name is None
the most recent checkpoint is used.
:return: (weightspath, paramspath, configpath) or (None, None, None)
:rtype: tuple
"""
name = checkpointname
if name is None:
path = self.latest()
if path is None:
return None, None, None
else:
path = join(self.basepath, name)
if not exists(path):
os.makedirs(path)
return (join(path, 'weights'), join(path, 'params.json'),
join(path, 'config.json'))
def save(self, checkpointname='checkpoint'):
"""
Save network weights and parameters under the given name.
:param str checkpointname: Name of checkpoint folder. Path will be
self.basepath/checkpointname
:return: path to checkpoint folder
:rtype: str
"""
weightspath, paramspath, configpath = self.datapaths(checkpointname)
self.config.timestamp = time.time()
self.network.save_weights(weightspath)
state = self.state if hasattr(self.state, '__iter__') else [self.state]
Config(self.parameters(*state)).save(paramspath)
Config(self.config).save(configpath)
return join(self.basepath, checkpointname)
def save_best(self, score, checkpointname='checkpoint', isloss=False):
"""
Save best network weights and parameters under the given name.
:param float|int score: Some score indicating quality of network.
:param str checkpointname: Name of checkpoint folder.
:param bool isloss: True, score is a loss and lower is better otherwise
higher is better.
:return: path to checkpoint folder
:rtype: str
"""
bestscore = self.config.bestscore
if (bestscore is None
or (isloss and score < bestscore)
or (not isloss and score > bestscore)):
self.config.bestscore = score
self.config.isloss = isloss
self.save(checkpointname)
return join(self.basepath, checkpointname)
def load(self, checkpointname=None):
"""
Create network, load weights and parameters.
:param str|none checkpointname: Name of checkpoint to load. If None
the most recent checkpoint is used. If no checkpoint exists yet
the network will be created but no weights loaded and the
default configuration will be returned.
:return: whatever self.create_net returns
:rtype: object
"""
weightspath, paramspath, configpath = self.datapaths(checkpointname)
params = Config().load(paramspath) if paramspath else None
state = self.create_net(**params) if params else self.create_net()
self.network = state[0] if hasattr(state, '__iter__') else state
self.state = state
if weightspath:
self.network.load_weights(weightspath)
defaultconfig = Config(bestscore=None, timestamp=None)
self.config = Config().load(configpath) if configpath else defaultconfig
return state
| 6,996 | 34.882051 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/stratify.py | """
.. module:: stratify
:synopsis: Stratification of sample sets
"""
from __future__ import absolute_import
import random as rnd
from nutsflow import nut_processor, nut_sink, Sort
from nutsml.datautil import upsample, random_downsample
@nut_processor
def Stratify(iterable, labelcol, labeldist, rand=None):
"""
iterable >> Stratify(labelcol, labeldist, rand=None)
Stratifies samples by randomly down-sampling according to the given
label distribution. In detail: samples belonging to the class with the
smallest number of samples are returned with probability one. Samples
from other classes are randomly down-sampled to match the number of
samples in the smallest class.
Note that in contrast to SplitRandom, which generates the same random
split per default, Stratify generates different stratifications.
Furthermore, while the downsampling is random the order of samples
remains the same!
While labeldist needs to be provided or computed upfront the actual
stratification occurs online and only one sample per time is stored
in memory.
>>> from nutsflow import Collect, CountValues
>>> from nutsflow.common import StableRandom
>>> fix = StableRandom(1) # Stable random numbers for doctest
>>> samples = [('pos', 1), ('pos', 1), ('neg', 0)]
>>> labeldist = samples >> CountValues(1)
>>> samples >> Stratify(1, labeldist, rand=fix) >> Sort()
[('neg', 0), ('pos', 1)]
:param iterable over tuples iterable: Iterable of tuples where column
labelcol contains a sample label that is used for stratification
:param int labelcol: Column of tuple/samples that contains the label,
:param dict labeldist: Dictionary with numbers of different labels,
e.g. {'good':12, 'bad':27, 'ugly':3}
:param Random|None rand: Random number generator used for down-sampling.
If None, random.Random() is used.
:return: Stratified samples
:rtype: Generator over tuples
"""
rand = rnd.Random() if rand is None else rand
min_n = float(min(labeldist.values()))
probs = {l: min_n / n for l, n in labeldist.items()}
for sample in iterable:
label = sample[labelcol]
if rand.random() < probs[label]:
yield sample
@nut_sink
def CollectStratified(iterable, labelcol, mode='downrnd', container=list,
rand=None):
"""
iterable >> CollectStratified(labelcol, mode='downrnd', container=list,
rand=rnd.Random())
Collects samples in a container and stratifies them by either randomly
down-sampling classes or up-sampling classes by duplicating samples.
>>> from nutsflow import Collect
>>> samples = [('pos', 1), ('pos', 1), ('neg', 0)]
>>> samples >> CollectStratified(1) >> Sort()
[('neg', 0), ('pos', 1)]
:param iterable over tuples iterable: Iterable of tuples where column
labelcol contains a sample label that is used for stratification
:param int labelcol: Column of tuple/samples that contains the label
:param string mode:
'downrnd' : randomly down-sample
'up' : up-sample
:param container container: Some container, e.g. list, set, dict
that can be filled from an iterable
:param Random|None rand: Random number generator used for sampling.
If None, random.Random() is used.
:return: Stratified samples
:rtype: List of tuples
"""
rand = rnd.Random() if rand is None else rand
samples = list(iterable)
if mode == 'up':
stratified = upsample(samples, labelcol, rand)
elif mode == 'downrnd':
stratified = random_downsample(samples, labelcol, rand)
else:
raise ValueError('Unknown mode: ' + mode)
return container(stratified)
| 3,795 | 37.734694 | 76 | py |
nuts-ml | nuts-ml-master/nutsml/batcher.py | """
.. module:: batcher
:synopsis: Collecting samples in mini-batches for GPU-based training.
"""
import warnings
import numpy as np
import nutsml.imageutil as ni
from nutsflow import nut_function
from nutsflow.base import Nut
from nutsflow.iterfunction import take, PrefetchIterator
def build_number_batch(numbers, dtype):
"""
Return numpy array with given dtype for given numbers.
>>> numbers = (1, 2, 3, 1)
>>> build_number_batch(numbers, 'uint8')
array([1, 2, 3, 1], dtype=uint8)
:param iterable number numbers: Numbers to create batch from
:param numpy data type dtype: Data type of batch, e.g. 'uint8'
:return: Numpy array for numbers
:rtype: numpy.array
"""
return np.array(numbers, dtype=dtype)
def build_one_hot_batch(class_ids, dtype, num_classes):
"""
Return one hot vectors for class ids.
>>> class_ids = [0, 1, 2, 1]
>>> build_one_hot_batch(class_ids, 'uint8', 3)
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]], dtype=uint8)
:param iterable class_ids: Class indices in {0, ..., num_classes-1}
:param numpy data type dtype: Data type of batch, e.g. 'uint8'
:param num_classes: Number of classes
:return: One hot vectors for class ids.
:rtype: numpy.array
"""
class_ids = np.array(class_ids, dtype=np.uint16)
return np.eye(num_classes, dtype=dtype)[class_ids]
def build_vector_batch(vectors, dtype):
"""
Return batch of vectors.
>>> from nutsflow.common import shapestr
>>> vectors = [np.array([1,2,3]), np.array([2, 3, 4])]
>>> batch = build_vector_batch(vectors, 'uint8')
>>> shapestr(batch)
'2x3'
>>> batch
array([[1, 2, 3],
[2, 3, 4]], dtype=uint8)
:param iterable vectors: Numpy row vectors
:param numpy data type dtype: Data type of batch, e.g. 'uint8'
:return: vstack of vectors
:rtype: numpy.array
"""
if not len(vectors):
raise ValueError('No vectors to build batch!')
return np.vstack(vectors).astype(dtype)
def build_tensor_batch(tensors, dtype, axes=None, expand=None):
"""
Return batch of tensors.
>>> from nutsflow.common import shapestr
>>> tensors = [np.zeros((2, 3)), np.ones((2, 3))]
>>> batch = build_tensor_batch(tensors, 'uint8')
>>> shapestr(batch)
'2x2x3'
>>> print(batch)
[[[0 0 0]
[0 0 0]]
<BLANKLINE>
[[1 1 1]
[1 1 1]]]
>>> batch = build_tensor_batch(tensors, 'uint8', expand=0)
>>> shapestr(batch)
'2x1x2x3'
>>> print(batch)
[[[[0 0 0]
[0 0 0]]]
<BLANKLINE>
[[[1 1 1]
[1 1 1]]]]
>>> batch = build_tensor_batch(tensors, 'uint8', axes=(1, 0))
>>> shapestr(batch)
'2x3x2'
>>> print(batch)
[[[0 0]
[0 0]
[0 0]]
<BLANKLINE>
[[1 1]
[1 1]
[1 1]]]
:param iterable tensors: Numpy tensors
:param numpy data type dtype: Data type of batch, e.g. 'uint8'
:param tuple|None axes: axes order, e.g. to move a channel axis to the
last position. (see numpy transpose for details)
:param int|None expand: Add empty dimension at expand dimension.
(see numpy expand_dims for details).
:return: stack of tensors, with batch axis first.
:rtype: numpy.array
"""
if not len(tensors):
raise ValueError('No tensors to build batch!')
if axes is not None:
tensors = [np.transpose(t, axes) for t in tensors]
if expand is not None:
tensors = [np.expand_dims(t, expand) for t in tensors]
return np.stack(tensors).astype(dtype)
def build_image_batch(images, dtype, channelfirst=False):
"""
Return batch of images.
If images have no channel a channel axis is added. For channelfirst=True
it will be added/moved to front otherwise the channel comes last.
All images in batch will have a channel axis. Batch is of shape
(n, c, h, w) or (n, h, w, c) depending on channelfirst, where n is
the number of images in the batch.
>>> from nutsflow.common import shapestr
>>> images = [np.zeros((2, 3)), np.ones((2, 3))]
>>> batch = build_image_batch(images, 'uint8', True)
>>> shapestr(batch)
'2x1x2x3'
>>> batch
array([[[[0, 0, 0],
[0, 0, 0]]],
<BLANKLINE>
<BLANKLINE>
[[[1, 1, 1],
[1, 1, 1]]]], dtype=uint8)
:param numpy array images: Images to batch. Must be of shape (w,h,c)
or (w,h). Gray-scale with channel is fine (w,h,1) and also
alpha channel is fine (w,h,4).
:param numpy data type dtype: Data type of batch, e.g. 'uint8'
:param bool channelfirst: If True, channel is added/moved to front.
:return: Image batch with shape (n, c, h, w) or (n, h, w, c).
:rtype: np.array
"""
def _targetshape(image):
shape = image.shape
return (shape[0], shape[1], 1) if image.ndim == 2 else shape
n = len(images)
if not n:
raise ValueError('No images to build batch!')
h, w, c = _targetshape(images[0]) # shape of first(=all) images
if c > w or c > h:
raise ValueError('Channel not at last axis: ' + str((h, w, c)))
batch = np.empty((n, c, h, w) if channelfirst else (n, h, w, c))
for i, image in enumerate(images):
image = ni.add_channel(image, channelfirst)
if image.shape != batch.shape[1:]:
raise ValueError('Images vary in shape: ' + str(image.shape))
batch[i, :, :, :] = image
return batch.astype(dtype)
class BuildBatch(Nut):
"""
Build batches for GPU-based neural network training.
"""
def __init__(self, batchsize, prefetch=1):
"""
iterable >> BuildBatch(batchsize, prefetch=1)
Take samples in iterable, extract specified columns, convert
column data to numpy arrays of various types, aggregate converted
samples into a batch.
The format of a batch is a list of lists: [[inputs], [outputs]]
where inputs and outputs are Numpy arrays.
The following example uses PrintType() to print the shape of
the batches constructed. This is useful for development and debugging
but should be removed in production.
>>> from nutsflow import Collect, PrintType
>>> numbers = [4.1, 3.2, 1.1]
>>> images = [np.zeros((5, 3)), np.ones((5, 3)) , np.ones((5, 3))]
>>> class_ids = [1, 2, 1]
>>> samples = list(zip(numbers, images, class_ids))
>>> build_batch = (BuildBatch(batchsize=2)
... .input(0, 'number', 'float32')
... .input(1, 'image', np.uint8, True)
... .output(2, 'one_hot', np.uint8, 3))
>>> batches = samples >> build_batch >> PrintType() >> Collect()
[[<ndarray> 2:float32, <ndarray> 2x1x5x3:uint8], [<ndarray> 2x3:uint8]]
[[<ndarray> 1:float32, <ndarray> 1x1x5x3:uint8], [<ndarray> 1x3:uint8]]
In the example above, we have multiple inputs and a single output,
and the batch is of format [[number, image], [one_hot]], where each
data element a Numpy array with the shown shape and dtype.
Sample columns can be ignored or reused. Assuming an autoencoder, one
might whish to reuse the sample image as input and output:
>>> build_batch = (BuildBatch(2)
... .input(1, 'image', np.uint8, True)
... .output(1, 'image', np.uint8, True))
>>> batches = samples >> build_batch >> PrintType() >> Collect()
[[<ndarray> 2x1x5x3:uint8], [<ndarray> 2x1x5x3:uint8]]
[[<ndarray> 1x1x5x3:uint8], [<ndarray> 1x1x5x3:uint8]]
In the prediction phase no target outputs are needed. If the batch
contains only inputs, the batch format is just [inputs].
>>> build_pred_batch = (BuildBatch(2)
... .input(1, 'image', 'uint8', True))
>>> batches = samples >> build_pred_batch >> PrintType() >> Collect()
[<ndarray> 2x1x5x3:uint8]
[<ndarray> 1x1x5x3:uint8]
:param int batchsize: Size of batch = number of rows in batch.
:param int prefetch: Number of batches to prefetch. This speeds up
GPU based training, since one batch is built on CPU while the
another is processed on the GPU.
Note: if verbose=True, prefetch is set to 0 to simplify debugging.
:param bool verbose: Print batch shape when True.
(and sets prefetch=0)
"""
self.batchsize = batchsize
self.prefetch = prefetch
self.colspecs = []
self.builder = {'image': build_image_batch,
'number': build_number_batch,
'vector': build_vector_batch,
'tensor': build_tensor_batch,
'one_hot': build_one_hot_batch}
def input(self, col, name, *args, **kwargs):
"""
Specify and add input columns for batch to create
:param int col: column of the sample to extract and to create a
batch input column from.
:param string name: Name of the column function to apply to create
a batch column, e.g. 'image'
See the following functions for more details:
'image': nutsflow.batcher.build_image_batch
'number': nutsflow.batcher.build_number_batch
'vector': nutsflow.batcher.build_vector_batch
'tensor': nutsflow.batcher.build_tensor_batch
'one_hot': nutsflow.batcher.build_one_hot_batch
:param args args: Arguments for column function, e.g. dtype
:param kwargs kwargs: Keyword arguments for column function
:return: instance of BuildBatch
:rtype: BuildBatch
"""
self.colspecs.append((col, name, True, args, kwargs))
return self
def output(self, col, name, *args, **kwargs):
"""
Specify and add output columns for batch to create
:param int col: column of the sample to extract and to create a
batch output column from.
:param string name: Name of the column function to apply to create
a batch column, e.g. 'image'
See the following functions for more details:
'image': nutsflow.batcher.build_image_batch
'number': nutsflow.batcher.build_number_batch
'vector': nutsflow.batcher.build_vector_batch
'tensor': nutsflow.batcher.build_tensor_batch
'one_hot': nutsflow.batcher.build_one_hot_batch
:param args args: Arguments for column function, e.g. dtype
:param kwargs kwargs: Keyword arguments for column function
:return: instance of BuildBatch
:rtype: BuildBatch
"""
self.colspecs.append((col, name, False, args, kwargs))
return self
def _batch_generator(self, iterable):
"""Return generator over batches for given iterable of samples"""
while 1:
batchsamples = list(take(iterable, self.batchsize))
if not batchsamples:
break
cols = list(zip(*batchsamples)) # flip rows to cols
batch = [[], []] # in, out columns of batch
for colspec in self.colspecs:
col, func, isinput, args, kwargs = colspec
if not func in self.builder:
raise ValueError('Invalid builder: ' + func)
coldata = self.builder[func](cols[col], *args, **kwargs)
batch[0 if isinput else 1].append(coldata)
if not batch[1]: # no output (prediction phase)
batch = batch[0] # flatten and take only inputs
yield batch
def __rrshift__(self, iterable):
"""
Convert samples in iterable into mini-batches.
Structure of output depends on fmt function used. If None
output is a list of np.arrays
:param iterable iterable: Iterable over samples.
:return: Mini-batches
:rtype: list of np.array if fmt=None
"""
batch_gen = self._batch_generator(iter(iterable))
if self.prefetch:
batch_gen = PrefetchIterator(batch_gen, self.prefetch)
return batch_gen
@nut_function
def Mixup(batch, alpha):
"""
Mixup produces random interpolations between data and labels.
Usage:
... >> BuildBatch() >> Mixup(0.1) >> network.train() >> ...
Implementation based on the following paper:
mixup: Beyond Empirical Risk Minimization
https://arxiv.org/abs/1710.09412
:param list batch: Batch consisting of list of input data and list of
output data, where data must be numeric, e.g. images and
one-hot-encoded class labels that can be interpolated between.
:param float alpha: Control parameter for beta distribution the
interpolation factors are sampled from. Range: [0,...,1]
For alpha <= 0 no mixup is performed.
:return:
"""
if alpha <= 0:
return batch
ri = np.arange(len(batch[0][0]))
np.random.shuffle(ri)
lam = np.random.beta(alpha, alpha)
mixup = lambda data: lam * data + (1 - lam) * data[ri]
inputs = [mixup(i) for i in batch[0]]
outputs = [mixup(o) for o in batch[1]]
return [inputs, outputs]
| 13,350 | 34.889785 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/network.py | """
.. module:: network
:synopsis: Wrapper around other network APIs such as Lasagne, Keras and
Pytorch to enable usage within nuts-flow/ml.
For instance, with a wrapped network one can write:
samples >> build_batch >> network.train() >> log_loss >> Consume()
"""
from __future__ import print_function
import numpy as np
from nutsflow.common import itemize
from nutsflow import (nut_processor, nut_sink, Collect, Map,
Flatten, Get)
@nut_processor
def TrainValNut(batches, func, **kwargs):
"""
batches >> TrainValNut(func, **kwargs)
Create nut to train or validate a network.
:param iterable over batches batches: Batches to train/validate.
:param function func: Training or validation function of network.
:param kwargs kwargs: Keyword arguments passed on to function.
:return: Result(s) of training/validation function, e.g. loss, accuracy, ...
:rtype: float or array/tuple of floats
"""
for batch in batches:
yield func(*batch, **kwargs)
@nut_processor
def PredictNut(batches, func, flatten=True):
"""
batches >> PredictNut(func)
Create nut to perform network predictions.
:param iterable over batches batches: Batches to create predictions for.
:param function func: Prediction function
:param bool flatten: True: flatten output. Instead of returning batch of
predictions return individual predictions
:return: Result(s) of prediction
:rtype: typically array with class probabilities (softmax vector)
"""
for batch in batches:
pred_batch = func(batch)
if flatten:
for prediction in pred_batch:
yield prediction
else:
yield pred_batch
@nut_sink
def EvalNut(batches, network, metrics, compute, predcol=None):
"""
batches >> EvalNut(network, metrics)
Create nut to evaluate network performance for given metrics.
Returned when network.evaluate() is called.
:param iterable over batches batches: Batches to evaluate
:param nutmsml.Network network:
:param list of functions metrics: List of functions that compute
some metric, e.g. accuracy, F1, kappa-score.
Each metric function must take vectors with true and
predicted classes/probabilities and must compute the
metric over the entire input (not per sample/mini-batch).
:param function compute: Function of the form f(metric, targets, preds)
that computes the given metric (e.g. mean accuracy) for the given
targets and predictions.
:param int|None predcol: Index of column in prediction to extract
for evaluation. If None a single prediction output is
expected.
:return: Result(s) of evaluation, e.g. accuracy, precision, ...
:rtype: float or tuple of floats if there is more than one metric
"""
targets = []
def accumulate(batch):
inputs, outputs = batch
target = outputs[0] if isinstance(outputs, list) else outputs
targets.extend(target)
return inputs
preds = (batches >> Map(accumulate) >> network.predict(flatten=False) >>
Get(predcol) >> Flatten() >> Collect())
targets, preds = np.vstack(targets), np.vstack(preds)
targets = targets.astype(np.float)
results = tuple(compute(m, targets, preds) for m in metrics)
return results if len(results) > 1 else results[0]
class Network(object):
"""
Abstract base class for networks. Allows to wrap existing network APIs
such as Lasagne, Keras or Pytorch into an API that enables direct usage
of the network as a Nut in a nuts flow.
"""
def __init__(self, weightspath):
"""
Constructs base wrapper for networks.
:param string weightspath: Filepath where network weights are saved to
and loaded from.
"""
self.weightspath = weightspath
self.best_score = None # score of best scoring network so far
def _weightspath(self, weightspath):
"""
Return give weightspath if not None else return self.weightspath.
:param string|None weightspath: Path to network weights or None.
:return: Return weightspath
"""
return self.weightspath if weightspath is None else weightspath
def train(self):
"""
Train network
>>> train_losses = samples >> batcher >> network.train() >> Collect() # doctest: +SKIP
:return: Typically returns training loss per batch.
"""
raise NotImplementedError('Implement train()!')
def validate(self):
"""
Validate network
>>> val_losses = samples >> batcher >> network.validate() >> Collect() # doctest: +SKIP
:return: Typically returns validation loss per batch.
"""
raise NotImplementedError('Implement validate()!')
def predict(self, flatten=True):
"""
Get network predictions
>>> predictions = samples >> batcher >> network.predict() >> Collect() # doctest: +SKIP
:param bool flatten: True: return individual predictions instead
of batched prediction
:return: Typically returns softmax class probabilities.
:rtype: ndarray
"""
raise NotImplementedError('Implement predict()!')
def evaluate(self, metrics, predcol=None, targetcol=-1):
"""
Evaluate performance of network for given metrices
>>> acc, f1 = samples >> batcher >> network.evaluate([accuracy, f1_score]) # doctest: +SKIP
:param list metric: List of metrics. See EvalNut for details.
:param int|None predcol: Index of column in prediction to extract
for evaluation. If None a single prediction output is
expected.
:param int targetcol: Index of batch column that contain targets.
:return: Result for each metric as a tuple or a single float if
there is only one metric.
"""
raise NotImplementedError('Implement evaluate()!')
def save_best(self, score, isloss=True):
"""
Save weights of best network
:param float score: Score of the network, e.g. loss, accuracy
:param bool isloss: True means lower score is better, e.g. loss
and the network with the lower score score is saved.
"""
if (not self.best_score or
(isloss is True and score <= self.best_score) or
(isloss is False and score >= self.best_score)):
self.best_score = score
self.save_weights()
def save_weights(self, weightspath=None):
"""
Save network weights.
| network.save_weights()
:param string weightspath: Path to network weights.
self.weightspath is used if weightspath is None.
"""
raise NotImplementedError('Implement save_weights()!')
def load_weights(self, weightspath=None):
"""
Load network weights.
| network.load_weights()
:param string weightspath: Path to network weights.
self.weightspath is used if weightspath is None.
"""
raise NotImplementedError('Implement load_weights()!')
def print_layers(self):
"""Print description of the network layers"""
raise NotImplementedError('Implement print_layers()!')
class LasagneNetwork(Network): # pragma no cover
"""
Wrapper for Lasagne models: https://lasagne.readthedocs.io/en/latest/
"""
def __init__(self, out_layer, train_fn, val_fn, pred_fn,
weightspath='weights_lasagne_net.npz'):
"""
Construct wrapper around Lasagne network.
:param Lasgane layer out_layer: Output layer of Lasagne network.
:param Theano function train_fn: Training function
:param Theano function val_fn: Validation function
:param Theano function pred_fn: Prediction function
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
self.out_layer = out_layer
self.train_fn = train_fn
self.val_fn = val_fn
self.pred_fn = pred_fn
@staticmethod
def _layers(layer, ret_input=False):
"""Return network layers. InputLayer is returned if ret_input==True."""
while hasattr(layer, 'input_layer'):
yield layer
layer = layer.input_layer
if ret_input:
yield layer
@staticmethod
def _get_named_params(network):
"""Return layer parameters and names"""
for l_num, layer in enumerate(LasagneNetwork._layers(network)):
for p_num, param in enumerate(layer.get_params()):
name = '{}_{}'.format(l_num, p_num)
yield name, param
def train(self, **kwargs):
return TrainValNut(self.train_fn, **kwargs)
def validate(self, **kwargs):
return TrainValNut(self.val_fn, **kwargs)
def predict(self, flatten=True):
return PredictNut(self.pred_fn, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds)
return result.eval() if hasattr(result, 'eval') else result
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
weightspath = super(LasagneNetwork, self)._weightspath(weightspath)
weights = {name: p.get_value() for name, p in
LasagneNetwork._get_named_params(self.out_layer)}
np.savez_compressed(weightspath, **weights)
def load_weights(self, weightspath=None):
weightspath = super(LasagneNetwork, self)._weightspath(weightspath)
weights = np.load(weightspath)
for name, param in LasagneNetwork._get_named_params(self.out_layer):
param.set_value(weights[name])
def print_layers(self):
import lasagne as la
layers = list(LasagneNetwork._layers(self.out_layer, ret_input=True))
for i, layer in enumerate(reversed(layers)):
name = layer.__class__.__name__
shape = la.layers.get_output_shape(layer)
print('{:3d} {:30s} {}'.format(i, name, shape), end=' ')
if hasattr(layer, 'filter_size'):
print('{}'.format(layer.filter_size[0]), end='//')
elif hasattr(layer, 'pool_size'):
is_int = isinstance(layer.pool_size, int)
size = layer.pool_size if is_int else layer.pool_size[0]
print('{}'.format(size), end='//')
if hasattr(layer, 'p'):
print(' [{:.2f}]'.format(layer.p), end='')
if hasattr(layer, 'stride'):
print('{}'.format(layer.stride[0]), end='')
if hasattr(layer, 'learning_rate_scale'):
if layer.learning_rate_scale != 1.0:
lr_scale = layer.learning_rate_scale
print(' [lr_scale={:.2f}]'.format(lr_scale), end='')
if hasattr(layer, 'params'):
for param in layer.params:
if 'trainable' not in layer.params[param]:
print(' [NT]', end='')
print()
class KerasNetwork(Network): # pragma no cover
"""
Wrapper for Keras models: https://keras.io/
"""
def __init__(self, model, weightspath='weights_keras_net.hd5'):
"""
Construct wrapper around Keras model.
:param Keras model model: Keras model to wrap. See
https://keras.io/models/sequential/
https://keras.io/models/model/
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
self.model = model
# Since Keras with tensorflow 2.x the function train_on_batch()
# does not accept a batch format of [[inputs],[outputs]] anymore,
# while other similar function such as test_on_batch, predict_on_batch
# are still fine with it. Therefore only fixing for train_on_batch
# where sublist are removed if inputs and/or outputs are single items.
def _train_on_batch(self, x_batches, y_batches, **kwargs):
x_batches, y_batches = itemize(x_batches), itemize(y_batches)
return self.model.train_on_batch(x_batches, y_batches, kwargs)
def train(self, **kwargs):
return TrainValNut(self._train_on_batch, **kwargs)
def validate(self, **kwargs):
return TrainValNut(self.model.test_on_batch, **kwargs)
def predict(self, flatten=True):
return PredictNut(self.model.predict_on_batch, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds).numpy()
is_vector = hasattr(result, '__iter__')
return float(np.mean(result) if is_vector else result)
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
weightspath = super(KerasNetwork, self)._weightspath(weightspath)
self.model.save_weights(weightspath)
def load_weights(self, weightspath=None):
weightspath = super(KerasNetwork, self)._weightspath(weightspath)
self.model.load_weights(weightspath)
def print_layers(self):
self.model.summary()
class PytorchNetwork(Network): # pragma no cover
"""
Wrapper for Pytorch models:
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html
"""
def __init__(self, model, weightspath='weights_pytorch_net.pt'):
"""
Construct wrapper around Pytorch model.
:param Pytorch model model: Pytorch model to wrap.
model needs to have three attributes:
| model.device:, e.g 'cuda:0' or 'cpu'
| model.optimizer: e.g. torch.optim.SGD
| model.losses: (list of) loss functions, e.g. F.cross_entropy
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
assert hasattr(model, 'device')
assert hasattr(model, 'optimizer')
assert hasattr(model, 'losses')
self.model = model
model.to(model.device)
def _to_tensor(self, batches, flatten):
"""
Convert batches into Pytorch tensors.
:param list|ndarray batches: Numpy array or list of arrays.
:param bool flatten: If true and batch contains only one column
return single tensor instead of list of tensors.
:return: List of batches as PyTorch tensors or a single tensor
:rtype: [tensors] or tensor
"""
import torch
T = lambda b: torch.as_tensor(b, device=self.model.device)
batches = self._to_list(batches)
tensors = [T(b) for b in batches if not isinstance(b, str)]
if flatten and len(tensors) == 1:
return tensors[0]
return tensors
def _to_list(self, x):
"""
Wraps x in a list if it is not already a list.
:param object x: Any object.
:return: x wrapped in list
:rtype: list
"""
return x if isinstance(x, list) else [x]
def _train_batch(self, x_batches, y_batches, *args):
"""
Performs a single gradient step on a batch.
:param ndarray|[ndarray] x_batches: Input batch or list of batches
:param ndarray|[ndarray] y_batches: Output batch or list of batches
:return: losses. If there is multiple outputs then a list with
the losses for each output and the mean over these losses
is returned. Otherwise a single float with the loss is returned.
:rtype: float|[float]
"""
x_tensors = self._to_tensor(x_batches, True)
y_tensors = self._to_tensor(y_batches, False)
model = self.model
model.optimizer.zero_grad()
y_preds = self._to_list(model(x_tensors, *args))
loss_fns = self._to_list(model.losses)
losses = []
for loss_fn, y_pred, y_true in zip(loss_fns, y_preds, y_tensors):
loss = loss_fn(y_pred, y_true)
loss.backward()
losses.append(loss.item())
model.optimizer.step()
return [np.mean(losses)] + losses if len(losses) > 1 else losses[0]
def _validate_batch(self, x_batches, y_batches, *args):
"""
Performs a forward step to compute losses.
:param [ndarray] x_batches: List of input batches
:param [ndarray] y_batches: List of output/target batches
:return: losses. If there is multiple outputs then a list with
the losses for each output and the mean over these losses
is returned. Otherwise a single float with the loss is returned.
:rtype: float|[float]
"""
import torch
losses = []
with torch.no_grad():
x_tensors = self._to_tensor(x_batches, True)
y_tensors = self._to_tensor(y_batches, False)
model = self.model
y_preds = self._to_list(model(x_tensors, *args))
loss_fns = self._to_list(model.losses)
for loss_fn, y_pred, y_true in zip(loss_fns, y_preds, y_tensors):
loss = loss_fn(y_pred, y_true)
losses.append(loss.item())
return [np.mean(losses)] + losses if len(losses) > 1 else losses[0]
def _predict_batch(self, x_batches, *args):
"""
Performs a forward step to compute output.
:param [ndarray] x_batches: List of input batches
:return: network outputs
:rtype: list
"""
import torch
with torch.no_grad():
x_tensors = self._to_tensor(x_batches, True)
y_preds = self.model(x_tensors, *args)
return [p.cpu().numpy() for p in y_preds]
def train(self, **kwargs):
self.model.train()
return TrainValNut(self._train_batch, **kwargs)
def validate(self, **kwargs):
self.model.eval()
return TrainValNut(self._validate_batch, **kwargs)
def predict(self, flatten=True):
self.model.eval()
return PredictNut(self._predict_batch, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds)
return result.item() if hasattr(result, 'item') else result
self.model.eval()
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
import torch
weightspath = super(PytorchNetwork, self)._weightspath(weightspath)
torch.save(self.model.state_dict(), weightspath)
def load_weights(self, weightspath=None):
import torch
weightspath = super(PytorchNetwork, self)._weightspath(weightspath)
self.model.load_state_dict(torch.load(weightspath))
def print_layers(self, input_shape=None):
"""
Print network architecture (and layer dimensions).
:param tuple|None input_shape: (C, H, W) or None
If None, layer dimensions and param numbers are not printed.
"""
if input_shape:
from torchsummary import summary
device = self.model.device[:4] # remove GPU id, e.g. cuda:0
summary(self.model, input_shape, device=device)
else:
print(str(self.model))
| 19,583 | 36.302857 | 100 | py |
nuts-ml | nuts-ml-master/nutsml/viewer.py | """
.. module:: viewer
:synopsis: Viewing of sample data
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import numpy as np
import nutsml.imageutil as iu
from six.moves import range
from nutsflow import NutFunction, nut_function
from nutsflow.common import as_tuple, as_set, stype
from matplotlib import pyplot as plt
class ViewImage(NutFunction): # pragma no coverage
"""
Display images in window.
"""
def __init__(self, imgcols, layout=(1, None), figsize=None,
pause=0.0001, axis_off=False, labels_off=False, titles=None,
every_sec=0, every_n=0, **imargs):
"""
iterable >> ViewImage(imgcols, layout=(1, None), figsize=None,
**plotargs)
| Images should be numpy arrays in one of the following formats:
| MxN - luminance (grayscale, float array only)
| MxNx3 - RGB (float or uint8 array)
| MxNx4 - RGBA (float or uint8 array)
Shapes with single-dimension axis are supported but not encouraged,
e.g. MxNx1 will be converted to MxN.
See
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow
>>> from nutsflow import Consume
>>> from nutsml import ReadImage
>>> imagepath = 'tests/data/img_formats/*.jpg'
>>> samples = [(1, 'nut_color'), (2, 'nut_grayscale')]
>>> read_image = ReadImage(1, imagepath)
>>> samples >> read_image >> ViewImage(1) >> Consume() # doctest: +SKIP
>>> view_gray = ViewImage(1, cmap='gray')
>>> samples >> read_image >> view_gray >> Consume() # doctest: +SKIP
:param int|tuple|None imgcols: Index or tuple of indices of data columns
containing images (ndarray). Use None if images are provided
directly, e.g. [img1, img2, ...] >> ViewImage(None) >> Consume()
:param tuple layout: Rows and columns of the viewer layout., e.g.
a layout of (2,3) means that 6 images in the data are
arranged in 2 rows and 3 columns.
Number of cols can be None is then derived from imgcols
:param tuple figsize: Figure size in inch.
:param float pause: Waiting time in seconds after each plot.
Pressing a key skips the waiting time.
:param bool axis_off: Enable or disable display of figure axes.
:param bool lables_off: Enable or disable display of axes labels.
:param float every_sec: View every given second, e.g. to print
every 2.5 sec every_sec = 2.5
:param int every_n: View every n-th call.
:param kwargs imargs: Keyword arguments passed on to matplotlib's
imshow() function, e.g. cmap='gray'. See
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow
"""
imgcols = (None,) if imgcols is None else as_tuple(imgcols)
r, c, n = layout[0], layout[1], len(imgcols)
if c is None:
c = n
if n != r * c:
raise ValueError("Number of images and layout don't match!")
fig = plt.figure(figsize=figsize)
fig.canvas.set_window_title('ViewImage')
self.axes = [fig.add_subplot(r, c, i + 1) for i in range(n)]
self.axis_off = axis_off
self.labels_off = labels_off
self.titles = titles
self.imgcols = imgcols
self.pause = pause
self.cnt = 0
self.time = time.time()
self.every_sec = every_sec
self.every_n = every_n
self.imargs = imargs
def __delta_sec(self):
"""Return time in seconds (float) consumed between prints so far"""
return time.time() - self.time
def __should_view(self):
"""Return true if data should be viewed"""
self.cnt += 1
return (self.cnt >= self.every_n and
self.__delta_sec() >= self.every_sec)
def __call__(self, data):
"""
View the images in data
:param tuple data: Data with images at imgcols.
:return: unchanged input data
:rtype: tuple
"""
if not self.__should_view():
return data
self.cnt = 0
self.time = time.time()
for i, (imgcol, ax) in enumerate(zip(self.imgcols, self.axes)):
ax.clear()
if self.axis_off:
ax.set_axis_off()
if self.labels_off:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if self.titles:
ax.set_title(self.titles[i])
img = data if imgcol is None else data[imgcol]
img = np.squeeze(img)
ax.imshow(img, **self.imargs)
ax.figure.canvas.draw()
plt.waitforbuttonpress(timeout=self.pause) # or plt.pause(self.pause)
return data
class ViewImageAnnotation(NutFunction): # pragma no coverage
"""
Display images and annotation in window.
"""
TEXTPROP = {'edgecolor': 'k', 'backgroundcolor': (1, 1, 1, 0.5)}
SHAPEPROP = {'edgecolor': 'y', 'facecolor': 'none', 'linewidth': 1}
def __init__(self, imgcol, annocols, figsize=None,
pause=0.0001, interpolation=None, **annoargs):
"""
iterable >> ViewImageAnnotation(imgcol, annocols, figsize=None,
pause, interpolation, **annoargs)
| Images must be numpy arrays in one of the following formats:
| MxN - luminance (grayscale, float array only)
| MxNx3 - RGB (float or uint8 array)
| MxNx4 - RGBA (float or uint8 array)
| See
| http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow
Shapes with single-dimension axis are supported but not encouraged,
e.g. MxNx1 will be converted to MxN.
:param int imgcol: Index of data column that contains the image
:param int|tuple annocols: Index or tuple of indices specifying the data
column(s) that contain annotation (labels, or geometry)
:param tuple figsize: Figure size in inch.
:param float pause: Waiting time in seconds after each plot.
Pressing a key skips the waiting time.
:param string interpolation: Interpolation for imshow, e.g.
'nearest', 'bilinear', 'bicubic'. for details see
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot
.imshow
:param kwargs annoargs: Keyword arguments for visual properties of
annotation, e.g. edgecolor='y', linewidth=1
"""
fig = plt.figure(figsize=figsize)
fig.canvas.set_window_title('ViewImageAnnotation')
self.axes = fig.add_subplot(111)
self.imgcol = imgcol
self.annocols = as_set(annocols)
self.pause = pause
self.interpolation = interpolation
self.annoargs = annoargs
def _shapeprops(self):
"""Return shape properties from kwargs or default value."""
aa = ViewImageAnnotation.SHAPEPROP.copy()
aa.update(self.annoargs)
return aa
def _textprop(self, key):
"""Return text property from kwargs or default value."""
return self.annoargs.get(key, ViewImageAnnotation.TEXTPROP[key])
def __call__(self, data):
"""
View the image and its annotation
:param tuple data: Data with image at imgcol and annotation at annocol.
:return: unchanged input data
:rtype: tuple
"""
img = np.squeeze(data[self.imgcol])
ax = self.axes
ax.clear()
ax.imshow(img, interpolation=self.interpolation)
labelcol = 0.7
for acol in self.annocols:
annos = data[acol]
if isinstance(annos, (list, tuple)):
props = self._shapeprops()
for anno in iu.annotation2pltpatch(annos, **props):
ax.add_patch(anno)
else:
fs = ax.get_window_extent().height / 22
p = img.shape[0] / 6
x, y = p / 2, p * labelcol
labelcol += 1
ax.text(x, y, str(annos),
color=self._textprop('edgecolor'),
backgroundcolor=self._textprop('backgroundcolor'),
size=fs, family='monospace')
ax.figure.canvas.draw()
plt.waitforbuttonpress(timeout=self.pause)
return data
| 8,520 | 37.382883 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/fileutil.py | """
.. module:: fileutil
:synopsis: File system utilities.
"""
import glob
import os
import os.path as op
import shutil
import uuid
TEMP_FOLDER = 'temp'
def create_filename(prefix='', ext=''):
"""
Create a unique filename.
:param str prefix: Prefix to add to filename.
:param str ext: Extension to append to filename, e.g. 'jpg'
:return: Unique filename.
:rtype: str
"""
suffix = '.' + ext if ext else ''
return prefix + str(uuid.uuid4()) + suffix
def create_temp_filepath(prefix='', ext='', relative=True):
"""
Create a temporary folder under :py:data:`TEMP_FOLDER`.
If the folder already exists do nothing. Return relative (default) or
absolute path to a temp file with a unique name.
See related function :func:`.create_filename`.
:param str prefix: Prefix to add to filename.
:param str ext: Extension to append to filename, e.g. 'jpg'
:param bool relative: True: return relative path, otherwise absolute path.
:return: Path to file with unique name in temp folder.
:rtype: str
"""
create_folders(TEMP_FOLDER)
rel_path = op.join(TEMP_FOLDER, create_filename(prefix, ext))
return rel_path if relative else op.abspath(rel_path)
def create_folders(path, mode=0o777):
"""
Create folder(s). Don't fail if already existing.
See related functions :func:`.delete_folders` and :func:`.clear_folder`.
:param str path: Path of folders to create, e.g. 'foo/bar'
:param int mode: File creation mode, e.g. 0777
"""
if not os.path.exists(path):
os.makedirs(path, mode)
def delete_file(path):
"""
Remove file at given path. Don't fail if non-existing.
:param str path: Path to file to delete, e.g. 'foo/bar/file.txt'
"""
if os.path.exists(path):
os.remove(path)
def delete_folders(path):
"""
Remove folder and sub-folders. Don't fail if non-existing or not empty.
:param str path: Path of folders to delete, e.g. 'foo/bar'
"""
if os.path.exists(path):
shutil.rmtree(path)
def delete_temp_data():
"""
Remove :py:data:`TEMP_FOLDER` and all its contents.
"""
delete_folders(TEMP_FOLDER)
def clear_folder(path):
"""
Remove all content (files and folders) within the specified folder.
:param str path: Path of folder to clear.
"""
for sub_path in glob.glob(op.join(path, "*")):
if os.path.isfile(sub_path):
os.remove(sub_path)
else:
shutil.rmtree(sub_path)
def reader_filepath(sample, filename, pathfunc):
"""
Construct filepath from sample, filename and/or pathfunction.
Helper function used in ReadImage and ReadNumpy.
:param tuple|list sample: E.g. ('nut_color', 1)
:param filename:
:param string|function|None pathfunc: Filepath with wildcard '*',
which is replaced by the file id/name provided in the sample, e.g.
'tests/data/img_formats/*.jpg' for sample ('nut_grayscale', 2)
will become 'tests/data/img_formats/nut_grayscale.jpg'
or
Function to compute path to image file from sample, e.g.
lambda sample: 'tests/data/img_formats/{1}.jpg'.format(*sample)
or
None, in this case the filename is taken as the filepath.
:return:
"""
if isinstance(pathfunc, str):
return pathfunc.replace('*', filename)
if hasattr(pathfunc, '__call__'):
return pathfunc(sample)
return filename | 3,460 | 26.91129 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/logger.py | """
.. module:: logger
:synopsis: Data logging
"""
import os
import numpy as np
from warnings import warn
from nutsflow import NutFunction
from nutsflow.common import as_tuple
class LogToFile(NutFunction):
"""
Log columns of data to file.
"""
def __init__(self, filepath, cols=None, colnames=None, reset=True,
delimiter=','):
"""
Construct logger.
>>> from __future__ import print_function
>>> from nutsflow import Consume
>>> filepath = 'tests/data/temp_logfile.csv'
>>> data = [[1, 2], [3, 4]]
>>> with LogToFile(filepath) as logtofile:
... data >> logtofile >> Consume()
>>> print(open(filepath).read())
1,2
3,4
<BLANKLINE>
>>> logtofile = LogToFile(filepath, cols=(1, 0), colnames=['a', 'b'])
>>> data >> logtofile >> Consume()
>>> print(open(filepath).read())
a,b
2,1
4,3
<BLANKLINE>
>>> logtofile.close()
>>> logtofile.delete()
:param string filepath: Path to file to write log to.
:param int|tuple|None cols: Indices of columns of input data to write.
None: write all columns
int: only write the single given column
tuple: list of column indices
:param tuple|None colnames: Column names to write in first line.
If None no colnames are written.
:param bool reset: If True the writing to the log file is reset
if the logger is recreated. Otherwise log data is appended
to the log file.
:param str delimiter: Delimiter for columns in log file.
"""
self.cols = cols
self.reset = reset
self.delim = delimiter
self.filepath = filepath
self.f = open(filepath, 'w' if self.reset else 'a')
if colnames:
self._writerow(colnames)
def _writerow(self, row):
"""Write row as string to log file and flush"""
self.f.write(self.delim.join(map(str, row)))
self.f.write('\n')
self.f.flush()
def __call__(self, x):
"""
Log x
:param any x: Any type of data.
Special support for numpy arrays.
:return: Return input unchanged
:rtype: Same as input
"""
if isinstance(x, np.ndarray):
row = x.tolist() if x.ndim else [x.item()]
else:
row = x
if not self.cols is None:
row = [row[i] for i in as_tuple(self.cols)]
self._writerow(row)
return x
def delete(self):
"""Delete log file"""
self.close()
os.remove(self.filepath)
def close(self):
"""Implementation of context manager API"""
self.f.close()
def __enter__(self):
"""Implementation of context manager API"""
return self
def __exit__(self, *args):
"""Implementation of context manager API"""
self.close()
class LogCols(LogToFile):
def __init__(self, filepath, cols=None, colnames=None, reset=True,
delimiter=','):
LogToFile.__init__(self, filepath, cols, colnames, reset, delimiter)
warn('LogCols is deprecated. Use LogToFile!', DeprecationWarning)
| 3,320 | 28.389381 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/datautil.py | """
.. module:: datautil
:synopsis: Utility functions for non-image data
"""
import random as rnd
import collections as cl
from six import iteritems
from nutsflow.common import as_set
def upsample(samples, labelcol, rand=None):
"""
Up-sample sample set.
Creates stratified samples by up-sampling smaller classes to the size of
the largest class.
Note: The example shown below uses rnd.Random(i) to create a deterministic
sequence of randomly stratified samples. Usually it is sufficient to use
the default (rand=None).
>>> from __future__ import print_function
>>> import random as rnd
>>> samples = [('pos1', 1), ('pos2', 1), ('neg1', 0)]
>>> for i in range(3): # doctest: +SKIP
... print(upsample(samples, 1, rand=rnd.Random(i)))
[('neg1', 0), ('neg1', 0), ('pos1', 1), ('pos2', 1)]
[('pos2', 1), ('neg1', 0), ('pos1', 1), ('neg1', 0)]
[('neg1', 0), ('neg1', 0), ('pos1', 1), ('pos2', 1)]
:param iterable samples: Iterable of samples where each sample has a
label at a fixed position (labelcol). Labels can by any hashable type,
e.g. int, str, bool
:param int labelcol: Index of label in sample
:param Random|None rand: Random number generator. If None,
random.Random(None) is used.
:return: Stratified sample set.
:rtype: list of samples
"""
rand = rnd.Random() if rand is None else rand
groups, labelcnts = group_samples(samples, labelcol)
_, max_cnts = max(iteritems(labelcnts), key=lambda l_c: l_c[1])
stratified = []
for label, samples in iteritems(groups):
extended = samples * int((max_cnts / len(samples) + 1))
stratified.extend(extended[:max_cnts])
rand.shuffle(stratified)
return stratified
def random_downsample(samples, labelcol, rand=None, ordered=False):
"""
Randomly down-sample samples.
Creates stratified samples by down-sampling larger classes to the size of
the smallest class.
Note: The example shown below uses StableRandom(i) to create a deterministic
sequence of randomly stratified samples. Usually it is sufficient to use
the default (rand=None). Do NOT use rnd.Random(0) since this
will generate the same subsample every time.
>>> from __future__ import print_function
>>> from nutsflow.common import StableRandom
>>> samples = [('pos1', 1), ('pos2', 1), ('pos3', 1),
... ('neg1', 0), ('neg2', 0)]
>>> for i in range(3):
... print(random_downsample(samples, 1, StableRandom(i), True))
[('pos2', 1), ('pos3', 1), ('neg2', 0), ('neg1', 0)]
[('pos2', 1), ('pos3', 1), ('neg2', 0), ('neg1', 0)]
[('pos2', 1), ('pos1', 1), ('neg1', 0), ('neg2', 0)]
:param iterable samples: Iterable of samples where each sample has a
label at a fixed position (labelcol). Labels can be any hashable type,
e.g. int, str, bool
:param int labelcol: Index of label in sample
:param Random|None rand: Random number generator. If None,
random.Random(None) is used.
:param bool ordered: True: samples are kept in order when downsampling.
:return: Stratified sample set.
:rtype: list of samples
"""
rand = rnd.Random() if rand is None else rand
groups, labelcnts = group_samples(samples, labelcol, ordered=ordered)
_, min_cnts = min(iteritems(labelcnts), key=lambda l_c1: l_c1[1])
return [s for e in groups.values() for s in rand.sample(e, min_cnts)]
def group_samples(samples, labelcol, ordered=False):
"""
Return samples grouped by label and label counts.
>>> samples = [('pos', 1), ('pos', 1), ('neg', 0)]
>>> groups, labelcnts = group_samples(samples, 1, True)
>>> groups
OrderedDict([(1, [('pos', 1), ('pos', 1)]), (0, [('neg', 0)])])
>>> labelcnts
Counter({1: 2, 0: 1})
:param iterable samples: Iterable of samples where each sample has a
label at a fixed position (labelcol)
:param int labelcol: Index of label in sample
:param bool ordered: True: samples are kept in order when grouping.
:return: (groups, labelcnts) where groups is a dict containing
samples grouped by label, and labelcnts is a Counter dict
containing label frequencies.
:rtype: tuple(dict, Counter)
"""
labelcnts = cl.Counter(s[labelcol] for s in samples)
groups = group_by(samples, lambda s: s[labelcol], ordered=ordered)
return groups, labelcnts
def group_by(elements, keyfunc, ordered=False):
"""
Group elements using the given key function.
>>> is_odd = lambda x: bool(x % 2)
>>> numbers = [0, 1, 2, 3, 4]
>>> group_by(numbers, is_odd, True)
OrderedDict([(False, [0, 2, 4]), (True, [1, 3])])
:param iterable elements: Any iterable
:param function keyfunc: Function that returns key to group by
:param bool ordered: True: return OrderedDict else return dict
:return: dictionary with results of keyfunc as keys and the elements
for that key as value
:rtype: dict|OrderedDict
"""
groups = cl.OrderedDict() if ordered else dict()
for e in elements:
key = keyfunc(e)
if key in groups:
groups[key].append(e)
else:
groups[key] = [e]
return groups
def col_map(sample, columns, func, *args, **kwargs):
"""
Map function to given columns of sample and keep other columns
>>> sample = (1, 2, 3)
>>> add_n = lambda x, n: x + n
>>> col_map(sample, 1, add_n, 10)
(1, 12, 3)
>>> col_map(sample, (0, 2), add_n, 10)
(11, 2, 13)
:param tuple|list sample: Sample
:param int|tuple columns: Single or multiple column indices.
:param function func: Function to map
:param args args: Arguments passed on to function
:param kwargs kwargs: Keyword arguments passed on to function
:return: Sample where function has been applied to elements in the given
columns.
"""
colset = as_set(columns)
f, a, kw = func, args, kwargs
enum_iter = enumerate(sample)
return tuple(f(e, *a, **kw) if i in colset else e for i, e in enum_iter)
def shuffle_sublists(sublists, rand):
"""
Shuffles the lists within a list but not the list itself.
>>> from nutsflow.common import StableRandom
>>> rand = StableRandom(0)
>>> sublists = [[1, 2, 3], [4, 5, 6, 7]]
>>> shuffle_sublists(sublists, rand)
>>> sublists
[[1, 3, 2], [4, 5, 7, 6]]
:param sublists: A list containing lists
:param Random rand: A random number generator.
"""
for sublist in sublists:
rand.shuffle(sublist)
| 6,600 | 34.111702 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/transformer.py | """
.. module:: transformer
:synopsis: Data and image transformations
"""
from __future__ import print_function
import numpy as np
import os.path as path
import random as rnd
import nutsml.datautil as ut
import nutsml.imageutil as ni
from nutsflow import Nut, NutFunction, nut_processor
from nutsflow.common import as_tuple, as_set
# map transformation to specified sample columns
def map_transform(sample, imagecols, spec):
"""
Map transformation function on columns of sample.
:param tuple sample: Sample with images
:param int|tuple imagecols: Indices of sample columns the transformation
should be applied to. Can be a single index or a tuple of indices.
:param tuple spec: Transformation specification. Either a tuple with
the name of the transformation function or a tuple with the
name, arguments and keyword arguments of the transformation function.
:return: Sample with transformations applied. Columns not specified
remain unchained.
:rtype: tuple
"""
colset = as_tuple(imagecols)
name, a, kw = spec if isinstance(spec, tuple) else (spec, [], {})
f = TransformImage.transformations[name]
enum_sample = enumerate(sample)
return tuple(f(e, *a, **kw) if i in colset else e for i, e in enum_sample)
class TransformImage(NutFunction):
"""
Transformation of images in samples.
"""
transformations = {
'identical': ni.identical,
'rerange': ni.rerange,
'crop': ni.crop,
'crop_square': ni.crop_square,
'crop_center': ni.crop_center,
'normalize_histo': ni.normalize_histo,
'gray2rgb': ni.gray2rgb,
'rgb2gray': ni.rgb2gray,
'resize': ni.resize,
'translate': ni.translate,
'rotate': ni.rotate,
'contrast': ni.change_contrast,
'sharpness': ni.change_sharpness,
'brightness': ni.change_brightness,
'color': ni.change_color,
'edges': ni.extract_edges,
'fliplr': ni.fliplr,
'flipud': ni.flipud,
'shear': ni.shear,
'elastic': ni.distort_elastic,
'occlude': ni.occlude,
}
def __init__(self, imagecols):
"""
samples >> TransformImage(imagecols)
Images are expected to be numpy arrays of the shape (h, w, c) or (h, w)
with a range of [0,255] and a dtype of uint8. Transformation should
result in images with the same properties.
>>> transform = TransformImage(0).by('resize', 10, 20)
:param int|tuple imagecols: Indices of sample columns the transformation
should be applied to. Can be a single index or a tuple of indices.
:param tuple transspec: Transformation specification. Either a
tuple with the name of the transformation function or a tuple
with the name, arguments and keyword arguments of the
transformation function.
The list of argument values and dictionaries provided in the
transspec are simply passed on to the transformation function.
See the relevant functions for details.
"""
self.transspec = []
self.imagecols = as_tuple(imagecols)
def by(self, name, *args, **kwargs):
"""
Specify and add transformations to be performed.
>>> transform = TransformImage(0).by('resize', 10, 20).by('fliplr')
| Available transformations:
| :func:`identical <nutsml.imageutil.identical>`
| :func:`rerange <nutsml.imageutil.rerange>`
(old_min, old_max, new_min, new_max, dtype)
| :func:`crop <nutsml.imageutil.crop>` (x1, y1, x2, y2)
| :func:`crop_center <nutsml.imageutil.crop_center>` (w, h)
| :func:`crop_square <nutsml.imageutil.crop_square>`
| :func:`normalize_histo <nutsml.imageutil.normalize_histo>` (gamma)
| :func:`rgb2gray <nutsml.imageutil.rgb2gray>`
| :func:`gray2rgb <nutsml.imageutil.gray2rgb>`
| :func:`resize <nutsml.imageutil.resize>` (w, h)
| :func:`translate <nutsml.imageutil.translate>` (dx, dy)
| :func:`rotate <nutsml.imageutil.rotate>` (angle)
| :func:`contrast <nutsml.imageutil.contrast>` (contrast)
| :func:`sharpness <nutsml.imageutil.sharpness>` (sharpness)
| :func:`brightness <nutsml.imageutil.brightness>` (brightness)
| :func:`color <nutsml.imageutil.color>` (color)
| :func:`edges <nutsml.imageutil.extract_edges>` (sigma)
| :func:`fliplr <nutsml.imageutil.fliplr>`
| :func:`flipud <nutsml.imageutil.flipud>`
| :func:`shear <nutsml.imageutil.shear>` (shear_factor)
| :func:`elastic <nutsml.imageutil.distort_elastic>` (smooth, scale, seed)
| :func:`occlude <nutsml.imageutil.occlude>` (x, y, w, h)
:param string name: Name of the transformation to apply, e.g. 'resize'
:param args args: Arguments for the transformation, e.g. width and
height for resize.
:param kwargs kwargs: Keyword arguments passed on to the transformation
:return: instance of TransformImage with added transformation
:rtype: TransformImage
"""
self.transspec.append((name, args, kwargs))
return self
@classmethod
def register(cls, name, transformation):
"""
Register new transformation function.
>>> brighter = lambda image, c: image * c
>>> TransformImage.register('brighter', brighter)
>>> transform = TransformImage(0).by('brighter', 1.5)
:param string name: Name of transformation
:param function transformation: Transformation function.
"""
cls.transformations[name] = transformation
def __call__(self, sample):
"""
Apply transformation to sample.
:param tuple sample: Sample
:return: Transformed sample
:rtype: tuple
"""
for spec in self.transspec:
sample = map_transform(sample, self.imagecols, spec)
return sample
class AugmentImage(Nut):
"""
Random augmentation of images in samples
"""
def __init__(self, imagecols, rand=None):
"""
samples >> AugmentImage(imagecols, rand=None)
Randomly augment images, e.g. changing contrast. See TransformImage for
a full list of available augmentations. Every transformation can be
used as an augmentation. Note that the same (random) augmentation
is applied to all images specified in imagecols. This ensures that
an image and its mask are randomly rotated by the same angle,
for instance.
>>> augment_img = (AugmentImage(0)
... .by('identical', 1.0)
... .by('brightness', 0.5, [0.7, 1.3])
... .by('contrast', 0.5, [0.7, 1.3])
... .by('fliplr', 0.5)
... .by('flipud', 0.5)
... .by('occlude', 0.5, [0, 1], [0, 1],[0.1, 0.5], [0.1, 0.5])
... .by('rotate', 0.5, [0, 360]))
See :func:`nutsml.transformer.TransformImage.by` for full list of
available augmentations.
Note that each augmentation is applied independently. This is in
contrast to transformations which are applied in sequence and
result in one image.
Augmentation on the other hand are randomly applied and can result
in many images. However, augmenters can be chained to achieve
combinations of augmentation, e.g. contrast or brightness combined with
rotation or shearing:
>>> augment1 = (AugmentImage(0)
... .by('brightness', 0.5, [0.7, 1.3])
... .by('contrast', 0.5, [0.7, 1.3]))
>>> augment2 = (AugmentImage(0)
... .by('shear', 0.5, [0, 0.2])
... .by('rotate', 0.5, [0, 360]))
>>> samples >> augment1 >> augment2 >> Consume() # doctest: +SKIP
:param int|tuple imagecols: Indices of sample columns that contain
images.
:param Random|None rand: Random number generator. If None,
random.Random() is used.
"""
self.imagecols = imagecols
self.rand = rnd.Random() if rand is None else rand
self.augmentations = []
def by(self, name, prob, *ranges, **kwargs):
"""
Specify and add augmentation to be performed.
>>> augment_img = AugmentImage(0).by('rotate', 0.5, [0, 360])
:param string name: Name of the augmentation/transformation, e.g.
'rotate'
:param float|int prob:
If prob <= 1: probability [0,1] that the augmentation is applied
If prob > 1: number of times augmentation is applied.
:param list of lists ranges: Lists with ranges for each argument of
the augmentation, e.g. [0, 360] degrees, where parameters are
randomly sampled from.
:param kwargs kwargs: Keyword arguments passed on the the augmentation.
:return: instance of AugmentImage
:rtype: AugmentImage
"""
self.augmentations.append((name, prob, ranges, kwargs))
return self
def __rrshift__(self, iterable):
"""
Apply augmentation to samples in iterable.
:param iterable iterable: Samples
:return: iterable with augmented samples
:rtype: generator
"""
imagecols = as_tuple(self.imagecols)
rand = self.rand
for sample in iterable:
for name, p, ranges, kwargs in self.augmentations:
n = int(p) if p > 1.0 else 1
for _ in range(n):
if rand.uniform(0, 1) < p:
args = [rand.uniform(r[0], r[1]) for r in ranges]
transformation = name, args, kwargs
yield map_transform(sample, imagecols, transformation)
@nut_processor
def RegularImagePatches(iterable, imagecols, pshape, stride):
"""
samples >> RegularImagePatches(imagecols, shape, stride)
Extract patches in a regular grid from images.
>>> import numpy as np
>>> img = np.reshape(np.arange(12), (3, 4))
>>> samples = [(img, 0)]
>>> getpatches = RegularImagePatches(0, (2, 2), 2)
>>> for p in samples >> getpatches:
... print(p)
(array([[0, 1],
[4, 5]]), 0)
(array([[2, 3],
[6, 7]]), 0)
:param iterable iterable: Samples with images
:param int|tuple imagecols: Indices of sample columns that contain
images, where patches are extracted from.
Images must be numpy arrays of shape h,w,c or h,w
:param tuple shape: Shape of patch (h,w)
:param int stride: Step size of grid patches are extracted from
:return: Iterator over samples where images are replaced by patches.
:rtype: generator
"""
colset = as_set(imagecols)
for sample in iterable:
patch_iters = ut.col_map(sample, imagecols, ni.patch_iter, pshape, stride)
while True:
patched = []
for i, p in enumerate(patch_iters):
try:
patched.append(next(p) if i in colset else p)
except StopIteration:
pass
if len(patched) == len(sample):
yield tuple(patched)
else: # pragma: no cover
break # one or all patch iterators are depleted
@nut_processor
def RandomImagePatches(iterable, imagecols, pshape, npatches):
"""
samples >> RandomImagePatches(imagecols, shape, npatches)
Extract patches at random locations from images.
>>> import numpy as np
>>> np.random.seed(0) # just to ensure stable doctest
>>> img = np.reshape(np.arange(30), (5, 6))
>>> samples = [(img, 0)]
>>> getpatches = RandomImagePatches(0, (2, 3), 3)
>>> for (p, l) in samples >> getpatches:
... print(p.tolist(), l)
[[7, 8, 9], [13, 14, 15]] 0
[[8, 9, 10], [14, 15, 16]] 0
[[8, 9, 10], [14, 15, 16]] 0
:param iterable iterable: Samples with images
:param int|tuple imagecols: Indices of sample columns that contain
images, where patches are extracted from.
Images must be numpy arrays of shape h,w,c or h,w
:param tuple shape: Shape of patch (h,w)
:param int npatches: Number of patches to extract (per image)
:return: Iterator over samples where images are replaced by patches.
:rtype: generator
"""
imagecols = as_tuple(imagecols)
for sample in iterable:
image = sample[imagecols[0]]
hi, wi = image.shape[:2]
hs2, ws2 = pshape[0] // 2 + 1, pshape[1] // 2 + 1
rr = np.random.randint(hs2, hi - hs2, npatches)
cc = np.random.randint(ws2, wi - ws2, npatches)
for r, c in zip(rr, cc):
yield ut.col_map(sample, imagecols, ni.extract_patch, pshape, r, c)
@nut_processor
def ImagePatchesByMask(iterable, imagecol, maskcol, pshape, npos,
nneg=lambda npos: npos, pos=255, neg=0, retlabel=True):
"""
samples >> ImagePatchesByMask(imagecol, maskcol, pshape, npos,
nneg=lambda npos: npos,
pos=255, neg=0, retlabel=True)
Randomly sample positive/negative patches from image based on mask.
A patch is positive if its center point has the value 'pos' in the
mask (corresponding to the input image) and is negative for value 'neg'
The mask must be of same size as image.
>>>
>>> import numpy as np
>>> np.random.seed(0) # just to ensure stable doctest
>>> img = np.reshape(np.arange(25), (5, 5))
>>> mask = np.eye(5, dtype='uint8') * 255
>>> samples = [(img, mask)]
>>> getpatches = ImagePatchesByMask(0, 1, (3, 3), 2, 1)
>>> for (p, l) in samples >> getpatches:
... print(p.tolist(), l)
[[10, 11, 12], [15, 16, 17], [20, 21, 22]] 0
[[12, 13, 14], [17, 18, 19], [22, 23, 24]] 1
[[6, 7, 8], [11, 12, 13], [16, 17, 18]] 1
>>> np.random.seed(0) # just to ensure stable doctest
>>> patches = ImagePatchesByMask(0, 1, (3, 3), 1, 1, retlabel=False)
>>> for (p, m) in samples >> getpatches:
... print(p.tolist(), l)
[[10, 11, 12], [15, 16, 17], [20, 21, 22]] 1
[[12, 13, 14], [17, 18, 19], [22, 23, 24]] 1
[[6, 7, 8], [11, 12, 13], [16, 17, 18]] 1
:param iterable iterable: Samples with images
:param int imagecol: Index of sample column that contain image
:param int maskcol: Index of sample column that contain mask
:param tuple pshape: Shape of patch
:param int npos: Number of positive patches to sample
:param int|function nneg: Number of negative patches to sample or
a function hat returns the number of negatives
based on number of positives.
:param int pos: Mask value indicating positives
:param int neg: Mask value indicating negatives
:param bool retlabel: True return label, False return mask patch
:return: Iterator over samples where images are replaced by image patches
and masks are replaced by labels [0,1] or mask patches
:rtype: generator
"""
for sample in iterable:
image, mask = sample[imagecol], sample[maskcol]
if image.shape[:2] != mask.shape:
raise ValueError('Image and mask size don''t match!')
it = ni.sample_pn_patches(image, mask, pshape, npos, nneg, pos, neg)
for img_patch, mask_patch, label in it:
outsample = list(sample)[:]
outsample[imagecol] = img_patch
outsample[maskcol] = label if retlabel else mask_patch
yield tuple(outsample)
@nut_processor
def ImagePatchesByAnnotation(iterable, imagecol, annocol, pshape, npos,
nneg=lambda npos: npos, pos=255, neg=0,
retlabel=True):
"""
samples >> ImagePatchesByAnnotation(imagecol, annocol, pshape, npos,
nneg=lambda npos: npos,
pos=255, neg=0, retlabel=True)
Randomly sample positive/negative patches from image based on annotation.
See imageutil.annotation2coords for annotation format.
A patch is positive if its center point is within the annotated region
and is negative otherwise.
>>> import numpy as np
>>> np.random.seed(0) # just to ensure stable doctest
>>> img = np.reshape(np.arange(25), (5, 5))
>>> anno = ('point', ((3, 2), (2, 3),))
>>> samples = [(img, anno)]
>>> getpatches = ImagePatchesByAnnotation(0, 1, (3, 3), 1, 1)
>>> for (p, l) in samples >> getpatches:
... print(p.tolist(), l)
[[12, 13, 14], [17, 18, 19], [22, 23, 24]] 0
[[11, 12, 13], [16, 17, 18], [21, 22, 23]] 1
[[7, 8, 9], [12, 13, 14], [17, 18, 19]] 1
:param iterable iterable: Samples with images
:param int imagecol: Index of sample column that contain image
:param int annocol: Index of sample column that contain annotation
:param tuple pshape: Shape of patch
:param int npos: Number of positive patches to sample
:param int|function nneg: Number of negative patches to sample or
a function hat returns the number of negatives
based on number of positives.
:param int pos: Mask value indicating positives
:param int neg: Mask value indicating negatives
:param bool retlabel: True return label, False return mask patch
:return: Iterator over samples where images are replaced by image patches
and masks are replaced by labels [0,1] or mask patches
:rtype: generator
"""
for sample in iterable:
image, annotations = sample[imagecol], sample[annocol]
mask = ni.annotation2mask(image, annotations)
n = len(annotations[1]) if annotations else 0
it = ni.sample_pn_patches(image, mask, pshape, npos * n, nneg, pos, neg)
for img_patch, mask_patch, label in it:
outsample = list(sample)[:]
outsample[imagecol] = img_patch
outsample[annocol] = label if retlabel else mask_patch
yield tuple(outsample)
@nut_processor
def ImageAnnotationToMask(iterable, imagecol, annocol):
"""
samples >> ImageAnnotationToMask(imagecol, annocol)
Create mask for image annotation. Annotation are of the following formats.
See imageutil.annotation2coords for details.
('point', ((x, y), ... ))
('circle', ((x, y, r), ...))
('rect', ((x, y, w, h), ...))
('polyline', (((x, y), (x, y), ...), ...))
>>> import numpy as np
>>> from nutsflow import Collect
>>> img = np.zeros((3, 3), dtype='uint8')
>>> anno = ('point', ((0, 1), (2, 0)))
>>> samples = [(img, anno)]
>>> masks = samples >> ImageAnnotationToMask(0, 1) >> Collect()
>>> print(masks[0][1])
[[ 0 0 255]
[255 0 0]
[ 0 0 0]]
:param iterable iterable: Samples with images and annotations
:param int imagecol: Index of sample column that contain image
:param int annocol: Index of sample column that contain annotation
:return: Iterator over samples where annotations are replaced by masks
:rtype: generator
"""
for sample in iterable:
sample = list(sample)[:] # going to mutate sample
mask = ni.annotation2mask(sample[imagecol], sample[annocol])
sample[annocol] = mask
yield tuple(sample)
class ImageMean(NutFunction):
"""
Compute, save mean over images and subtract from images.
"""
def __init__(self, imagecol, filepath='image_means.npy'):
"""
samples >> ImageMean(imagecol, filepath='image_means.npy')
Construct ImageMean nut.
:param int imagecol: Index of sample column that contain image
:param string filepath: Path to file were mean values are saved
and loaded from.
"""
hasfile = path.exists(filepath)
self.imagecol = imagecol
self.filepath = filepath
self.means = np.load(filepath, False) if hasfile else np.array([])
def __call__(self, sample):
"""
Subtract mean from images in samples.
sub_mean = ImageMean(imagecol, filepath)
samples >> sub_mean >> Consume()
:param tuple sample: Sample that contains an image (at imagecol).
:return: Sample with image where mean is subtracted.
Note that image will not be of dtype uint8 and
in range [0,255] anymore!
:rtype: tuple
"""
sample = list(sample)[:] # going to mutate sample
image = sample[self.imagecol].astype('float32')
if not self.means.size:
raise ValueError('Mean has not yet been computed!')
if self.means.shape != image.shape:
raise ValueError('Mean loaded was computed on different images?')
sample[self.imagecol] = image - self.means
return tuple(sample)
def train(self):
"""
Compute mean over images in samples.
sub_mean = ImageMean(imagecol, filepath)
samples >> sub_mean.train() >> Consume()
:return: Input samples are returned unchanged
:rtype: tuple
"""
@nut_processor
def Trainer(iterable, outer):
img_mean = np.array([])
for n_i, sample in enumerate(iterable):
image = sample[outer.imagecol]
if not img_mean.size:
img_mean = np.zeros(image.shape)
img_mean += image
yield sample
outer.means = img_mean / (n_i + 1)
if outer.filepath:
np.save(outer.filepath, outer.means, False)
return Trainer(self)
class ImageChannelMean(NutFunction):
"""
Compute, save per-channel means over images and subtract from images.
"""
def __init__(self, imagecol, filepath='image_channel_means.npy',
means=None):
"""
samples >> ImageChannelMean(imagecol,
filepath='image_channel_means.npy',
means=None)
Construct ImageChannelMean nut.
:param int imagecol: Index of sample column that contain image
:param string filepath: Path to file were mean values are saved
and loaded from.
:param list|tuple means: Mean values can be provided directly.
In this case filepath will be ignored and training is not
necessary.
"""
hasfile = path.exists(filepath)
self.imagecol = imagecol
self.filepath = filepath
if means:
self.means = np.array([[means]])
else:
self.means = np.load(filepath, False) if hasfile else np.array([])
def __call__(self, sample):
"""
Subtract per-channel mean from images in samples.
sub_mean = ImageChannelMean(imagecol, filepath='means.npy')
samples >> sub_mean >> Consume()
sub_mean = ImageChannelMean(imagecol, means=[197, 87, 101])
samples >> sub_mean >> Consume()
:param tuple sample: Sample that contains an image (at imagecol).
:return: Sample with image where mean is subtracted.
Note that image will not be of dtype uint8 and
in range [0,255] anymore!
:rtype: tuple
"""
sample = list(sample)[:] # going to mutate sample
image = sample[self.imagecol].astype('float32')
if not self.means.size:
raise ValueError('Mean has not yet been computed!')
if self.means.ndim != image.ndim:
raise ValueError('Mean loaded was computed on different images?')
sample[self.imagecol] = image - self.means
return tuple(sample)
def train(self):
"""
Compute per-channel mean over images in samples.
sub_mean = ImageChannelMean(imagecol, filepath)
samples >> sub_mean.train() >> Consume()
:return: Input samples are returned unchanged
:rtype: tuple
"""
@nut_processor
def Trainer(iterable, outer):
means = np.array([])
for n_i, sample in enumerate(iterable):
image = sample[outer.imagecol]
img_means = np.mean(image, axis=(0, 1), keepdims=True)
if not means.size:
means = np.zeros(img_means.shape)
means += img_means
yield sample
outer.means = means / (n_i + 1)
if outer.filepath:
np.save(outer.filepath, outer.means, False)
return Trainer(self)
| 24,592 | 37.366615 | 82 | py |
nuts-ml | nuts-ml-master/nutsml/booster.py | """
.. module:: booster
:synopsis: Boosting of wrongly predicted samples
"""
import random as rnd
import numpy as np
from nutsflow.common import StableRandom
from nutsflow import nut_processor, Tee, Collect, Flatten, Print
@nut_processor
def Boost(iterable, batcher, network, rand=None):
"""
iterable >> Boost(batcher, network, rand=None)
Boost samples with high softmax probability for incorrect class.
Expects one-hot encoded targets and softmax predictions for output.
NOTE: prefetching of batches must be disabled when using boosting!
| network = Network()
| build_batch = BuildBatch(BATCHSIZE, prefetch=0).input(...).output(...)
| boost = Boost(build_batch, network)
| samples >> ... ?>> boost >> build_batch >> network.train() >> Consume()
:param iterable iterable: Iterable with samples.
:param nutsml.BuildBatch batcher: Batcher used for network training.
:param nutsml.Network network: Network used for prediction
:param Random|None rand: Random number generator used for down-sampling.
If None, random.Random() is used.
:return: Generator over samples to boost
:rtype: generator
"""
def do_boost(probs, target):
assert len(target) > 1, 'Expect one-hot encoded target: ' + str(target)
assert len(target) == len(probs), 'Expect softmax probs: ' + str(probs)
return rand.random() > probs[np.argmax(target)]
assert batcher.prefetch == 0, 'Disable prefetch when boosting'
rand = rnd.Random() if rand is None else rand
samples1, samples2 = iterable >> Tee(2)
for batch in samples1 >> batcher:
inputs, targets = batch
tars = targets[0]
preds = iter(inputs) >> network.predict() >> Collect()
for p,t,s in zip(preds, tars, samples2):
if do_boost(p, t):
yield s
| 1,850 | 34.596154 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/common.py | """
.. module:: common
:synopsis: Common nuts
"""
import numpy as np
import random as rnd
from nutsflow import (nut_function, nut_sink, NutFunction,
Flatten, Collect)
from nutsflow.common import StableRandom
from nutsml.datautil import group_by, shuffle_sublists
@nut_function
def CheckNaN(data):
"""
Raise exception if data contains NaN.
Useful to stop training if network doesn't converge and loss function
returns NaN. Example:
samples >> network.train() >> CheckNan() >> log >> Consume()
>>> from nutsflow import Collect
>>> [1, 2, 3] >> CheckNaN() >> Collect()
[1, 2, 3]
>>> import numpy as np
>>> [1, np.NaN, 3] >> CheckNaN() >> Collect()
Traceback (most recent call last):
...
RuntimeError: NaN encountered: nan
:param data: Items or iterables.
:return: Return input data if it doesn't contain NaN
:rtype: any
:raise: RuntimeError if data contains NaN.
"""
if np.any(np.isnan(data)):
raise RuntimeError('NaN encountered: ' + str(data))
return data
@nut_sink
def PartitionByCol(iterable, column, values):
"""
Partition samples in iterables depending on column value.
>>> samples = [(1,1), (2,0), (2,4), (1,3), (3,0)]
>>> ones, twos = samples >> PartitionByCol(0, [1, 2])
>>> ones
[(1, 1), (1, 3)]
>>> twos
[(2, 0), (2, 4)]
Note that values does not need to contain all possible values. It is
sufficient to provide the values for the partitions wanted.
:param iterable iterable: Iterable over samples
:param int column: Index of column to extract
:param list values: List of column values to create partitions for.
:return: tuple of partitions
:rtype: tuple
"""
groups = group_by(iterable, lambda sample: sample[column])
return tuple(groups.get(v, []) for v in values)
class ConvertLabel(NutFunction):
"""
Convert string labels to integer class ids (or one-hot) and vice versa.
"""
def __init__(self, column, labels, onehot=False):
"""
Convert string labels to integer class ids (or one-hot) and vice versa.
Also converts confidence vectors, e.g. softmax output or float values
to class labels.
>>> from nutsflow import Collect
>>> labels = ['class0', 'class1', 'class2']
>>> convert = ConvertLabel(None, labels)
>>> [1, 0] >> convert >> Collect()
['class1', 'class0']
>>> ['class1', 'class0'] >> convert >> Collect()
[1, 0]
>>> [0.9, 0.4, 1.6] >> convert >> Collect()
['class1', 'class0', 'class2']
>>> [[0.1, 0.7, 0.2], [0.8, 0.1, 0.1]] >> convert >> Collect()
['class1', 'class0']
>>> convert = ConvertLabel(None, labels, onehot=True)
>>> ['class1', 'class0'] >> convert >> Collect()
[[0, 1, 0], [1, 0, 0]]
>>> convert = ConvertLabel(1, labels)
>>> [('data', 'class1'), ('data', 'class0')] >> convert >> Collect()
[('data', 1), ('data', 0)]
>>> [('data', 1), ('data', 2)] >> convert >> Collect()
[('data', 'class1'), ('data', 'class2')]
>>> [('data', 0.9)] >> convert >> Collect()
[('data', 'class1')]
>>> [('data', [0.1, 0.7, 0.2])] >> convert >> Collect()
[('data', 'class1')]
:param int column: Index of column in sample that contains label.
If None process labels directly.
:param list|tuple labels: List of class labels (strings).
:param bool onehot: True: convert class labels to one-hot encoded
vectors. False, convert to class index.
"""
self.column = column
self.labels = labels
self.onehot = onehot
self.n_labels = len(labels)
self.id2label = {i: l for i, l in enumerate(labels)}
self.label2id = {l: i for i, l in enumerate(labels)}
def __call__(self, sample):
"""Return sample and replace label within sample if it is a sample"""
hascol = self.column is not None
x = sample[self.column] if hascol else sample
if isinstance(x, str):
y = self.label2id[x]
elif isinstance(x, int):
y = self.id2label[x]
elif isinstance(x, float):
y = self.id2label[round(x)]
else: # assume vector with confidence values
assert len(x) == len(self.labels)
_, argmax = max((v, i) for i, v in enumerate(x))
y = self.id2label[argmax]
if self.onehot and isinstance(y, int):
y = [1 if i == y else 0 for i in range(self.n_labels)]
if hascol: # input has columns => return sample
outsample = list(sample)
outsample[self.column] = y
return tuple(outsample)
else:
return y
@nut_sink
def SplitRandom(iterable, ratio=0.7, constraint=None, rand=None):
"""
Randomly split iterable into partitions.
For the same input data the same split is created every time and is stable
across different Python version 2.x or 3.x. A random number generator
can be provided to create varying splits.
>>> train, val = range(10) >> SplitRandom(ratio=0.7)
>>> train, val
([6, 3, 1, 7, 0, 2, 4], [5, 9, 8])
>>> range(10) >> SplitRandom(ratio=0.7) # Same split again
[[6, 3, 1, 7, 0, 2, 4], [5, 9, 8]]
>>> train, val, test = range(10) >> SplitRandom(ratio=(0.6, 0.3, 0.1))
>>> train, val, test
([6, 1, 4, 0, 3, 2], [8, 7, 9], [5])
>>> data = zip('aabbccddee', range(10))
>>> same_letter = lambda t: t[0]
>>> train, val = data >> SplitRandom(ratio=0.6, constraint=same_letter)
>>> sorted(train)
[('a', 0), ('a', 1), ('b', 2), ('b', 3), ('d', 6), ('d', 7)]
>>> sorted(val)
[('c', 4), ('c', 5), ('e', 8), ('e', 9)]
:param iterable iterable: Iterable over anything. Will be consumed!
:param float|tuple ratio: Ratio of two partition e.g. a ratio of 0.7
means 70%, 30% split.
Alternatively a list or ratios can be provided, e.g.
ratio=(0.6, 0.3, 0.1). Note that ratios must sum up to one
and cannot be zero.
:param function|None constraint: Function that returns key the elements of
the iterable are grouped by before partitioning. Useful to ensure
that a partition contains related elements, e.g. left and right eye
images are not scattered across partitions.
Note that constrains have precedence over ratios.
:param Random|None rand: Random number generator. The default None
ensures that the same split is created every time SplitRandom
is called. This is important when continuing an interrupted
training session or running the same training on machines with
different Python versions. Note that Python's random.Random(0)
generates different number for Python 2.x and 3.x!
:return: partitions of iterable with sizes according to provided ratios.
:rtype: (list, list, ...)
"""
rand = StableRandom(0) if rand is None else rand
samples = list(iterable)
if hasattr(ratio, '__iter__'):
ratios = tuple(ratio)
if abs(sum(ratios) - 1.0) > 1e-6:
raise ValueError('Ratios must sum up to one: ' + str(ratios))
if min(ratios) <= 0:
raise ValueError('Ratios cannot be zero: ' + str(ratios))
else:
ratios = (ratio, 1.0 - ratio)
ns = [int(len(samples) * r) for r in ratios]
if constraint is None:
groups = [[s] for s in samples]
else:
groups = list(group_by(samples, constraint, True).values())
rand.shuffle(groups)
groups = iter(groups)
splits = []
for n in ns[:-1]:
split = []
for group in groups:
split.extend(group)
if len(split) >= n:
splits.append(split)
break
splits.append([e for g in groups for e in g]) # append remaining groups
shuffle_sublists(splits, rand)
return splits
@nut_sink
def SplitLeaveOneOut(iterable, keyfunc=None):
"""
Returns a leave-one-out split of the iterable.
Note that SplitLeaveOneOut consumes the entire input stream
and returns a generator over the leave-one-out splits.
The splits are stable across Python version 2.x or 3.x
and deterministic.
>>> from nutsflow.common import console # just for printing
>>> samples = [1, 2, 3]
>>> for train, test in samples >> SplitLeaveOneOut():
... console(train, ' ', test)
[2, 3] [1]
[1, 3] [2]
[1, 2] [3]
>>> samples = [(1, 1), (2, 0), (2, 4), (1, 3), (3, 0)]
>>> splits = samples >> SplitLeaveOneOut(lambda x: x[0])
>>> for train, test in splits:
... console(train, ' ', test)
[(2, 0), (2, 4), (3, 0)] [(1, 1), (1, 3)]
[(1, 1), (1, 3), (3, 0)] [(2, 0), (2, 4)]
[(1, 1), (1, 3), (2, 0), (2, 4)] [(3, 0)]
:param iterable iterable: Iterable over anything. Will be consumed!
:param function/None keyfunc: Function that returns value the split
is based on. If None, the sample itself serves as key.
:return: generator over leave-one-out train and test splits (train, test)
:rtype: Generator[(list, list)]
"""
samples = list(iterable)
if keyfunc is None:
keyfunc = lambda x: x
groups = list(group_by(samples, keyfunc, True).values())
idxs = range(len(groups))
for i in idxs:
test = groups[i]
others = (groups[k] for k in idxs if k != i)
train = others >> Flatten() >> Collect()
yield train, test
| 9,645 | 34.20438 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/imageutil.py | """
.. module:: imageutil
:synopsis: Basic image processing utilities
"""
from __future__ import absolute_import, print_function
import numpy as np
import PIL as pil
import skimage.exposure as ske
import skimage.transform as skt
import skimage.color as skc
import skimage.util.shape as sks
import skimage.io as ski
import skimage.draw as skd
import matplotlib.patches as plp
from six.moves import range, map
from nutsflow.common import shapestr, isnan
from PIL import ImageEnhance as ie
from skimage.color import rgb2gray
from skimage import feature
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from warnings import warn
def load_image(filepath, as_grey=False, dtype='uint8', no_alpha=True):
"""
Load image as numpy array from given filepath.
Supported formats: gif, png, jpg, bmp, tif, npy
>>> img = load_image('tests/data/img_formats/nut_color.jpg')
>>> shapestr(img)
'213x320x3'
:param string filepath: Filepath to image file or numpy array.
:param bool as_grey:
:return: numpy array with shapes
(h, w) for grayscale or monochrome,
(h, w, 3) for RGB (3 color channels in last axis)
(h, w, 4) for RGBA (for no_alpha = False)
(h, w, 3) for RGBA (for no_alpha = True)
pixel values are in range [0,255] for dtype = uint8
:rtype: numpy ndarray
"""
if filepath.endswith('.npy'): # image as numpy array
arr = np.load(filepath).astype(dtype)
arr = rgb2gray(arr) if as_grey else arr
else:
img = ski.imread(filepath, as_gray=as_grey)
arr = np.array(img, dtype=dtype)
# https://github.com/scikit-image/scikit-image/issues/2406
if arr.ndim == 1 and arr.shape[0] == 2:
arr = arr[0] # pragma: no cover
if arr.ndim == 3 and arr.shape[2] == 4 and no_alpha:
arr = arr[..., :3] # cut off alpha channel
return arr
def save_image(filepath, image):
"""
Save numpy array as image (or numpy array) to given filepath.
Supported formats: gif, png, jpg, bmp, tif, npy
:param string filepath: File path for image file. Extension determines
image file format, e.g. .gif
:param numpy array image: Numpy array to save as image.
Must be of shape (h,w) or (h,w,3) or (h,w,4)
"""
if filepath.endswith('.npy'): # image as numpy array
np.save(filepath, image, allow_pickle=False)
else:
ski.imsave(filepath, image)
def arr_to_pil(image):
"""
Convert numpy array to PIL image.
>>> import numpy as np
>>> rgb_arr = np.ones((5, 4, 3), dtype='uint8')
>>> pil_img = arr_to_pil(rgb_arr)
>>> pil_img.size
(4, 5)
:param ndarray image: Numpy array with dtype 'uint8' and dimensions
(h,w,c) for RGB or (h,w) for gray-scale images.
:return: PIL image
:rtype: PIL.Image
"""
if image.dtype != np.uint8:
raise ValueError('Expect uint8 dtype but got: ' + str(image.dtype))
if not (2 <= image.ndim <= 3):
raise ValueError('Expect gray scale or RGB image: ' + str(image.ndim))
return pil.Image.fromarray(image, 'RGB' if image.ndim == 3 else 'L')
def pil_to_arr(image):
"""
Convert PIL image to Numpy array.
>>> import numpy as np
>>> rgb_arr = np.ones((5, 4, 3), dtype='uint8')
>>> pil_img = arr_to_pil(rgb_arr)
>>> arr = pil_to_arr(pil_img)
>>> shapestr(arr)
'5x4x3'
:param PIL.Image image: PIL image (RGB or grayscale)
:return: Numpy array
:rtype: numpy.array with dtype 'uint8'
"""
if image.mode not in {'L', 'RGB'}:
raise ValueError('Expect RBG or grayscale but got:' + image.mode)
return np.asarray(image)
def set_default_order(kwargs):
"""
Set order parameter in kwargs for scikit-image functions.
Default order is 1, which performs a linear interpolation of pixel values
when images are rotated, resized and sheared. This is fine for images
but causes unwanted pixel values in masks. This function set the default
order to 0, which disables the interpolation.
:param kwargs kwargs: Dictionary with keyword arguments.
"""
if 'order' not in kwargs:
kwargs['order'] = 0
def add_channel(image, channelfirst):
"""
Add channel if missing and make first axis if requested.
>>> import numpy as np
>>> image = np.ones((10, 20))
>>> image = add_channel(image, True)
>>> shapestr(image)
'1x10x20'
:param ndarray image: RBG (h,w,3) or gray-scale image (h,w).
:param bool channelfirst: If True, make channel first axis
:return: Numpy array with channel (as first axis if makefirst=True)
:rtype: numpy.array
"""
if not 2 <= image.ndim <= 3:
raise ValueError('Image must be 2 or 3 channel!')
if image.ndim == 2: # gray-scale image
image = np.expand_dims(image, axis=-1) # add channel axis
return np.rollaxis(image, 2) if channelfirst else image
def floatimg2uint8(image):
"""
Convert array with floats to 'uint8' and rescale from [0,1] to [0, 256].
Converts only if image.dtype != uint8.
>>> import numpy as np
>>> image = np.eye(10, 20, dtype=float)
>>> arr = floatimg2uint8(image)
>>> np.max(arr)
255
:param numpy.array image: Numpy array with range [0,1]
:return: Numpy array with range [0,255] and dtype 'uint8'
:rtype: numpy array
"""
return (image * 255).astype('uint8') if image.dtype != 'uint8' else image
def rerange(image, old_min, old_max, new_min, new_max, dtype):
"""
Return image with values in new range.
Note: The default range of images is [0, 255] and most image
processing functions expect this range and will fail otherwise.
However, as input to neural networks re-ranged images, e.g [-1, +1]
are sometimes needed.
>>> import numpy as np
>>> image = np.array([[0, 255], [255, 0]])
>>> rerange(image, 0, 255, -1, +1, 'float32')
array([[-1., 1.],
[ 1., -1.]], dtype=float32)
:param numpy.array image: Should be a numpy array of an image.
:param int|float old_min: Current minimum value of image, e.g. 0
:param int|float old_max: Current maximum value of image, e.g. 255
:param int|float new_min: New minimum, e.g. -1.0
:param int|float new_max: New maximum, e.g. +1.0
:param numpy datatype dtype: Data type of output image,
e.g. float32' or np.uint8
:return: Image with values in new range.
"""
image = image.astype('float32')
old_range, new_range = old_max - old_min, new_max - new_min
image = (image - old_min) / old_range * new_range + new_min
return image.astype(dtype)
def identical(image):
"""
Return input image unchanged.
:param numpy.array image: Should be a numpy array of an image.
:return: Same as input
:rtype: Same as input
"""
return image
def crop(image, x1, y1, x2, y2):
"""
Crop image.
>>> import numpy as np
>>> image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
>>> crop(image, 1, 2, 5, 5)
array([[ 9, 10, 11],
[13, 14, 15]], dtype=uint8)
:param numpy array image: Numpy array.
:param int x1: x-coordinate of left upper corner of crop (inclusive)
:param int y1: y-coordinate of left upper corner of crop (inclusive)
:param int x2: x-coordinate of right lower corner of crop (exclusive)
:param int y2: y-coordinate of right lower corner of crop (exclusive)
:return: Cropped image
:rtype: numpy array
"""
return image[y1:y2, x1:x2]
def crop_center(image, w, h):
"""
Crop region with size w, h from center of image.
Note that the crop is specified via w, h and not via shape (h,w).
Furthermore if the image or the crop region have even dimensions,
coordinates are rounded down.
>>> import numpy as np
>>> image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
>>> crop_center(image, 3, 2)
array([[ 4, 5, 6],
[ 8, 9, 10]], dtype=uint8)
:param numpy array image: Numpy array.
:param int w: Width of crop
:param int h: Height of crop
:return: Cropped image
:rtype: numpy array
:raise: ValueError if image is smaller than crop region
"""
iw, ih = image.shape[1], image.shape[0]
dw, dh = iw - w, ih - h
if dw < 0 or dh < 0:
raise ValueError('Image too small for crop {}x{}'.format(iw, ih))
return image[dh // 2:dh // 2 + h, dw // 2:dw // 2 + w]
def crop_square(image):
"""
Crop image to square shape.
Crops symmetrically left and right or top and bottom to achieve
aspect ratio of one and preserves the largest dimension.
:param numpy array image: Numpy array.
:return: Cropped image
:rtype: numpy array
"""
iw, ih = image.shape[1], image.shape[0]
if iw > ih:
dw, mw = int((iw - ih) / 2), (iw - ih) % 2
return crop(image, dw + mw, 0, iw - dw, ih)
else:
dh, mh = int((ih - iw) / 2), (ih - iw) % 2
return crop(image, 0, dh + mh, iw, ih - dh)
def occlude(image, x, y, w, h, color=0):
"""
Occlude image with a rectangular region.
Occludes an image region with dimensions w,h centered on x,y with the
given color. Invalid x,y coordinates will be clipped to ensure complete
occlusion rectangle is within the image.
>>> import numpy as np
>>> image = np.ones((4, 5)).astype('uint8')
>>> occlude(image, 2, 2, 2, 3)
array([[1, 1, 1, 1, 1],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 1]], dtype=uint8)
>>> image = np.ones((4, 4)).astype('uint8')
>>> occlude(image, 0.5, 0.5, 0.5, 0.5)
array([[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]], dtype=uint8)
:param numpy array image: Numpy array.
:param int|float x: x coordinate for center of occlusion region.
Can be provided as fraction (float) of image width
:param int|float y: y coordinate for center of occlusion region.
Can be provided as fraction (float) of image height
:param int|float w: width of occlusion region.
Can be provided as fraction (float) of image width
:param int|float h: height of occlusion region.
Can be provided as fraction (float) of image height
:param int|tuple color: gray-scale or RGB color of occlusion.
:return: Copy of input image with occluded region.
:rtype: numpy array
"""
frac = lambda c, m: int(m * c) if isinstance(c, float) else c
iw, ih = image.shape[:2]
x, y = frac(x, iw), frac(y, ih)
w, h = frac(w, iw), frac(h, ih)
r, c = int(y - h // 2), int(x - w // 2)
r, c = max(min(r, ih - h), 0), max(min(c, iw - w), 0)
image2 = image.copy()
image2[r:r + h, c:c + w] = color
return image2
def normalize_histo(image, gamma=1.0): # pragma no coverage
"""
Perform histogram normalization on image.
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float gamma: Factor for gamma adjustment.
:return: Normalized image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
image = ske.equalize_adapthist(image)
image = ske.adjust_gamma(image, gamma=gamma)
return floatimg2uint8(image)
def enhance(image, func, *args, **kwargs):
"""
Enhance image using a PIL enhance function
See the following link for details on PIL enhance functions:
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html
>>> from PIL.ImageEnhance import Brightness
>>> image = np.ones((3,2), dtype='uint8')
>>> enhance(image, Brightness, 0.0)
array([[0, 0],
[0, 0],
[0, 0]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param function func: PIL ImageEnhance function
:param args args: Argument list passed on to enhance function.
:param kwargs kwargs: Key-word arguments passed on to enhance function
:return: Enhanced image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
image = arr_to_pil(image)
image = func(image).enhance(*args, **kwargs)
return pil_to_arr(image)
def change_contrast(image, contrast=1.0):
"""
Change contrast of image.
>>> image = np.eye(3, dtype='uint8') * 255
>>> change_contrast(image, 0.5)
array([[170, 42, 42],
[ 42, 170, 42],
[ 42, 42, 170]], dtype=uint8)
See
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Contrast
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float contrast: Contrast [0, 1]
:return: Image with changed contrast
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return enhance(image, ie.Contrast, contrast)
def change_brightness(image, brightness=1.0):
"""
Change brightness of image.
>>> image = np.eye(3, dtype='uint8') * 255
>>> change_brightness(image, 0.5)
array([[127, 0, 0],
[ 0, 127, 0],
[ 0, 0, 127]], dtype=uint8)
See
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Brightness
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float brightness: Brightness [0, 1]
:return: Image with changed brightness
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return enhance(image, ie.Brightness, brightness)
def change_sharpness(image, sharpness=1.0):
"""
Change sharpness of image.
>>> image = np.eye(3, dtype='uint8') * 255
>>> change_sharpness(image, 0.5)
array([[255, 0, 0],
[ 0, 196, 0],
[ 0, 0, 255]], dtype=uint8)
See
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Sharpness
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float sharpness: Sharpness [0, ...]
:return: Image with changed sharpness
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return enhance(image, ie.Sharpness, sharpness)
def change_color(image, color=1.0):
"""
Change color of image.
>>> image = np.eye(3, dtype='uint8') * 255
>>> change_color(image, 0.5)
array([[255, 0, 0],
[ 0, 255, 0],
[ 0, 0, 255]], dtype=uint8)
See
http://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Color
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float color: Color [0, 1]
:return: Image with changed color
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return enhance(image, ie.Color, color)
def extract_edges(image, sigma): # pragma: no cover
"""
Extract edges using the Canny algorithm.
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float sigma: Standard deviation of the Gaussian filter.
:return: Binary image with extracted edges
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
image = rgb2gray(image)
params = dict(sigma=sigma, low_threshold=0.1, high_threshold=50.0,
mask=None, use_quantiles=False)
image = feature.canny(image, **params)
return image.astype('uint8') * 255
def gray2rgb(image):
"""
Grayscale scale image to RGB image
>>> image = np.eye(3, dtype='uint8') * 255
>>> gray2rgb(image)
array([[[255, 255, 255],
[ 0, 0, 0],
[ 0, 0, 0]],
<BLANKLINE>
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
<BLANKLINE>
[[ 0, 0, 0],
[ 0, 0, 0],
[255, 255, 255]]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:return: RGB image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return skc.gray2rgb(image)
def rgb2gray(image):
"""
RGB scale image to grayscale image
>>> image = np.eye(3, dtype='uint8') * 255
>>> rgb2gray(image)
array([[255, 0, 0],
[ 0, 255, 0],
[ 0, 0, 255]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:return: grayscale image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return floatimg2uint8(skc.rgb2gray(image))
def translate(image, dx, dy, **kwargs):
"""
Shift image horizontally and vertically
>>> image = np.eye(3, dtype='uint8') * 255
>>> translate(image, 2, 1)
array([[ 0, 0, 0],
[ 0, 0, 255],
[ 0, 0, 0]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param dx: horizontal translation in pixels
:param dy: vertical translation in pixels
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
rotate function, e.g. order=1 for linear interpolation.
:return: translated image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
transmat = skt.AffineTransform(translation=(-dx, -dy))
return skt.warp(image, transmat, preserve_range=True,
**kwargs).astype('uint8')
def rotate(image, angle=0, **kwargs):
"""
Rotate image.
For details see:
http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.rotate
For a smooth interpolation of images set 'order=1'. To rotate masks use
the default 'order=0'.
>>> image = np.eye(3, dtype='uint8')
>>> rotate(image, 90)
array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float angle: Angle in degrees in counter-clockwise direction
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
rotate function, e.g. order=1 for linear interpolation.
:return: Rotated image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
return skt.rotate(image, angle, preserve_range=True,
**kwargs).astype('uint8')
def resize(image, w, h, anti_aliasing=False, **kwargs):
"""
Resize image.
Image can be up- or down-sized (using interpolation). For details see:
http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
>>> image = np.ones((10,5), dtype='uint8')
>>> resize(image, 4, 3)
array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param int w: Width in pixels.
:param int h: Height in pixels.
:param bool anti_aliasing: Toggle anti aliasing.
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
resize function, e.g. order=1 for linear interpolation.
:return: Resized image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
return skt.resize(image, (h, w), mode='constant', preserve_range=True,
anti_aliasing=anti_aliasing, **kwargs).astype('uint8')
def shear(image, shear_factor, **kwargs):
"""
Shear image.
For details see:
http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform
>>> image = np.eye(3, dtype='uint8')
>>> rotated = rotate(image, 45)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:param float shear_factor: Shear factor [0, 1]
:param kwargs kwargs: Keyword arguments for the underlying scikit-image
warp function, e.g. order=1 for linear interpolation.
:return: Sheared image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
set_default_order(kwargs)
transform = skt.AffineTransform(shear=shear_factor)
return skt.warp(image, transform, preserve_range=True,
**kwargs).astype('uint8')
def fliplr(image):
"""
Flip image left to right.
>>> image = np.reshape(np.arange(4, dtype='uint8'), (2,2))
>>> fliplr(image)
array([[1, 0],
[3, 2]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:return: Flipped image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return np.fliplr(image)
def flipud(image):
"""
Flip image up to down.
>>> image = np.reshape(np.arange(4, dtype='uint8'), (2,2))
>>> flipud(image)
array([[2, 3],
[0, 1]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
:return: Flipped image
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
return np.flipud(image)
def distort_elastic(image, smooth=10.0, scale=100.0, seed=0):
"""
Elastic distortion of images.
Channel axis in RGB images will not be distorted but grayscale or
RGB images are both valid inputs. RGB and grayscale images will be
distorted identically for the same seed.
Simard, et. al, "Best Practices for Convolutional Neural Networks
applied to Visual Document Analysis",
in Proc. of the International Conference on Document Analysis and
Recognition, 2003.
:param ndarray image: Image of shape [h,w] or [h,w,c]
:param float smooth: Smoothes the distortion.
:param float scale: Scales the distortion.
:param int seed: Seed for random number generator. Ensures that for the
same seed images are distorted identically.
:return: Distorted image with same shape as input image.
:rtype: ndarray
"""
# create random, smoothed displacement field
rnd = np.random.RandomState(int(seed))
h, w = image.shape[:2]
dxy = rnd.rand(2, h, w, 3) * 2 - 1
dxy = gaussian_filter(dxy, smooth, mode="constant")
dxy = dxy / np.linalg.norm(dxy) * scale
dxyz = dxy[0], dxy[1], np.zeros_like(dxy[0])
# create transformation coordinates and deform image
is_color = len(image.shape) == 3
ranges = [np.arange(d) for d in image.shape]
grid = np.meshgrid(*ranges, indexing='ij')
add = lambda v, dv: v + dv if is_color else v + dv[:, :, 0]
idx = [np.reshape(add(v, dv), (-1, 1)) for v, dv in zip(grid, dxyz)]
distorted = map_coordinates(image, idx, order=1, mode='reflect')
return distorted.reshape(image.shape)
def polyline2coords(points):
"""
Return row and column coordinates for a polyline.
>>> rr, cc = polyline2coords([(0, 0), (2, 2), (2, 4)])
>>> list(rr)
[0, 1, 2, 2, 3, 4]
>>> list(cc)
[0, 1, 2, 2, 2, 2]
:param list of tuple points: Polyline in format [(x1,y1), (x2,y2), ...]
:return: tuple with row and column coordinates in numpy arrays
:rtype: tuple of numpy array
"""
coords = []
for i in range(len(points) - 1):
xy = list(map(int, points[i] + points[i + 1]))
coords.append(skd.line(xy[1], xy[0], xy[3], xy[2]))
return [np.hstack(c) for c in zip(*coords)]
def mask_where(mask, value):
"""
Return x,y coordinates where mask has specified value
>>> mask = np.eye(3, dtype='uint8')
>>> mask_where(mask, 1).tolist()
[[0, 0], [1, 1], [2, 2]]
:param numpy array mask: Numpy array with range [0,255] and dtype 'uint8'.
:return: Array with x,y coordinates
:rtype: numpy array with shape Nx2 where each row contains x, y
"""
return np.transpose(np.where(mask == value)).astype('int32')
def mask_choice(mask, value, n):
"""
Random selection of n points where mask has given value
>>> np.random.seed(1) # ensure same random selection for doctest
>>> mask = np.eye(3, dtype='uint8')
>>> mask_choice(mask, 1, 2).tolist()
[[0, 0], [2, 2]]
:param numpy array mask: Numpy array with range [0,255] and dtype 'uint8'.
:param int n: Number of points to select. If n is larger than the
points available only the available points will be returned.
:return: Array with x,y coordinates
:rtype: numpy array with shape nx2 where each row contains x, y
"""
points = mask_where(mask, value)
n = min(n, points.shape[0])
return points[np.random.choice(points.shape[0], n, replace=False), :]
def extract_patch(image, pshape, r, c):
"""
Extract a patch of given shape, centered at r,c of given shape from image.
Note that there is no checking if the patch region is inside the image.
>>> image = np.reshape(np.arange(16, dtype='uint8'), (4, 4))
>>> extract_patch(image, (2, 3), 2, 2)
array([[ 5, 6, 7],
[ 9, 10, 11]], dtype=uint8)
:param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
Can be of shapes MxN, MxNxC.
:param tuple pshape: Shape of patch. #Dimensions must match image.
:param int r: Row for center of patch
:param int c: Column for center of patch
:return: numpy array with shape pshape
:rtype: numpy array with range [0,255] and dtype 'uint8'
"""
h, w = pshape[0], pshape[1]
r, c = int(r - h // 2), int(c - w // 2)
return image[r:r + h, c:c + w]
def patch_iter(image, shape=(3, 3), stride=1):
"""
Extracts patches from images with given shape.
Patches are extracted in a regular grid with the given stride,
starting in the left upper corner and then row-wise.
Image can be gray-scale (no third channel dim) or color.
>>> import numpy as np
>>> img = np.reshape(np.arange(12), (3, 4))
>>> for p in patch_iter(img, (2, 2), 2):
... print(p)
[[0 1]
[4 5]]
[[2 3]
[6 7]]
:param ndarray image: Numpy array of shape h,w,c or h,w.
:param tuple shape: Shape of patch (h,w)
:param int stride: Step size of grid patches are extracted from
:return: Iterator over patches
:rtype: Iterator
"""
# view_as_windows requires contiguous array, which we ensure here
if not image.flags['C_CONTIGUOUS']:
warn('Image is not contiguous and will be copied!')
image = np.ascontiguousarray(image)
is_gray = image.ndim == 2
wshape = shape if is_gray else (shape[0], shape[1], image.shape[2])
views = sks.view_as_windows(image, wshape, stride)
rows, cols = views.shape[:2]
def patch_gen():
for r in range(rows):
for c in range(cols):
yield views[r, c] if is_gray else views[r, c, 0]
return patch_gen()
# Note that masked arrays don't work here since the where(pred) function ignores
# the mask (returns all coordinates) for where(mask == 0).
def centers_inside(centers, image, pshape):
"""
Filter center points of patches ensuring that patch is inside of image.
>>> centers = np.array([[1, 2], [0,1]])
>>> image = np.zeros((3, 4))
>>> centers_inside(centers, image, (3, 3)).astype('uint8')
array([[1, 2]], dtype=uint8)
:param ndarray(n,2) centers: Center points of patches.
:param ndarray(h,w) image: Image the patches should be inside.
:param tuple pshape: Patch shape of form (h,w)
:return: Patch centers where the patch is completely inside the image.
:rtype: ndarray of shape (n, 2)
"""
if not centers.shape[0]: # list of centers is empty
return centers
h, w = image.shape[:2]
h2, w2 = pshape[0] // 2, pshape[1] // 2
minr, maxr, minc, maxc = h2 - 1, h - h2, w2 - 1, w - w2
rs, cs = centers[:, 0], centers[:, 1]
return centers[np.all([rs > minr, rs < maxr, cs > minc, cs < maxc], axis=0)]
def sample_mask(mask, value, pshape, n):
"""
Randomly pick n points in mask where mask has given value.
Ensure that only points picked that can be center of a patch with
shape pshape that is inside the mask.
>>> mask = np.zeros((3, 4))
>>> mask[1, 2] = 1
>>> sample_mask(mask, 1, (1, 1), 1)
array([[1, 2]], dtype=uint16)
:param ndarray mask: Mask
:param int value: Sample points in mask that have this value.
:param tuple pshape: Patch shape of form (h,w)
:param int n: Number of points to sample. If there is not enough points
to sample from a smaller number will be returned. If there are not
points at all np.empty((0, 2)) will be returned.
:return: Center points of patches within the mask where the center point
has the given mask value.
:rtype: ndarray of shape (n, 2)
"""
centers = np.transpose(np.where(mask == value))
centers = centers_inside(centers, mask, pshape).astype('uint16')
n = min(n, centers.shape[0])
if not n:
return np.empty((0, 2))
return centers[np.random.choice(centers.shape[0], n, replace=False), :]
def sample_labeled_patch_centers(mask, value, pshape, n, label):
"""
Randomly pick n points in mask where mask has given value and add label.
Same as imageutil.sample_mask but adds given label to each center
>>> mask = np.zeros((3, 4))
>>> mask[1, 2] = 1
>>> sample_labeled_patch_centers(mask, 1, (1, 1), 1, 0)
array([[1, 2, 0]], dtype=uint16)
:param ndarray mask: Mask
:param int value: Sample points in mask that have this value.
:param tuple pshape: Patch shape of form (h,w)
:param int n: Number of points to sample. If there is not enough points
to sample from a smaller number will be returned. If there are not
points at all np.empty((0, 2)) will be returned.
:param int label: Numeric label to append to each center point
:return: Center points of patches within the mask where the center point
has the given mask value and the label
:rtype: ndarray of shape (n, 3)
"""
centers = sample_mask(mask, value, pshape, n)
labels = np.full((centers.shape[0], 1), label, dtype=np.uint8)
return np.hstack((centers, labels))
def sample_patch_centers(mask, pshape, npos, nneg, pos=255, neg=0):
"""
Sample positive and negative patch centers where mask value is pos or neg.
The sampling routine ensures that the patch is completely inside the mask.
>>> np.random.seed(0) # just to ensure consistent doctest
>>> mask = np.zeros((3, 4))
>>> mask[1, 2] = 255
>>> sample_patch_centers(mask, (2, 2), 1, 1)
array([[1, 1, 0],
[1, 2, 1]], dtype=uint16)
:param ndarray mask: Mask
:param tuple pshape: Patch shape of form (h,w)
:param int npos: Number of positives to sample.
:param int nneg: Number of negatives to sample.
:param int pos: Value for positive points in mask
:param int neg: Value for negative points in mask
:return: Center points of patches within the mask where the center point
has the given mask value (pos, neg) and the label (1, 0)
:rtype: ndarray of shape (n, 3)
"""
pcenters = sample_labeled_patch_centers(mask, pos, pshape, npos, 1)
nneg = nneg(npos) if hasattr(nneg, '__call__') else nneg
ncenters = sample_labeled_patch_centers(mask, neg, pshape, nneg, 0)
# return all labeled patch center points in random order
labeled_centers = np.vstack((pcenters, ncenters))
np.random.shuffle(labeled_centers)
return labeled_centers
def sample_pn_patches(image, mask, pshape, npos, nneg, pos=255, neg=0):
"""
Sample positive and negative patches where mask value is pos or neg.
The sampling routine ensures that the patch is completely inside the
image and mask and that a patch a the same position is extracted from
the image and the mask.
>>> np.random.seed(0) # just to ensure consistent doctest
>>> mask = np.zeros((3, 4), dtype='uint8')
>>> img = np.reshape(np.arange(12, dtype='uint8'), (3, 4))
>>> mask[1, 2] = 255
>>> for ip, mp, l in sample_pn_patches(img, mask, (2, 2), 1, 1):
... print(ip)
... print(mp)
... print(l)
[[0 1]
[4 5]]
[[0 0]
[0 0]]
0
[[1 2]
[5 6]]
[[ 0 0]
[ 0 255]]
1
:param ndarray mask: Mask
:param tuple pshape: Patch shape of form (h,w)
:param int npos: Number of positives to sample.
:param int nneg: Number of negatives to sample.
:param int pos: Value for positive points in mask
:param int neg: Value for negative points in mask
:return: Image and mask patches where the patch center point
has the given mask value (pos, neg) and the label (1, 0)
:rtype: tuple(image_patch, mask_patch, label)
"""
for r, c, label in sample_patch_centers(mask, pshape, npos, nneg, pos, neg):
img_patch = extract_patch(image, pshape, r, c)
mask_patch = extract_patch(mask, pshape, r, c)
yield img_patch, mask_patch, label
def annotation2coords(image, annotation):
"""
Convert geometric annotation in image to pixel coordinates.
For example, given a rectangular region annotated in an image as
('rect', ((x, y, w, h))) the function returns the coordinates of all pixels
within this region as (row, col) position tuples.
The following annotation formats are supported:
('point', ((x, y), ... ))
('circle', ((x, y, r), ...))
('ellipse', ((x, y, rx, ry, rot), ...))
('rect', ((x, y, w, h), ...))
('polyline', (((x, y), (x, y), ...), ...))
Annotation regions can exceed the image dimensions and will be clipped.
Note that annotation is in x,y order while output is r,c (row, col).
>>> import numpy as np
>>> img = np.zeros((5, 5), dtype='uint8')
>>> anno = ('point', ((1, 1), (1, 2)))
>>> for rr, cc in annotation2coords(img, anno):
... print(list(rr), list(cc))
[1] [1]
[2] [1]
:param ndarray image: Image
:param annotation annotation: Annotation of an image region such as
point, circle, rect or polyline
:return: Coordinates of pixels within the (clipped) region.
:rtype: generator over tuples (row, col)
"""
if not annotation or isnan(annotation):
return
shape = image.shape[:2]
kind, geometries = annotation
for geo in geometries:
if kind == 'point':
if geo[1] < shape[0] and geo[0] < shape[1]:
rr, cc = np.array([geo[1]]), np.array([geo[0]])
else:
rr, cc = np.array([]), np.array([])
elif kind == 'circle':
rr, cc = skd.circle(geo[1], geo[0], geo[2], shape=shape)
#rr, cc = skd.disc((geo[1], geo[0]), geo[2], shape=shape)
elif kind == 'ellipse':
rr, cc = skd.ellipse(geo[1], geo[0], geo[3], geo[2],
rotation=geo[4], shape=shape)
elif kind == 'rect':
x, y, w, h = geo
rr, cc = skd.rectangle((y,x), extent=(h,w), shape=shape)
rr, cc = rr.flatten('F'), cc.flatten('F')
elif kind == 'polyline':
if geo[0] == geo[-1]: # closed polyline => draw fill polygon
xs, ys = zip(*geo)
rr, cc = skd.polygon(ys, xs, shape=shape)
else:
rr, cc = polyline2coords(geo)
else:
raise ValueError('Invalid kind of annotation: ' + kind)
if not rr.size or not cc.size:
err_msg = 'Annotation {}:{} '.format(kind, geo)
err_msg += 'outside image {}! Image transformed?'.format(shape)
raise ValueError(err_msg)
yield rr, cc
def annotation2pltpatch(annotation, **kwargs):
"""
Convert geometric annotation to matplotlib geometric objects (=patches)
For details regarding matplotlib patches see:
http://matplotlib.org/api/patches_api.html
For annotation formats see:
imageutil.annotation2coords
:param annotation annotation: Annotation of an image region such as
point, circle, rect or polyline
:return: matplotlib.patches
:rtype: generator over matplotlib patches
"""
if not annotation or isnan(annotation):
return
kind, geometries = annotation
for geo in geometries:
if kind == 'point':
pltpatch = plp.CirclePolygon((geo[0], geo[1]), 1, **kwargs)
elif kind == 'circle':
pltpatch = plp.Circle((geo[0], geo[1]), geo[2], **kwargs)
elif kind == 'rect':
x, y, w, h = geo
pltpatch = plp.Rectangle((x, y), w, h, **kwargs)
elif kind == 'polyline':
pltpatch = plp.Polygon(geo, closed=False, **kwargs)
else:
raise ValueError('Invalid kind of annotation: ' + kind)
yield pltpatch
def annotation2mask(image, annotations, pos=255):
"""
Convert geometric annotation to mask.
For annotation formats see:
imageutil.annotation2coords
>>> import numpy as np
>>> img = np.zeros((3, 3), dtype='uint8')
>>> anno = ('point', ((0, 1), (2, 0)))
>>> annotation2mask(img, anno)
array([[ 0, 0, 255],
[255, 0, 0],
[ 0, 0, 0]], dtype=uint8)
:param annotation annotation: Annotation of an image region such as
point, circle, rect or polyline
:param int pos: Value to write in mask for regions defined by annotation
:param numpy array image: Image annotation refers to.
Returned mask will be of same size.
:return: Mask with annotation
:rtype: numpy array
"""
mask = np.zeros(image.shape[:2], dtype=np.uint8)
for rr, cc in annotation2coords(image, annotations):
mask[rr, cc] = pos
return mask
| 37,685 | 33.353692 | 97 | py |
nuts-ml | nuts-ml-master/nutsml/metrics.py | """
.. module:: metrics
:synopsis: Evaluation metrics for model performance
"""
from __future__ import absolute_import
def box_intersect(box1, box2):
"""
Return intersection box two (bounding) boxes.
If the boxes don't intersect (0,0,0,0) is returned.
:param (x,y,w,h) box1: First box with x,y specifying the left upper corner.
:param (x,y,w,h) box2: Second bounding box.
:return: Intersection box
:rtype: (x,y,w,h)
"""
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[0] + box1[2], box2[0] + box2[2])
y2 = min(box1[1] + box1[3], box2[1] + box2[3])
w, h = x2 - x1, y2 - y1
return (x1, y1, w, h) if w > 0 and h > 0 else (0, 0, 0, 0)
def box_iou(box1, box2):
"""
Return Intersection of Union of two (bounding) boxes.
:param (x,y,w,h) box1: First bounding box with x,y specifying the
left upper corner.
:param (x,y,w,h) box2: Second bounding box.
:return: Intersection of Union [0...1]
:rtype: float
"""
_, _, w, h = box_intersect(box1, box2)
area_intersect = w * h
area_box1 = box1[2] * box1[3]
area_box2 = box2[2] * box2[3]
return area_intersect / float(area_box1 + area_box2 - area_intersect)
def box_matches(box, boxes):
"""
Return list of boxes and their IOU's sorted by maximum overlap first.
:param [x,y,w,h] box: Box to match against all boxes.
:param list boxes: Boxes that are overlapped with box. Boxes must be
in format [[x,y,w,h], ...]
:return: List of boxes with their IOUs: [ (box, IOU), ... ]
:rtype: list of tuples
"""
matches = [(b, box_iou(box, b)) for b in boxes]
matches.sort(key=lambda m: -m[1])
return matches
def box_best_match(box, boxes):
"""
Return box in boxes that has the largest IOU with the given box.
:param [x,y,w,h] box: Box to match against all boxes.
:param list boxes: Boxes that are overlapped with box. Boxes must be
in format [[x,y,w,h], ...]
:return: Best matching box in format ([x,y,w,h], iou) or (None, 0.0)
if boxes is empty.
"""
if not boxes:
return None, 0.0
return max(((b, box_iou(box, b)) for b in boxes), key=lambda m: m[1])
def box_pr_curve(true_boxes, pred_boxes, pred_scores, iou_thresh=0.5):
"""
Return Precision Recall curve for bounding box predictions.
References:
https://sanchom.wordpress.com/tag/average-precision
:param list true_boxes: Collection of true boxes of format (x,y,w,h).
:param list pred_boxes: Collection of predicted boxes of format (x,y,w,h).
:param list pred_scores: Scores/confidence values for each predicted box.
Higher score means more confident.
:param float iou_thresh: Threshold for minimum IoU overlap for a box to
be counted as a true positive.
:return: Precision-recall curve
:rtype: generator over tuples (precision, recall, score)
:raises: AssertionError if pred_boxes and pred_scores don't match in length.
"""
assert len(pred_scores) == len(pred_boxes)
if true_boxes and pred_boxes:
tp, fp = 0, 0
true_unmatched = {tuple(b) for b in true_boxes}
preds = sorted(zip(pred_boxes, pred_scores), key=lambda s: -s[1])
for pred_box, pred_score in preds:
box, iou = box_best_match(pred_box, true_unmatched)
if iou >= iou_thresh:
true_unmatched.discard(box)
tp += 1
else:
fp += 1
fn = len(true_unmatched)
precision = tp / float(tp + fp)
recall = tp / float(tp + fn)
yield precision, recall, pred_score
def box_avg_precision(true_boxes, pred_boxes, pred_scores, iou_thresh=0.5,
mode='voc2007'):
"""
Return Average Precision (AP).
Note that there are various, different definitions of Average Precision.
See: https://arxiv.org/abs/1607.03476
and: https://sanchom.wordpress.com/tag/average-precision/
This code implements the version used in VOC challenge 2007, see
| http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html
| http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.pdf
:param list true_boxes: Collection of true boxes of format (x,y,w,h).
:param list pred_boxes: Collection of predicted boxes of format (x,y,w,h).
:param list pred_scores: Scores/confidence values for each predicted box.
Higher score means more confident.
:param float iou_thresh: Threshold for minimum IoU overlap for a box to
be counted as a true positive.
:param string mode: Currently not used and set to 'voc2007'
:return: Average Precision [0...1]
:rtype: float
:raises: AssertionError if pred_boxes and pred_scores don't match in length.
"""
pr_curve = box_pr_curve(true_boxes, pred_boxes, pred_scores, iou_thresh)
pr_curve = list(pr_curve) # generator to list
if not pr_curve:
return 0.0
rts = [r / 10.0 for r in range(0, 11)] # recall thresholds
def p_inter(rt):
ps = [p for p, r, _ in pr_curve if r >= rt]
return max(ps) if ps else 0
return sum(p_inter(rt) for rt in rts) / 11
def box_mean_avg_precision(true_boxes, true_labels, pred_boxes, pred_labels,
pred_scores, iou_thresh=0.5, mode='voc2007'):
"""
Return mean Average Precision (mAP).
See box_avg_precision() for details.
:param list true_boxes: Collection of true boxes of format (x,y,w,h).
:param list true_labels: Label for each true box.
:param list pred_boxes: Collection of predicted boxes of format (x,y,w,h).
:param lsit pred_labels: Label for each predicted box.
:param list pred_scores: Scores/confidence values for each predicted box.
Higher score means more confident.
:param float iou_thresh: Threshold for minimum IoU overlap for a box to
be counted as a true positive.
:param string mode: Currently not used and set to 'voc2007'
:return: mean Average Precision [0...1]
:rtype: float
:raises: AssertionError if pred_boxes and pred_scores don't match in length.
"""
assert len(true_boxes) == len(true_labels)
assert len(pred_boxes) == len(pred_labels)
assert len(pred_boxes) == len(pred_scores)
labels = set(true_labels + pred_labels)
sap, n = 0.0, 0
for label in labels:
preds = zip(pred_boxes, pred_labels, pred_scores)
pbss = [(b, s) for b, l, s in preds if l == label]
tbs = [b for b, l in zip(true_boxes, true_labels) if l == label]
if tbs and pbss:
pbs, pss = zip(*pbss)
sap += box_avg_precision(tbs, pbs, pss, iou_thresh, mode=mode)
n += 1
return sap / n if n else 0
| 6,763 | 36.787709 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/__init__.py | __version__ = '1.2.3'
# exporting all nuts-flow functions under nutsml namespace
from nutsflow import *
# exporting common nuts-ml functions under nutsml namespace
from nutsml.batcher import BuildBatch
from nutsml.booster import Boost
from nutsml.checkpoint import Checkpoint
from nutsml.stratify import Stratify, CollectStratified
from nutsml.logger import LogToFile, LogCols
from nutsml.network import Network, KerasNetwork, LasagneNetwork
from nutsml.plotter import PlotLines
from nutsml.reader import (ReadNumpy, ReadImage, ReadLabelDirs, ReadPandas)
from nutsml.transformer import (TransformImage, AugmentImage, ImageMean,
ImageChannelMean, RegularImagePatches,
RandomImagePatches, ImagePatchesByMask,
ImagePatchesByAnnotation,
ImageAnnotationToMask)
from nutsml.common import (CheckNaN, SplitRandom, SplitLeaveOneOut,
PartitionByCol, ConvertLabel)
from nutsml.viewer import (ViewImage, ViewImageAnnotation)
from nutsml.writer import WriteImage
| 1,107 | 45.166667 | 75 | py |
nuts-ml | nuts-ml-master/nutsml/plotter.py | """
.. module:: plotter
:synopsis: Plotting of data, e.g. loss over epochs
"""
import time
import itertools as itt
import matplotlib.pyplot as plt
from six.moves import range
from nutsflow import NutFunction
from nutsflow.common import as_tuple, as_list
class PlotLines(NutFunction): # pragma no coverage
"""
Plot line graph for selected data columns.
"""
def __init__(self, ycols,
xcols=None,
layout=(1, None),
titles=None,
every_sec=0,
every_n=0,
filterfunc=lambda data: True,
figsize=None,
filepath=None):
"""
iterable >> PlotLines(ycols) >> Consume()
>>> import os
>>> import numpy as np
>>> from nutsflow import Consume
>>> fp = 'tests/data/temp_plotter.png'
>>> xs = np.arange(0, 6.3, 1.2)
>>> ysin, ycos = np.sin(xs), np.cos(xs)
>>> data = zip(xs, ysin, ycos)
>>> data >> PlotLines(1, 0, filepath=fp) >> Consume()
>>> list(ycos) >> PlotLines(0, filepath=fp) >> Consume()
>>> data >> PlotLines(ycols=(1,2), filepath=fp) >> Consume()
>>> ysin.tolist() >> PlotLines(ycols=None, filepath=fp) >> Consume()
>>> if os.path.exists(fp): os.remove(fp)
:param int|tuple|None ycols: Index or tuple of indices of the
data columns that contain the y-data for the plot.
If None data is used directly.
:param int|tuple|function|iterable|None xcols: Index or tuple of indices
of the data columns that contain the x-data for the plot.
Alternatively an iterator or a function can be provided that
generates the x-data for the plot, e.g. xcols = itertools.count()
or xcols = lambda: epoch
For xcols==None, itertools.count() will be used.
:param tuple layout: Rows and columns of the plotter layout., e.g.
a layout of (2,3) means that 6 plots in the data are
arranged in 2 rows and 3 columns.
Number of cols can be None is then derived from ycols
:param float every_sec: Plot every given second, e.g. to plot
every 2.5 sec every_sec = 2.5
:param int every_n: Plot every n-th call.
:param function filterfunc: Boolean function to filter plot data.
:param tuple figsize: Figure size in inch.
:param filepath: Path to a file to draw plot to. If provided the
plot will not appear on the screen.
:return: Returns input unaltered
:rtype: any
"""
self.ycols = [-1] if ycols is None else as_list(ycols)
self.xcols = itt.count() if xcols is None else xcols
self.filepath = filepath
self.figsize = figsize
self.titles = titles
self.cnt = 0
self.time = time.time()
self.filterfunc = filterfunc
self.every_sec = every_sec
self.every_n = every_n
r, c, n = layout[0], layout[1], len(self.ycols)
if c is None:
c = n
self.figure = plt.figure(figsize=figsize)
self.axes = [self.figure.add_subplot(r, c, i + 1) for i in range(n)]
self.reset()
def __delta_sec(self):
"""Return time in seconds (float) consumed between plots so far"""
return time.time() - self.time
def __should_plot(self, data):
"""Return true if data should be plotted"""
self.cnt += 1
return (self.filterfunc(data) and
self.cnt >= self.every_n and
self.__delta_sec() >= self.every_sec)
def reset(self):
"""Reset plot data"""
self.xdata, self.ydata = [], []
for _ in self.ycols:
self.xdata.append([])
self.ydata.append([])
def _add_data(self, data):
"""Add data point to data buffer"""
if hasattr(data, 'ndim'): # is it a numpy array?
data = data.tolist() if data.ndim else [data.item()]
else:
data = as_list(data)
if hasattr(self.xcols, '__iter__'):
x = next(self.xcols)
for i, _ in enumerate(self.ycols):
self.xdata[i].append(x)
elif hasattr(self.xcols, '__call__'):
x = self.xcols()
for i, _ in enumerate(self.ycols):
self.xdata[i].append(x)
else:
for i, xcol in enumerate(as_tuple(self.xcols)):
self.xdata[i].append(data[xcol])
for i, ycol in enumerate(self.ycols):
self.ydata[i].append(data if ycol < 0 else data[ycol])
def __call__(self, data):
"""Plot data"""
if not self.__should_plot(data):
return data
self.cnt = 0 # reset counter
self.time = time.time() # reset timer
self._add_data(data)
for i, ax in enumerate(self.axes):
ax.clear()
if self.titles:
ax.set_title(self.titles[i])
ax.plot(self.xdata[i], self.ydata[i], '-')
ax.figure.canvas.draw()
if self.filepath:
self.figure.savefig(self.filepath, bbox_inches='tight')
else:
plt.pause(0.0001) # Needed to draw
return data
| 5,309 | 33.934211 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/__init__.py | """
Example nuts-ml pipelines
""" | 33 | 10.333333 | 25 | py |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/__init__.py | 0 | 0 | 0 | py | |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/cnn_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a CNN on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
BATCHSIZE = 64
EPOCHS = 3
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device='cpu'):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(True),
nn.BatchNorm2d(10),
nn.Conv2d(10, 20, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(True),
nn.BatchNorm2d(20),
nn.Flatten(),
nn.Linear(320, 50),
nn.ReLU(True),
nn.Linear(50, 10),
)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = F.cross_entropy # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
return self.layers(x)
build_batch = (nm.BuildBatch(BATCHSIZE)
.input(0, 'image', 'float32', True)
.output(1, 'number', 'int64'))
build_pred_batch = (nm.BuildBatch(BATCHSIZE)
.input(0, 'image', 'float32', True))
augment = (nm.AugmentImage(0)
.by('identical', 1)
.by('translate', 0.2, [-3, +3], [-3, +3])
.by('rotate', 0.2, [-30, +30])
.by('shear', 0.2, [0, 0.2])
.by('elastic', 0.2, [5, 5], [100, 100], [0, 100])
)
vec2img = nf.MapCol(0, lambda x: (x.reshape([28, 28]) * 255).astype('uint8'))
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def train(network, x, y, epochs):
"""Train network for given number of epochs"""
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
augment >> nf.Shuffle(1000) >> build_batch >>
network.train() >> nf.Collect())
print('train loss: %.4f' % np.mean(losses))
def validate(network, x, y):
"""Compute validation/test loss (= mean over batch losses)"""
losses = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_batch >> network.validate() >> nf.Collect())
print('val loss: %.4f' % np.mean(losses))
def predict(network, x, y):
"""Compute network outputs and print accuracy"""
preds = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_pred_batch >> network.predict() >> nf.Collect())
acc = accuracy(y, preds)
print('test acc %.1f %%' % acc)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
result = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_batch >> network.evaluate(metrics))
return result
def view_misclassified_images(network, x, y):
"""Show misclassified images"""
make_label = nf.Map(lambda s: (s[0], 'true:%d pred:%d' % (s[1], s[2])))
filter_error = nf.Filter(lambda s: s[1] != s[2])
view_image = nm.ViewImageAnnotation(0, 1, pause=1)
preds = (zip(x, y) >> vec2img >> build_pred_batch >>
network.predict() >> nf.Map(np.argmax) >> nf.Collect())
(zip(x, y, preds) >> vec2img >> filter_error >> make_label >>
view_image >> nf.Consume())
def view_augmented_images(x, y, n=10):
"""Show n augmented images"""
view_image = nm.ViewImageAnnotation(0, 1, pause=1)
zip(x, y) >> vec2img >> augment >> nf.Take(n) >> view_image >> nf.Consume()
if __name__ == '__main__':
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
print('creating model...')
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((1, 28, 28))
print('training ...')
train(network, x_train, y_train, EPOCHS)
network.save_weights()
print('evaluating ...')
print('train acc:', evaluate(network, x_train, y_train))
print('test acc:', evaluate(network, x_test, y_test))
print('validating ...')
validate(network, x_test, y_test)
print('predicting ...')
predict(network, x_test, y_test)
# print('viewing images...')
# view_augmented_images(x_test, y_test)
# print('showing errors ...')
# view_misclassified_images(network, x_test, y_test)
| 5,025 | 30.810127 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/utils.py | """
Utility functions for downloading the MNIST data set.
"""
import requests
import pickle
import gzip
from pathlib import Path
def download_mnist():
"""Download MNIST from web to data folder."""
folder = Path("data/mnist")
filename = "mnist.pkl.gz"
fullpath = folder / filename
url = "http://deeplearning.net/data/mnist/" + filename
folder.mkdir(parents=True, exist_ok=True)
if not fullpath.exists():
content = requests.get(url).content
fullpath.open("wb").write(content)
return fullpath
def load_mnist(filepath):
"""Load MNIST data from filepath"""
with gzip.open(filepath.as_posix(), "rb") as f:
data = pickle.load(f, encoding="latin-1")
(x_train, y_train), (x_valid, y_valid), _ = data
return x_train, y_train, x_valid, y_valid
| 810 | 26.033333 | 58 | py |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/__init__.py | 0 | 0 | 0 | py | |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/mlp_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = nn.CrossEntropyLoss() # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format(np.mean(losses)))
print('train acc : {:.1f}'.format(acc_train))
print('test acc : {:.1f}'.format(acc_test))
if __name__ == '__main__':
print('creating model...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((28 * 28,))
print('training network...')
train(network, epochs=3)
| 2,959 | 30.157895 | 76 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/__init__.py | 0 | 0 | 0 | py | |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/autoencoder/conv_autoencoder.py | """
A simple convolutional autoencoder adapted from
https://blog.keras.io/building-autoencoders-in-keras.html
"""
from nutsml import KerasNetwork
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from runner import INPUT_SHAPE
def create_network():
input_img = Input(shape=INPUT_SHAPE)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(input_img, decoded)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
| 1,312 | 36.514286 | 77 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/autoencoder/runner.py | """
Runs training and prediction.
Trains an autoencoder on MNIST and in the prediction phase shows
the original image, the decoded images and the difference.
"""
from __future__ import print_function
import numpy as np
from six.moves import zip, range
from nutsflow import *
from nutsml import *
NUM_EPOCHS = 10 # need more epochs for good results!
BATCH_SIZE = 128
INPUT_SHAPE = (28, 28, 1)
def create_network():
import conv_autoencoder as cae
return cae.create_network()
def load_samples():
from tensorflow.keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
h, w, c = INPUT_SHAPE
x_train = np.reshape(x_train, (len(x_train), h, w, c))
x_test = np.reshape(x_test, (len(x_test), h, w, c))
return list(zip(x_train, x_train)), list(zip(x_test, x_test))
@nut_function
def Diff(sample):
x, y = sample
return x, y, abs(x - y) # Add difference image to sample
def train():
print('\n\nTRAIN...')
rerange = TransformImage((0, 1)).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'image', 'float32')
.output(1, 'image', 'float32'))
print('creating network and loading data...')
network = create_network()
train_samples, test_samples = load_samples()
print('training...', len(train_samples), len(test_samples))
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss = (train_samples >> PrintProgress(train_samples) >> rerange >>
Shuffle(1000) >> build_batch >> network.train() >> Mean())
print("train loss : {:.6f}".format(t_loss))
network.save_best(t_loss, isloss=True)
def predict():
print('\n\nPREDICT...')
rerange = TransformImage((0, 1)).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE).input(0, 'image', 'float32'))
view_images = ViewImage((0, 1, 2), pause=0.5,
titles=['Input', 'Output', 'Difference'])
print('creating network ...')
network = create_network()
network.load_weights()
print('loading data...')
_, test_samples = load_samples()
print('predicting...')
preds = test_samples >> rerange >> build_batch >> network.predict()
(test_samples >> Take(100) >> rerange >> Get(0) >> Zip(preds) >> Diff() >>
view_images >> Consume())
def view():
print('\n\nVIEW...')
train_samples, test_samples = load_samples()
(train_samples >> Take(10) >> PrintColType() >> ViewImage(0, pause=1) >>
Consume())
if __name__ == "__main__":
view()
train()
predict()
| 2,640 | 26.8 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/autoencoder/__init__.py | """
Example autoencoders using nuts.
""" | 40 | 12.666667 | 32 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/view_train_images.py | """
.. module:: view_train_images
:synopsis: Example for showing images with transformation
"""
from nutsflow import Take, Consume, GetCols
from nutsml import ViewImage, TransformImage
if __name__ == "__main__":
from mlp_train import load_samples
samples, _ = load_samples()
transform = (TransformImage(0).by('elastic', 5, 100))
(samples >> GetCols(0,0,1) >> Take(1000) >> transform >>
ViewImage((0,1), pause=1) >> Consume())
| 453 | 27.375 | 60 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/mlp_view_misclassified.py | """
.. module:: mlp_precit
:synopsis: Example nuts-ml pipeline for prediction
"""
from __future__ import print_function
from nutsflow import Consume, Zip, Unzip, Map, ArgMax, nut_filter
from nutsml import TransformImage, BuildBatch, ViewImageAnnotation
BATCH_SIZE = 128
if __name__ == "__main__":
from mlp_train import create_network, load_samples
TransformImage.register('flatten', lambda img: img.flatten())
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32')
.by('flatten'))
show_image = ViewImageAnnotation(0, (1, 2), pause=3, figsize=(3, 3))
pred_batch = BuildBatch(BATCH_SIZE).by(0, 'vector', 'float32')
IsMisclassified = nut_filter(lambda t: t[1] != t[2])
print('loading samples ...')
train_samples, test_samples = load_samples()
print('loading network...')
network = create_network()
network.load_weights()
print('predicting...')
samples = train_samples + test_samples
images, trues = samples >> Unzip()
preds = (samples >> transform >> pred_batch >>
network.predict() >> Map(ArgMax()))
images >> Zip(trues, preds) >> IsMisclassified() >> show_image >> Consume()
| 1,212 | 31.783784 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/mlp_classify.py | """
.. module:: mlp_predict
:synopsis: Example nuts-ml pipeline for classification
"""
from __future__ import print_function
from nutsflow import Collect, Consume, Get, Zip, Map, Format, ArgMax
from nutsml import (TransformImage, BuildBatch, ReadImage, ReadLabelDirs,
ViewImageAnnotation)
BATCH_SIZE = 128
if __name__ == "__main__":
from mlp_train import create_network
TransformImage.register('flatten', lambda img: img.flatten())
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32')
.by('flatten'))
show_image = ViewImageAnnotation(0, (1, 2), pause=1, figsize=(4, 4))
pred_batch = BuildBatch(BATCH_SIZE).input(0, 'vector', 'float32')
print('loading network...')
network = create_network()
network.load_weights()
print('predicting...')
samples = ReadLabelDirs('images', '*.png') >> ReadImage(0) >> Collect()
truelabels = samples >> Get(1) >> Format('true: {}')
predictions = (samples >> transform >> pred_batch >>
network.predict() >> Map(ArgMax()) >> Format('pred: {}'))
samples >> Get(0) >> Zip(predictions, truelabels) >> show_image >> Consume()
| 1,203 | 34.411765 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/cnn_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a CNN on MNIST
This is code is based on a Keras example (see here)
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
to train a Multi-layer perceptron on the MNIST data and modified to
use nuts for the data-preprocessing.
"""
from __future__ import print_function
from six.moves import zip, range
from nutsflow import PrintProgress, Collect, Unzip, Shuffle, Pick, Mean, NOP
from nutsml import (KerasNetwork, TransformImage, AugmentImage, BuildBatch,
Boost, PrintColType, PlotLines)
PICK = 0.1 # Pick 10% of the data for a quick trial
NUM_EPOCHS = 10
INPUT_SHAPE = (28, 28, 1)
BATCH_SIZE = 128
NUM_CLASSES = 10
def load_samples():
from tensorflow.python.keras.datasets import mnist
h, w, c = INPUT_SHAPE
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], h, w)
x_test = x_test.reshape(x_test.shape[0], h, w)
return list(zip(x_train, y_train)), list(zip(x_test, y_test))
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'cnn_weights.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
print('creating network ...')
network = create_network()
print('loading data...')
train_samples, test_samples = load_samples()
augment = (AugmentImage(0)
.by('identical', 1)
.by('translate', 0.5, [-3, +3], [-3, +3])
.by('rotate', 0.5, [-5, +5])
.by('shear', 0.5, [0, 0.2])
.by('elastic', 0.5, [5, 5], [100, 100], [0, 100]))
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32'))
build_batch = (BuildBatch(BATCH_SIZE, prefetch=0)
.input(0, 'image', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
plot = PlotLines((0, 1), layout=(2, 1), every_sec=1)
print('training...', NUM_EPOCHS)
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
Pick(PICK) >> augment >> transform >> Shuffle(100) >>
build_batch >> network.train() >> plot >> Unzip())
print('train loss : {:.6f}'.format(t_loss >> Mean()))
print('train acc : {:.1f}'.format(100 * (t_acc >> Mean())))
e_acc = (test_samples >> transform >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
network.save_best(e_acc, isloss=False)
if __name__ == "__main__":
train()
| 3,378 | 34.568421 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/write_images.py | """
.. module:: write_images
:synopsis: Example for writing of image data
"""
from six.moves import zip
from nutsflow import Take, Consume, Enumerate, Zip, Format, Get, Print
from nutsml import WriteImage
def load_samples():
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
return list(zip(X_train, y_train)), list(zip(X_test, y_test))
if __name__ == '__main__':
train_samples, _ = load_samples()
imagepath = 'images/*.png'
names = Enumerate() >> Zip(train_samples >> Get(1)) >> Format('{1}/img{0}')
names = names >> Print()
train_samples >> Take(30) >> WriteImage(0, imagepath, names) >> Consume()
| 681 | 28.652174 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/read_images.py | """
.. module:: read_images
:synopsis: Example for reading and viewing of image data
"""
from nutsflow import Consume, Print
from nutsml import ReadLabelDirs, ReadImage, ViewImageAnnotation, PrintColType
if __name__ == "__main__":
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(3, 3))
(ReadLabelDirs('images', '*.png') >> Print() >> ReadImage(0) >>
PrintColType() >> show_image >> Consume())
| 421 | 29.142857 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/cnn_classify.py | """
.. module:: cnn_predict
:synopsis: Example nuts-ml pipeline for network predictions
"""
from nutsflow import Collect, Consume, Get, Zip, Map, ArgMax, Format
from nutsml import (TransformImage, BuildBatch, ReadLabelDirs, ReadImage,
ViewImageAnnotation)
BATCH_SIZE = 128
if __name__ == "__main__":
from cnn_train import create_network
transform = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32')
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(3, 3))
pred_batch = BuildBatch(BATCH_SIZE).input(0, 'image', 'float32')
print('loading network...')
network = create_network()
network.load_weights()
print('predicting...')
samples = ReadLabelDirs('images', '*.png') >> ReadImage(0) >> Collect()
predictions = (samples >> transform >> pred_batch >>
network.predict() >> Map(ArgMax()))
samples >> Get(0) >> Zip(predictions) >> show_image >> Consume()
| 955 | 33.142857 | 75 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/__init__.py | """
Example nuts-ml pipelines for MNIST
""" | 43 | 13.666667 | 35 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/mlp_train.py | """
.. module:: mlp_train
:synopsis: Example nuts-ml pipeline for training and evaluation
This is code is based on a Keras example (see here)
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
to train a Multi-layer perceptron on the MNIST data and modified to
use nuts for the data-preprocessing.
"""
from __future__ import print_function
from six.moves import zip, range
from nutsflow import PrintProgress, Collect, Unzip, Mean
from nutsml import (KerasNetwork, TransformImage, BuildBatch, PlotLines,
PrintType)
NUM_EPOCHS = 5
BATCH_SIZE = 128
NUM_CLASSES = 10
def load_samples():
from tensorflow.python.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_samples = list(zip(x_train, map(int, y_train)))
test_samples = list(zip(x_test, map(int, y_test)))
return train_samples, test_samples
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'mlp_weights.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
TransformImage.register('flatten', lambda img: img.flatten())
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32')
.by('flatten'))
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'vector', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
plot = PlotLines((0, 1), layout=(2, 1), every_sec=1)
print('loading data...')
train_samples, test_samples = load_samples()
print('creating network ...')
network = create_network()
print('training...', NUM_EPOCHS)
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
transform >> build_batch >>
network.train() >> plot >> Unzip())
print('train loss : {:.6f}'.format(t_loss >> Mean()))
print('train acc : {:.1f}'.format(100 * (t_acc >> Mean())))
e_acc = (test_samples >> transform >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
network.save_best(e_acc, isloss=False)
if __name__ == "__main__":
train()
| 2,820 | 31.056818 | 73 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/view_train_images.py | """
.. module:: view_train_images
:synopsis: Example nuts-ml pipeline reading and viewing image data
"""
from nutsflow import Take, Consume, MapCol
from nutsml import ViewImageAnnotation, PrintColType, ConvertLabel
if __name__ == "__main__":
from cnn_train import load_samples, load_names
train_samples, _ = load_samples()
convert_label = ConvertLabel(1, load_names())
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(2, 2),
interpolation='spline36')
(train_samples >> Take(10) >> convert_label >> PrintColType() >>
show_image >> Consume())
| 619 | 30 | 69 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/cnn_train.py | """
.. module:: mlp_view_misclassified
:synopsis: Example for showing misclassified examples
"""
from __future__ import print_function
import pickle
import os.path as osp
from six.moves import zip, map, range
from nutsflow import PrintProgress, Zip, Unzip, Pick, Shuffle, Mean
from nutsml import (KerasNetwork, TransformImage, AugmentImage, BuildBatch,
SplitRandom, PlotLines, PrintType)
PICK = 0.1 # Pick 10% of the data for a quick trial
NUM_EPOCHS = 10
BATCH_SIZE = 128
NUM_CLASSES = 10
INPUT_SHAPE = (32, 32, 3)
def load_samples():
from tensorflow.python.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
train_samples = list(zip(x_train, map(int, y_train)))
test_samples = list(zip(x_test, map(int, y_test)))
return train_samples, test_samples
def load_names():
from tensorflow.python.keras.utils.data_utils import get_file
dirname = 'cifar-10-batches-py'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
with open(osp.join(path, 'batches.meta'), 'rb') as f:
return pickle.load(f)['label_names']
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Convolution2D, MaxPooling2D
model = Sequential()
model.add(Convolution2D(32, (3, 3), padding='same',
input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return KerasNetwork(model, 'weights_cifar10.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
rerange = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'image', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
p = 0.1
augment = (AugmentImage(0)
.by('identical', 1.0)
.by('elastic', p, [5, 5], [100, 100], [0, 100])
.by('brightness', p, [0.7, 1.3])
.by('color', p, [0.7, 1.3])
.by('shear', p, [0, 0.1])
.by('fliplr', p)
.by('rotate', p, [-10, 10]))
plot_eval = PlotLines((0, 1), layout=(2, 1), titles=['train', 'val'])
print('creating network...')
network = create_network()
print('loading data...')
train_samples, test_samples = load_samples()
train_samples, val_samples = train_samples >> SplitRandom(0.8)
print('training...', len(train_samples), len(val_samples))
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
Pick(PICK) >> augment >> rerange >> Shuffle(100) >>
build_batch >> network.train() >> Unzip())
t_loss, t_acc = t_loss >> Mean(), t_acc >> Mean()
print("train loss : {:.6f}".format(t_loss))
print("train acc : {:.1f}".format(100 * t_acc))
v_loss, v_acc = (val_samples >> rerange >>
build_batch >> network.validate() >> Unzip())
v_loss, v_acc = v_acc >> Mean(), v_acc >> Mean()
print('val loss : {:.6f}'.format(v_loss))
print('val acc : {:.1f}'.format(100 * v_acc))
network.save_best(v_acc, isloss=False)
plot_eval((t_acc, v_acc))
print('testing...', len(test_samples))
e_acc = (test_samples >> rerange >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
if __name__ == "__main__":
train()
| 4,395 | 33.614173 | 76 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/write_images.py | """
.. module:: write_images
:synopsis: Example nuts-ml pipeline for writing of images
"""
from nutsflow import (Take, Print, Consume, Enumerate, Zip, Format, MapCol,
Get)
from nutsml import WriteImage, ConvertLabel
if __name__ == "__main__":
from cnn_train import load_samples, load_names
train_samples, _ = load_samples()
convert_label = ConvertLabel(1, load_names())
fnames = (Enumerate() >> Zip(train_samples >> Get(1)) >> convert_label >>
Format('{0}_{1}') >> Print())
imagepath = 'images/img*.png'
train_samples >> Take(10) >> WriteImage(0, imagepath, fnames) >> Consume()
| 648 | 29.904762 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/read_images.py | """
.. module:: read_images
:synopsis: Example nuts-ml pipeline for reading and viewing image data
"""
from glob import glob
from nutsflow import Consume
from nutsml import ReadImage, ViewImage, PrintColType
if __name__ == "__main__":
show_image = ViewImage(0, pause=1, figsize=(2, 2), interpolation='spline36')
paths = glob('images/*.png')
paths >> ReadImage(None) >> PrintColType() >> show_image >> Consume()
| 429 | 27.666667 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/cnn_classify.py | """
.. module:: cnn_predict
:synopsis: Example pipeline for viewing annotations and classification
"""
from __future__ import print_function
from glob import glob
from nutsflow import Collect, Consume, Get, Zip, Map, ArgMax, Print
from nutsml import (TransformImage, BuildBatch, ReadImage, ViewImageAnnotation,
ConvertLabel)
BATCH_SIZE = 128
if __name__ == "__main__":
from cnn_train import create_network, load_names
convert_label = ConvertLabel(None, load_names())
rerange = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32')
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(3, 3),
interpolation='spline36')
pred_batch = BuildBatch(BATCH_SIZE).input(0, 'image', 'float32')
print('loading network...')
network = create_network()
network.load_weights()
print('predicting...')
samples = glob('images/*.png') >> Print() >> ReadImage(None) >> Collect()
predictions = (samples >> rerange >> pred_batch >>
network.predict() >> convert_label)
samples >> Get(0) >> Zip(predictions) >> show_image >> Consume()
| 1,151 | 32.882353 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/view_data.py | """
.. module:: view_data
:synopsis: Example nuts-ml pipeline viewing CIFAR-10 image data
"""
from nutsflow import Take, Consume
from nutsml import ViewImage
if __name__ == "__main__":
from cnn_train import load_samples
train_samples, test_samples = load_samples()
samples = train_samples + test_samples
show_image = ViewImage(0, pause=1, figsize=(2, 2), interpolation='spline36')
samples >> Take(10) >> show_image >> Consume()
| 455 | 25.823529 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/__init__.py | """
Example nuts-ml pipelines for CIFAR-10
""" | 46 | 14.666667 | 38 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/view_augmented_images.py | """
.. module:: view_augmented_images
:synopsis: Example nuts-ml pipeline for viewing augmented image data
"""
from nutsflow import Take, Consume
from nutsml import ViewImageAnnotation, AugmentImage
if __name__ == "__main__":
from cnn_train import load_samples
train_samples, _ = load_samples()
p = 0.5
augment = (AugmentImage(0)
.by('identical', 1.0)
.by('elastic', p, [5, 5], [50, 50], [0, 100])
.by('brightness', p, [0.7, 1.3])
.by('rotate', p, [-10, 10])
.by('fliplr', p)
)
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(2, 2),
interpolation='spline36')
(train_samples >> Take(10) >> augment >> show_image >> Consume())
| 793 | 29.538462 | 71 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/invariant_l0_attack.py | import tensorflow as tf
import random
import time
import numpy as np
from keras.datasets import mnist
import sys
import os
import itertools
import sklearn.cluster
import scipy.misc
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
DTYPE = tf.float32
def make_model(filters=64, s1=5, s2=5, s3=3,
d1=0, d2=0, fc=256,
lr=1e-3, decay=1e-3):
model = Sequential()
model.add(Conv2D(filters, kernel_size=(s1, s1),
activation='relu',
input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters*2, (s2, s2), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters*2, (s3, s3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(d1))
model.add(Flatten())
model.add(Dense(fc, activation='relu'))
model.add(Dropout(d2))
model.add(Dense(10))
opt = keras.optimizers.Adam(lr, decay=decay)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
final = Sequential()
final.add(model)
final.add(Activation('softmax'))
final.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
return model, final
def train_model(model, x_train, y_train, batch_size=256,
epochs=20):
model.fit(x_train, keras.utils.to_categorical(y_train, 10),
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=2,
)
return model
def show(img):
img = img
remap = " .*#" + "#" * 100
img = (img.flatten()) * 3
print("START")
for i in range(28):
print("".join([remap[int(round(x))] for x in img[i * 28:i * 28 + 28]]))
def compute_mat(angle, sx, sy, ax, ay, tx, ty, da, db):
mat = np.eye(3)
mat = np.dot(mat, [[1,ax,0],
[ay,1,0],
[0, 0, 1]])
mat = np.dot(mat, [[sx,0,0],
[0,sy,0],
[0, 0, 1]])
mat = np.dot(mat, [[1,0,tx],
[0,1,ty],
[0, 0, 1]])
mat = np.dot(mat, [[np.cos(angle), np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
inv = np.linalg.inv(mat)
return mat, inv
def cluster(mask):
dbscan = sklearn.cluster.DBSCAN(2, min_samples=5)
points = [(i,j) for i in range(28) for j in range(28) if mask[0,i,j,0]]
points = np.array(points)
dbscan.fit(points)
flat = points[:,0]*28+points[:,1]
labels = dbscan.labels_
arr = np.zeros((28*28))
arr[flat] = -1
for i in range(max(labels)+1):
arr[flat[labels==i]] = 1+i
arr = arr.reshape((28,28))
return arr
def improve_transform():
sys.path.append("gan/")
from gan.acgan_mnist import Generator
zin = tf.placeholder(tf.float32, [None, 74])
x_target = tf.placeholder(tf.float32, [None, 28, 28, 1])
generated_images, _ = Generator(None, zin)
generated_images = tf.reshape(generated_images, [-1, 28, 28, 1])
similarity_loss = tf.reduce_sum(np.abs(generated_images - x_target),axis=(1,2,3))
z_loss = 0.01*tf.reduce_sum(zin[:,10:]**2, axis=1)
total_loss = similarity_loss + z_loss
grads = tf.gradients(similarity_loss, [zin])[0]
sess = tf.Session()
touse = [x for x in tf.trainable_variables() if 'Generator' in x.name]
saver = tf.train.Saver(touse)
saver.restore(sess, 'gan/model/mnist-acgan-2')
keras.backend.set_learning_phase(False)
def score(image, label):
#show(image)
zs = np.random.normal(0, 1, size=(128, 74))
zs[:,:10] = 0
zs[:,label] = 1
for _ in range(30):
#print("generate")
ell, l_sim, l_z, nimg, delta = sess.run((total_loss, similarity_loss,
z_loss, generated_images,grads),
{zin: zs,
x_target: image[np.newaxis,:,:,:]})
#print(l_sim)
#show(nimg)
zs[:,10:] -= delta[:,10:]*.01
return np.min(ell)
transformation_matrix = tf.placeholder(tf.float32, [8])
xs = tf.placeholder(DTYPE, [None, 28, 28, 1])
transformed = tf.contrib.image.transform(xs, transformation_matrix,
'BILINEAR')
uids = list(set([int(x.split("_")[1]) for x in os.listdir("best") if 'best_' in x and "_10000" in x]))
num = [max([int(x.split("_")[2][:-4]) for x in os.listdir("best") if str(uids[i]) in x and 'idx' not in x and 'tran' not in x]) for i in range(4)]
arr = []
for fileid, filecount in zip(uids, num):
best = np.load("best/best_%d_%d.npy"%(fileid,filecount))
best_idx = np.array(np.load("best/best_%d_%d_idx.npy"%(fileid,filecount)), dtype=np.int32)
best_transforms = np.load("best/best_%d_transforms_%d.npy"%(fileid,filecount))
mask = (abs(best-x_test[use_idx]) > .5)
delta = np.sum(mask,axis=(1,2,3))
arr.append(delta)
print(delta)
print(np.median(delta))
arr = np.min(arr,axis=0)
fout = open("/tmp/out.html","w")
def write(txt, img, lab, delta, doinv=False, do=True):
if do:
if len(img.shape) == 4:
img = img[0]
if doinv:
timg = sess.run(transformed, {xs: img[np.newaxis,:,:,:],
transformation_matrix: inv.flatten()[:-1]})[0]
else:
timg = img
s = score(timg, lab)
else:
s = 0
print(lab, type(lab))
print(delta, type(delta))
fout.write('<div style="float: left; padding: 3px">%d[%d]@%d<br/><img style="width:50px; height:50px;" src="%s"/></div>'%(int(s),lab,delta,txt))
scipy.misc.imsave("/tmp/"+txt, img.reshape((28,28)))
print("score of being", lab, "is:", s)
show(img)
fout.flush()
return s
candidates = []
for IDX in range(100):
fout.write("<br/><div style='clear: both'></div><br/>")
mat, inv = compute_mat(*best_transforms[IDX])
img = sess.run(transformed, {xs: x_train[best_idx[IDX:IDX+1]],
transformation_matrix: mat.flatten()[:-1]})
print("Source image")
write("img_%d_0.png"%IDX, x_test[use_idx[IDX]], y_test[use_idx[IDX]],0)
print("Target image")
write("img_%d_2.png"%IDX, x_train[best_idx[IDX]], y_train[best_idx[IDX]],0)
mask = (abs(x_test[use_idx[IDX]]-img) > .5)
#origs.append(np.sum(mask))
print("Transformed target image")
write("img_%d_1.png"%IDX, img, y_train[best_idx[IDX]],np.sum(mask), True)
write("img_%d_1.5.png"%IDX, np.array(mask,dtype=np.int32), y_train[best_idx[IDX]], np.sum(mask), True, do=False)
print("Mask delta", np.sum(mask))
show(mask)
clusters = cluster(mask)
print("\n".join(["".join([str(int(x)) for x in y]) for y in clusters]).replace("0"," ").replace("-1","*"))
write("img_%d_1.6.png"%IDX, np.array(mask,dtype=np.int32), y_train[best_idx[IDX]], np.sum(mask), True, do=False)
import matplotlib
colored = np.zeros((28,28,3))
for i in range(28):
for j in range(28):
if mask[0,i,j,0] != 0:
colored[i,j,:] = matplotlib.colors.to_rgb("C"+str(int(clusters[i,j]+1)))
scipy.misc.imsave("/tmp/img_%d_1.6.png"%IDX, colored)
possible = []
for nid,subset in enumerate(itertools.product([False,True], repeat=int(np.max(clusters)))):
if np.sum(subset) == 0: continue
mask = np.any([clusters==(i+1) for i,x in enumerate(subset) if x], axis=0)+0.0
mask = mask.reshape(img.shape)
print("Mask weight", np.sum(mask))
out = ((mask)*img) + ((1-mask)*x_test[use_idx[IDX]])
print("New Image")
s = write("img_%d_%d.png"%(IDX,3+nid), out, y_train[best_idx[IDX]], np.sum(mask), True)
possible.append((out,s))
candidates.append(possible)
print("-"*80)
import pickle
pickle.dump(candidates, open("/tmp/candidates.p","wb"))
def find_transform():
global x_train, x_test
x_train = (x_train>.5) + 0
x_test = (x_test>.5) + 0
UID = random.randint(0,1000000)
transformation_matrix = tf.placeholder(tf.float32, [8])
inverse_matrix = tf.placeholder(tf.float32, [8])
darkena = tf.placeholder(DTYPE, [])
darkenb = tf.placeholder(DTYPE, [])
print('shape',x_train.shape)
dataset = tf.constant(x_train, dtype=DTYPE)
labels = tf.constant(y_train, dtype=tf.int32)
print('a1')
transformed_dataset = tf.contrib.image.transform(dataset, transformation_matrix,
'BILINEAR')
inverted_dataset = tf.contrib.image.transform(transformed_dataset, inverse_matrix,
'BILINEAR')
ok_transform = tf.reduce_sum(inverted_dataset,axis=(1,2,3)) > tf.reduce_sum(dataset,axis=(1,2,3))*.85
transformed_dataset = (1-(1-transformed_dataset)**darkenb)**(1./darkenb)
print('a2')
flat_transformed = tf.cast(tf.reshape(transformed_dataset, [-1, 28*28]), dtype=DTYPE)
query = tf.placeholder(DTYPE, (None, 28, 28, 1))
query_y = tf.placeholder(tf.int32, [None])
query_t = tf.transpose(tf.reshape(query, [-1, 28*28]))
query_t = (1-(1-query_t)**darkena)**(1./darkena)
print('a3')
norms = tf.reduce_sum(tf.square(flat_transformed), axis=1)[:, tf.newaxis] \
- 2*tf.matmul(flat_transformed, query_t)
badness1 = 1000*tf.reshape((1-tf.cast(ok_transform,dtype=DTYPE)),[-1,1])
badness2 = 1000*tf.cast(tf.equal(tf.reshape(query_y, [1, -1]), tf.reshape(labels, [-1, 1])), dtype=DTYPE)
print(norms, badness1, badness2, query_y, labels)
norms = norms + badness1 + badness2
_, topk_indices = tf.nn.top_k(-tf.transpose(norms), k=1, sorted=False)
print('done')
def rand(low,high):
return random.random()*(high-low)+low
sess = tf.Session()
best = np.zeros((100,28,28,1))
l0 = np.zeros(100)+10000
best_idx = np.zeros(100)
best_transforms = [None]*100
for tick in range(10000000):
angle = rand(-.25,.25)
sx, sy = rand(.8,1.2), rand(.8,1.2)
ax, ay = rand(-.2,.2), rand(-.2,.2)
tx, ty = rand(-8,8), rand(-8,8)
da, db = rand(-.25,4), rand(-.25,4)
mat, inv = compute_mat(angle, sx, sy, ax, ay, tx, ty, da, db)
now = time.time()
ns, topk, dat, is_ok = sess.run((norms, topk_indices, transformed_dataset, ok_transform),
{transformation_matrix: mat.flatten()[:-1],
inverse_matrix: inv.flatten()[:-1],
query: x_test[use_idx],
query_y: y_test[use_idx],
darkena: db,
darkenb: db})
#print(time.time()-now)
for i in range(100):
e = topk[i][0]
v = ns[e, i]
dd = np.sum((x_test[use_idx[i]]>.5)^(dat[e]>.5))
#print('check', 'idx',i, 'to',e, 'val',v, 'was',best[i])
if dd < l0[i]:
#print("new better", 'idx',i, 'map to',e, 'was', best[i], 'now', v)
#print('l0 diff',np.sum((x_train[i]>.5)^(dat[e]>.5)))
l0[i] = min(l0[i], dd)
best[i] = dat[e]
best_idx[i] = e
best_transforms[i] = [angle, sx, sy, ax ,ay, tx, ty, da, db]
if tick%1000 == 0:
print('mean',np.mean(l0),'median',np.median(l0))
print(sorted(l0))
np.save("best/best_%d_%d.npy"%(UID,tick),best)
np.save("best/best_%d_%d_idx.npy"%(UID,tick),best_idx)
np.save("best/best_%d_transforms_%d.npy"%(UID,tick),best_transforms)
if tick%10000 == 0:
for i in range(100):
print("is",l0[i])
show(x_test[use_idx[i]])
show(best[i])
show((x_test[use_idx[i]]>.5)^(best[i]>.5))
x_train = y_train = None
if __name__ == "__main__":
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_rows = img_cols = 28
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
#data_scores = np.load("mnist_scores.npy")
#x_train = x_train[data_scores>1.0] # only keep the best 80% of the data
#y_train = y_train[data_scores>1.0] # only keep the best 80% of the data
use_idx = [159, 235, 247, 452, 651, 828, 937, 1018, 1021, 1543, 1567, 1692, 1899, 1904, 1930, 1944, 2027, 2082, 2084,
2232, 2273, 2306, 2635, 2733, 2805, 2822, 3169, 3290, 3335, 3364, 3394, 3469, 3471, 3540, 3628, 3735, 3999,
4014, 4086, 4329, 4456, 4471, 4482, 4496, 4503, 4504, 4611, 4630, 4649, 4726, 4840, 4974, 4980, 5089, 5209,
5281, 5447, 5522, 5700, 5820, 5909, 5926, 5946, 5988, 6054, 6130, 6408, 6506, 6558, 6693, 6759, 6762, 6779,
6881, 6947, 6997, 7031, 7063, 7154, 7377, 7547, 7625, 7759, 7790, 7796, 7826, 8334, 8535, 9073, 9181, 9195,
9231, 9375, 9458, 9563, 9639, 9695, 9720, 9811, 9825]
#model, final = make_model()
#train_model(final, x_train, y_train)
#model.save("baseline.model")
find_transform()
#improve_transform()
| 14,333 | 36.03876 | 152 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/acgan_mnist.py | import os, sys
sys.path.append(os.getcwd())
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.mnist
import tflib.plot
MODE = 'wgan-gp' # dcgan, wgan, or wgan-gp
DIM = 128#64 # Model dimensionality
BATCH_SIZE = 100 # Batch size
CRITIC_ITERS = 5 # For WGAN and WGAN-GP, number of critic iters per gen iter
LAMBDA = 10 # Gradient penalty lambda hyperparameter
ITERS = 2000000 # How many generator iterations to train for
OUTPUT_DIM = 784 # Number of pixels in MNIST (28*28)
NOISY = False
lib.print_model_settings(locals().copy())
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
return LeakyReLU(output)
def Generator(n_samples, noise=None):
label = None
if noise is None:
label = tf.random_uniform([n_samples],0,10,dtype=tf.int32)
label = tf.one_hot(label, 10)
noise = tf.random_normal([n_samples, 64])
noise = tf.concat([label, noise], axis=1)
output = lib.ops.linear.Linear('Generator.Input', 64+10, 4*4*4*DIM, noise)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
output = tf.nn.relu(output)
output = output[:,:,:7,:7]
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 1, 5, output)
if NOISY:
output += tf.random_normal((n_samples,1,28,28), stddev=.1)
output = tf.nn.sigmoid(output)
return tf.reshape(output, [-1, OUTPUT_DIM]), label
def Discriminator(inputs):
output = tf.reshape(inputs, [-1, 1, 28, 28])
output = lib.ops.conv2d.Conv2D('Discriminator.1',1,DIM,5,output,stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*4*DIM])
preds = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DIM, 10, output)
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DIM, 1, output)
return tf.reshape(output, [-1]), preds
if __name__ == "__main__":
real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM])
labels_real = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
fake_data, labels_fake = Generator(BATCH_SIZE)
disc_real, preds_real = Discriminator(real_data)
disc_fake, preds_fake = Discriminator(fake_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if True:
classifier_cost_real = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_real,
logits=preds_real)
classifier_cost_fake = tf.nn.softmax_cross_entropy_with_logits(labels=labels_fake,
logits=preds_fake)
classifier_cost = classifier_cost_real + classifier_cost_fake
gen_cost = -tf.reduce_mean(disc_fake) + classifier_cost
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) + classifier_cost
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(disc_cost, var_list=disc_params)
clip_disc_weights = None
# For saving samples
fixed_noise = np.random.normal(size=(128, 74))
fixed_noise[:,:10] = 0
for i in range(128):
fixed_noise[i,i%10] = 1
fixed_noise = tf.constant(fixed_noise.astype('float32'))
fixed_noise_samples, _ = Generator(128, noise=fixed_noise)
def generate_image(frame, true_dist):
samples = session.run(fixed_noise_samples)
lib.save_images.save_images(
samples.reshape((128, 28, 28)),
("noisy-" if NOISY else "")+'mnist_acgan_samples_{0:09d}.png'.format(frame)
)
# Dataset iterator
train_gen, dev_gen, test_gen = lib.mnist.load(BATCH_SIZE, BATCH_SIZE)
def inf_train_gen():
while True:
for images,targets in train_gen():
yield images,targets
saver = tf.train.Saver()
# Train loop
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in range(ITERS):
start_time = time.time()
if iteration > 0:
_ = session.run(gen_train_op)
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in range(disc_iters):
_data,_targets = next(gen)
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data,
labels_real: _targets}
)
if clip_disc_weights is not None:
_ = session.run(clip_disc_weights)
lib.plot.plot('train disc cost', _disc_cost)
lib.plot.plot('time', time.time() - start_time)
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
for images,targets in dev_gen():
_dev_disc_cost, _creal, _cfake = session.run(
(disc_cost, classifier_cost_real, classifier_cost_fake),
feed_dict={real_data: images,
labels_real: targets}
)
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
lib.plot.plot('dev classreal cost', np.mean(_creal))
lib.plot.plot('dev classfake cost', np.mean(_cfake))
generate_image(iteration, _data)
saver.save(session, 'model/mnist-acgan-2'+("-noisy" if NOISY else ""))
# Write logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
| 7,752 | 33.154185 | 97 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/small_imagenet.py | import numpy as np
import scipy.misc
import time
def make_generator(path, n_files, batch_size):
epoch_count = [1]
def get_epoch():
images = np.zeros((batch_size, 3, 64, 64), dtype='int32')
files = list(range(n_files))
random_state = np.random.RandomState(epoch_count[0])
random_state.shuffle(files)
epoch_count[0] += 1
for n, i in enumerate(files):
image = scipy.misc.imread("{}/{}.png".format(path, str(i+1).zfill(len(str(n_files)))))
images[n % batch_size] = image.transpose(2,0,1)
if n > 0 and n % batch_size == 0:
yield (images,)
return get_epoch
def load(batch_size, data_dir='/home/ishaan/data/imagenet64'):
return (
make_generator(data_dir+'/train_64x64', 1281149, batch_size),
make_generator(data_dir+'/valid_64x64', 49999, batch_size)
)
if __name__ == '__main__':
train_gen, valid_gen = load(64)
t0 = time.time()
for i, batch in enumerate(train_gen(), start=1):
print("{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0]))
if i == 1000:
break
t0 = time.time() | 1,159 | 34.151515 | 98 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/save_images.py | """
Image grid saver, based on color_grid_vis from github.com/Newmu
"""
import numpy as np
import scipy.misc
from scipy.misc import imsave
def save_images(X, save_path):
# [0, 1] -> [0,255]
if isinstance(X.flatten()[0], np.floating):
X = (255.99*X).astype('uint8')
n_samples = X.shape[0]
rows = int(np.sqrt(n_samples))
while n_samples % rows != 0:
rows -= 1
nh, nw = rows, n_samples//rows
if X.ndim == 2:
X = np.reshape(X, (X.shape[0], int(np.sqrt(X.shape[1])), int(np.sqrt(X.shape[1]))))
if X.ndim == 4:
# BCHW -> BHWC
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
elif X.ndim == 3:
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw))
for n, x in enumerate(X):
j = n//nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w] = x
imsave(save_path, img)
| 914 | 22.461538 | 91 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/inception_score.py | # From https://github.com/openai/improved-gan/blob/master/inception_score/model.py
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session() as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
# sys.stdout.write(".")
# sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
| 3,375 | 33.44898 | 90 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/cifar10.py | import numpy as np
import os
import urllib.request, urllib.parse, urllib.error
import gzip
import pickle as pickle
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
return dict['data'], dict['labels']
def cifar_generator(filenames, batch_size, data_dir):
all_data = []
all_labels = []
for filename in filenames:
data, labels = unpickle(data_dir + '/' + filename)
all_data.append(data)
all_labels.append(labels)
images = np.concatenate(all_data, axis=0)
labels = np.concatenate(all_labels, axis=0)
def get_epoch():
rng_state = np.random.get_state()
np.random.shuffle(images)
np.random.set_state(rng_state)
np.random.shuffle(labels)
for i in range(len(images) // batch_size):
yield (images[i*batch_size:(i+1)*batch_size], labels[i*batch_size:(i+1)*batch_size])
return get_epoch
def load(batch_size, data_dir):
return (
cifar_generator(['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5'], batch_size, data_dir),
cifar_generator(['test_batch'], batch_size, data_dir)
)
| 1,197 | 27.52381 | 125 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/plot.py | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import collections
import time
import pickle as pickle
_since_beginning = collections.defaultdict(lambda: {})
_since_last_flush = collections.defaultdict(lambda: {})
_iter = [0]
def tick():
_iter[0] += 1
def plot(name, value):
_since_last_flush[name][_iter[0]] = value
def flush():
prints = []
for name, vals in list(_since_last_flush.items()):
prints.append("{}\t{}".format(name, np.mean(list(vals.values()))))
_since_beginning[name].update(vals)
x_vals = np.sort(list(_since_beginning[name].keys()))
y_vals = [_since_beginning[name][x] for x in x_vals]
plt.clf()
plt.plot(x_vals, y_vals)
plt.xlabel('iteration')
plt.ylabel(name)
plt.savefig(name.replace(' ', '_')+'.jpg')
print("iter {}\t{}".format(_iter[0], "\t".join(prints)))
_since_last_flush.clear()
with open('log.pkl', 'wb') as f:
pickle.dump(dict(_since_beginning), f, pickle.HIGHEST_PROTOCOL) | 982 | 22.97561 | 68 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/__init__.py | import numpy as np
import tensorflow as tf
import locale
locale.setlocale(locale.LC_ALL, '')
_params = {}
_param_aliases = {}
def param(name, *args, **kwargs):
"""
A wrapper for `tf.Variable` which enables parameter sharing in models.
Creates and returns theano shared variables similarly to `tf.Variable`,
except if you try to create a param with the same name as a
previously-created one, `param(...)` will just return the old one instead of
making a new one.
This constructor also adds a `param` attribute to the shared variables it
creates, so that you can easily search a graph for all params.
"""
if name not in _params:
kwargs['name'] = name
param = tf.Variable(*args, **kwargs)
param.param = True
_params[name] = param
result = _params[name]
i = 0
while result in _param_aliases:
# print 'following alias {}: {} to {}'.format(i, result, _param_aliases[result])
i += 1
result = _param_aliases[result]
return result
def params_with_name(name):
return [p for n,p in list(_params.items()) if name in n]
def delete_all_params():
_params.clear()
def alias_params(replace_dict):
for old,new in list(replace_dict.items()):
# print "aliasing {} to {}".format(old,new)
_param_aliases[old] = new
def delete_param_aliases():
_param_aliases.clear()
# def search(node, critereon):
# """
# Traverse the Theano graph starting at `node` and return a list of all nodes
# which match the `critereon` function. When optimizing a cost function, you
# can use this to get a list of all of the trainable params in the graph, like
# so:
# `lib.search(cost, lambda x: hasattr(x, "param"))`
# """
# def _search(node, critereon, visited):
# if node in visited:
# return []
# visited.add(node)
# results = []
# if isinstance(node, T.Apply):
# for inp in node.inputs:
# results += _search(inp, critereon, visited)
# else: # Variable node
# if critereon(node):
# results.append(node)
# if node.owner is not None:
# results += _search(node.owner, critereon, visited)
# return results
# return _search(node, critereon, set())
# def print_params_info(params):
# """Print information about the parameters in the given param set."""
# params = sorted(params, key=lambda p: p.name)
# values = [p.get_value(borrow=True) for p in params]
# shapes = [p.shape for p in values]
# print "Params for cost:"
# for param, value, shape in zip(params, values, shapes):
# print "\t{0} ({1})".format(
# param.name,
# ",".join([str(x) for x in shape])
# )
# total_param_count = 0
# for shape in shapes:
# param_count = 1
# for dim in shape:
# param_count *= dim
# total_param_count += param_count
# print "Total parameter count: {0}".format(
# locale.format("%d", total_param_count, grouping=True)
# )
def print_model_settings(locals_):
print("Uppercase local vars:")
all_vars = [(k,v) for (k,v) in list(locals_.items()) if (k.isupper() and k!='T' and k!='SETTINGS' and k!='ALL_SETTINGS')]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print("\t{}: {}".format(var_name, var_value))
def print_model_settings_dict(settings):
print("Settings dict:")
all_vars = [(k,v) for (k,v) in list(settings.items())]
all_vars = sorted(all_vars, key=lambda x: x[0])
for var_name, var_value in all_vars:
print("\t{}: {}".format(var_name, var_value)) | 3,755 | 31.947368 | 125 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/mnist.py | import numpy
import os
import urllib.request, urllib.parse, urllib.error
import gzip
import pickle as pickle
def mnist_generator(data, batch_size, n_labelled, limit=None):
images, targets = data
rng_state = numpy.random.get_state()
numpy.random.shuffle(images)
numpy.random.set_state(rng_state)
numpy.random.shuffle(targets)
if limit is not None:
print("WARNING ONLY FIRST {} MNIST DIGITS".format(limit))
images = images.astype('float32')[:limit]
targets = targets.astype('int32')[:limit]
if n_labelled is not None:
labelled = numpy.zeros(len(images), dtype='int32')
labelled[:n_labelled] = 1
def get_epoch():
rng_state = numpy.random.get_state()
numpy.random.shuffle(images)
numpy.random.set_state(rng_state)
numpy.random.shuffle(targets)
if n_labelled is not None:
numpy.random.set_state(rng_state)
numpy.random.shuffle(labelled)
image_batches = images.reshape(-1, batch_size, 784)
target_batches = targets.reshape(-1, batch_size)
if n_labelled is not None:
labelled_batches = labelled.reshape(-1, batch_size)
for i in range(len(image_batches)):
yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i]), numpy.copy(labelled))
else:
for i in range(len(image_batches)):
yield (numpy.copy(image_batches[i]), numpy.copy(target_batches[i]))
return get_epoch
def load(batch_size, test_batch_size, n_labelled=None):
filepath = '/tmp/mnist.pkl.gz'
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
if not os.path.isfile(filepath):
print("Couldn't find MNIST dataset in /tmp, downloading...")
urllib.request.urlretrieve(url, filepath)
with gzip.open('/tmp/mnist.pkl.gz', 'rb') as f:
train_data, dev_data, test_data = pickle.load(f, encoding='latin1')
return (
mnist_generator(train_data, batch_size, n_labelled),
mnist_generator(dev_data, test_batch_size, n_labelled),
mnist_generator(test_data, test_batch_size, n_labelled)
)
| 2,180 | 32.553846 | 105 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/conv2d.py | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
"""
inputs: tensor of shape (batch size, num channels, height, width)
mask_type: one of None, 'a', 'b'
returns: tensor of shape (batch size, num channels, height, width)
"""
with tf.name_scope(name) as scope:
if mask_type is not None:
mask_type, mask_n_channels = mask_type
mask = np.ones(
(filter_size, filter_size, input_dim, output_dim),
dtype='float32'
)
center = filter_size // 2
# Mask out future locations
# filter shape is (height, width, input channels, output channels)
mask[center+1:, :, :, :] = 0.
mask[center, center+1:, :, :] = 0.
# Mask out future channels
for i in range(mask_n_channels):
for j in range(mask_n_channels):
if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
mask[
center,
center,
i::mask_n_channels,
j::mask_n_channels
] = 0.
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
fan_in = input_dim * filter_size**2
fan_out = output_dim * filter_size**2 / (stride**2)
if mask_type is not None: # only approximately correct
fan_in /= 2.
fan_out /= 2.
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
if _weights_stdev is not None:
filter_values = uniform(
_weights_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
else:
filter_values = uniform(
filters_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
# print "WARNING IGNORING GAIN"
filter_values *= gain
filters = lib.param(name+'.Filters', filter_values)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,2)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,2]))
filters = filters * (target_norms / norms)
if mask_type is not None:
with tf.name_scope('filter_mask'):
filters = filters * mask
result = tf.nn.conv2d(
input=inputs,
filter=filters,
strides=[1, 1, stride, stride],
padding='SAME',
data_format='NCHW'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros(output_dim, dtype='float32')
)
result = tf.nn.bias_add(result, _biases, data_format='NCHW')
return result
| 3,860 | 30.137097 | 140 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/cond_batchnorm.py | import tflib as lib
import numpy as np
import tensorflow as tf
def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):
"""conditional batchnorm (dumoulin et al 2016) for BCHW conv filtermaps"""
if axes != [0,2,3]:
raise Exception('unsupported')
mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
shape = mean.get_shape().as_list() # shape is [1,n,1,1]
offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))
scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))
offset = tf.nn.embedding_lookup(offset_m, labels)
scale = tf.nn.embedding_lookup(scale_m, labels)
result = tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)
return result | 870 | 50.235294 | 135 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/batchnorm.py | import tflib as lib
import numpy as np
import tensorflow as tf
def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True):
if ((axes == [0,2,3]) or (axes == [0,2])) and fused==True:
if axes==[0,2]:
inputs = tf.expand_dims(inputs, 3)
# Old (working but pretty slow) implementation:
##########
# inputs = tf.transpose(inputs, [0,2,3,1])
# mean, var = tf.nn.moments(inputs, [0,1,2], keep_dims=False)
# offset = lib.param(name+'.offset', np.zeros(mean.get_shape()[-1], dtype='float32'))
# scale = lib.param(name+'.scale', np.ones(var.get_shape()[-1], dtype='float32'))
# result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-4)
# return tf.transpose(result, [0,3,1,2])
# New (super fast but untested) implementation:
offset = lib.param(name+'.offset', np.zeros(inputs.get_shape()[1], dtype='float32'))
scale = lib.param(name+'.scale', np.ones(inputs.get_shape()[1], dtype='float32'))
moving_mean = lib.param(name+'.moving_mean', np.zeros(inputs.get_shape()[1], dtype='float32'), trainable=False)
moving_variance = lib.param(name+'.moving_variance', np.ones(inputs.get_shape()[1], dtype='float32'), trainable=False)
def _fused_batch_norm_training():
return tf.nn.fused_batch_norm(inputs, scale, offset, epsilon=1e-5, data_format='NCHW')
def _fused_batch_norm_inference():
# Version which blends in the current item's statistics
batch_size = tf.cast(tf.shape(inputs)[0], 'float32')
mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)
mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]
var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]
return tf.nn.batch_normalization(inputs, mean, var, offset[None,:,None,None], scale[None,:,None,None], 1e-5), mean, var
# Standard version
# return tf.nn.fused_batch_norm(
# inputs,
# scale,
# offset,
# epsilon=1e-2,
# mean=moving_mean,
# variance=moving_variance,
# is_training=False,
# data_format='NCHW'
# )
if is_training is None:
outputs, batch_mean, batch_var = _fused_batch_norm_training()
else:
outputs, batch_mean, batch_var = tf.cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
if update_moving_stats:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
float_stats_iter = tf.cast(stats_iter, tf.float32)
update_moving_mean = tf.assign(moving_mean, ((float_stats_iter/(float_stats_iter+1))*moving_mean) + ((1/(float_stats_iter+1))*batch_mean))
update_moving_variance = tf.assign(moving_variance, ((float_stats_iter/(float_stats_iter+1))*moving_variance) + ((1/(float_stats_iter+1))*batch_var))
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(outputs)
outputs = tf.cond(is_training, _force_updates, no_updates)
if axes == [0,2]:
return outputs[:,:,:,0] # collapse last dim
else:
return outputs
else:
# raise Exception('old BN')
# TODO we can probably use nn.fused_batch_norm here too for speedup
mean, var = tf.nn.moments(inputs, axes, keep_dims=True)
shape = mean.get_shape().as_list()
if 0 not in axes:
print("WARNING ({}): didn't find 0 in axes, but not using separate BN params for each item in batch".format(name))
shape[0] = 1
offset = lib.param(name+'.offset', np.zeros(shape, dtype='float32'))
scale = lib.param(name+'.scale', np.ones(shape, dtype='float32'))
result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
return result
| 4,374 | 48.715909 | 169 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/deconv2d.py | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Deconv2D(
name,
input_dim,
output_dim,
filter_size,
inputs,
he_init=True,
weightnorm=None,
biases=True,
gain=1.,
mask_type=None,
):
"""
inputs: tensor of shape (batch size, height, width, input_dim)
returns: tensor of shape (batch size, 2*height, 2*width, output_dim)
"""
with tf.name_scope(name) as scope:
if mask_type != None:
raise Exception('Unsupported configuration')
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
stride = 2
fan_in = input_dim * filter_size**2 / (stride**2)
fan_out = output_dim * filter_size**2
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
if _weights_stdev is not None:
filter_values = uniform(
_weights_stdev,
(filter_size, filter_size, output_dim, input_dim)
)
else:
filter_values = uniform(
filters_stdev,
(filter_size, filter_size, output_dim, input_dim)
)
filter_values *= gain
filters = lib.param(
name+'.Filters',
filter_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,3)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,3]))
filters = filters * tf.expand_dims(target_norms / norms, 1)
inputs = tf.transpose(inputs, [0,2,3,1], name='NCHW_to_NHWC')
input_shape = tf.shape(inputs)
try: # tf pre-1.0 (top) vs 1.0 (bottom)
output_shape = tf.pack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
except Exception as e:
output_shape = tf.stack([input_shape[0], 2*input_shape[1], 2*input_shape[2], output_dim])
result = tf.nn.conv2d_transpose(
value=inputs,
filter=filters,
output_shape=output_shape,
strides=[1, 2, 2, 1],
padding='SAME'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros(output_dim, dtype='float32')
)
result = tf.nn.bias_add(result, _biases)
result = tf.transpose(result, [0,3,1,2], name='NHWC_to_NCHW')
return result
| 3,276 | 27.25 | 101 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/layernorm.py | import tflib as lib
import numpy as np
import tensorflow as tf
def Layernorm(name, norm_axes, inputs):
mean, var = tf.nn.moments(inputs, norm_axes, keep_dims=True)
# Assume the 'neurons' axis is the first of norm_axes. This is the case for fully-connected and BCHW conv layers.
n_neurons = inputs.get_shape().as_list()[norm_axes[0]]
offset = lib.param(name+'.offset', np.zeros(n_neurons, dtype='float32'))
scale = lib.param(name+'.scale', np.ones(n_neurons, dtype='float32'))
# Add broadcasting dims to offset and scale (e.g. BCHW conv data)
offset = tf.reshape(offset, [-1] + [1 for i in range(len(norm_axes)-1)])
scale = tf.reshape(scale, [-1] + [1 for i in range(len(norm_axes)-1)])
result = tf.nn.batch_normalization(inputs, mean, var, offset, scale, 1e-5)
return result | 821 | 38.142857 | 117 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/conv1d.py | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def Conv1D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None, biases=True, gain=1.):
"""
inputs: tensor of shape (batch size, num channels, width)
mask_type: one of None, 'a', 'b'
returns: tensor of shape (batch size, num channels, width)
"""
with tf.name_scope(name) as scope:
if mask_type is not None:
mask_type, mask_n_channels = mask_type
mask = np.ones(
(filter_size, input_dim, output_dim),
dtype='float32'
)
center = filter_size // 2
# Mask out future locations
# filter shape is (width, input channels, output channels)
mask[center+1:, :, :] = 0.
# Mask out future channels
for i in range(mask_n_channels):
for j in range(mask_n_channels):
if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
mask[
center,
i::mask_n_channels,
j::mask_n_channels
] = 0.
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
fan_in = input_dim * filter_size
fan_out = output_dim * filter_size / stride
if mask_type is not None: # only approximately correct
fan_in /= 2.
fan_out /= 2.
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
filter_values = uniform(
filters_stdev,
(filter_size, input_dim, output_dim)
)
# print "WARNING IGNORING GAIN"
filter_values *= gain
filters = lib.param(name+'.Filters', filter_values)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1]))
filters = filters * (target_norms / norms)
if mask_type is not None:
with tf.name_scope('filter_mask'):
filters = filters * mask
result = tf.nn.conv1d(
value=inputs,
filters=filters,
stride=stride,
padding='SAME',
data_format='NCHW'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros([output_dim], dtype='float32')
)
# result = result + _biases
result = tf.expand_dims(result, 3)
result = tf.nn.bias_add(result, _biases, data_format='NCHW')
result = tf.squeeze(result)
return result
| 3,401 | 30.211009 | 140 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/linear.py | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def disable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = False
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Linear(
name,
input_dim,
output_dim,
inputs,
biases=True,
initialization=None,
weightnorm=None,
gain=1.
):
"""
initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)`
"""
with tf.name_scope(name) as scope:
def uniform(stdev, size):
if _weights_stdev is not None:
stdev = _weights_stdev
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
if initialization == 'lecun':# and input_dim != output_dim):
# disabling orth. init for now because it's too slow
weight_values = uniform(
np.sqrt(1./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot' or (initialization == None):
weight_values = uniform(
np.sqrt(2./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'he':
weight_values = uniform(
np.sqrt(2./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot_he':
weight_values = uniform(
np.sqrt(4./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'orthogonal' or \
(initialization == None and input_dim == output_dim):
# From lasagne
def sample(shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
# TODO: why normal and not uniform?
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return q.astype('float32')
weight_values = sample((input_dim, output_dim))
elif initialization[0] == 'uniform':
weight_values = np.random.uniform(
low=-initialization[1],
high=initialization[1],
size=(input_dim, output_dim)
).astype('float32')
else:
raise Exception('Invalid initialization!')
weight_values *= gain
weight = lib.param(
name + '.W',
weight_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
# norm_values = np.linalg.norm(weight_values, axis=0)
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
weight = weight * (target_norms / norms)
# if 'Discriminator' in name:
# print "WARNING weight constraint on {}".format(name)
# weight = tf.nn.softsign(10.*weight)*.1
if inputs.get_shape().ndims == 2:
result = tf.matmul(inputs, weight)
else:
reshaped_inputs = tf.reshape(inputs, [-1, input_dim])
result = tf.matmul(reshaped_inputs, weight)
result = tf.reshape(result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim]))
if biases:
result = tf.nn.bias_add(
result,
lib.param(
name + '.b',
np.zeros((output_dim,), dtype='float32')
)
)
return result | 4,548 | 29.736486 | 98 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/gan/tflib/ops/__init__.py | 0 | 0 | 0 | py | |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/significance.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : compute significance
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Compute Stream Significance.
Routine Listings
----------------
calculate_significance
"""
__author__ = "Nathaniel Starkman"
__all__ = ["calculate_significance"]
##############################################################################
# IMPORTS
# GENERAL
import numpy as np
from scipy.stats import poisson
from astropy.coordinates import SkyCoord
from astropy.table import Table, QTable
import matplotlib.pyplot as plt
from typing import Optional, Union
# CUSTOM
from astroPHD import ObjDict
##############################################################################
# PARAMETERS
data_typing = Union[SkyCoord, Table]
array_like = Union[list, tuple, np.ndarray]
##############################################################################
# CODE
##############################################################################
def calculate_significance(
data: data_typing,
orbit: data_typing,
idxres: Optional[ObjDict],
skycut: array_like,
nsample: int = 27,
cdf: array_like = [],
plot=True,
):
"""Calculate significance.
Parameters
----------
data: SkyCoord or (Q)Table
orbit: SkyCoord or (Q)Table
idxres: ObjDict, optional
skycut: array_like
nsample: int, optional
cdf: array_lik, optional
Returns
-------
significance: float
the significance of the stream detection
"""
# --------------------
if idxres is None: # assumed already used idxcatalog & idxo
idxcatalog = np.ones(len(data), dtype=bool)
idxo = np.ones(len(data), dtype=bool)
else:
idxcatalog = idxres.idxcatalog
idxo = idxres.idxo
# --------------------
if isinstance(data, SkyCoord):
# dataphi1 = data.phi1[idxcatalog]
dataphi2 = data.phi2[idxcatalog]
elif isinstance(data, (Table, QTable)):
# dataphi1 = data['phi1'][idxcatalog]
dataphi2 = data["phi2"][idxcatalog]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
if isinstance(orbit, SkyCoord):
orbitphi2 = orbit.phi2[idxo]
elif isinstance(orbit, (Table, QTable)):
orbitphi2 = orbit["phi2"][idxo]
else:
raise TypeError("orbit is not a SkyCoord or (Q)Table")
# --------------------
x = idxres.arc[idxres.idxo][skycut].to_value("deg")
y = idxres.wsky.copy()
y[dataphi2 < orbitphi2] *= -1
y = y[skycut].to_value("deg")
# --------------------
if plot:
# plot 1
fig, ax = plt.subplots(1, 2, figsize=(6, 3))
ax[0].scatter(x, y)
freq, ybin, _ = ax[1].hist(y, bins=nsample)
ax[1].axhline(np.mean(freq), c="k", ls=":")
# plot 2
mu = np.median(freq)
sample = poisson.rvs(mu, size=nsample)
plt.figure()
plt.hist(y, bins=nsample)
plt.axhline(np.mean(freq), c="k", ls=":")
plt.scatter(ybin[:-1], sample, c="k", zorder=2)
# /if
if not cdf:
raise Exception
else:
significance = (nsample - 2) * np.prod(
[(1 - poisson.cdf(x, mu)) for x in cdf]
)
print(f"p value = {significance * 100:.10f} %")
return significance
# /def
##############################################################################
# END
| 3,599 | 22.225806 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/projection.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : projection
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Projection.
Routine Listings
----------------
get_data_along_orbit
select_stars_in_an_arm
digitize_star_along_stream
"""
__author__ = "Nathaniel Starkman"
__all__ = [
"get_data_along_orbit",
"select_stars_in_an_arm",
"digitize_star_along_stream",
]
##############################################################################
# IMPORTS
# GENERAL
import numpy as np
from scipy.stats import binned_statistic
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table, QTable
# CUSTOM
from astroPHD import ObjDict, LogFile
from astroPHD.plot import starkplot as plt
# PROJECT-SPECIFIC
from .plot import plot_data_along_orbit, plot_data_along_orbit_in_window
##############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
Table_like = (Table, QTable)
##############################################################################
# CODE
##############################################################################
def get_data_along_orbit(
data,
orbit,
plot=True,
frame="cust",
lon_angle="phi1",
lat_angle="phi2",
**kw
):
"""get_data_along_orbit.
Parameters
----------
data : astropy SkyCoord
the data
orbit : astropy SkyCoord
plot : bool
Other Parameters
----------------
kw
passed to starkplot's plt.set
see documentation for options.
Returns
-------
idxres : ObjDict
contains idxcatalog, idxo, arc, wsky
"""
# match to catalog for distance reference
idx2, d2d, _ = data.match_to_catalog_sky(orbit, nthneighbor=1)
# data subset
sep_ind = d2d <= 3 * u.deg # points within width of orbit
idxo = idx2[sep_ind] # index into orbit
wsky = d2d[sep_ind] # sky separation
idxcatalog = np.where(sep_ind == np.True_)[0] # index into data
# getting distance along the arc of the orbit
orbphi1 = getattr(orbit, lon_angle)
orbphi2 = getattr(orbit, lat_angle)
arc = np.zeros_like(orbphi1)
arc[1:] = np.cumsum(
np.sqrt(
np.diff(orbphi1) ** 2 * np.cos(orbphi2[:-1]) ** 2
+ np.diff(orbphi2) ** 2
)
)
# storing
idxres = ObjDict(
"Data Along Orbit",
idxcatalog=idxcatalog,
idxo=idxo,
# along & perpendicular to arc
arc=arc,
wsky=wsky,
)
# plotting
if plot:
# 1st Plot: Closeup
plot_data_along_orbit(
data,
orbit,
idxres,
frame=frame,
lon_angle=lon_angle,
lat_angle=lat_angle,
**kw
)
# 2nd Plot: SkyWindow
plot_data_along_orbit_in_window(
data,
orbit,
idxres,
frame=frame,
lon_angle=lon_angle,
lat_angle=lat_angle,
)
plt.plot(orbit.phi1, orbit.phi2)
# /if
return idxres
# /def
# -----------------------------------------------------------------------------
def select_stars_in_an_arm(
data,
orbit,
idxres,
skycut=Ellipsis,
bins=61,
num_side=1,
digitize=True,
numstars=20,
):
"""plot_data_orbit_projection.
Parameters
----------
data : SkyCoord, (Q)Table
orbit : SkyCoord, (Q)Table
idxres :
returned by get_data_along_orbit
skycut : index array, optional (default Ellipsis)
bins : number of bins
must be odd
Returns
-------
res: ObjDict
.statistic
.bin_meds
.binnumber
.cent_inds
.instream_catalog
.instream
"""
# --------------------
if not bool(bins % 2): # it's even
raise ValueError("bins must be odd")
# --------------------
if idxres is None: # assumed already used idxcatalog & idxo
idxcatalog = np.ones(len(data), dtype=bool)
idxo = np.ones(len(data), dtype=bool)
else:
idxcatalog = idxres.idxcatalog
idxo = idxres.idxo
# --------------------
if isinstance(data, SkyCoord):
# dataphi1 = data.phi1[idxcatalog]
dataphi2 = data.phi2[idxcatalog]
elif isinstance(data, (Table, QTable)):
# dataphi1 = data['phi1'][idxcatalog]
dataphi2 = data["phi2"][idxcatalog]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
if isinstance(orbit, SkyCoord):
orbitphi2 = orbit.phi2[idxo]
elif isinstance(orbit, (Table, QTable)):
orbitphi2 = orbit["phi2"][idxo]
else:
raise TypeError("orbit is not a SkyCoord or (Q)Table")
# --------------------
x = idxres.arc[idxo][skycut].to_value("deg")
y = idxres.wsky.copy()
y[dataphi2 < orbitphi2] *= -1
y = y[skycut].to_value("deg")
# --------------------
stats, bin_edges, binnum = binned_statistic(
y, x, bins=bins, statistic="count"
)
# medians of bins
bin_meds = bin_edges[:-1] + np.diff(bin_edges) / 2
# indices of the stream
cent_ind = int(np.around(np.max(binnum) / 2))
cent_inds = np.arange(num_side * 2 + 1) - num_side + cent_ind - 1
# --------------------
# backing out the indices of stars in the stream
# inside skycut, inside catalog
streaminskycut = np.zeros_like(binnum, dtype=bool)
for ind in cent_inds:
streaminskycut |= binnum == ind
# inside catalog
streamincatalog = skycut.copy()
streamincatalog[streamincatalog] = streaminskycut
# in the whole dataset
instream = idxcatalog[streamincatalog]
# --------------------
res = ObjDict(
"instream",
statistic=stats,
bin_meds=bin_meds,
binnumber=binnum,
cent_inds=cent_inds,
instream_catalog=streamincatalog,
instream=instream,
)
return res
# /def
# -----------------------------------------------------------------------------
def digitize_star_along_stream(data, instream, bins=10):
"""digitize_star_along_stream.
Parameters
----------
data: (Q)Table
instream: array_like
index / bool array
Returns
-------
df: (n, 3) ndarray
columns ra, dec, dec_err
Notes
-----
dec_err estimated as bin_width / sqrt(numpoints)
works with output of select_stars_in_an_arm
"""
if isinstance(data, SkyCoord):
dataphi1 = data.phi1
ra, dec = data.icrs.ra, data.icrs.dec
elif isinstance(data, (Table, QTable)):
dataphi1 = data["phi1"]
ra, dec = data["ra"], data["dec"]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
x = dataphi1[instream]
bin_edges = np.histogram_bin_edges(x, bins=bins) # binning
binnums = np.digitize(x, bin_edges[:-1]) # getting binnumber
print(len(x), len(binnums))
print(bins, len(np.unique(binnums)))
avg_ra = np.full(bins, np.nan)
avg_dec = np.full(bins, np.nan)
avg_dec_err = np.full(bins, np.nan)
for i, b in enumerate(np.unique(binnums)):
ind = binnums == b
avg_ra[i] = ra[instream][ind].mean().value
avg_dec[i] = dec[instream][ind].mean().value
avg_dec_err[i] = np.diff(bin_edges)[i] / np.sqrt(
ind.sum()
) # width/sqrt(numpoints)
return np.c_[avg_ra, avg_dec, avg_dec_err]
# /def
##############################################################################
# END
| 7,816 | 22.264881 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/plot.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : plotfuncs
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""plot functions.
Routine Listings
----------------
plot_sky_window
plot_proper_motions
plot_data_along_orbit
plot_data_along_orbit_in_window
plot_data_orbit_projection
plot_radec_track_residual
"""
__author__ = "Nathaniel Starkman"
__all__ = [
"plot_sky_window",
"plot_proper_motions",
"plot_data_along_orbit",
"plot_data_along_orbit_in_window",
"plot_data_orbit_projection",
"plot_radec_track_residual",
]
##############################################################################
# IMPORTS
# GENERAL
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from matplotlib import colors
from matplotlib.patches import Ellipse
from astropy.coordinates import SkyCoord
from astropy.table import Table, QTable
from typing import Optional
# CUSTOM
from astroPHD.plot import starkplot as plt, mpl_decorator
# PROJECT-SPECIFIC
from .select import inRange
##############################################################################
# CODE
##############################################################################
@mpl_decorator(
fig="new",
figsize=(10, 5),
xlabel=r"$\phi_1$ (deg)",
ylabel=r"$\phi_2$ (deg)",
aspect="equal",
invert_axis="xyz",
xkw={"fontsize": 15, "title": {"fontsize": 20}},
)
def plot_sky_window(
data,
ind=...,
step: int = 0.05,
sigma: tuple = (2, 2),
nbody: Optional[dict] = None,
indwinstr=None,
orb: Optional[SkyCoord] = None,
orbrng=None,
frame="cust",
lon_angle="phi1",
lat_angle="phi2",
cmap="Spectral",
**kw,
):
"""Plot the sky data window.
Parameters
----------
data: astropy QTable
the dataframe containing the sky window
ind: Ellipse or array_like, optional
step: float or (2,) list of floats, optional
(default = 0.05)
the number of degrees by which to bin the sky data
if (2,) list of floats then taken as (xstep, ystep)
sigma: float or (2,) list of floats, optional
(default = 2)
the sigma for the Gaussian filter
if (2,) list of floats then taken as (xstep, ystep)
nbody: dictionary, optional
dictionary must have 'cust', which is a SkyCoord
orb: SkyCoord, optional
orbrng: array_like, optional
frame: str, optional
lon_angle: str, optional
lat_angle: str, optional
cmap: str or matplotlib colormap
the colormap to use for the Gaussia-filtered image
TODO some reason need to specify, can't be in mpl_decorator?
nbody_cmap: (default = 'nipy_spectral_r')
TODO
----
explain normed=True, interpolation='nearest'
support not only phi1, phi2
Returns
-------
im: pyplot imshow
Gaussian-filtered image
"""
plt.grid(False)
# --------------------
# step
if np.isscalar(step):
xstep = ystep = step
else:
xstep, ystep = step
# sigma
if np.isscalar(sigma):
xsigma = ysigma = sigma
else:
xsigma, ysigma = sigma
# --------------------
# getting x, y data
if isinstance(data, SkyCoord):
x = getattr(data, lon_angle)[ind].to_value("deg")
y = getattr(data, lat_angle)[ind].to_value("deg")
elif isinstance(data, (Table, QTable)):
x = data[lon_angle][ind].to_value("deg")
y = data[lat_angle][ind].to_value("deg")
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
# bins
xbins = np.arange(x.min(), x.max() + xstep, xstep)
ybins = np.arange(y.min(), y.max() + ystep, ystep)
# making histogram
# normalizing the counts
H, xedges, yedges = np.histogram2d(x, y, bins=[xbins, ybins], normed=False)
H = np.rot90(H) # rotating to correct orientation
# getting the figure edges
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# gaussian filter
fH = gaussian_filter(H, sigma=sigma, order=0)
# plotting
im = plt.imshow(
fH,
extent=extent,
interpolation="nearest",
cmap=cmap,
norm=colors.LogNorm(),
)
# -----------------------------------------------------------
# nbody
if nbody is not None:
plt.scatter(
getattr(nbody[frame], lon_angle)[indwinstr][::100],
getattr(nbody[frame], lat_angle)[indwinstr][::100],
c=nbody[frame].distance[indwinstr][::100].distmod,
s=1,
colorbar=True,
cmap=kw.get("nbody_cmap", "nipy_spectral_r"),
)
# -----------------------------------------------------------
# progenitor orbit
if orb is not None:
if orbrng is None:
raise ValueError("orbrng cannot be <None>")
orbwin = inRange(
getattr(orb[frame], lon_angle),
getattr(orb[frame], lat_angle),
rng=orbrng,
)
plt.plot(
getattr(orb[frame], lon_angle)[orbwin],
getattr(orb[frame], lat_angle)[orbwin],
)
return im
# /def
# -----------------------------------------------------------------------------
@mpl_decorator(
fig="new",
figsize=(10, 10),
title=(r"Pal5 PM", dict(fontsize=24)),
xlabel=r"$\mu_{\alpha^*} \ (\rm{mas} \, \rm{yr}^{-1})$",
ylabel=r"$\mu_\delta \ (\rm{mas} \, \rm{yr}^{-1})$",
unit_labels=False,
xlim=(-15, 10),
ylim=(-15, 10),
tight_layout=True,
legend=dict(fontsize=15),
xkw=dict(fontsize=20),
)
def plot_proper_motions(
data,
idx,
bakidx: slice = slice(None, None, 100),
orbit: Optional[SkyCoord] = None,
orbitinwindow: Optional[list] = None,
cutspm: Optional[list] = None,
**kw,
):
"""Plot proper motions.
Parameters
----------
data: SkyCoord, (Q)Table
must have pmra & pmdec columns
if SkyCoord: .pm_ra_cosdec (or .pm_ra) & .pm_dec
if (Q)Table: ['pmra'], ['pmdec']
idx : slicer
index or bool array to select the rows from `data`
bakidx : slicer, optional
background subsampling
orbit : optional
(default None)
orbitinwindow : optional
(default None)
only used if orbit is not None
cutspm : optional
(default None)
Returns
-------
line : scatterplot result
plt.scatter(pmra, pmdec, **kw)
"""
# --------------------
if isinstance(data, SkyCoord):
try:
datapmra = data.pm_ra_cosdec
except Exception:
datapmra = data.pm_ra
datapmdec = data.pm_dec
elif isinstance(data, (Table, QTable)):
datapmra = data["pmra"]
datapmdec = data["pmdec"]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
notnan = ~(np.isnan(datapmra) | np.isnan(datapmdec))
# Background
_bakidx = np.zeros(len(datapmra), dtype=bool)
_bakidx[bakidx] = True
plt.scatter(datapmra[notnan & _bakidx], datapmdec[notnan & _bakidx], s=1)
# Plotting
_idx = np.zeros(len(datapmra), dtype=bool)
_idx[idx] = True
line = plt.scatter(datapmra[notnan & _idx], datapmdec[notnan & _idx], **kw)
# Start value circle
if cutspm is not None:
ellipse = Ellipse(
xy=(cutspm.x0_lon, cutspm.x0_lat),
width=cutspm.dx_lon,
height=cutspm.dx_lat,
fill=False,
edgecolor="k",
)
plt.gca().add_patch(ellipse)
if orbit is not None:
plt.plot(
orbit["eq"].pm_ra_cosdec,
orbit["eq"].pm_dec,
label="all",
lw=7,
c="k",
)
if orbitinwindow is not None:
plt.plot(
orbit["eq"].pm_ra_cosdec[orbitinwindow],
orbit["eq"].pm_dec[orbitinwindow],
label="all",
lw=7,
c="tab:purple",
)
plt.scatter(
orbit["orb"].pmra(t=None),
orbit["orb"].pmdec(),
label="prog",
s=30,
zorder=2,
)
return line
# /def
# -----------------------------------------------------------------------------
def plot_data_along_orbit(
data, orbit, idxres, frame="cust", lon_angle="phi1", lat_angle="phi2", **kw
):
"""plot_data_along_orbit."""
# --------------------
if isinstance(data, SkyCoord):
dataphi1 = getattr(data, lon_angle)[idxres.idxcatalog]
dataphi2 = getattr(data, lat_angle)[idxres.idxcatalog]
elif isinstance(data, (Table, QTable)):
dataphi1 = data[lon_angle][idxres.idxcatalog]
dataphi2 = data[lat_angle][idxres.idxcatalog]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
if isinstance(orbit, SkyCoord):
orbitphi1 = getattr(orbit, lon_angle)[idxres.idxo]
orbitphi2 = getattr(orbit, lat_angle)[idxres.idxo]
elif isinstance(orbit, (Table, QTable)):
orbitphi1 = orbit[lon_angle][idxres.idxo]
orbitphi2 = orbit[lat_angle][idxres.idxo]
else:
raise TypeError("orbit is not a SkyCoord or (Q)Table")
# --------------------
# orbit
plt.scatter(orbitphi1, orbitphi2, c="k", s=0.1)
# data
plt.scatter(dataphi1, dataphi2, s=0.1)
# setting plot properties
plt.set(
aspect="equal",
title=kw.pop("title", "Data Along Orbit"),
xlabel=kw.pop("xlabel", rf"{lon_angle} [deg]"),
ylabel=kw.pop("ylabel", rf"{lat_angle} [deg]"),
)
plt.set(**kw) # any extra user keywords
return
# /def
# -----------------------------------------------------------------------------
@mpl_decorator(fig="new")
def plot_data_along_orbit_in_window(
data,
orbit,
idxres,
windowcut=...,
frame="cust",
lon_angle="phi1",
lat_angle="phi2",
**kw,
):
"""plot_data_along_orbit_in_window.
Parameters
----------
data : SkyCoord
orbit : SkyCoord
"""
# --------------------
if isinstance(data, SkyCoord):
phi1 = getattr(data, lon_angle)[idxres.idxcatalog]
phi2 = getattr(data, lat_angle)[idxres.idxcatalog]
elif isinstance(data, (Table, QTable)):
phi1 = data[lon_angle][idxres.idxcatalog]
phi2 = data[lat_angle][idxres.idxcatalog]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
plot_sky_window(
data,
ind=...,
orbrng=windowcut,
fig=None,
frame=frame,
lon_angle=lon_angle,
lat_angle=lat_angle,
)
plt.scatter(phi1, phi2, s=40, c="k", alpha=0.05)
plt.set(**kw)
return
# /def
# -----------------------------------------------------------------------------
def plot_data_orbit_projection(
data,
orbit,
idxres,
skycut=Ellipsis,
filtered=True,
step=0.07,
sigma=(2, 2),
add_sidehists=True,
frame="cust",
lon_angle="phi1",
lat_angle="phi2",
**kw,
):
"""plot_data_orbit_projection.
Parameters
----------
data : SkyCoord, (Q)Table
orbit : SkyCoord, (Q)Table
idxres :
returned by get_data_along_orbit
skycut : index array, optional (default Ellipsis)
filtered : bool, optional (default True)
whether to apply a gaussian filter
step : float, optional (default .07)
the step size in the gaussian filter
sigma : list, optional (default (2, 2))
the sigma in the guassian filter
Returns
-------
im : matplotlib plot
if not filtered:
scatter(*, s=2, sidehists=True)
if filtered:
imshow(*, cmap='Spectral', norm=colors.LogNorm(),
interpolation='nearest')
"""
# --------------------
if idxres is None: # assumed already used idxcatalog & idxo
idxcatalog = np.ones(len(data), dtype=bool)
idxo = np.ones(len(data), dtype=bool)
else:
idxcatalog = idxres.idxcatalog
idxo = idxres.idxo
# --------------------
if isinstance(data, SkyCoord):
# dataphi1 = getattr(data, lon_angle)[idxcatalog]
dataphi2 = getattr(data, lat_angle)[idxcatalog]
elif isinstance(data, (Table, QTable)):
# dataphi1 = data[lon_angle][idxcatalog]
dataphi2 = data[lat_angle][idxcatalog]
else:
raise TypeError("data is not a SkyCoord or (Q)Table")
# --------------------
if isinstance(orbit, SkyCoord):
# orbphi1 = getattr(orbit, lon_angle)
orbphi2 = getattr(orbit, lat_angle)
elif isinstance(orbit, (Table, QTable)):
# orbphi1 = orbit[lon_angle]
orbphi2 = orbit[lat_angle]
else:
raise TypeError("orbit is not a SkyCoord or (Q)Table")
# --------------------
# distance along the arc
x = idxres.arc[idxo][skycut].to_value("deg")
# perpendicular to arc
y = idxres.wsky.copy()
y[dataphi2 < orbphi2[idxo]] *= -1
y = y[skycut].to_value("deg")
# --------------------
plt.grid(False)
ax = plt.gca()
if not filtered:
im = plt.scatter(x, y, s=2, sidehists=add_sidehists)
else:
xbins = np.arange(x.min(), x.max() + step, step)
ybins = np.arange(y.min(), y.max() + step, step)
H, xedges, yedges = np.histogram2d(
x, y, bins=[xbins, ybins], normed=True
)
H = np.rot90(H)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fH = gaussian_filter(H, sigma=sigma, order=0)
im = plt.imshow(
fH,
extent=extent,
cmap="Spectral",
norm=colors.LogNorm(),
interpolation="nearest",
xlabel=r"$s_1$",
ylabel=r"$s_2$",
)
shargs = {k: v for k, v in kw.items() if k.startswith("sh")}
[kw.pop(k) for k in shargs.keys()]
plt.scatter(x, y, alpha=0, sidehists=add_sidehists, **shargs)
axs = plt.gcf().axes
axs[0].set_aspect("equal")
axs[0].tick_params(
axis="both", labelsize=13, color="black", colors="black"
)
axs[1].tick_params(axis="both", labelsize=12)
axs[2].tick_params(axis="both", labelsize=12)
axs[0].set_xlabel(r"$s_1 \ (\rm{deg})$", fontsize=20)
axs[0].set_ylabel(r"$s_2 \ (\rm{deg})$", fontsize=20)
if not isinstance(add_sidehists, bool):
if "left" in add_sidehists:
axs[0].yaxis.set_label_position("right")
plt.set(figsize=(8, 3.5))
plt.tight_layout()
plt.set(**kw)
plt.sca(ax)
return im
# /def
# -----------------------------------------------------------------------------
def plot_radec_track_residual(data_radec, track_radec, track_interp, **kw):
"""plot_radec_track_residual.
calculates residual as (dec - track_interp.dec) / dec_err
Parameters
----------
data_radec : (n, 3) array
measured data.
columns are [ra, dec, dec_err]
track_radec :
track_interp : function
Returns
-------
fig : matplotlib figure
(frame1, frame2) : matplotlib axes
"""
# Preplotting
ra, dec, dec_err = data_radec.T
residual = dec - track_interp(ra)
xmin, xmax = min(ra), max(ra)
xlim = [xmin - 2.5, xmax + 2.5]
ymin, ymax = min(dec), max(dec)
ylim = [ymin - 2.5, ymax + 2.5]
# Plotting
fig = plt.figure()
# main plot
frame1 = fig.add_axes((0.1, 0.3, 0.8, 0.6))
plt.errorbar(ra, dec, yerr=dec_err, fmt=".r", zorder=0, label="data")
plt.plot(track_radec[:, 0], track_radec[:, 1], label="track")
plt.scatter(ra, track_interp(ra), s=10, c="k", label="match")
frame1.set_xticklabels([])
frame1.set(xlim=xlim, ylim=ylim, ylabel="Dec [degree]")
plt.grid(True)
# residual plot
frame2 = fig.add_axes((0.1, 0.1, 0.8, 0.2))
plt.axhline(0, c="tab:blue")
plt.plot(ra, residual, "or")
frame2.set(xlim=xlim, ylim=ylim, xlabel="RA [degree]")
plt.grid(False)
plt.set(**kw, ax=frame1)
return fig, (frame1, frame2)
# /def
##############################################################################
# END
| 16,398 | 24.385449 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : src initialization
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""AST1501.
Routine Listings
----------------
makefilepaths
load_orbit_and_stream
load_Pal5_orbit_and_stream
"""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# CUSTOM
from astroPHD import LogFile, ObjDict
# PROJECT-SPECIFIC
from ._loadorbits import load_orbit_and_stream, load_Pal5_orbit_and_stream
###############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
###############################################################################
# CODE
def makefilepaths(
datapath="/geir_data/scr/nstarkman/Pal-5-in-Gaia-DR2/",
nbody="data/nbody/pal5.dat",
nbodyformat="ascii.ecsv",
skydata="scripts/get_gaia_window/base/output/window.fits",
logger=_LOGFILE,
verbose=None,
**kw,
):
"""Make Notebook Options.
INPUT
-----
datapath: str (default = '/geir_data/scr/nstarkman/data/')
the file path to the data
nbody: str (default = 'nbody3')
what nbody to use
f'{datapath}{nbody}/nbody/pal5.dat'
nbodyformat: str (default = 'ascii.ecsv')
what format the nbody is
skydata: str (default = '040221')
the folder in which the sky window is located
f'{datapath}{nbody}/skydata/{skydata}_{modified}.fits'
modified: str (default = 'Modified')
modifier for skydata
options: None, 'Modified', 'Modified_pmsub'
OUTPUT
------
opts: ObjDict
return order:
'nbody', 'skydata',
'datapath', 'nbodypath', 'skydatapath',
'nbodyformat', 'use_modified',
"""
# the path of the N-Body
nbodypath = datapath + nbody
# the data path
# if modified in ('Modified', 'Modified_pmsub'):
# skydatapath = f'{datapath}/{nbody}/skydata/{skydata}_{modified}.fits'
# else:
# skydatapath = f'{datapath}/{nbody}/skydata/{skydata}.fits'
skydatapath = datapath + skydata
opts = ObjDict(
"options",
datapath=datapath,
nbody=nbody,
nbodyformat=nbodyformat,
skydata=skydata,
nbodypath=nbodypath,
skydatapath=skydatapath,
**kw,
)
logger.write(
f"""makefilepaths:
loading data from {datapath}
nbodypath: {nbodypath}
skydatapath: {skydatapath}
"""
)
return opts
# /def
# -------------------------------------------------------------------------
| 2,834 | 23.652174 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/_loadorbits.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : loadorbits
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""load orbits.
Routine Listings
----------------
load_orbit_and_stream
plot_stream_orbit_mollweide
plot_spatial_closeup
plot_spatial
plot_velocity
plot_angular_momentum
load_Pal5_orbit_and_stream
"""
__author__ = "Nathaniel Starkman"
__all__ = [
"load_orbit_and_stream",
"plot_stream_orbit_mollweide",
"plot_spatial_closeup",
"plot_spatial",
"plot_velocity",
"plot_angular_momentum",
"load_Pal5_orbit_and_stream",
]
#############################################################################
# IMPORTS
# GENERAL
import numpy as np # numerical python
# galpy
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
from galpy.util import bovy_plot, bovy_coords
# astropy
from astropy import units as u, coordinates as coord
from astropy.table import Table, QTable
from astropy.coordinates.representation import CartesianDifferential
# CUSTOM
from astroPHD.plot import starkplot as plt
from astroPHD import ObjDict, LogFile
# PROJECT-SPECIFIC
from .progenitors import loadProgenitor
from .util.coordinates.frameattrs import convert_repr
from .orbit import SequentialOrbits
##############################################################################
# SETUP
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
##############################################################################
# CODE
##############################################################################
##############################################################################
# LOAD ORBITS
def load_orbit_and_stream(
nbodypath,
adj=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
nbodyformat="ascii.commented_header",
orbitpath=None,
orbitname=None,
plot=False,
returnplots=False,
logger=_LOGFILE,
verbose=None,
):
"""Load Orbit and N-Body data.
N-Body requires Galactocentric coordinates
Parameters
----------
nbodypath: str
relative path to nbody data
adj: list
spatial and kinematic adjustments to the N-Body data
form: [x (kpc), y (kpc), z (kpc), v_x (km/s), v_y (km/s), v_z (km/s)]
item elements must have units compatible with listed in 'form'
or no units, in which case they are assumed to have above unit
orbitpath: str (or None)
relative path to orbit .json file
see .json form below in 'Examples'
plot: bool
whether to make and show plots
returnplots: bool
whether to return plots
Returns
-------
orbit: ObjDict
orbit properties
{orb / orbb: galpy Orbit
integrated forward/backward orbit
sc / scb: astropy SkyCoord
forward / backward SkyCoord from orbit
gc / gcb: astropy SkyCoord
Galactocentric coordinates
gal / galb: astropy SkyCoord
Galactic coordinates
eq / eqb: astropy SkyCoord
Equatorical coordinates
Lgc / Lgcb: ndarray
forward / backward angular moment array
Galactocentric coordinates
Lgal / Lgalb: ndarray
forward / backward angular moment array
Galactic coordinates
}
stream: ObjDict
stream properties
{orb: astropy QTable
N-Body Table
sc: astropy SkyCoord
SkyCoord from orbit
gc: astropy SkyCoord
Galactocentric coordinates
gal: astropy SkyCoord
Galactic coordinates
eq: astropy SkyCoord
Equatorial coordinates
Lgc: ndarray
angular moment array
Galactocentric coordinates
Lgal: ndarray
angular moment array
Galactic coordinates
}
plot: matplotlib.pyplot figure
"""
def sphericalcoslat(sc, wrap_angle=None):
# sc.representation_type = 'spherical'
# sc.differential_type = coord.SphericalCosLatDifferential
# if wrap_angle is not None:
# getattr(sc, wrap_angle).wrap_angle = 180 * u.deg # TODO
return convert_repr(
sc, representation="spherical", differential="sphericalcoslat"
)
# ------------------------------------------------------------------------
# Progenitor
if orbitpath is None and orbitname is None:
raise ValueError("orbitpath and orbitname is not None")
elif orbitpath is None:
prog = loadProgenitor(orbitname, logger=logger)
else:
raise NotImplementedError("not yet implemented")
logger.report(f"loading Progenitor {prog.name} orbit", verbose=verbose)
# Parameter Values
vxvv = prog.coord
ro = 8.0 * u.kpc
vo = 220 * u.km / u.s
zo = 0.025 * u.kpc
vsun = [-11.1, 24.0, 7.25] * u.km / u.s # -11.1 b/c handedness
vsunGC = [11.1, 244, 7.25] * u.km / u.s # +11.1 b/c handedness
# Making Orbit (Galactic)
t_arr = np.linspace(0, prog["t+"]["end"], num=prog["t+"]["num"])
t_arr = t_arr * u.Gyr if not hasattr(t_arr, "unit") else t_arr
tb_arr = np.linspace(0, prog["t-"]["end"], num=prog["t-"]["num"])
tb_arr = tb_arr * u.Gyr if not hasattr(tb_arr, "unit") else tb_arr
o = SequentialOrbits(
vxvv=vxvv, ro=ro, vo=vo, zo=zo, solarmotion=vsun, pot=MWPotential2014
)
o.integrate(t_arr)
# import pdb; pdb.set_trace()
o.add_backward_orbit()
o.integrate(tb_arr)
logger.report("\tmade orbit", verbose=verbose, start_at=2)
# ---------------------------------------------------
# SkyCoordinates
orbSkyCrd = sphericalcoslat(o.SkyCoord("full"))
# Galactocentric (x, y, z, v_xyz)
gc_orb = sphericalcoslat(orbSkyCrd.galactocentric, "lon")
# Galactic (l, b)
gal_orb = sphericalcoslat(orbSkyCrd.galactic, "l")
# Equatorial (ra, dec)
eq_orb = sphericalcoslat(orbSkyCrd.icrs, "ra")
logger.report("\tmade skycoords", verbose=verbose, start_at=2)
# ---------------------------------------------------
# Angular Momentum
# Angular Momentum in the Galactocentric Reference Frame
Lgc_orb = o.L("full")
# # Angular Momentum in the Galactic Reference Frame
Lgal_orb = np.cross(
gal_orb.cartesian.xyz.transpose(), gal_orb.velocity.d_xyz.transpose()
)
logger.report("\tmade angular momentum", verbose=verbose, start_at=2)
# ---------------------------------------------------
# Storing
orbit = ObjDict(
"orbit",
orb=o,
sc=orbSkyCrd,
# skycoords
eq=eq_orb,
icrs=eq_orb,
gc=gc_orb,
galactocentric=gc_orb,
gal=gal_orb,
galactic=gal_orb,
# angular momentum
Lgc=Lgc_orb,
Lgal=Lgal_orb,
# info
info=prog,
)
logger.report("\tstored orbit", verbose=verbose, start_at=2)
# ------------------------------------------------------------------------
# N-body Stream Data
logger.report("making N-body", verbose=verbose)
# print(nbodypath, nbodyformat)
df = Table.read(nbodypath, format=nbodyformat)
# units
logger.report("\tfixing units", verbose=verbose, start_at=2)
udict = {
"mass": u.Msun,
"x": u.kpc,
"y": u.kpc,
"z": u.kpc,
"v_x": u.km / u.s,
"v_y": u.km / u.s,
"v_z": u.km / u.s,
}
for key, unit in udict.items():
if key in df.colnames:
setattr(df[key], "unit", unit)
# Manual Adjustments
logger.report("\tadjusted position", verbose=verbose, start_at=2)
for i, unit in enumerate([*[u.kpc] * 3, *[u.km / u.s] * 3]):
try:
adj[i].to(unit)
except AttributeError: # doesn't have units
adj[i] *= unit # assigning correct units
except u.UnitConversionError: # not in correct units
raise u.UnitConversionError(
f"{adj[i]} is not compatible with {unit}"
)
df["x"] = (df["x"] + adj[0]).to(u.kpc)
df["y"] = (df["y"] + adj[1]).to(u.kpc)
df["z"] = (df["z"] + adj[2]).to(u.kpc)
df["v_x"] = (df["v_x"] + adj[3]).to(u.km / u.s)
df["v_y"] = (df["v_y"] + adj[4]).to(u.km / u.s)
df["v_z"] = (df["v_z"] + adj[5]).to(u.km / u.s)
# ---------------------------------------------------
# SkyCoords
streamSkyCrd = coord.SkyCoord(
x=-df["x"],
y=df["y"],
z=df["z"],
v_x=-df["v_x"], # -v_x b/c handedness
v_y=df["v_y"],
v_z=df["v_z"],
galcen_distance=np.sqrt(ro ** 2 + zo ** 2),
# galcen_distance=8.3 * u.kpc,
z_sun=zo,
galcen_v_sun=CartesianDifferential(*vsunGC),
frame="galactocentric",
# galcen_coord=coord.ICRS(ra=266.4051 * u.deg, dec=-28.936175 * u.deg)
).icrs
streamSkyCrd = sphericalcoslat(streamSkyCrd, "ra")
# streamSkyCrd.representation_type = 'spherical'
# streamSkyCrd.differential_type = coord.SphericalCosLatDifferential
# Galactocentric (x, y, z, v_xyz)
gc_stream = sphericalcoslat(streamSkyCrd.galactocentric, "lon")
# Galactic (l, b)
gal_stream = sphericalcoslat(streamSkyCrd.galactic, "l")
# Equatorial (ra, dec)
eq_stream = sphericalcoslat(streamSkyCrd.icrs, "ra")
logger.report("\tmade skycoords", verbose=verbose, start_at=2)
# ---------------------------------------------------
# Galactocentric Angular Momentum
Lgc_stream = np.cross(
df["x", "y", "z"].to_pandas().values,
df["v_x", "v_y", "v_z"].to_pandas().values,
)
# Lgc_stream = np.cross( # DIDN'T TRANSFORM CORRECTLY
# gc_stream.cartesian.xyz.transpose(),
# gc_stream.velocity.d_xyz.transpose())
# Angular Momentum in the Galactic Reference Frame
Lgal_stream = np.cross(
gal_stream.cartesian.xyz.transpose(),
gal_stream.velocity.d_xyz.transpose(),
)
# np.cross(uvw, vuvvvw)
logger.report("\tmade angular momentum", verbose=verbose, start_at=2)
# ---------------------------------------------------
df = QTable(df)
stream = ObjDict(
"stream",
orb=df,
sc=streamSkyCrd,
# skycoords
gc=gc_stream,
galactocentric=gc_stream,
gal=gal_stream,
galactic=gal_stream,
eq=eq_stream,
icrs=eq_stream,
# angular momentum
Lgc=Lgc_stream,
Lgal=Lgal_stream,
)
logger.report("\tstored stream", verbose=verbose, start_at=2)
# ------------------------------------------------------------------------
# Plotting
if plot is True:
logger.report("plotting", verbose=verbose)
fig0 = plot_stream_orbit_mollweide(eq_stream, eq_orb)
fig1 = plot_spatial_closeup(gc_orb, gc_stream) # closeup Plots
fig2 = plot_spatial(gal_stream, gal_orb) # Position
fig3 = plot_velocity(gc_orb, gc_stream) # Velocity
fig4 = plot_angular_momentum(Lgal_orb, Lgal_stream) # Angular Momentum
plt.show()
if returnplots is True:
print("done loading orbits")
return orbit, stream, (fig0, fig1, fig2, fig3, fig4)
else:
logger.report("not plotting", verbose=verbose)
# ------------------------------------------------------------------------
logger.report("done loading orbits", verbose=verbose)
if returnplots is True:
return orbit, stream, None
return orbit, stream
# /def
def _scatterhelp(ax, source, q1="ra", q2="dec", label="", wrap=False, **kw):
if wrap:
res = ax.scatter(
getattr(source, q1).wrap_at(180 * u.deg).rad,
getattr(source, q2).wrap_at(180 * u.deg).rad,
label=label,
**kw,
)
else:
res = ax.scatter(
getattr(source, q1), getattr(source, q2), label=label, **kw
)
return res
# /def
# ----------------------------------------------------------------------------
def _scatterhelpradec(ax, source, label, **kw):
return _scatterhelp(ax, source, q1="ra", q2="dec", label=label, wrap=True)
# /def
# ----------------------------------------------------------------------------
@plt.mpl_decorator(
fig="new",
figsize=(8, 6),
title=r"$\alpha$ & $\delta$ (Equatorial)",
xlabel=r"$\alpha$",
ylabel=r"$\delta$",
)
def plot_stream_orbit_mollweide(eq_orb, eq_stream, **kw):
"""plot_stream_orbit_mollweide."""
fig = plt.gcf()
ax = fig.add_subplot(111, projection="mollweide")
# stream
l0 = _scatterhelpradec(ax, eq_stream, "stream", s=2)
l1 = _scatterhelpradec(ax, eq_orb, "progenitor", s=2, c="r")
l2 = _scatterhelpradec(ax, eq_orb[0], "Pal5", s=30, c="r", edgecolor="k")
return fig
# /def
# ----------------------------------------------------------------------------
def plot_spatial_closeup(gc_orb, gc_stream):
"""plot_spatial_closeup.
TODO
----
make general
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
qpairs = [("x", "y"), ("x", "z"), ("y", "z")]
q1lims = [(8, 8.4), (8, 8.4), (0, 0.5)]
q2lims = [(-0.5, 1), (16.2, 16.8), (16.2, 16.8)]
for ax, (q1, q2), q1lim, q2lim in zip(axs, qpairs, q1lims, q2lims):
# Stream
dd1 = getattr(gc_stream.cartesian, q1).value
dd2 = getattr(gc_stream.cartesian, q2).value
_ind = (
(q1lim[0] < dd1)
& (dd1 < q1lim[1])
& (q2lim[0] < dd2)
& (dd2 < q2lim[1])
)
plt.sca(ax)
# TODO switch to _scatterhelp
plt.scatter(
getattr(gc_stream.cartesian, q1)[_ind],
getattr(gc_stream.cartesian, q2)[_ind],
s=2,
label="stream",
)
# Pal5 Integrated
plt.scatter(
getattr(gc_orb.cartesian, q1),
getattr(gc_orb.cartesian, q2),
s=1,
c="r",
label="Pal5",
)
# Progenitor
plt.scatter(
getattr(gc_orb.cartesian, q1)[0],
getattr(gc_orb.cartesian, q2)[0],
s=30,
c="r",
edgecolor="k",
label="Pal5",
)
plt.set(
title="{q1} & {q2} (Galactocentric)".format(q1=q1, q2=q2),
xlabel=q1,
ylabel=q2,
xlim=q1lim,
ylim=q2lim,
)
fig.tight_layout()
return fig
# /def
# ----------------------------------------------------------------------------
def plot_spatial(gal_stream, gal_orb):
"""plot_spatial."""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
for ax, (q1, q2) in zip(axs, (("x", "y"), ("x", "z"), ("y", "z"))):
s1_arr = getattr(gal_stream.cartesian, q1)[::50]
s2_arr = getattr(gal_stream.cartesian, q2)[::50]
ax.scatter(s1_arr, s2_arr, s=2, label="stream")
p1_arr = getattr(gal_orb.cartesian, q1)
p2_arr = getattr(gal_orb.cartesian, q2)
ax.scatter(p1_arr, p2_arr, s=2, c="r", label="progenitor")
plt.set(
ax=ax,
title="{q1} & {q2} (Galactic)".format(q1=q1, q2=q2),
xlabel=r"{q1} [{unit}]".format(q1=q1, unit=s1_arr.unit),
ylabel=r"{q2} [{unit}]".format(q2=q2, unit=s2_arr.unit),
)
fig.tight_layout()
return fig
# /def
# ----------------------------------------------------------------------------
def plot_velocity(gc_orb, gc_stream):
"""Plot velocity.
Parameters
----------
gc_orb: SkyCoord
orbit in galactocentric coordinates
gc_stream: SkyCoord
stream in galactocentric coordinates
Return
------
fig: pyplot.Figure
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
vpairs = (("d_x", "d_y"), ("d_x", "d_z"), ("d_y", "d_z"))
v1lims = [(-100, -20), (-100, -20), (-130, -100)]
v2lims = [(-130, -100), (-100, 25), (-100, 25)]
streamvals = gc_stream.cartesian.differentials["s"]
orbvals = gc_orb.cartesian.differentials["s"]
for ax, (q1, q2), q1lim, q2lim in zip(axs, vpairs, v1lims, v2lims):
dd1 = getattr(streamvals, q1).value
dd2 = getattr(streamvals, q2).value
_ind = (
(q1lim[0] < dd1)
& (dd1 < q1lim[1])
& (q2lim[0] < dd2)
& (dd2 < q2lim[1])
)
s1_arr = getattr(streamvals, q1)[_ind]
s2_arr = getattr(streamvals, q2)[_ind]
ax.scatter(s1_arr, s2_arr, s=2, label="stream")
p1_arr = getattr(orbvals, q1)
p2_arr = getattr(orbvals, q2)
ax.scatter(p1_arr, p2_arr, s=2, c="r", label="progenitor")
# progenitor
ax.scatter(
getattr(orbvals, q1)[0],
getattr(orbvals, q2)[0],
s=30,
c="r",
edgecolor="k",
label="Pal5",
)
# plot properties
ax.set_title("{q1} & {q2} (Galactocentric)".format(q1=q1, q2=q2))
ax.set_xlabel(r"{q1} [{unit}]".format(q1=q1, unit=s1_arr.unit))
ax.set_ylabel(r"{q2} [{unit}]".format(q2=q2, unit=s2_arr.unit))
ax.set_xlim(q1lim)
ax.set_ylim(q2lim)
ax.legend()
# /for
fig.tight_layout()
return fig
# /def
# ----------------------------------------------------------------------------
def plot_angular_momentum(Lgal_orb, Lgal_stream):
"""plot_angular_momentum.
Parameters
----------
Lgal_orb: ndarray
Lgal_stream: ndarray
Returns
-------
fig: pyplot.Figure
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
for ax, (q1, q2) in zip(axs, (("x", "y"), ("x", "z"), ("y", "z"))):
opts = ["x", "y", "z"]
i, j = opts.index(q1), opts.index(q2)
ax.scatter(Lgal_stream[:, i], Lgal_stream[:, j], s=2, label="stream")
ax.scatter(
Lgal_orb[:, i], Lgal_orb[:, j], s=2, c="r", label="progenitor"
)
ax.set_title("$L_{q1}$ & $L_{q2}$ (Galactic)".format(q1=q1, q2=q2))
ax.set_xlabel(r"$L_{q1}$ [{unit}]".format(q1=q1, unit="todo"))
ax.set_ylabel(r"$L_{q2}$ [{unit}]".format(q2=q2, unit="todo"))
ax.legend()
# /for
fig.tight_layout()
return fig
# /def
##############################################################################
# loadPal5orbits
def load_Pal5_orbit_and_stream(
nbodypath,
adj=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
nbodyformat="ascii.ecsv",
plot=False,
returnplots=False,
logger=_LOGFILE,
verbose=None,
):
"""Load Pal 5 Orbit and N-Body data.
Parameters
----------
nbodypath: str
relative path to nbody data
adj: list
spatial and kinematic adjustments to the N-Body data
form: [x (kpc), y (kpc), z (kpc), v_x (km/s), v_y (km/s), v_z (km/s)]
item elements must have units compatible with listed in 'form'
or no units, in which case they are assumed to have above unit
plot: bool
whether to make and show plots
returnplots: bool
whether to return plots
Returns
-------
pal5 : dict
stream : dict
plot : matplotlib.pyplot.Figure
"""
return load_orbit_and_stream(
nbodypath,
adj=adj,
nbodyformat=nbodyformat,
orbitname="Palomar 5",
plot=plot,
returnplots=returnplots,
logger=logger,
verbose=verbose,
)
# /def
##############################################################################
# End
| 19,689 | 26.347222 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/gsel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : g selection
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""g selection."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# GENERAL
from astropy import units as u
# CUSTOM
from astroPHD import ObjDict, LogFile
# PROJECT-SPECIFIC
from .select import inRange
##############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
##############################################################################
# CODE
##############################################################################
def select_g_range(
df, low=20, up=20.7, g_name="g dx", logger=_LOGFILE, verbose=None
):
"""select_g_range.
Parameters
----------
df : (Q)Table
low : float
units of magnitudes
up : float
units of magnitudes
"""
res = inRange(df[g_name], rng=[low, up] * u.mag)
# -----------------------------------------------------
# report
logger.report(
"Made g range Selection",
f"select_g_range:\n\tlhs={low}, up={up}\
\n\tg_name={g_name}",
verbose=verbose,
)
# -----------------------------------------------------
return res
# /def
##############################################################################
# END
| 1,663 | 20.333333 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/cmd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : initializing selection functions
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""logging initialization."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# GENERAL
from scipy.interpolate import interp1d
from astropy import units as u
# CUSTOM
from astroPHD import LogFile
# PROJECT-SPECIFIC
from .select import inRange
##############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
##############################################################################
# CODE
##############################################################################
def select_shift_CMD(
data,
isochrone,
lhs=0.0,
rhs=0.08,
low=0.035,
up=0.0385,
isorng=[18, 24],
datarng=[18.1, 23.5],
g_name="g dx",
gmr_name="g-r dx",
iso_g_name="g PS",
iso_gmr_name="g-r PS",
fill_value="extrapolate",
logger=_LOGFILE,
verbose=None,
):
"""Shifted CMD selection.
Parameters
----------
data : QTable
the sky datatable
isochrone : QTable
isochrone datatable
lhs : scalar
shift the isochrone left
rhs : scalar
shift the isochrone right
rhs > lhs
low : scalar
shift the isochrone down
up : scalar
shift the isochrone up
isorng : (2,) list
restrict the range applied on the isochrone
datarng : (2,) list
restrict the range applied on the data
g_name : str, optional (default 'g dx')
name of the g column in *data*
gmr_name : str, optional (default 'g-r dx')
name of the g-r column in *data*
iso_g_name : str, optional (default 'g PS')
name of the g column in *isochrone*
iso_gmr_name : str, optional (default 'g-r PS')
name of the g-r column in *isochrone*
fill_value : str, optional (default 'extrapolate')
interp1d fill_value for lhs & rhs
# Logging
logfile : logger, optional
verbose : int, optional
the degree of verbosity
None) (default): use instantiated value
0) None; 1) status report, >=2) step-by-step
"""
# -----------------------------------------------------
# Checking
assert lhs < rhs
assert low < up
# -----------------------------------------------------
# restricting isochrone range
_iso_ind = inRange(isochrone[iso_g_name], rng=isorng * u.mag)
# isochrone lhs, and left shift
isocr_spl_lhs = interp1d(
isochrone[iso_g_name][_iso_ind] + lhs * u.mag,
isochrone[iso_gmr_name][_iso_ind],
fill_value=fill_value,
)
# isochrone rhs, and right shift
isocr_spl_rhs = interp1d(
isochrone[iso_g_name][_iso_ind] + rhs * u.mag,
isochrone[iso_gmr_name][_iso_ind],
fill_value=fill_value,
)
# -----------------------------------------------------
# restricting data range
_data_ind = inRange(data[g_name], rng=datarng * u.mag)
# evaluating isochrone splines
# shifting low and up
evl_spl_MSp_low = isocr_spl_lhs(data[g_name][_data_ind]) + low
evl_spl_MSp_up = isocr_spl_rhs(data[g_name][_data_ind]) + up
rng = [evl_spl_MSp_low, evl_spl_MSp_up] * u.mag
# -----------------------------------------------------
# CMD
shiftCMD = inRange(data[g_name], rng=datarng * u.mag)
_cmd_ind = inRange(data[gmr_name][_data_ind], rng=rng)
shiftCMD[shiftCMD] = _cmd_ind
# -----------------------------------------------------
# report
logger.report(
f"made select_shift_CMD",
f"select_shift_CMD:\n\tlhs={lhs}, rhs={rhs}\n\tlow={low}, up={up}\
\n\tisorng={isorng}, datarng={datarng}\
\n\tg_name={g_name}, gmr_name={gmr_name}\
\n\tiso_g_name={iso_g_name}, iso_gmr_name={iso_gmr_name}\
\n\tfill_value={fill_value}",
verbose=verbose,
)
# -----------------------------------------------------
return shiftCMD
# /def
##############################################################################
# END
| 4,434 | 26.042683 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/pm.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : initializing selection functions
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Proper motion selections."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# GENERAL
from astropy import units as u
# CUSTOM
from astroPHD import LogFile
# Project-SPECIFIC
from .select import ellipse
##############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
##############################################################################
# CODE
##############################################################################
##############################################################################
# Select Proper Motion
def select_pm_circle(
df,
x0_lon=-2.5,
x0_lat=-2.5,
dx_lon=3,
dx_lat=3,
lon_name="pmra",
lat_name="pmdec",
lon_units=u.mas / u.yr,
lat_units=u.mas / u.yr,
logger=_LOGFILE,
verbose=None,
):
"""Select proper motion circle.
Parameters
----------
df : QTable
the sky datatable
must contain columns *pm_lon*, *pm_lat* that are in units
*lon_units*, *lat_units*, respectively.
x0_lon : float
xo in the longitude direction
units are in *lon_units*
x0_lat : float
xo in the latitude direction
units are in *lat_units*
dx_lon : float
dx in the longitude direction
units are in *lon_units*
dx_lat : float
dx in the latitude direction
units are in *lat_units*
lon_name : str, optional (default 'pmra')
name of the longitudinal direction
lat_name : str, optional (default 'pmdec')
name of the latitudinal direction
lon_units : astropy units, optional (default u.mas / uyr)
units of the longitudinal direction
lat_units : astropy units, optional (default u.mas / uyr)
units of the latitudinal direction
# Logging
logfile : logger, optional
verbose : int, optional
the degree of verbosity
None) (default): use instantiated value
0) None; 1) status report, >=2) step-by-step
"""
pmcirc = ellipse(
df[lon_name].to_value(lon_units),
df[lat_name].to_value(lat_units),
x0=(x0_lon, x0_lat),
dx=(dx_lon, dx_lat),
)
# -----------------------------------------------------
# report
logger.report(
"Made PM Selection",
f"select_pm_circle:\n\tx0_lon={x0_lon}, x0_lat={x0_lat}\
\n\tdx_lon={dx_lon}, dx_lat={dx_lat}\
\n\tlon_name={lon_name}, lat_name={lat_name}\
\n\tlon_units={lon_units}, lat_units={lat_units}",
verbose=verbose,
)
# -----------------------------------------------------
return pmcirc
# /def
##############################################################################
# END
| 3,195 | 24.98374 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/select.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : select
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Selection Functions.
Routine Listings
----------------
.box
.circle
.ellipse
.inRange
.ioRange
.outRange
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from functools import wraps
#############################################################################
# CODE
#############################################################################
#############################################################################
# Decorator
class idxDecorator:
"""Decorator to control whether return bool array or indices.
adds *as_ind* as a kwarg to decorated function
as_ind
------
default = False
if true: return np.where(bool array == True)
TODO switch this up to a wrapt or decorator-style decorator for docstrings
"""
def __new__(cls, func=None, as_ind=False):
"""__new__."""
self = super().__new__(cls)
self.as_ind = as_ind
if func is None:
return self
else:
return self(func)
def __call__(self, wrapped_func):
"""__call__."""
@wraps(wrapped_func)
def wrapper(*args, as_ind=self.as_ind, **kwargs):
return_ = np.asarray(wrapped_func(*args, **kwargs))
if as_ind:
return np.where(return_ == True)
else:
return return_
wrapper.__doc__ = wrapped_func.__doc__
# TODO modify documentation to include as_ind
return wrapper
#############################################################################
# Functions
def _inRange(x, rng, lbi=True, ubi=False):
"""_inRange.
Parameters
----------
x : array
the array on which to test for membership in the range
rng : list
the range. (lower, upper)
lbi : bool (default True)
Lower Bound Inclusive, whether to be inclusive on the lower bound
ubi : bool (default False)
Upper Bound Inclusive, whether to be inclusive on the upper bound
Returns
-------
idx: bool array
bool index array
shape matches `x`
"""
if lbi and ubi: # both true
return (rng[0] <= x) & (x <= rng[1])
elif lbi: # only lbi is true
return (rng[0] <= x) & (x < rng[1])
elif ubi: # only ubi is true
return (rng[0] < x) & (x <= rng[1])
else: # neither true
return (rng[0] < x) & (x < rng[1])
# /def
# -----------------------------------------------------------------------------
@idxDecorator
def inRange(*args, rng=None, lbi=True, ubi=False):
"""Multidimensional box selection.
Parameters
----------
args : list
either list of values along each dimension or list of values & bounds
the input type depends on rng
rng : None, list (default None)
if rng is not None:
for domains x
args = [[x1], [x2], ...]
rng = [1st [lower, upper],
2nd [lower, upper],
...]
else:
args are the lists
list of (x, [lower, upper]
lbi : bool (default True)
Lower Bound Inclusive, whether to be inclusive on the lower bound
ubi : bool (default False)
Upper Bound Inclusive, whether to be inclusive on the upper bound
Returns
-------
inrange : bool ndarray
boolean array to select values in box selection
TODO
----
allow lbi & rbi to be lists, matching args, for individual adjustment
"""
# Compare
# If args contains lists of [list, [low, up]]
if rng is None:
rowbool = np.array([_inRange(v, lu) for v, lu in args])
numtrues = len(args)
# If args and low,up are in separate lists
else:
if len(args) == 1:
args = (args[0],)
rng = (rng,)
rowbool = np.array([_inRange(v, lu) for v, lu in zip(args, rng)])
numtrues = len(args)
# now getting where all the dimensions are inside the bounds
# collapses column to 1 row
# check were all rows=True for each collapsed column
allbool = rowbool.sum(axis=0)
inrange = allbool == numtrues #
return inrange
# /def
# box as proxy to inRange
box = inRange
# -----------------------------------------------------------------------------
@idxDecorator
def outRange(*args, rng=None, lbi=False, ubi=True):
"""Multidimensional box exclusion.
equivelent to ~inRange
Parameters
----------
args : list
either list of values along each dimension or list of values & bounds
the input type depends on rng
rng : None, list (default None)
if rng is not None:
for domains x
args = [[x1], [x2], ...]
rng = [1st [lower, upper],
2nd [lower, upper],
...]
else:
args are the lists
list of (x, [lower, upper]
lbi : bool (default False)
Lower Bound Inclusive, whether to be inclusive on the lower bound
ubi : bool (default True)
Upper Bound Inclusive, whether to be inclusive on the upper bound
Returns
-------
outrange : bool ndarray
boolean array to select values outside box selection
TODO
----
allow lbi & rbi to be lists, matching args, for individual adjustment
"""
return ~inRange(*args, rng=rng, lbi=lbi, ubi=ubi)
# /def
# -----------------------------------------------------------------------------
@idxDecorator
def ioRange(incl=None, excl=None, rng=None):
"""Supports inRange and outRange.
Parameters
----------
incl : list
list of inRange args
excl : list
list of notinRange args
rng : list
concatenated list of (not)inRange rng
must be in orgder of [*inRange rng, *notinRange rng]
"""
# Nothing passed. Raise error
if (incl is None) & (excl is None):
raise ValueError("incl and excl are None")
# Only inclusion passed
elif incl is None:
sel = outRange(*excl, rng=rng)
# Only exclusion passed
elif excl is None:
sel = inRange(*incl, rng=rng)
# Both inclustion and exclusion
else:
inclrng = rng[: len(incl)] if rng is not None else None
exclrng = rng[len(incl) :] if rng is not None else None
sel = inRange(*incl, rng=inclrng) & outRange(*excl, rng=exclrng)
return sel
# /def
# -----------------------------------------------------------------------------
@idxDecorator
def ellipse(*x, x0=0.0, dx=1.0):
"""Elliptical selection of data in many dimensions.
sel = np.sqrt(((x - x0) / dx)**2 + ...) < 1
Arguments
---------
*x: m x (n, 1) arrays
x0: scalar, (m, 1) array (default = 0.)
the center position of each x.
can broadcast a scalar to apply to all
dx: scalar, (m, 1) array (default = 0.)
the radius in each dimension
Returns
-------
sel: bool
bool array selecting data w/in ellipse
"""
shape = (len(x[0]), len(x))
x0 = np.broadcast_to(x0, shape).T # reshape x0 correctly
dx = np.broadcast_to(dx, shape).T # reshape x0 correctly
arr = np.divide(np.subtract(x, x0), dx)
return np.sqrt(np.sum(np.square(arr), axis=0)) < 1
# /def
# -----------------------------------------------------------------------------
@idxDecorator
def circle(*x, x0=0.0, radius=1.0):
"""Circular selection of data in many dimensions.
sel = np.sqrt(((x - x0) / radius)**2 + ...) < 1
Arguments
---------
*x: m x (n, 1) arrays
x0: scalar, (m, 1) array (default = 0.)
the center position of each x.
can broadcast a scalar to apply to all
dx: scalar
the radius
Returns
-------
sel: bool
bool array selecting data w/in circle
"""
return ellipse(*x, x0=x0, dx=radius)
# /def
#############################################################################
# END
| 8,358 | 23.730769 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : initializing selection functions
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""selection function."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
from .select import inRange, outRange, ioRange, ellipse, circle
from .cmd import select_shift_CMD
from .pm import select_pm_circle
from .gsel import select_g_range
# combined
from .combined import select_pm_cmd_g_cut
#############################################################################
| 758 | 25.172414 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/select/combined.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE :
# AUTHOR : Nathaniel Starkman
# PROJECT :
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""combine selections."""
__author__ = "Nathaniel Starkman"
###############################################################################
# IMPORTS
# GENERAL
from astropy import units as u
# CUSTOM
from astroPHD import LogFile
# PROJECT-SPECIFIC
from .cmd import select_shift_CMD
from .pm import select_pm_circle
from .gsel import select_g_range
###############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False, verbose=0)
###############################################################################
# CODE
##############################################################################
def select_pm_cmd_g_cut(
data,
isochrone,
# PM
pm_x0_lon=-2.5,
pm_x0_lat=-2.5,
pm_dx_lon=3,
pm_dx_lat=3,
# CMD
cmd_lhs=0.0,
cmd_rhs=0.08,
cmd_low=0.035,
cmd_up=0.0385,
# G
g_low=20,
g_up=20.7,
# kwargs for PM, CMD, G
lon_name="pmra",
lat_name="pmdec", # pm
lon_units=u.mas / u.yr,
lat_units=u.mas / u.yr, # pm
g_name="g dx",
gmr_name="g-r dx", # cmd, g
iso_g_name="g PS",
iso_gmr_name="g-r PS", # cmd
isorng=[18, 24],
datarng=[18.1, 23.5], # cmd
fill_value="extrapolate", # cmd
# logging
logger=_LOGFILE,
verbose=None,
**kw
):
"""select_pm_cmd_g_cut.
Parameters
----------
df : QTable
the sky datatable
must contain columns *pm_lon*, *pm_lat* that are in units
*lon_units*, *lat_units*, respectively.
isochrone : QTable
isochrone datatable
# PM
pm_x0_lon : float
proper motion xo in the longitude direction
units are in *lon_units*
pm_x0_lat : float
proper motion xo in the latitude direction
units are in *lat_units*
pm_dx_lon : float
proper motion dx in the longitude direction
units are in *lon_units*
pm_dx_lat : float
proper motion dx in the latitude direction
units are in *lat_units*
# CMD
cmd_lhs : scalar
shift the isochrone left
cmd_rhs : scalar
shift the isochrone right
rhs > lhs
cmd_low : scalar
shift the isochrone down
cmd_up : scalar
shift the isochrone up
# kwargs for PM, CMD, G
lon_name : str, optional (default 'pmra')
name of the longitudinal direction
lat_name : str, optional (default 'pmdec')
name of the latitudinal direction
lon_units : astropy units, optional (default u.mas / uyr)
units of the longitudinal direction
lat_units : astropy units, optional (default u.mas / uyr)
units of the latitudinal direction
g_name : str, optional (default 'g dx')
name of the g column in *data*
gmr_name : str, optional (default 'g-r dx')
name of the g-r column in *data*
iso_g_name : str, optional (default 'g PS')
name of the g column in *isochrone*
iso_gmr_name : str, optional (default 'g-r PS')
name of the g-r column in *isochrone*
isorng : (2,) list
restrict the range applied on the isochrone
datarng : (2,) list
restrict the range applied on the data
fill_value : str, optional (default 'extrapolate')
interp1d fill_value for lhs & rhs
# Logging
logfile : logger, optional
verbose : int, optional
the degree of verbosity
None) (default): use instantiated value
0) None; 1) status report, >=2) step-by-step
"""
# showing unused kwargs for safety (in case write in wrong kw)
# have this to absorb lmfit extra parameters
logger.report("unused kwargs: {}".format(kw), verbose=verbose)
# proper motion
selpm = select_pm_circle(
data,
x0_lon=pm_x0_lon,
x0_lat=pm_x0_lat,
dx_lon=pm_dx_lon,
dx_lat=pm_dx_lat,
lon_name=lon_name,
lat_name=lat_name,
lon_units=lon_units,
lat_units=lat_units,
logger=_LOGFILE,
verbose=None,
)
# cmd
selcmd = select_shift_CMD(
data,
isochrone,
lhs=cmd_lhs,
rhs=cmd_rhs,
low=cmd_low,
up=cmd_up,
isorng=isorng,
datarng=datarng,
g_name=g_name,
gmr_name=gmr_name,
iso_g_name=iso_g_name,
iso_gmr_name=iso_gmr_name,
fill_value=fill_value,
logger=_LOGFILE,
verbose=None,
)
# g
selg = select_g_range(
data, low=g_low, up=g_up, g_name=g_name, logger=_LOGFILE, verbose=None
)
# combining
comb_sel = selpm & selcmd & selg
return comb_sel
# /def
###############################################################################
# END
| 5,018 | 24.348485 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/mwdust_util.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : mwdust_util
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""utilities for mwdust.
TODO use _LOGFILE inside load_dust_color
TODO use _LOGFILE inside load_dust_gri
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import sys
import numpy as np
import mwdust
# Astropy
from astropy import units as u
from astropy.table import QTable
from astropy.coordinates import SkyCoord
# CUSTOM
from astroPHD import LogFile
#############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
#############################################################################
# CODE
#############################################################################
def load_dust_color(
color,
fname,
df=None,
distance=None,
save=True,
logger=_LOGFILE,
verbose=None,
):
"""PanSTARRS1 dust color.
Parameters
----------
color
fname
df
distance
save
logger
verbose
Returns
-------
dust_c
"""
try:
dust_c = np.load(fname.format(color)) * u.mag
except FileNotFoundError:
if (df is None) or (distance is None):
raise ValueError(
f"load_dust_color({color}) needs *df* & *distance*"
)
sys.stdout.write(f"could not load {color}")
sys.stdout.flush()
SFDc = mwdust.SFD(filter=f"PS1 {color}")
dust_c = SFDc(df["l"], df["b"], distance.to_value(u.kpc)) * u.mag
if save:
dust_c.value.dump(fname.format(color))
sys.stdout.write(f"\033 -> finised {color}, ")
sys.stdout.flush()
else:
sys.stdout.write(f"\033 {color}, ")
sys.stdout.flush()
return dust_c
# /def
# ----------------------------------------------------------------------------
def load_dust_gri(
fname,
df=None,
distance=None,
recalculate=False,
save=True,
save_intermediate=False,
logger=_LOGFILE,
verbose=None,
):
"""Load_dust_gri.
panstarrs1 dust in g,r,i
tries to load from fname.format('gri'), else tries to make table
Arguments
---------
fname:
must support color substitution
ex: f'../data/nbody3/skydata/ps1dust_040112_{}.dat'
df: QTable
table of coordinates.
only used if load fails
distance: quantity
the distance at which to calculate the dust extinction
only used if load fails
save: bool
whether to save the table
only used if load fails
save_intermediate:
whether to save the intermediate tables
only used if load fails
Returns
-------
ps1dust_gri: QTable
table of dust extinction
[l, b], g, r, i
"""
df = QTable(df) # just making sure
try:
if recalculate:
raise FileNotFoundError
ps1dust_gri = QTable.read(fname.format("gri"), format="ascii.ecsv")
except FileNotFoundError:
sys.stdout.write("\ncould not load gri table")
sys.stdout.flush()
# loading g
ps1dust_g = load_dust_color(
"g",
fname,
df=df,
distance=distance,
save=save_intermediate,
logger=logger,
verbose=verbose,
)
ps1dust_r = load_dust_color(
"r",
fname,
df=df,
distance=distance,
save=save_intermediate,
logger=logger,
verbose=verbose,
)
ps1dust_i = load_dust_color(
"i",
fname,
df=df,
distance=distance,
save=save_intermediate,
logger=logger,
verbose=verbose,
)
coord = SkyCoord(
l=df["l"], b=df["b"], distance=distance, frame="galactic"
)
ps1dust_gri = QTable(
[coord, ps1dust_g, ps1dust_r, ps1dust_i],
names=("coord", "g", "r", "i"),
)
if save:
ps1dust_gri.write(fname.format("gri"), format="ascii.ecsv")
sys.stdout.write("\033" + " -> assembled gri table, ")
sys.stdout.flush()
else:
# check if need to recalculate the dust because doesn't match df
if len(df["g"]) != len(ps1dust_gri["g"]):
logger.write("need to recalculate dust")
ps1dust_gri = load_dust_gri(
fname,
df=df,
distance=distance,
recalculate=True,
save=save,
save_intermediate=save_intermediate,
logger=logger,
verbose=verbose,
)
sys.stdout.write("loaded gri table")
sys.stdout.flush()
return ps1dust_gri
# /def
##############################################################################
# END
| 5,230 | 21.450644 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/pickle.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : pickle
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""pickle."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import pickle
############################################################################
# CODE
############################################################################
def dump(obj, fname, protocol=None, *, fopt="b", fix_imports=True):
"""Wrap pickle.dump.
*fname* replaces *file* and is a string for the filename
this file is auto opened and closed
"""
with open(fname, "w" + fopt) as file:
pickle.dump(obj, file, protocol=protocol, fix_imports=fix_imports)
return
# /def
# ----------------------------------------------------------------------------
def load(
fname, *, fopt="b", fix_imports=True, encoding="ASCII", errors="strict"
):
"""Wrap pickle.load.
*fname* replaces *file* and is a string for the filename
this file is auto opened and closed
"""
with open(fname, "r" + fopt) as file:
res = pickle.load(
file, fix_imports=fix_imports, encoding=encoding, errors=errors
)
return res
# /def
############################################################################
# END
| 1,526 | 21.130435 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.